[ { "id": 232037, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/mapbox/_layer.py", "file_name": "_layer.py", "fun_name": "minzoom", "commit_message": "switch to black .22", "code": "def minzoom(self):\n \n return self[\"minzoom\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 63481, "documentation": { "docstring": "\n Sets the minimum zoom level (mapbox.layer.minzoom). At zoom\n levels less than the minzoom, the layer will be hidden.\n\n The 'minzoom' property is a number and may be specified as:\n - An int or float in the interval [0, 24]\n\n Returns\n -------\n int|float\n ", "n_words": 42, "vocab_size": 37, "n_whitespaces": 101, "language": "en" } }, { "id": 196771, "commit_id": "f757f3daae6e11ea0cfb7dadc133274d8d74315f", "repo": "sympy", "path": "sympy/concrete/summations.py", "file_name": "summations.py", "fun_name": "telescopic", "commit_message": "Reordered imports 2", "code": "def telescopic(L, R, limits):\n \n (i, a, b) = limits\n if L.is_Add or R.is_Add:\n return None\n\n # We want to solve(L.subs(i, i + m) + R, m)\n # First we try a simple match since this does things that\n # solve doesn't do, e.g. solve(f(k+m)-f(k), m) fails\n\n k = Wild(\"k\")\n sol = (-R).match(L.subs(i, i + k))\n s = None\n if sol and k in sol:\n s = sol[k]\n if not (s.is_Integer and L.subs(i, i + s) == -R):\n # sometimes match fail(f(x+2).match(-f(x+k))->{k: -2 - 2x}))\n s = None\n\n # But there are things that match doesn't do that solve\n # can do, e.g. determine that 1/(x + m) = 1/(1 - x) when m = 1\n\n if s is None:\n m = Dummy('m')\n try:\n from sympy.solvers.solvers import solve\n sol = solve(L.subs(i, i + m) + R, m) or []\n except NotImplementedError:\n return None\n sol = [si for si in sol if si.is_Integer and\n (L.subs(i, i + si) + R).expand().is_zero]\n if len(sol) != 1:\n return None\n s = sol[0]\n\n if s < 0:\n return telescopic_direct(R, L, abs(s), (i, a, b))\n elif s > 0:\n return telescopic_direct(L, R, s, (i, a, b))\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 391, "n_words": 189, "vocab_size": 104, "complexity": 16, "nloc": 27, "token_counts": 242, "n_ast_nodes": 374, "n_identifiers": 27, "d_id": 48161, "documentation": { "docstring": "\n Tries to perform the summation using the telescopic property.\n\n Return None if not possible.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 24, "language": "en" } }, { "id": 230902, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/_annotation.py", "file_name": "_annotation.py", "fun_name": "startarrowsize", "commit_message": "switch to black .22", "code": "def startarrowsize(self):\n \n return self[\"startarrowsize\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 62575, "documentation": { "docstring": "\n Sets the size of the start annotation arrow head, relative to\n `arrowwidth`. A value of 1 (default) gives a head about 3x as\n wide as the line.\n\n The 'startarrowsize' property is a number and may be specified as:\n - An int or float in the interval [0.3, inf]\n\n Returns\n -------\n int|float\n ", "n_words": 51, "vocab_size": 45, "n_whitespaces": 117, "language": "en" } }, { "id": 266879, "commit_id": "8b2e6285650ec42ec4a19075a8567047e8304ba2", "repo": "ansible", "path": "lib/ansible/galaxy/dependency_resolution/providers.py", "file_name": "providers.py", "fun_name": "get_dependencies", "commit_message": "galaxy - Clean up type hints and imports.", "code": "def get_dependencies(self, candidate):\n # type: (Candidate) -> list[Candidate]\n r\n # FIXME: If there's several galaxy servers set, there may be a\n # FIXME: situation when the metadata of the same collection\n # FIXME: differs. So how do we resolve this case? Priority?\n # FIXME: Taking into account a pinned hash? Exploding on\n # FIXME: any differences?\n # NOTE: The underlying implmentation currently uses first found\n req_map = self._api_proxy.get_collection_dependencies(candidate)\n\n # NOTE: This guard expression MUST perform an early exit only\n # NOTE: after the `get_collection_dependencies()` call because\n # NOTE: internally it polulates the artifact URL of the candidate,\n # NOTE: its SHA hash and the Galaxy API token. These are still\n # NOTE: necessary with `--no-deps` because even with the disabled\n # NOTE: dependency resolution the outer layer will still need to\n # NOTE: know how to download and validate the artifact.\n #\n # NOTE: Virtual candidates should always return dependencies\n # NOTE: because they are ephemeral and non-installable.\n if not self._with_deps and not candidate.is_virtual:\n return []\n\n return [\n self._make_req_from_dict({'name': dep_name, 'version': dep_req})\n for dep_name, dep_req in req_map.items()\n ]\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 364, "n_words": 178, "vocab_size": 125, "complexity": 4, "nloc": 13, "token_counts": 60, "n_ast_nodes": 115, "n_identifiers": 12, "d_id": 78638, "documentation": { "docstring": "Get direct dependencies of a candidate.\n\n :returns: A collection of requirements that `candidate` \\\n specifies as its dependencies.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 49, "language": "en" } }, { "id": 159564, "commit_id": "e798bf049f036a5865c14d4343ed8a833864aabe", "repo": "rasa", "path": "rasa/shared/core/trackers.py", "file_name": "trackers.py", "fun_name": "active_loop_name", "commit_message": "convert TrackerActiveLoop to a dataclass", "code": "def active_loop_name(self) -> Optional[Text]:\n \n if not self.active_loop or self.active_loop.name == SHOULD_NOT_BE_SET:\n return None\n\n return self.active_loop.name\n", "url": "https://github.com/RasaHQ/rasa.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 47, "n_words": 15, "vocab_size": 13, "complexity": 3, "nloc": 8, "token_counts": 33, "n_ast_nodes": 54, "n_identifiers": 7, "d_id": 38336, "documentation": { "docstring": "Get the name of the currently active loop.\n\n Returns: `None` if no active loop or the name of the currently active loop.\n ", "n_words": 22, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 262246, "commit_id": "00c7600103ee34ac50506af88f1b34b713f849e7", "repo": "TTS", "path": "TTS/tts/models/vits.py", "file_name": "vits.py", "fun_name": "get_lr", "commit_message": "Update Vits model API", "code": "def get_lr(self) -> List:\n \n return [self.config.lr_disc, self.config.lr_gen]\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 7, "token_counts": 22, "n_ast_nodes": 36, "n_identifiers": 6, "d_id": 77157, "documentation": { "docstring": "Set the initial learning rates for each optimizer.\n\n Returns:\n List: learning rates for each optimizer.\n ", "n_words": 15, "vocab_size": 10, "n_whitespaces": 40, "language": "en" } }, { "id": 80333, "commit_id": "a4a3ba65d736045733cb49430d7076b73aec23bb", "repo": "awx", "path": "awx/main/tasks/receptor.py", "file_name": "receptor.py", "fun_name": "_convert_args_to_cli", "commit_message": "Refactored tasks.py to a package\n--- Added 3 new sub-package : awx.main.tasks.system , awx.main.tasks.jobs , awx.main.tasks.receptor\n--- Modified the functional tests and unit tests accordingly", "code": "def _convert_args_to_cli(vargs):\n \n args = ['cleanup']\n for option in ('exclude_strings', 'remove_images'):\n if vargs.get(option):\n args.append('--{}={}'.format(option.replace('_', '-'), ' '.join(vargs.get(option))))\n for option in ('file_pattern', 'image_prune', 'process_isolation_executable', 'grace_period'):\n if vargs.get(option) is True:\n args.append('--{}'.format(option.replace('_', '-')))\n elif vargs.get(option) not in (None, ''):\n args.append('--{}={}'.format(option.replace('_', '-'), vargs.get(option)))\n return args\n\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 109, "n_words": 40, "vocab_size": 31, "complexity": 6, "nloc": 11, "token_counts": 141, "n_ast_nodes": 251, "n_identifiers": 9, "d_id": 17051, "documentation": { "docstring": "\n For the ansible-runner worker cleanup command\n converts the dictionary (parsed argparse variables) used for python interface\n into a string of CLI options, which has to be used on execution nodes.\n ", "n_words": 30, "vocab_size": 28, "n_whitespaces": 43, "language": "en" } }, { "id": 189683, "commit_id": "e040bcacd38378386749db18aeba575b93f4ebca", "repo": "manim", "path": "manim/mobject/geometry/line.py", "file_name": "line.py", "fun_name": "get_normal_vector", "commit_message": "Improved structure of the :mod:`.mobject` module (#2476)\n\n* group graphing and update its references\r\n\r\n* group text and update its references\r\n\r\n* group opengl and update its references\r\n\r\n* group three_d and update its references\r\n\r\n* group geometry and update (most) references\r\n\r\n* move some chaning.py + updater files into animation\r\n\r\n* refactor arc.py\r\n\r\n* refactor line.py\r\n\r\n* refactor polygram.py\r\n\r\n* refactor tips.py\r\n\r\n* black + isort\r\n\r\n* import new files in __init__.py\r\n\r\n* refactor places where geometry was used\r\n\r\n* black + isort again\r\n\r\n* remove unused imports\r\n\r\n* update reference.rst\r\n\r\n* add descriptions to files\r\n\r\n* fix circular imports\r\n\r\n* forgot ArrowTip\r\n\r\n* fix tests\r\n\r\n* fix doctests\r\n\r\n* satisfy mypy?\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix ALL merge conflicts\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* one VMobject import slipped through\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* re-add imports to `manim/opengl/__init__.py`\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix reference manual\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* ignore unknown directive type\r\n\r\n* fix arrow tip imports in docstrings\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Benjamin Hackl ", "code": "def get_normal_vector(self) -> np.ndarray:\n \n\n p0, p1, p2 = self.tip.get_start_anchors()[:3]\n return normalize(np.cross(p2 - p1, p1 - p0))\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 37, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 12, "token_counts": 43, "n_ast_nodes": 69, "n_identifiers": 11, "d_id": 46164, "documentation": { "docstring": "Returns the normal of a vector.\n\n Examples\n --------\n ::\n\n >>> np.round(Arrow().get_normal_vector()) + 0. # add 0. to avoid negative 0 in output\n array([ 0., 0., -1.])\n ", "n_words": 26, "vocab_size": 24, "n_whitespaces": 77, "language": "en" } }, { "id": 66352, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/loan_management/report/loan_interest_report/loan_interest_report.py", "file_name": "loan_interest_report.py", "fun_name": "get_loan_wise_pledges", "commit_message": "style: format code with black", "code": "def get_loan_wise_pledges(filters):\n\tloan_wise_unpledges = {}\n\tcurrent_pledges = {}\n\n\tconditions = \"\"\n\n\tif filters.get(\"company\"):\n\t\tconditions = \"AND company = %(company)s\"\n\n\tunpledges = frappe.db.sql(\n\t\t.format(\n\t\t\tconditions=conditions\n\t\t),\n\t\tfilters,\n\t\tas_dict=1,\n\t)\n\n\tfor unpledge in unpledges:\n\t\tloan_wise_unpledges.setdefault((unpledge.loan, unpledge.loan_security), unpledge.qty)\n\n\tpledges = frappe.db.sql(\n\t\t.format(\n\t\t\tconditions=conditions\n\t\t),\n\t\tfilters,\n\t\tas_dict=1,\n\t)\n\n\tfor security in pledges:\n\t\tcurrent_pledges.setdefault((security.loan, security.loan_security), security.qty)\n\t\tcurrent_pledges[(security.loan, security.loan_security)] -= loan_wise_unpledges.get(\n\t\t\t(security.loan, security.loan_security), 0.0\n\t\t)\n\n\treturn current_pledges\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 33, "n_words": 61, "vocab_size": 41, "complexity": 4, "nloc": 42, "token_counts": 154, "n_ast_nodes": 236, "n_identifiers": 19, "d_id": 14173, "documentation": { "docstring": "\n\t\tSELECT up.loan, u.loan_security, sum(u.qty) as qty\n\t\tFROM `tabLoan Security Unpledge` up, `tabUnpledge` u\n\t\tWHERE u.parent = up.name\n\t\tAND up.status = 'Approved'\n\t\t{conditions}\n\t\tGROUP BY up.loan, u.loan_security\n\t\n\t\tSELECT lp.loan, p.loan_security, sum(p.qty) as qty\n\t\tFROM `tabLoan Security Pledge` lp, `tabPledge`p\n\t\tWHERE p.parent = lp.name\n\t\tAND lp.status = 'Pledged'\n\t\t{conditions}\n\t\tGROUP BY lp.loan, p.loan_security\n\t", "n_words": 51, "vocab_size": 35, "n_whitespaces": 39, "language": "en" } }, { "id": 261494, "commit_id": "bb080aa690364d84d11232c73dc8db2f0dde3578", "repo": "scikit-learn", "path": "sklearn/linear_model/_logistic.py", "file_name": "_logistic.py", "fun_name": "_check_multi_class", "commit_message": "ENH add newton-cholesky solver to LogisticRegression (#24767)", "code": "def _check_multi_class(multi_class, solver, n_classes):\n \n if multi_class == \"auto\":\n if solver in (\"liblinear\", \"newton-cholesky\"):\n multi_class = \"ovr\"\n elif n_classes > 2:\n multi_class = \"multinomial\"\n else:\n multi_class = \"ovr\"\n if multi_class == \"multinomial\" and solver in (\"liblinear\", \"newton-cholesky\"):\n raise ValueError(\"Solver %s does not support a multinomial backend.\" % solver)\n return multi_class\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 122, "n_words": 49, "vocab_size": 33, "complexity": 6, "nloc": 11, "token_counts": 62, "n_ast_nodes": 118, "n_identifiers": 5, "d_id": 76838, "documentation": { "docstring": "Computes the multi class type, either \"multinomial\" or \"ovr\".\n\n For `n_classes` > 2 and a solver that supports it, returns \"multinomial\".\n For all other cases, in particular binary classification, return \"ovr\".\n ", "n_words": 31, "vocab_size": 29, "n_whitespaces": 40, "language": "en" } }, { "id": 66953, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/payroll/report/salary_payments_based_on_payment_mode/salary_payments_based_on_payment_mode.py", "file_name": "salary_payments_based_on_payment_mode.py", "fun_name": "get_data", "commit_message": "style: format code with black", "code": "def get_data(filters, mode_of_payments):\n\tdata = []\n\n\tconditions = get_conditions(filters)\n\n\tentry = frappe.db.sql(\n\t\t\n\t\t% (conditions),\n\t\tas_dict=1,\n\t)\n\n\tbranch_wise_entries, gross_pay = prepare_data(entry)\n\n\tbranches = frappe.db.sql_list(\n\t\t\n\t\t% (conditions)\n\t)\n\n\ttotal_row = {\"total\": 0, \"branch\": \"Total\"}\n\n\tfor branch in branches:\n\t\ttotal = 0\n\t\trow = {\"branch\": branch}\n\t\tfor mode in mode_of_payments:\n\t\t\tif branch_wise_entries.get(branch).get(mode):\n\t\t\t\trow[mode] = branch_wise_entries.get(branch).get(mode)\n\t\t\t\ttotal += branch_wise_entries.get(branch).get(mode)\n\n\t\trow[\"total\"] = total\n\t\tdata.append(row)\n\n\ttotal_row = get_total_based_on_mode_of_payment(data, mode_of_payments)\n\ttotal_deductions = gross_pay - total_row.get(\"total\")\n\n\treport_summary = []\n\n\tif data:\n\t\tdata.append(total_row)\n\t\tdata.append({})\n\t\tdata.append({\"branch\": \"Total Gross Pay\", mode_of_payments[0]: gross_pay})\n\t\tdata.append({\"branch\": \"Total Deductions\", mode_of_payments[0]: total_deductions})\n\t\tdata.append({\"branch\": \"Total Net Pay\", mode_of_payments[0]: total_row.get(\"total\")})\n\n\t\tcurrency = erpnext.get_company_currency(filters.company)\n\t\treport_summary = get_report_summary(\n\t\t\tgross_pay, total_deductions, total_row.get(\"total\"), currency\n\t\t)\n\n\treturn data, total_row, report_summary\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 72, "n_words": 107, "vocab_size": 71, "complexity": 5, "nloc": 45, "token_counts": 270, "n_ast_nodes": 448, "n_identifiers": 31, "d_id": 14387, "documentation": { "docstring": "\n\t\tselect branch, mode_of_payment, sum(net_pay) as net_pay, sum(gross_pay) as gross_pay\n\t\tfrom `tabSalary Slip` sal\n\t\twhere docstatus = 1 %s\n\t\tgroup by branch, mode_of_payment\n\t\t\n\t\tselect distinct branch from `tabSalary Slip` sal\n\t\twhere docstatus = 1 %s\n\t", "n_words": 34, "vocab_size": 22, "n_whitespaces": 28, "language": "en" } }, { "id": 74706, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/whitelist.py", "file_name": "whitelist.py", "fun_name": "attribute_rule", "commit_message": "Reformat with black", "code": "def attribute_rule(allowed_attrs):\n \n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 3, "token_counts": 10, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 16302, "documentation": { "docstring": "\n Generator for functions that can be used as entries in Whitelister.element_rules.\n These functions accept a tag, and modify its attributes by looking each attribute\n up in the 'allowed_attrs' dict defined here:\n * if the lookup fails, drop the attribute\n * if the lookup returns a callable, replace the attribute with the result of calling\n it - e.g. {'title': uppercase} will replace 'title' with the result of uppercasing\n the title. If the callable returns None, the attribute is dropped\n * if the lookup returns a truthy value, keep the attribute; if falsy, drop it\n ", "n_words": 93, "vocab_size": 60, "n_whitespaces": 125, "language": "en" } }, { "id": 87145, "commit_id": "fe07466a1449a5ae60526528ce7bf9399b59b47d", "repo": "sentry", "path": "src/sentry/region_to_control/producer.py", "file_name": "producer.py", "fun_name": "get_region_to_control_producer", "commit_message": "chore(hybrid-cloud): Extract region to control silo into service abstraction (#40353)\n\n1. Use the `silo_mode_delegator` to make the silo conditional sensitive\r\nlogic of region to control processing like other services that need to\r\nbe conditional based on deployment.\r\n2. Leverage the lifecycle management offered by the\r\n`DelegatedBySiloMode` to stop arroyo kafka producer between tests or\r\nafter test failures (rather than requiring explicit test fixture clean\r\nup, it's now 'implicit' to the lifecycle of the mocks introduced at the\r\ntop level).\r\n3. Add default mocks for the region to control kafka producer so that\r\nmost tests do not require kafka running (also improves performance\r\nsignificantly). There is still the integration test that uses the real\r\nproducer.\r\n4. *Attempt* to fix ModuleDeadlock error with more granular importing. I\r\ncould not reproduce this issue locally, unfortunately, so this is a best\r\neffort attempt to reduce any circular import possibilities.\r\n\r\nCo-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com>", "code": "def get_region_to_control_producer(self) -> KafkaProducer:\n \n if self._publisher is None:\n config = settings.KAFKA_TOPICS.get(settings.KAFKA_REGION_TO_CONTROL)\n self._publisher = KafkaProducer(\n kafka_config.get_kafka_producer_cluster_options(config[\"cluster\"])\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 78, "n_words": 16, "vocab_size": 14, "complexity": 2, "nloc": 13, "token_counts": 53, "n_ast_nodes": 73, "n_identifiers": 11, "d_id": 18234, "documentation": { "docstring": "\n Creates, if necessary, an arroyo.KafkaProducer client configured for region to control communication and returns\n it, caching it for future calls. Installs an exit handler to close the worker thread processes.\n ", "n_words": 30, "vocab_size": 27, "n_whitespaces": 53, "language": "en" } }, { "id": 63636, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/requests/utils.py", "file_name": "utils.py", "fun_name": "dict_from_cookiejar", "commit_message": "upd; format", "code": "def dict_from_cookiejar(cj):\n \n\n cookie_dict = {}\n\n for cookie in cj:\n cookie_dict[cookie.name] = cookie.value\n\n return cookie_dict\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 33, "n_words": 14, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 27, "n_ast_nodes": 45, "n_identifiers": 6, "d_id": 13432, "documentation": { "docstring": "Returns a key/value dictionary from a CookieJar.\n\n :param cj: CookieJar object to extract cookies from.\n :rtype: dict\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 26, "language": "en" } }, { "id": 195681, "commit_id": "d3c0fc825c4a80904a1fb9a2092137c3d9e0c3fe", "repo": "sympy", "path": "sympy/polys/numberfields/galoisgroups.py", "file_name": "galoisgroups.py", "fun_name": "galois_group", "commit_message": "Add a `galois_group()` function", "code": "def galois_group(T, max_tries=30, randomize=False):\n r\n from sympy.combinatorics.named_groups import CyclicGroup\n gg = {\n 3: _galois_group_degree_3,\n 4: _galois_group_degree_4,\n 5: _galois_group_degree_5,\n }\n max_supported = max(gg.keys())\n n = T.degree()\n if n > max_supported:\n raise ValueError(f\"Only polynomials up to degree {max_supported} are supported.\")\n if n < 1:\n raise ValueError(\"Constant polynomial has no Galois group.\")\n if n < 3:\n return (CyclicGroup(n), n == 1)\n return gg[n](T, max_tries=max_tries, randomize=randomize)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 133, "n_words": 62, "vocab_size": 50, "complexity": 4, "nloc": 52, "token_counts": 109, "n_ast_nodes": 171, "n_identifiers": 18, "d_id": 47364, "documentation": { "docstring": "\n Compute the Galois group for polynomials *T* up to degree 5.\n\n Parameters\n ==========\n\n T : Poly\n Irreducible, monic polynomial over :ref:`ZZ`, whose Galois group\n is to be determined.\n max_tries : int, default 30\n Make at most this many attempts in those steps that involve\n generating Tschirnhausen transformations.\n randomize : bool, default False\n If ``True``, then use random coefficients when generating Tschirnhausen\n transformations. Otherwise try transformations in a fixed order,\n starting with small coefficients and degrees and working upward.\n\n Returns\n =======\n\n Pair ``(PermutationGroup, bool)``\n The first element is the Galois group, and the second says whether the\n group is contained in the alternating group $A_n$ ($n$ the degree of\n *T*).\n\n Raises\n ======\n\n ValueError\n if *T* is of an unsupported degree.\n\n MaxTriesException\n if could not complete before exceeding *max_tries* in those steps\n that involve generating Tschirnhausen transformations.\n\n ", "n_words": 135, "vocab_size": 98, "n_whitespaces": 269, "language": "en" } }, { "id": 219892, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pyio.py", "file_name": "_pyio.py", "fun_name": "seek", "commit_message": "add python 3.10.4 for windows", "code": "def seek(self, pos, whence=SEEK_SET):\n \n if isinstance(pos, float):\n raise TypeError('an integer is required')\n self._checkClosed()\n return os.lseek(self._fd, pos, whence)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 56, "n_words": 17, "vocab_size": 16, "complexity": 2, "nloc": 5, "token_counts": 43, "n_ast_nodes": 69, "n_identifiers": 12, "d_id": 55884, "documentation": { "docstring": "Move to new file position.\n\n Argument offset is a byte count. Optional argument whence defaults to\n SEEK_SET or 0 (offset from start of file, offset should be >= 0); other values\n are SEEK_CUR or 1 (move relative to current position, positive or negative),\n and SEEK_END or 2 (move relative to end of file, usually negative, although\n many platforms allow seeking beyond the end of a file).\n\n Note that not all file objects are seekable.\n ", "n_words": 74, "vocab_size": 58, "n_whitespaces": 124, "language": "en" } }, { "id": 154492, "commit_id": "d6d503ac7c3028d871c34d9e99e925ddb0746df6", "repo": "modin", "path": "modin/core/execution/dask/implementations/pandas_on_dask/partitioning/virtual_partition.py", "file_name": "virtual_partition.py", "fun_name": "deploy_dask_func", "commit_message": "FIX-#4597: Refactor Partition handling of func, args, kwargs (#4715)\n\nCo-authored-by: Iaroslav Igoshev \r\nSigned-off-by: Jonathan Shi ", "code": "def deploy_dask_func(deployer, axis, f_to_deploy, f_args, f_kwargs, *args, **kwargs):\n \n result = deployer(axis, f_to_deploy, f_args, f_kwargs, *args, **kwargs)\n ip = get_ip()\n if isinstance(result, pandas.DataFrame):\n return result, len(result), len(result.columns), ip\n elif all(isinstance(r, pandas.DataFrame) for r in result):\n return [i for r in result for i in [r, len(r), len(r.columns), ip]]\n else:\n return [i for r in result for i in [r, None, None, ip]]\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 100, "n_words": 61, "vocab_size": 36, "complexity": 8, "nloc": 9, "token_counts": 136, "n_ast_nodes": 192, "n_identifiers": 19, "d_id": 36015, "documentation": { "docstring": "\n Execute a function on an axis partition in a worker process.\n\n This is ALWAYS called on either ``PandasDataframeAxisPartition.deploy_axis_func``\n or ``PandasDataframeAxisPartition.deploy_func_between_two_axis_partitions``, which both\n serve to deploy another dataframe function on a Dask worker process.\n\n Parameters\n ----------\n deployer : callable\n A `PandasDataFrameAxisPartition.deploy_*` method that will call `deploy_f`.\n axis : {0, 1}\n The axis to perform the function along.\n f_to_deploy : callable or RayObjectID\n The function to deploy.\n f_args : list or tuple\n Positional arguments to pass to ``f_to_deploy``.\n f_kwargs : dict\n Keyword arguments to pass to ``f_to_deploy``.\n *args : list\n Positional arguments to pass to ``func``.\n **kwargs : dict\n Keyword arguments to pass to ``func``.\n\n Returns\n -------\n list\n The result of the function ``func`` and metadata for it.\n ", "n_words": 116, "vocab_size": 69, "n_whitespaces": 224, "language": "en" } }, { "id": 46862, "commit_id": "bca849b4586c7446438f959b62903da4b997b9ea", "repo": "airflow", "path": "dev/breeze/src/airflow_breeze/utils/path_utils.py", "file_name": "path_utils.py", "fun_name": "get_used_airflow_sources", "commit_message": "Switch to `pipx` as the only installation Breeze2 method (#22740)\n\nSwitching Breeze2 to only use `pipx` for installation of Breeze2\r\ndue to problems it might cause for autocompletion if entrypoint\r\nis not avaiable on PATH.", "code": "def get_used_airflow_sources() -> Path:\n \n current_sources = search_upwards_for_airflow_sources_root(Path.cwd())\n if current_sources is None:\n current_sources = get_installation_airflow_sources()\n if current_sources is None:\n warn_non_editable()\n sys.exit(1)\n return current_sources\n\n\n@lru_cache(maxsize=None)", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "@lru_cache(maxsize=None)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 70, "n_words": 23, "vocab_size": 15, "complexity": 3, "nloc": 13, "token_counts": 43, "n_ast_nodes": 88, "n_identifiers": 11, "d_id": 9023, "documentation": { "docstring": "\n Retrieves the Root of used Airflow Sources which we operate on. Those are either Airflow sources found\n upwards in directory tree or sources where Breeze was installed from.\n :return: the Path for Airflow sources we use.\n ", "n_words": 36, "vocab_size": 30, "n_whitespaces": 49, "language": "en" } }, { "id": 105894, "commit_id": "0d9c12ad5155c6d505e70813a07c0aecd7120405", "repo": "datasets", "path": "tests/utils.py", "file_name": "utils.py", "fun_name": "require_spacy_model", "commit_message": "Make torch.Tensor and spacy models cacheable (#5191)\n\n* Make torch.Tensor and spacy models cacheable\r\n\r\n* Use newest models\r\n\r\n* Address comments\r\n\r\n* Small optim", "code": "def require_spacy_model(model):\n \n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 3, "token_counts": 10, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 22215, "documentation": { "docstring": "\n Decorator marking a test that requires a spacy model.\n\n These tests are skipped when they aren't installed.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 27, "language": "en" } }, { "id": 176426, "commit_id": "f11068c0115ede0c7b631f771c10be7efd0b950b", "repo": "networkx", "path": "networkx/algorithms/polynomials.py", "file_name": "polynomials.py", "fun_name": "tutte_polynomial", "commit_message": "Add Tutte polynomial (#5265)\n\nAdd a new polynomial module to algorithms for characteristic polynomials.\r\nAdds the Tutte polynomial, which is computed and ultimate represented as a\r\nsympy expression.\r\n\r\nCo-authored-by: Dan Schult \r\nCo-authored-by: Ross Barnowski ", "code": "def tutte_polynomial(G):\n r\n import sympy\n\n x = sympy.Symbol(\"x\")\n y = sympy.Symbol(\"y\")\n stack = deque()\n stack.append(nx.MultiGraph(G))\n\n polynomial = 0\n while stack:\n G = stack.pop()\n bridges = set(nx.bridges(G))\n\n e = None\n for i in G.edges:\n if (i[0], i[1]) not in bridges and i[0] != i[1]:\n e = i\n break\n if not e:\n loops = list(nx.selfloop_edges(G, keys=True))\n polynomial += x ** len(bridges) * y ** len(loops)\n else:\n # deletion-contraction\n C = nx.contracted_edge(G, e, self_loops=True)\n C.remove_edge(e[0], e[0])\n G.remove_edge(*e)\n stack.append(G)\n stack.append(C)\n return sympy.simplify(polynomial)\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 275, "n_words": 78, "vocab_size": 59, "complexity": 6, "nloc": 142, "token_counts": 195, "n_ast_nodes": 314, "n_identifiers": 28, "d_id": 41889, "documentation": { "docstring": "Returns the Tutte polynomial of `G`\n \n This function computes the Tutte polynomial via an iterative version of\n the deletion-contraction algorithm.\n\n The Tutte polynomial `T_G(x, y)` is a fundamental graph polynomial invariant in\n two variables. It encodes a wide array of information related to the\n edge-connectivity of a graph; \"Many problems about graphs can be reduced to\n problems of finding and evaluating the Tutte polynomial at certain values\" [1]_.\n In fact, every deletion-contraction-expressible feature of a graph is a\n specialization of the Tutte polynomial [2]_ (see Notes for examples).\n\n There are several equivalent definitions; here are three:\n\n Def 1 (rank-nullity expansion): For `G` an undirected graph, `n(G)` the\n number of vertices of `G`, `E` the edge set of `G`, `V` the vertex set of\n `G`, and `c(A)` the number of connected components of the graph with vertex\n set `V` and edge set `A` [3]_:\n\n .. math::\n\n T_G(x, y) = \\sum_{A \\in E} (x-1)^{c(A) - c(E)} (y-1)^{c(A) + |A| - n(G)}\n\n Def 2 (spanning tree expansion): Let `G` be an undirected graph, `T` a spanning\n tree of `G`, and `E` the edge set of `G`. Let `E` have an arbitrary strict\n linear order `L`. Let `B_e` be the unique minimal nonempty edge cut of\n $E \\setminus T \\cup {e}$. An edge `e` is internally active with respect to\n `T` and `L` if `e` is the least edge in `B_e` according to the linear order\n `L`. The internal activity of `T` (denoted `i(T)`) is the number of edges\n in $E \\setminus T$ that are internally active with respect to `T` and `L`.\n Let `P_e` be the unique path in $T \\cup {e}$ whose source and target vertex\n are the same. An edge `e` is externally active with respect to `T` and `L`\n if `e` is the least edge in `P_e` according to the linear order `L`. The\n external activity of `T` (denoted `e(T)`) is the number of edges in\n $E \\setminus T$ that are externally active with respect to `T` and `L`.\n Then [4]_ [5]_:\n\n .. math::\n\n T_G(x, y) = \\sum_{T \\text{ a spanning tree of } G} x^{i(T)} y^{e(T)}\n\n Def 3 (deletion-contraction recurrence): For `G` an undirected graph, `G-e`\n the graph obtained from `G` by deleting edge `e`, `G/e` the graph obtained\n from `G` by contracting edge `e`, `k(G)` the number of cut-edges of `G`,\n and `l(G)` the number of self-loops of `G`:\n\n .. math::\n T_G(x, y) = \\begin{cases}\n \t x^{k(G)} y^{l(G)}, & \\text{if all edges are cut-edges or self-loops} \\\\\n T_{G-e}(x, y) + T_{G/e}(x, y), & \\text{otherwise, for an arbitrary edge $e$ not a cut-edge or loop}\n \\end{cases}\n\n Parameters\n ----------\n G : NetworkX graph\n\n Returns\n -------\n instance of `sympy.core.add.Add`\n A Sympy expression representing the Tutte polynomial for `G`.\n\n Examples\n --------\n >>> C = nx.cycle_graph(5)\n >>> nx.tutte_polynomial(C)\n x**4 + x**3 + x**2 + x + y\n\n >>> D = nx.diamond_graph()\n >>> nx.tutte_polynomial(D)\n x**3 + 2*x**2 + 2*x*y + x + y**2 + y\n\n Notes\n -----\n Some specializations of the Tutte polynomial:\n\n - `T_G(1, 1)` counts the number of spanning trees of `G`\n - `T_G(1, 2)` counts the number of connected spanning subgraphs of `G`\n - `T_G(2, 1)` counts the number of spanning forests in `G`\n - `T_G(0, 2)` counts the number of strong orientations of `G`\n - `T_G(2, 0)` counts the number of acyclic orientations of `G`\n\n Edge contraction is defined and deletion-contraction is introduced in [6]_.\n Combinatorial meaning of the coefficients is introduced in [7]_.\n Universality, properties, and applications are discussed in [8]_.\n\n Practically, up-front computation of the Tutte polynomial may be useful when\n users wish to repeatedly calculate edge-connectivity-related information\n about one or more graphs.\n\n References\n ----------\n .. [1] M. Brandt,\n \"The Tutte Polynomial.\"\n Talking About Combinatorial Objects Seminar, 2015\n https://math.berkeley.edu/~brandtm/talks/tutte.pdf\n .. [2] A. Björklund, T. Husfeldt, P. Kaski, M. Koivisto,\n \"Computing the Tutte polynomial in vertex-exponential time\"\n 49th Annual IEEE Symposium on Foundations of Computer Science, 2008\n https://ieeexplore.ieee.org/abstract/document/4691000\n .. [3] Y. Shi, M. Dehmer, X. Li, I. Gutman,\n \"Graph Polynomials,\" p. 14\n .. [4] Y. Shi, M. Dehmer, X. Li, I. Gutman,\n \"Graph Polynomials,\" p. 46\n .. [5] A. Nešetril, J. Goodall,\n \"Graph invariants, homomorphisms, and the Tutte polynomial\"\n https://iuuk.mff.cuni.cz/~andrew/Tutte.pdf\n .. [6] D. B. West,\n \"Introduction to Graph Theory,\" p. 84\n .. [7] G. Coutinho,\n \"A brief introduction to the Tutte polynomial\"\n Structural Analysis of Complex Networks, 2011\n https://homepages.dcc.ufmg.br/~gabriel/seminars/coutinho_tuttepolynomial_seminar.pdf\n .. [8] J. A. Ellis-Monaghan, C. Merino,\n \"Graph polynomials and their applications I: The Tutte polynomial\"\n Structural Analysis of Complex Networks, 2011\n https://arxiv.org/pdf/0803.3079.pdf\n ", "n_words": 732, "vocab_size": 354, "n_whitespaces": 1105, "language": "en" } }, { "id": 153822, "commit_id": "57e29bc5d82348006c5170ef9ac0a9eedcd9acf9", "repo": "modin", "path": "modin/core/storage_formats/base/query_compiler.py", "file_name": "query_compiler.py", "fun_name": "idxmax", "commit_message": "REFACTOR-#4513: Fix spelling mistakes in docs and docstrings (#4514)\n\nCo-authored-by: Rehan Sohail Durrani \r\nSigned-off-by: jeffreykennethli ", "code": "def idxmax(self, **kwargs): # noqa: PR02\n \n return DataFrameDefault.register(pandas.DataFrame.idxmax)(self, **kwargs)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 24, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 26, "n_ast_nodes": 44, "n_identifiers": 7, "d_id": 35637, "documentation": { "docstring": "\n Get position of the first occurrence of the maximum for each row or column.\n\n Parameters\n ----------\n axis : {0, 1}\n skipna : bool\n **kwargs : dict\n Serves the compatibility purpose. Does not affect the result.\n\n Returns\n -------\n BaseQueryCompiler\n One-column QueryCompiler with index labels of the specified axis,\n where each row contains position of the maximum element for the\n corresponding row or column.\n ", "n_words": 62, "vocab_size": 43, "n_whitespaces": 177, "language": "en" } }, { "id": 65569, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/buying/report/procurement_tracker/procurement_tracker.py", "file_name": "procurement_tracker.py", "fun_name": "get_mapped_pr_records", "commit_message": "style: format code with black", "code": "def get_mapped_pr_records():\n\treturn frappe._dict(\n\t\tfrappe.db.sql(\n\t\t\t\n\t\t)\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 2, "n_words": 7, "vocab_size": 6, "complexity": 1, "nloc": 16, "token_counts": 18, "n_ast_nodes": 32, "n_identifiers": 5, "d_id": 13945, "documentation": { "docstring": "\n\t\tSELECT\n\t\t\tpr_item.purchase_order_item,\n\t\t\tpr.posting_date\n\t\tFROM `tabPurchase Receipt` pr, `tabPurchase Receipt Item` pr_item\n\t\tWHERE\n\t\t\tpr.docstatus=1\n\t\t\tAND pr.name=pr_item.parent\n\t\t\tAND pr_item.purchase_order_item IS NOT NULL\n\t\t\tAND pr.status not in (\"Closed\",\"Completed\",\"Cancelled\")\n\t\t", "n_words": 25, "vocab_size": 22, "n_whitespaces": 17, "language": "en" } }, { "id": 152171, "commit_id": "6a9b33c848281cb02f38764e4f91ef767f5e3edd", "repo": "stable-diffusion-webui", "path": "modules/codeformer/codeformer_arch.py", "file_name": "codeformer_arch.py", "fun_name": "calc_mean_std", "commit_message": "codeformer support", "code": "def calc_mean_std(feat, eps=1e-5):\n \n size = feat.size()\n assert len(size) == 4, 'The input feature should be 4D tensor.'\n b, c = size[:2]\n feat_var = feat.view(b, c, -1).var(dim=2) + eps\n feat_std = feat_var.sqrt().view(b, c, 1, 1)\n feat_mean = feat.view(b, c, -1).mean(dim=2).view(b, c, 1, 1)\n return feat_mean, feat_std\n\n", "url": "https://github.com/AUTOMATIC1111/stable-diffusion-webui.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 69, "n_words": 45, "vocab_size": 34, "complexity": 1, "nloc": 8, "token_counts": 112, "n_ast_nodes": 168, "n_identifiers": 15, "d_id": 35175, "documentation": { "docstring": "Calculate mean and std for adaptive_instance_normalization.\n\n Args:\n feat (Tensor): 4D tensor.\n eps (float): A small value added to the variance to avoid\n divide-by-zero. Default: 1e-5.\n ", "n_words": 25, "vocab_size": 24, "n_whitespaces": 56, "language": "en" } }, { "id": 176348, "commit_id": "e308b80f17264b89acf8defe185c71c6656d5105", "repo": "networkx", "path": "networkx/generators/line.py", "file_name": "line.py", "fun_name": "_lg_directed", "commit_message": "MAINT: Remove unnecessary helper functions, use inbuilt methods for line graph generator (#5327)\n\n* MAINT: Remove unnecessary helper functions, use inbuilt methods\r\n\r\n* Use multigraph key to create node, add tests for multi(di)graphs", "code": "def _lg_directed(G, create_using=None):\n \n L = nx.empty_graph(0, create_using, default=G.__class__)\n\n # Create a graph specific edge function.\n get_edges = partial(G.edges, keys=True) if G.is_multigraph() else G.edges\n\n for from_node in get_edges():\n # from_node is: (u,v) or (u,v,key)\n L.add_node(from_node)\n for to_node in get_edges(from_node[1]):\n L.add_edge(from_node, to_node)\n\n return L\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 92, "n_words": 42, "vocab_size": 36, "complexity": 4, "nloc": 8, "token_counts": 82, "n_ast_nodes": 128, "n_identifiers": 17, "d_id": 41851, "documentation": { "docstring": "Returns the line graph L of the (multi)digraph G.\n\n Edges in G appear as nodes in L, represented as tuples of the form (u,v)\n or (u,v,key) if G is a multidigraph. A node in L corresponding to the edge\n (u,v) is connected to every node corresponding to an edge (v,w).\n\n Parameters\n ----------\n G : digraph\n A directed graph or directed multigraph.\n create_using : NetworkX graph constructor, optional\n Graph type to create. If graph instance, then cleared before populated.\n Default is to use the same graph class as `G`.\n\n ", "n_words": 88, "vocab_size": 58, "n_whitespaces": 131, "language": "en" } }, { "id": 206039, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/forms/widgets.py", "file_name": "widgets.py", "fun_name": "id_for_label", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def id_for_label(self, id_, index=\"0\"):\n \n if id_ and self.add_id_index:\n id_ = \"%s_%s\" % (id_, index)\n return id_\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 48, "n_words": 16, "vocab_size": 14, "complexity": 3, "nloc": 4, "token_counts": 30, "n_ast_nodes": 51, "n_identifiers": 5, "d_id": 51334, "documentation": { "docstring": "\n Use an incremented id for each option where the main widget\n references the zero index.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 37, "language": "en" } }, { "id": 85629, "commit_id": "2f6716c264bbd916c2773edb8b75cf2e9b26c51b", "repo": "sentry", "path": "src/sentry/runner/initializer.py", "file_name": "initializer.py", "fun_name": "validate_snuba", "commit_message": "ref: type devserver startup (#38598)\n\nI noticed `sentry devserver 127.0.0.1` produced this error and decided\r\nto prevent it using typing:\r\n\r\n```console\r\n$ sentry devserver 127.0.0.1\r\nINFO:The Sentry runner will report development issues to Sentry.io. Use SENTRY_DEVENV_NO_REPORT to avoid reporting issues.\r\n16:33:40 [WARNING] sentry.utils.geo: settings.GEOIP_PATH_MMDB not configured.\r\n/Users/armenzg/code/sentry/src/sentry/runner/initializer.py:571: DeprecatedSettingWarning: The SENTRY_URL_PREFIX setting is deprecated. Please use SENTRY_OPTIONS['system.url-prefix'] instead.\r\n warnings.warn(DeprecatedSettingWarning(old, \"SENTRY_OPTIONS['%s']\" % new))\r\n16:33:41 [INFO] sentry.plugins.github: apps-not-configured\r\n16:33:41 [INFO] sentry.runner: We have reported the error below to Sentry\r\n/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/sentry_sdk/worker.py:123: ResourceWarning: unclosed \r\n callback = self._queue.get()\r\nResourceWarning: Enable tracemalloc to get the object allocation traceback\r\nTraceback (most recent call last):\r\n File \"/Users/armenzg/code/sentry/.venv/bin/sentry\", line 33, in \r\n sys.exit(load_entry_point('sentry', 'console_scripts', 'sentry')())\r\n File \"/Users/armenzg/code/sentry/src/sentry/runner/__init__.py\", line 186, in main\r\n raise e\r\n File \"/Users/armenzg/code/sentry/src/sentry/runner/__init__.py\", line 178, in main\r\n func(**kwargs)\r\n File \"/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/core.py\", line 1128, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/core.py\", line 1053, in main\r\n rv = self.invoke(ctx)\r\n File \"/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/core.py\", line 1659, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/core.py\", line 1395, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/core.py\", line 754, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/decorators.py\", line 26, in new_func\r\n return f(get_current_context(), *args, **kwargs)\r\n File \"/Users/armenzg/code/sentry/src/sentry/runner/decorators.py\", line 69, in inner\r\n return ctx.invoke(f, *args, **kwargs)\r\n File \"/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/core.py\", line 754, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/decorators.py\", line 26, in new_func\r\n return f(get_current_context(), *args, **kwargs)\r\n File \"/Users/armenzg/code/sentry/src/sentry/runner/decorators.py\", line 29, in inner\r\n return ctx.invoke(f, *args, **kwargs)\r\n File \"/Users/armenzg/code/sentry/.venv/lib/python3.8/site-packages/click/core.py\", line 754, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/Users/armenzg/code/sentry/src/sentry/runner/commands/devserver.py\", line 215, in devserver\r\n port = port + 1\r\nTypeError: unsupported operand type(s) for +: 'NoneType' and 'int\r\n```", "code": "def validate_snuba() -> None:\n \n if not settings.DEBUG:\n return\n\n has_all_snuba_required_backends = (\n settings.SENTRY_SEARCH\n in (\n \"sentry.search.snuba.EventsDatasetSnubaSearchBackend\",\n \"sentry.utils.services.ServiceDelegator\",\n )\n and settings.SENTRY_TAGSTORE == \"sentry.tagstore.snuba.SnubaTagStorage\"\n and\n # TODO(mattrobenolt): Remove ServiceDelegator check\n settings.SENTRY_TSDB\n in (\"sentry.tsdb.redissnuba.RedisSnubaTSDB\", \"sentry.utils.services.ServiceDelegator\")\n )\n\n eventstream_is_snuba = (\n settings.SENTRY_EVENTSTREAM == \"sentry.eventstream.snuba.SnubaEventStream\"\n or settings.SENTRY_EVENTSTREAM == \"sentry.eventstream.kafka.KafkaEventStream\"\n )\n\n # All good here, it doesn't matter what else is going on\n if has_all_snuba_required_backends and eventstream_is_snuba:\n return\n\n from sentry.features import requires_snuba as snuba_features\n\n snuba_enabled_features = set()\n\n for feature in snuba_features:\n if settings.SENTRY_FEATURES.get(feature, False):\n snuba_enabled_features.add(feature)\n\n if snuba_enabled_features and not eventstream_is_snuba:\n from .importer import ConfigurationError\n\n show_big_error(\n \n % \"\\n\".join(snuba_enabled_features)\n )\n raise ConfigurationError(\"Cannot continue without Snuba configured.\")\n\n if not eventstream_is_snuba:\n from .importer import ConfigurationError\n\n show_big_error(\n \n % (\n settings.SENTRY_SEARCH,\n settings.SENTRY_TAGSTORE,\n settings.SENTRY_TSDB,\n settings.SENTRY_EVENTSTREAM,\n )\n )\n raise ConfigurationError(\"Cannot continue without Snuba configured correctly.\")\n\n if eventstream_is_snuba and not has_all_snuba_required_backends:\n show_big_error(\n \n % (\n settings.SENTRY_SEARCH,\n settings.SENTRY_TAGSTORE,\n settings.SENTRY_TSDB,\n settings.SENTRY_EVENTSTREAM,\n )\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 580, "n_words": 133, "vocab_size": 77, "complexity": 14, "nloc": 98, "token_counts": 194, "n_ast_nodes": 333, "n_identifiers": 23, "d_id": 18018, "documentation": { "docstring": "\n Make sure everything related to Snuba is in sync.\n\n This covers a few cases:\n\n * When you have features related to Snuba, you must also\n have Snuba fully configured correctly to continue.\n * If you have Snuba specific search/tagstore/tsdb backends,\n you must also have a Snuba compatible eventstream backend\n otherwise no data will be written into Snuba.\n * If you only have Snuba related eventstream, yell that you\n probably want the other backends otherwise things are weird.\n \nYou have features enabled which require Snuba,\nbut you don't have any Snuba compatible configuration.\n\nFeatures you have enabled:\n%s\n\nSee: https://github.com/getsentry/snuba#sentry--snuba\n\nIt appears that you are requiring Snuba,\nbut your SENTRY_EVENTSTREAM is not compatible.\n\nCurrent settings:\n\nSENTRY_SEARCH = %r\nSENTRY_TAGSTORE = %r\nSENTRY_TSDB = %r\nSENTRY_EVENTSTREAM = %r\n\nSee: https://github.com/getsentry/snuba#sentry--snuba\nYou are using a Snuba compatible eventstream\nwithout configuring search/tagstore/tsdb also to use Snuba.\nThis is probably not what you want.\n\nCurrent settings:\n\nSENTRY_SEARCH = %r\nSENTRY_TAGSTORE = %r\nSENTRY_TSDB = %r\nSENTRY_EVENTSTREAM = %r\n\nSee: https://github.com/getsentry/snuba#sentry--snuba", "n_words": 165, "vocab_size": 86, "n_whitespaces": 182, "language": "en" } }, { "id": 104578, "commit_id": "3804442bb7cfcb9d52044d92688115cfdc69c2da", "repo": "datasets", "path": "src/datasets/features/image.py", "file_name": "image.py", "fun_name": "flatten", "commit_message": "Fix flatten of complex feature types (#3723)\n\n* Flatten Translation and TranslationVariableLanguages\r\n\r\n* Add tests\r\n\r\n* Style\r\n\r\n* Flatten for decodable features\r\n\r\n* Fix flatten for non-dict types\r\n\r\n* Add test\r\n\r\n* Descriptive message in flatten for Audio feature\r\n\r\n* Small refactor\r\n\r\n* Add flatten to features\r\n\r\n* Update table_flatten\r\n\r\n* Revert changes in Dataset.flatten_/flatten\r\n\r\n* Apply Quentin's suggestions from code review\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Improve table_flatten docstring\r\n\r\n* Fix tests\r\n\r\n* Add nested test\r\n\r\n* Minor fix\r\n\r\n* Remove comment\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>", "code": "def flatten(self) -> Union[\"FeatureType\", Dict[str, \"FeatureType\"]]:\n \n from .features import Value\n\n return (\n self\n if self.decode\n else {\n \"bytes\": Value(\"binary\"),\n \"path\": Value(\"string\"),\n }\n )\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 125, "n_words": 23, "vocab_size": 23, "complexity": 2, "nloc": 11, "token_counts": 48, "n_ast_nodes": 86, "n_identifiers": 8, "d_id": 21903, "documentation": { "docstring": "If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary.", "n_words": 16, "vocab_size": 13, "n_whitespaces": 15, "language": "en" } }, { "id": 156733, "commit_id": "2820bae493a49cb1d0a6e376985c5473b8f04fa8", "repo": "dask", "path": "dask/array/core.py", "file_name": "core.py", "fun_name": "dot", "commit_message": "Don't include docs in ``Array`` methods, just refer to module docs (#9244)\n\nCo-authored-by: James Bourbeau ", "code": "def dot(self, other):\n \n from dask.array.routines import tensordot\n\n return tensordot(self, other, axes=((self.ndim - 1,), (other.ndim - 2,)))\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 37, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 3, "token_counts": 45, "n_ast_nodes": 66, "n_identifiers": 9, "d_id": 36743, "documentation": { "docstring": "Dot product of self and other.\n\n Refer to :func:`dask.array.tensordot` for full documentation.\n\n See Also\n --------\n dask.array.dot : equivalent function\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 54, "language": "en" } }, { "id": 101615, "commit_id": "98d01760e469fd2108eed8d0b0a1ba6297c3177c", "repo": "faceswap", "path": "tools/sort/sort_methods.py", "file_name": "sort_methods.py", "fun_name": "sort", "commit_message": "Overhaul sort:\n - Standardize image data reading and writing\n - Optimize loading (just one pass required)\n - Make all sort groups binnable (to greater or lesser results)\n - Add sort by pitch\n - Deprecate multiple options\n - linting, docs + locales", "code": "def sort(self) -> None:\n \n raise NotImplementedError()\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 6, "token_counts": 12, "n_ast_nodes": 23, "n_identifiers": 3, "d_id": 21023, "documentation": { "docstring": " Override for method specific logic for sorting the loaded statistics\n\n The scored list :attr:`_result` should be sorted in place\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 34, "language": "en" } }, { "id": 277252, "commit_id": "fa6d9107a498f7c2403ff28c7b389a1a0c5cc083", "repo": "keras", "path": "keras/engine/base_layer.py", "file_name": "base_layer.py", "fun_name": "losses", "commit_message": "reduct too long lines", "code": "def losses(self):\n \n collected_losses = []\n for layer in self._flatten_layers():\n # If any eager losses are present, we assume the model to be part of\n # an eager training loop (either a custom one or the one used when\n # `run_eagerly=True`) and so we always return just the eager losses.\n if layer._eager_losses:\n # Filter placeholder losses that may have been added by revived\n # layers. (see base_layer_utils for details).\n if (\n layer._eager_losses[0]\n is not base_layer_utils.REVIVED_LOSS_PLACEHOLDER\n ):\n collected_losses.extend(layer._eager_losses)\n else:\n collected_losses.extend(layer._losses)\n for regularizer in layer._callable_losses:\n loss_tensor = regularizer()\n if loss_tensor is not None:\n collected_losses.append(loss_tensor)\n return collected_losses\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 369, "n_words": 93, "vocab_size": 71, "complexity": 6, "nloc": 16, "token_counts": 83, "n_ast_nodes": 140, "n_identifiers": 14, "d_id": 81916, "documentation": { "docstring": "List of losses added using the `add_loss()` API.\n\n Variable regularization tensors are created when this property is\n accessed, so it is eager safe: accessing `losses` under a\n `tf.GradientTape` will propagate gradients back to the corresponding\n variables.\n\n Examples:\n\n >>> class MyLayer(tf.keras.layers.Layer):\n ... def call(self, inputs):\n ... self.add_loss(tf.abs(tf.reduce_mean(inputs)))\n ... return inputs\n >>> l = MyLayer()\n >>> l(np.ones((10, 1)))\n >>> l.losses\n [1.0]\n\n >>> inputs = tf.keras.Input(shape=(10,))\n >>> x = tf.keras.layers.Dense(10)(inputs)\n >>> outputs = tf.keras.layers.Dense(1)(x)\n >>> model = tf.keras.Model(inputs, outputs)\n >>> # Activity regularization.\n >>> len(model.losses)\n 0\n >>> model.add_loss(tf.abs(tf.reduce_mean(x)))\n >>> len(model.losses)\n 1\n\n >>> inputs = tf.keras.Input(shape=(10,))\n >>> d = tf.keras.layers.Dense(10, kernel_initializer='ones')\n >>> x = d(inputs)\n >>> outputs = tf.keras.layers.Dense(1)(x)\n >>> model = tf.keras.Model(inputs, outputs)\n >>> # Weight regularization.\n >>> model.add_loss(lambda: tf.reduce_mean(d.kernel))\n >>> model.losses\n []\n\n Returns:\n A list of tensors.\n ", "n_words": 128, "vocab_size": 83, "n_whitespaces": 385, "language": "en" } }, { "id": 206346, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/test/client.py", "file_name": "client.py", "fun_name": "store_rendered_templates", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def store_rendered_templates(store, signal, sender, template, context, **kwargs):\n \n store.setdefault(\"templates\", []).append(template)\n if \"context\" not in store:\n store[\"context\"] = ContextList()\n store[\"context\"].append(copy(context))\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 37, "n_words": 18, "vocab_size": 18, "complexity": 2, "nloc": 5, "token_counts": 57, "n_ast_nodes": 96, "n_identifiers": 11, "d_id": 51498, "documentation": { "docstring": "\n Store templates and contexts that are rendered.\n\n The context is copied so that it is an accurate representation at the time\n of rendering.\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 36, "language": "en" } }, { "id": 190666, "commit_id": "b21b008118fc8cf65b4bcd9b059f1cd704e05c68", "repo": "pytest", "path": "testing/python/metafunc.py", "file_name": "metafunc.py", "fun_name": "test_unicode_idval", "commit_message": "Refactor idmaker functions into class IdMaker\n\nThis commit only refactors, it does not change or add functionality yet. Public\nAPI is retained. Reason or refactoring:\n\nUser provided parameter IDs (e.g. Metafunc.parametrize(ids=...)) had so far\nonly been used to calculate a unique test ID for each test invocation. That\ntest ID was a joined string where each parameter contributed some partial ID.\n\nWe're soon going to reuse functionality to generate parameter keys for\nreorder_items and FixtureDef cache. We will be interested in the partial\nIDs, and only if they originate from explicit user information. Refactoring\nmakes logic and data accessible for reuse, and increases cohesion in general.", "code": "def test_unicode_idval(self) -> None:\n \n values = [\n (\"\", r\"\"),\n (\"ascii\", r\"ascii\"),\n (\"ação\", r\"a\\xe7\\xe3o\"),\n (\"josé@blah.com\", r\"jos\\xe9@blah.com\"),\n (\n r\"δοκ.ιμή@παράδειγμα.δοκιμή\",\n r\"\\u03b4\\u03bf\\u03ba.\\u03b9\\u03bc\\u03ae@\\u03c0\\u03b1\\u03c1\\u03ac\\u03b4\\u03b5\\u03b9\\u03b3\"\n r\"\\u03bc\\u03b1.\\u03b4\\u03bf\\u03ba\\u03b9\\u03bc\\u03ae\",\n ),\n ]\n for val, expected in values:\n assert (\n IdMaker([], [], None, None, None, None)._idval(val, \"a\", 6) == expected\n )\n", "url": "https://github.com/pytest-dev/pytest.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 215, "n_words": 39, "vocab_size": 35, "complexity": 2, "nloc": 21, "token_counts": 88, "n_ast_nodes": 135, "n_identifiers": 7, "d_id": 46373, "documentation": { "docstring": "Test that Unicode strings outside the ASCII character set get\n escaped, using byte escapes if they're in that range or unicode\n escapes if they're not.\n\n ", "n_words": 25, "vocab_size": 21, "n_whitespaces": 46, "language": "en" } }, { "id": 321750, "commit_id": "218f490484066660dd4e899da600b252f7edd468", "repo": "qutebrowser", "path": "qutebrowser/config/configfiles.py", "file_name": "configfiles.py", "fun_name": "_has_webengine", "commit_message": "Warn on QtWebEngine downgrade and Qt 5 -> 6 upgrade", "code": "def _has_webengine(self) -> bool:\n \n try:\n import qutebrowser.qt.webenginewidgets # pylint: disable=unused-import\n except ImportError:\n return False\n return True\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 67, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 10, "token_counts": 23, "n_ast_nodes": 40, "n_identifiers": 7, "d_id": 117884, "documentation": { "docstring": "Check if QtWebEngine is available.\n\n Note that it's too early to use objects.backend here...\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 28, "language": "en" } }, { "id": 130357, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/autoscaler/_private/aliyun/utils.py", "file_name": "utils.py", "fun_name": "tag_resource", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def tag_resource(self, resource_ids, tags, resource_type=\"instance\"):\n \n request = TagResourcesRequest()\n request.set_Tags(tags)\n request.set_ResourceType(resource_type)\n request.set_ResourceIds(resource_ids)\n response = self._send_request(request)\n if response is not None:\n logging.info(\"instance %s create tag successfully.\", resource_ids)\n else:\n logging.error(\"instance %s create tag failed.\", resource_ids)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 110, "n_words": 32, "vocab_size": 26, "complexity": 2, "nloc": 10, "token_counts": 69, "n_ast_nodes": 117, "n_identifiers": 15, "d_id": 29243, "documentation": { "docstring": "Create and bind tags to specified ECS resources.\n\n :param resource_ids: The IDs of N resources.\n :param tags: The tags of the resource.\n :param resource_type: The type of the resource.\n ", "n_words": 29, "vocab_size": 19, "n_whitespaces": 57, "language": "en" } }, { "id": 308401, "commit_id": "d0c4f0fec4216e4193da716001b5e13e1e3f2106", "repo": "core", "path": "homeassistant/components/mqtt/cover.py", "file_name": "cover.py", "fun_name": "async_close_cover", "commit_message": "Add mqtt encoding support for publishing (#62739)\n\n* encoding support for mqtt publishing - todo tests\r\n\r\n* signature allows None values for qos and retain\r\n\r\n* common test for mqtt publishing encoding\r\n\r\n* better test with command templates\r\n\r\n* more tests\r\n\r\n* fix tests alarm control panel+tests light basic\r\n\r\n* tests light json and template\r\n\r\n* add tests vacuum and fix tests light_template", "code": "async def async_close_cover(self, **kwargs):\n \n await mqtt.async_publish(\n self.hass,\n self._config.get(CONF_COMMAND_TOPIC),\n self._config[CONF_PAYLOAD_CLOSE],\n self._config[CONF_QOS],\n self._config[CONF_RETAIN],\n self._config[CONF_ENCODING],\n )\n if self._optimistic:\n # Optimistically assume that cover has changed state.\n self._state = STATE_CLOSED\n if self._config.get(CONF_GET_POSITION_TOPIC):\n self._position = self.find_percentage_in_range(\n self._config[CONF_POSITION_CLOSED], COVER_PAYLOAD\n )\n self.async_write_ha_state()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 222, "n_words": 35, "vocab_size": 32, "complexity": 3, "nloc": 16, "token_counts": 98, "n_ast_nodes": 150, "n_identifiers": 22, "d_id": 107158, "documentation": { "docstring": "Move the cover down.\n\n This method is a coroutine.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 23, "language": "en" } }, { "id": 22098, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/models.py", "file_name": "models.py", "fun_name": "links", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def links(self):\n \n\n header = self.headers.get(\"link\")\n\n resolved_links = {}\n\n if header:\n links = parse_header_links(header)\n\n for link in links:\n key = link.get(\"rel\") or link.get(\"url\")\n resolved_links[key] = link\n\n return resolved_links\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 114, "n_words": 27, "vocab_size": 21, "complexity": 4, "nloc": 9, "token_counts": 57, "n_ast_nodes": 100, "n_identifiers": 9, "d_id": 4177, "documentation": { "docstring": "Returns the parsed header links of the response, if any.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 118743, "commit_id": "72703b38029f9358a0ec7ca5ed875a6b438ece19", "repo": "streamlit", "path": "lib/streamlit/elements/text.py", "file_name": "text.py", "fun_name": "text", "commit_message": "Replace static apps with live Cloud apps (#4317)\n\nCo-authored-by: kajarenc ", "code": "def text(self, body):\n \n text_proto = TextProto()\n text_proto.body = clean_text(body)\n return self.dg._enqueue(\"text\", text_proto)\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 40, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 32, "n_ast_nodes": 55, "n_identifiers": 8, "d_id": 26400, "documentation": { "docstring": "Write fixed-width and preformatted text.\n\n Parameters\n ----------\n body : str\n The string to display.\n\n Example\n -------\n >>> st.text('This is some text.')\n\n ", "n_words": 21, "vocab_size": 21, "n_whitespaces": 81, "language": "en" } }, { "id": 226825, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_funnelarea.py", "file_name": "_funnelarea.py", "fun_name": "baseratio", "commit_message": "switch to black .22", "code": "def baseratio(self):\n \n return self[\"baseratio\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58498, "documentation": { "docstring": "\n Sets the ratio between bottom length and maximum top length.\n\n The 'baseratio' property is a number and may be specified as:\n - An int or float in the interval [0, 1]\n\n Returns\n -------\n int|float\n ", "n_words": 34, "vocab_size": 32, "n_whitespaces": 86, "language": "en" } }, { "id": 200894, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/aggregation/tests.py", "file_name": "tests.py", "fun_name": "test_sum_distinct_aggregate", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_sum_distinct_aggregate(self):\n \n authors = Author.objects.filter(book__in=[self.b5, self.b6])\n self.assertEqual(authors.count(), 3)\n\n distinct_authors = authors.distinct()\n self.assertEqual(distinct_authors.count(), 2)\n\n # Selected author ages are 57 and 46\n age_sum = distinct_authors.aggregate(Sum(\"age\"))\n self.assertEqual(age_sum[\"age__sum\"], 103)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 82, "n_words": 26, "vocab_size": 24, "complexity": 1, "nloc": 7, "token_counts": 79, "n_ast_nodes": 132, "n_identifiers": 16, "d_id": 49822, "documentation": { "docstring": "\n Sum on a distinct() QuerySet should aggregate only the distinct items.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 126148, "commit_id": "acf2bf9b2fa9f6cac8c599ec1eea6a9d5249905f", "repo": "ray", "path": "rllib/utils/replay_buffers/tests/test_reservoir_buffer.py", "file_name": "test_reservoir_buffer.py", "fun_name": "test_episodes_unit", "commit_message": "[RLlib] Get rid of all these deprecation warnings. (#27085)", "code": "def test_episodes_unit(self):\n \n self.batch_id = 0\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 2, "nloc": 14, "token_counts": 104, "n_ast_nodes": 21, "n_identifiers": 3, "d_id": 28072, "documentation": { "docstring": "Tests adding, sampling, get-/set state, and eviction with\n experiences stored by timesteps.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 18, "language": "en" } }, { "id": 133142, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/dask/tests/test_dask_callback.py", "file_name": "test_dask_callback.py", "fun_name": "test_presubmit_shortcircuit", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_presubmit_shortcircuit(ray_start_1_cpu):\n \n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 8, "token_counts": 43, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 29941, "documentation": { "docstring": "\n Test that presubmit return short-circuits task submission, and that task's\n result is set to the presubmit return value.\n ", "n_words": 18, "vocab_size": 15, "n_whitespaces": 28, "language": "en" } }, { "id": 203345, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/checks.py", "file_name": "checks.py", "fun_name": "_check_prepopulated_fields_value", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _check_prepopulated_fields_value(self, obj, val, label):\n \n\n if not isinstance(val, (list, tuple)):\n return must_be(\"a list or tuple\", option=label, obj=obj, id=\"admin.E029\")\n else:\n return list(\n chain.from_iterable(\n self._check_prepopulated_fields_value_item(\n obj, subfield_name, \"%s[%r]\" % (label, index)\n )\n for index, subfield_name in enumerate(val)\n )\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 201, "n_words": 37, "vocab_size": 33, "complexity": 3, "nloc": 12, "token_counts": 78, "n_ast_nodes": 120, "n_identifiers": 17, "d_id": 50319, "documentation": { "docstring": "Check a value of `prepopulated_fields` dictionary, i.e. it's an\n iterable of existing fields.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 19, "language": "en" } }, { "id": 33998, "commit_id": "efb35a4107478f7d2ebcf56572c0967e68536e15", "repo": "transformers", "path": "src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py", "file_name": "processing_wav2vec2_with_lm.py", "fun_name": "from_pretrained", "commit_message": "[Wav2Vec2ProcessorWithLM] improve decoder downlaod (#15040)", "code": "def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):\n r\n requires_backends(cls, \"pyctcdecode\")\n from pyctcdecode import BeamSearchDecoderCTC\n\n feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(pretrained_model_name_or_path, **kwargs)\n tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(pretrained_model_name_or_path, **kwargs)\n\n if os.path.isdir(pretrained_model_name_or_path):\n decoder = BeamSearchDecoderCTC.load_from_dir(pretrained_model_name_or_path)\n else:\n # BeamSearchDecoderCTC has no auto class\n kwargs.pop(\"_from_auto\", None)\n\n # make sure that only relevant filenames are downloaded\n language_model_filenames = os.path.join(BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, \"*\")\n alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME\n allow_regex = [language_model_filenames, alphabet_filename]\n\n decoder = BeamSearchDecoderCTC.load_from_hf_hub(\n pretrained_model_name_or_path, allow_regex=allow_regex, **kwargs\n )\n\n # set language model attributes\n for attribute in [\"alpha\", \"beta\", \"unk_score_offset\", \"score_boundary\"]:\n value = kwargs.pop(attribute, None)\n\n if value is not None:\n cls._set_language_model_attribute(decoder, attribute, value)\n\n # make sure that decoder's alphabet and tokenizer's vocab match in content\n missing_decoder_tokens = cls.get_missing_alphabet_tokens(decoder, tokenizer)\n if len(missing_decoder_tokens) > 0:\n raise ValueError(\n f\"The tokens {missing_decoder_tokens} are defined in the tokenizer's \"\n \"vocabulary, but not in the decoder's alphabet. \"\n f\"Make sure to include {missing_decoder_tokens} in the decoder's alphabet.\"\n )\n\n return cls(feature_extractor=feature_extractor, tokenizer=tokenizer, decoder=decoder)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 445, "n_words": 137, "vocab_size": 100, "complexity": 5, "nloc": 56, "token_counts": 194, "n_ast_nodes": 321, "n_identifiers": 31, "d_id": 6183, "documentation": { "docstring": "\n Instantiate a [`Wav2Vec2ProcessorWithLM`] from a pretrained Wav2Vec2 processor.\n\n \n\n This class method is simply calling Wav2Vec2FeatureExtractor's\n [`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`], Wav2Vec2CTCTokenizer's\n [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`], and\n [`pyctcdecode.BeamSearchDecoderCTC.load_from_hf_hub`].\n\n Please refer to the docstrings of the methods above for more information.\n\n \n\n Args:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n This can be either:\n\n - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on\n huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or\n namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`.\n - a path to a *directory* containing a feature extractor file saved using the\n [`~SequenceFeatureExtractor.save_pretrained`] method, e.g., `./my_model_directory/`.\n - a path or url to a saved feature extractor JSON *file*, e.g.,\n `./my_model_directory/preprocessor_config.json`.\n **kwargs\n Additional keyword arguments passed along to both [`SequenceFeatureExtractor`] and\n [`PreTrainedTokenizer`]\n ", "n_words": 124, "vocab_size": 89, "n_whitespaces": 375, "language": "en" } }, { "id": 150412, "commit_id": "9f6bba40af1a407f190a89f5c0c8b4e3f528ba46", "repo": "freqtrade", "path": "freqtrade/rpc/replicate/__init__.py", "file_name": "__init__.py", "fun_name": "follower_loop", "commit_message": "initial concept for replicate, basic leader and follower logic", "code": "async def follower_loop(self):\n \n try:\n await self._connect_to_leaders()\n except Exception as e:\n logger.error(\"Exception occurred in follower loop: \")\n logger.exception(e)\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 71, "n_words": 17, "vocab_size": 17, "complexity": 2, "nloc": 6, "token_counts": 31, "n_ast_nodes": 60, "n_identifiers": 8, "d_id": 34736, "documentation": { "docstring": "\n Main follower coroutine\n\n This starts all of the leader connection coros\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 33, "language": "en" } }, { "id": 101373, "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", "repo": "faceswap", "path": "scripts/convert.py", "file_name": "convert.py", "fun_name": "coverage_ratio", "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", "code": "def coverage_ratio(self) -> float:\n \n return self._coverage_ratio\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 4, "d_id": 20788, "documentation": { "docstring": " float: The coverage ratio that the model was trained at. ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 74850, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/documents/tests/test_models.py", "file_name": "test_models.py", "fun_name": "test_standard_get_document_model_string", "commit_message": "Reformat with black", "code": "def test_standard_get_document_model_string(self):\n \n del settings.WAGTAILDOCS_DOCUMENT_MODEL\n self.assertEqual(get_document_model_string(), \"wagtaildocs.Document\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 27, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 37, "n_identifiers": 6, "d_id": 16328, "documentation": { "docstring": "Test get_document_model_string with no WAGTAILDOCS_DOCUMENT_MODEL", "n_words": 5, "vocab_size": 5, "n_whitespaces": 4, "language": "en" } }, { "id": 285833, "commit_id": "07c08df84e2af99be4ee32ab276128cafb9e7986", "repo": "OpenBBTerminal", "path": "openbb_terminal/helpers_denomination.py", "file_name": "helpers_denomination.py", "fun_name": "get_denominations", "commit_message": "Bug/2583 (#2671)\n\n* #2583 [CT] Add and use denomination helper\r\n\r\n* #2583 [CT] Fix Yahoo Finance denomination\r\n\r\n* #2583 [CT] Fix typings for dict\r\n\r\n* #2583 [CT] Add YF model get financials tests\r\n\r\n* #2583 [CT] Fix stubbed currency\r\n\r\n* #2583 [CT] Add test coverage for denomination helpers\r\n\r\n* #2583 [CT] Fix YF view not exporting raw data\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>", "code": "def get_denominations() -> Dict[DENOMINATION, float]:\n \n return {\n \"Trillions\": 1_000_000_000_000,\n \"Billions\": 1_000_000_000,\n \"Millions\": 1_000_000,\n \"Thousands\": 1_000,\n \"Units\": 1,\n }\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 62, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 13, "token_counts": 35, "n_ast_nodes": 61, "n_identifiers": 4, "d_id": 85447, "documentation": { "docstring": "Gets all supported denominations and their lower bound value\n\n Returns:\n Dict[DENOMINATION, int]: All supported denominations and their lower bound value\n ", "n_words": 20, "vocab_size": 13, "n_whitespaces": 33, "language": "en" } }, { "id": 65199, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/consolidated_financial_statement/consolidated_financial_statement.py", "file_name": "consolidated_financial_statement.py", "fun_name": "update_parent_account_names", "commit_message": "style: format code with black", "code": "def update_parent_account_names(accounts):\n\t\n\tname_to_account_map = {}\n\n\tfor d in accounts:\n\t\tif d.account_number:\n\t\t\taccount_name = d.account_number + \" - \" + d.account_name\n\t\telse:\n\t\t\taccount_name = d.account_name\n\t\tname_to_account_map[d.name] = account_name\n\n\tfor account in accounts:\n\t\tif account.parent_account:\n\t\t\taccount[\"parent_account_name\"] = name_to_account_map.get(account.parent_account)\n\n\treturn accounts\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 26, "n_words": 38, "vocab_size": 25, "complexity": 5, "nloc": 12, "token_counts": 71, "n_ast_nodes": 118, "n_identifiers": 10, "d_id": 13822, "documentation": { "docstring": "Update parent_account_name in accounts list.\n\n\tparent_name is `name` of parent account which could have other prefix\n\tof account_number and suffix of company abbr. This function adds key called\n\t`parent_account_name` which does not have such prefix/suffix.\n\t", "n_words": 35, "vocab_size": 31, "n_whitespaces": 31, "language": "en" } }, { "id": 132452, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/tests/test_cluster_searcher.py", "file_name": "test_cluster_searcher.py", "fun_name": "test_cluster_interrupt_searcher", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_cluster_interrupt_searcher(start_connected_cluster, tmpdir, searcher):\n \n cluster = start_connected_cluster\n dirpath = str(tmpdir)\n local_checkpoint_dir = os.path.join(dirpath, \"experiment\")\n from ray.tune import register_trainable\n\n register_trainable(\"trainable\", MyTrainableClass)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 38, "n_words": 20, "vocab_size": 18, "complexity": 12, "nloc": 60, "token_counts": 313, "n_ast_nodes": 72, "n_identifiers": 15, "d_id": 29762, "documentation": { "docstring": "Tests restoration of HyperOptSearch experiment on cluster shutdown\n with actual interrupt.\n\n Restoration should restore both state of trials\n and previous search algorithm (HyperOptSearch) state.\n This is an end-to-end test.\n ", "n_words": 29, "vocab_size": 28, "n_whitespaces": 44, "language": "en" } }, { "id": 63387, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "__invert__", "commit_message": "upd; format", "code": "def __invert__(self):\n \n return NotAny(self)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 21, "n_identifiers": 3, "d_id": 13282, "documentation": { "docstring": "\n Implementation of ~ operator - returns :class:`NotAny`\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 228784, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/bar/marker/_pattern.py", "file_name": "_pattern.py", "fun_name": "solidity", "commit_message": "switch to black .22", "code": "def solidity(self):\n \n return self[\"solidity\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 60457, "documentation": { "docstring": "\n Sets the solidity of the pattern fill. Solidity is roughly the\n fraction of the area filled by the pattern. Solidity of 0 shows\n only the background color without pattern and solidty of 1\n shows only the foreground color without pattern.\n\n The 'solidity' property is a number and may be specified as:\n - An int or float in the interval [0, 1]\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n int|float|numpy.ndarray\n ", "n_words": 75, "vocab_size": 52, "n_whitespaces": 157, "language": "en" } }, { "id": 81964, "commit_id": "68a44529b6b77d2d43d7099b654560bfd8bbf518", "repo": "awx", "path": "awxkit/awxkit/api/pages/page.py", "file_name": "page.py", "fun_name": "extract_data", "commit_message": "Register pages for the Instance peers and install bundle endpoints\n\nThis includes exposing a new interface for Page objects, Page.bytes,\nto return the full bytestring contents of the response.", "code": "def extract_data(self, response):\n \n try:\n data = response.json()\n except ValueError as e: # If there was no json to parse\n data = {}\n if response.text or response.status_code not in (200, 202, 204):\n text = response.text\n if len(text) > 1024:\n text = text[:1024] + '... <<< Truncated >>> ...'\n log.debug(\"Unable to parse JSON response ({0.status_code}): {1} - '{2}'\".format(response, e, text))\n\n return data\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 186, "n_words": 60, "vocab_size": 50, "complexity": 5, "nloc": 11, "token_counts": 83, "n_ast_nodes": 137, "n_identifiers": 13, "d_id": 17284, "documentation": { "docstring": "Takes a `requests.Response` and returns a data dict.", "n_words": 8, "vocab_size": 7, "n_whitespaces": 7, "language": "en" } }, { "id": 153874, "commit_id": "4ec7f6347903f9133c65ebc5b6e0e15553b98577", "repo": "modin", "path": "modin/core/execution/python/implementations/pandas_on_python/partitioning/partition.py", "file_name": "partition.py", "fun_name": "add_to_apply_calls", "commit_message": "REFACTOR-#4530: Standardize access to physical data in partitions (#4563)\n\nSigned-off-by: Alexey Prutskov ", "code": "def add_to_apply_calls(self, func, *args, **kwargs):\n \n return PandasOnPythonDataframePartition(\n self._data.copy(),\n call_queue=self.call_queue + [(func, args, kwargs)],\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 57, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 42, "n_ast_nodes": 63, "n_identifiers": 9, "d_id": 35677, "documentation": { "docstring": "\n Add a function to the call queue.\n\n Parameters\n ----------\n func : callable\n Function to be added to the call queue.\n *args : iterable\n Additional positional arguments to be passed in `func`.\n **kwargs : dict\n Additional keyword arguments to be passed in `func`.\n\n Returns\n -------\n PandasOnPythonDataframePartition\n New ``PandasOnPythonDataframePartition`` object with extended call queue.\n ", "n_words": 52, "vocab_size": 34, "n_whitespaces": 167, "language": "en" } }, { "id": 88279, "commit_id": "10fbaf4b856f85879611d50b714fa47eb4a358c3", "repo": "sentry", "path": "src/sentry/integrations/slack/requests/action.py", "file_name": "action.py", "fun_name": "callback_data", "commit_message": "ref: add src/sentry/utils/json.py to mypy.ini (#41133)\n\nfirst commit I sorted some of the mypy files (separated out to make the\r\ndiff of the second commit easier to follow)", "code": "def callback_data(self) -> JSONData:\n \n return json.loads(self.data[\"callback_id\"])\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 12, "token_counts": 20, "n_ast_nodes": 36, "n_identifiers": 6, "d_id": 18370, "documentation": { "docstring": "\n We store certain data in ``callback_id`` as JSON. It's a bit hacky, but\n it's the simplest way to store state without saving it on the Sentry\n side.\n\n Data included in this field:\n - issue: the ID of the corresponding Issue\n - orig_response_url: URL from the original message we received\n - is_message: did the original message have a 'message' type\n ", "n_words": 59, "vocab_size": 47, "n_whitespaces": 128, "language": "en" } }, { "id": 177582, "commit_id": "35125cca12ba1e8703c4284894e4e2db44ce7009", "repo": "label-studio", "path": "label_studio/tests/test_next_task.py", "file_name": "test_next_task.py", "fun_name": "test_overlap_first", "commit_message": "fix: DEV-1348: Fix _rearrange_overlap_cohort filter condition for overlap bulk update with concurrent import (#1844)\n\n* [fix] Rearrange overlap depending in annotations count\r\n\r\n* Fix next task test for not random overlap assignment\r\n\r\n* Delete unused method\r\n\r\n* Rename rearrange method to have back compatibility\r\n\r\n* Refactor to Q_finished_annotations from tasks.models\r\n\r\n* Fix filter for tasks with max annotations\r\n\r\n* Change filter for tasks with max annotations\r\n\r\n* Change project stats recalculation condition\r\n\r\n* Fix rearrange during import from storage\r\n\r\n* Change _rearrange_overlap_cohort filter condition\r\n\r\n* Switching to bulk_update in _rearrange_overlap_cohort\r\n\r\n* Stylize code\r\n\r\n* Add is_labeled on import\r\n\r\n* Fix tests\r\n\r\n* Fix tests\r\n\r\n* Fix tests more\r\n\r\nCo-authored-by: nik \r\nCo-authored-by: Sergei Ivashchenko \r\nCo-authored-by: niklub \r\nCo-authored-by: Max Tkachenko ", "code": "def test_overlap_first(business_client, setup_before_upload, show_overlap_first):\n c = business_client\n config = dict(\n title='test_overlap_first',\n is_published=True,\n maximum_annotations=1,\n show_overlap_first=show_overlap_first,\n sampling=\"Uniform sampling\",\n label_config=\n )\n\n project = make_project(config, business_client.user)\n\n annotation_result = json.dumps([{\n 'from_name': 'text_class',\n 'to_name': 'text',\n 'type': 'choices',\n 'value': {'choices': ['class_A']}\n }])\n\n num_tasks = 1000\n overlap_cohort_percentage = 1\n\n # set up tasks overlap\n setup_after_upload = True\n if setup_before_upload:\n r = c.patch(\n f'/api/projects/{project.id}/',\n data=json.dumps({'maximum_annotations': 2, 'overlap_cohort_percentage': overlap_cohort_percentage}),\n content_type='application/json'\n )\n assert r.status_code == 200\n setup_after_upload = False\n\n # create tasks\n tasks = []\n for i in range(num_tasks):\n tasks.append({'data': {'text': f'this is {str(i)}'}})\n r = business_client.post(\n f'/api/projects/{project.id}/tasks/bulk/', data=json.dumps(tasks), content_type='application/json')\n assert r.status_code == 201\n\n if setup_after_upload:\n r = c.patch(\n f'/api/projects/{project.id}/',\n data=json.dumps({'maximum_annotations': 2, 'overlap_cohort_percentage': overlap_cohort_percentage}),\n content_type='application/json'\n )\n assert r.status_code == 200\n\n expected_tasks_with_overlap = int(overlap_cohort_percentage / 100. * num_tasks)\n\n assert Task.objects.filter(Q(project_id=project.id) & Q(overlap__gt=1)).count() == expected_tasks_with_overlap\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 377, "n_words": 122, "vocab_size": 84, "complexity": 8, "nloc": 63, "token_counts": 396, "n_ast_nodes": 474, "n_identifiers": 42, "d_id": 42449, "documentation": { "docstring": "\n \n \n \n \n \n \n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 104, "language": "en" } }, { "id": 65637, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/controllers/item_variant.py", "file_name": "item_variant.py", "fun_name": "get_variant", "commit_message": "style: format code with black", "code": "def get_variant(template, args=None, variant=None, manufacturer=None, manufacturer_part_no=None):\n\t\n\titem_template = frappe.get_doc(\"Item\", template)\n\n\tif item_template.variant_based_on == \"Manufacturer\" and manufacturer:\n\t\treturn make_variant_based_on_manufacturer(item_template, manufacturer, manufacturer_part_no)\n\telse:\n\t\tif isinstance(args, str):\n\t\t\targs = json.loads(args)\n\n\t\tif not args:\n\t\t\tfrappe.throw(_(\"Please specify at least one attribute in the Attributes table\"))\n\t\treturn find_variant(template, args, variant)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 34, "n_words": 44, "vocab_size": 40, "complexity": 5, "nloc": 10, "token_counts": 90, "n_ast_nodes": 143, "n_identifiers": 18, "d_id": 13965, "documentation": { "docstring": "Validates Attributes and their Values, then looks for an exactly\n\tmatching Item Variant\n\n\t:param item: Template Item\n\t:param args: A dictionary with \"Attribute\" as key and \"Attribute Value\" as value\n\t", "n_words": 30, "vocab_size": 26, "n_whitespaces": 26, "language": "en" } }, { "id": 63296, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "replaceHTMLEntity", "commit_message": "upd; format", "code": "def replaceHTMLEntity(t):\n \n return _htmlEntityMap.get(t.entity)\n\n# it's easy to get these comment structures wrong - they're very common, so may as well make them available\ncStyleComment = Combine(Regex(r\"/\\*(?:[^*]|\\*(?!/))*\") + '*/').setName(\"C style comment\")\n\"Comment of the form ``/* ... */``\"\n\nhtmlComment = Regex(r\"\").setName(\"HTML comment\")\n\"Comment of the form ````\"\n\nrestOfLine = Regex(r\".*\").leaveWhitespace().setName(\"rest of line\")\ndblSlashComment = Regex(r\"//(?:\\\\\\n|[^\\n])*\").setName(\"// comment\")\n\"Comment of the form ``// ... (to end of line)``\"\n\ncppStyleComment = Combine(Regex(r\"/\\*(?:[^*]|\\*(?!/))*\") + '*/' | dblSlashComment).setName(\"C++ style comment\")\n\"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`\"\n\njavaStyleComment = cppStyleComment\n\"Same as :class:`cppStyleComment`\"\n\npythonStyleComment = Regex(r\"#.*\").setName(\"Python style comment\")\n\"Comment of the form ``# ... (to end of line)``\"\n\n_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',')\n + Optional(Word(\" \\t\")\n + ~Literal(\",\") + ~LineEnd()))).streamline().setName(\"commaItem\")\ncommaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default=\"\")).setName(\"commaSeparatedList\")\n\n\n# some other useful expressions - using lower-case class name since we are really using this as a namespace", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 207, "n_words": 141, "vocab_size": 91, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 347, "n_identifiers": 30, "d_id": 13236, "documentation": { "docstring": "Helper parser action to replace common HTML entities with their special characters(Deprecated) Predefined expression of 1 or more printable words or\nquoted strings, separated by commas.\n\nThis expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`.\n", "n_words": 34, "vocab_size": 31, "n_whitespaces": 31, "language": "en" } }, { "id": 214487, "commit_id": "a3120b5179f51308d4c0c1f4865873debb566bbd", "repo": "flair", "path": "tests/test_datasets.py", "file_name": "test_datasets.py", "fun_name": "test_reading_jsonl_dataset_should_be_successful", "commit_message": "refactor: :recycle: make label_type configurable for Jsonl corpora", "code": "def test_reading_jsonl_dataset_should_be_successful(tasks_base_path):\n \n dataset = JsonlDataset(tasks_base_path / \"jsonl/train.jsonl\")\n\n assert len(dataset.sentences) == 5\n assert dataset.sentences[0].to_tagged_string() == \"This is New Berlin \"\n assert dataset.sentences[1].to_tagged_string() == \"This is New Berlin .\"\n assert dataset.sentences[2].to_tagged_string() == \"This is New Berlin . \"\n assert (\n dataset.sentences[3].to_tagged_string()\n == \"EU rejects German call to boycott British lamb .\"\n )\n\n", "url": "https://github.com/flairNLP/flair.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 97, "n_words": 59, "vocab_size": 37, "complexity": 1, "nloc": 10, "token_counts": 77, "n_ast_nodes": 133, "n_identifiers": 7, "d_id": 53743, "documentation": { "docstring": "\n Tests reading a JsonlDataset containing multiple tagged entries\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 167582, "commit_id": "f538568afc2c76c2d738d32e3544cf9fe6742960", "repo": "pandas", "path": "pandas/_testing/_random.py", "file_name": "_random.py", "fun_name": "rands", "commit_message": "TYP: misc return type annotations (#47558)", "code": "def rands(nchars) -> str:\n \n return \"\".join(np.random.choice(RANDS_CHARS, nchars))\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 8, "token_counts": 24, "n_ast_nodes": 42, "n_identifiers": 8, "d_id": 40041, "documentation": { "docstring": "\n Generate one random byte string.\n\n See `rands_array` if you want to create an array of random strings.\n\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 27, "language": "en" } }, { "id": 207506, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/test_adminsite.py", "file_name": "test_adminsite.py", "fun_name": "test_get_action", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_get_action(self):\n \n action_name = \"delete_selected\"\n self.assertEqual(self.site.get_action(action_name), delete_selected)\n self.site.disable_action(action_name)\n self.assertEqual(self.site.get_action(action_name), delete_selected)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 45, "n_words": 10, "vocab_size": 8, "complexity": 1, "nloc": 5, "token_counts": 47, "n_ast_nodes": 79, "n_identifiers": 8, "d_id": 51992, "documentation": { "docstring": "AdminSite.get_action() returns an action even if it's disabled.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 230881, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/_annotation.py", "file_name": "_annotation.py", "fun_name": "arrowsize", "commit_message": "switch to black .22", "code": "def arrowsize(self):\n \n return self[\"arrowsize\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 62554, "documentation": { "docstring": "\n Sets the size of the end annotation arrow head, relative to\n `arrowwidth`. A value of 1 (default) gives a head about 3x as\n wide as the line.\n\n The 'arrowsize' property is a number and may be specified as:\n - An int or float in the interval [0.3, inf]\n\n Returns\n -------\n int|float\n ", "n_words": 51, "vocab_size": 45, "n_whitespaces": 117, "language": "en" } }, { "id": 67797, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/get_item_details.py", "file_name": "get_item_details.py", "fun_name": "get_basic_details", "commit_message": "style: format code with black", "code": "def get_basic_details(args, item, overwrite_warehouse=True):\n\t\n\n\tif not item:\n\t\titem = frappe.get_doc(\"Item\", args.get(\"item_code\"))\n\n\tif item.variant_of:\n\t\titem.update_template_tables()\n\n\titem_defaults = get_item_defaults(item.name, args.company)\n\titem_group_defaults = get_item_group_defaults(item.name, args.company)\n\tbrand_defaults = get_brand_defaults(item.name, args.company)\n\n\tdefaults = frappe._dict(\n\t\t{\n\t\t\t\"item_defaults\": item_defaults,\n\t\t\t\"item_group_defaults\": item_group_defaults,\n\t\t\t\"brand_defaults\": brand_defaults,\n\t\t}\n\t)\n\n\twarehouse = get_item_warehouse(item, args, overwrite_warehouse, defaults)\n\n\tif args.get(\"doctype\") == \"Material Request\" and not args.get(\"material_request_type\"):\n\t\targs[\"material_request_type\"] = frappe.db.get_value(\n\t\t\t\"Material Request\", args.get(\"name\"), \"material_request_type\", cache=True\n\t\t)\n\n\texpense_account = None\n\n\tif args.get(\"doctype\") == \"Purchase Invoice\" and item.is_fixed_asset:\n\t\tfrom erpnext.assets.doctype.asset_category.asset_category import get_asset_category_account\n\n\t\texpense_account = get_asset_category_account(\n\t\t\tfieldname=\"fixed_asset_account\", item=args.item_code, company=args.company\n\t\t)\n\n\t# Set the UOM to the Default Sales UOM or Default Purchase UOM if configured in the Item Master\n\tif not args.get(\"uom\"):\n\t\tif args.get(\"doctype\") in sales_doctypes:\n\t\t\targs.uom = item.sales_uom if item.sales_uom else item.stock_uom\n\t\telif (args.get(\"doctype\") in [\"Purchase Order\", \"Purchase Receipt\", \"Purchase Invoice\"]) or (\n\t\t\targs.get(\"doctype\") == \"Material Request\" and args.get(\"material_request_type\") == \"Purchase\"\n\t\t):\n\t\t\targs.uom = item.purchase_uom if item.purchase_uom else item.stock_uom\n\t\telse:\n\t\t\targs.uom = item.stock_uom\n\n\tif args.get(\"batch_no\") and item.name != frappe.get_cached_value(\n\t\t\"Batch\", args.get(\"batch_no\"), \"item\"\n\t):\n\t\targs[\"batch_no\"] = \"\"\n\n\tout = frappe._dict(\n\t\t{\n\t\t\t\"item_code\": item.name,\n\t\t\t\"item_name\": item.item_name,\n\t\t\t\"description\": cstr(item.description).strip(),\n\t\t\t\"image\": cstr(item.image).strip(),\n\t\t\t\"warehouse\": warehouse,\n\t\t\t\"income_account\": get_default_income_account(\n\t\t\t\targs, item_defaults, item_group_defaults, brand_defaults\n\t\t\t),\n\t\t\t\"expense_account\": expense_account\n\t\t\tor get_default_expense_account(args, item_defaults, item_group_defaults, brand_defaults),\n\t\t\t\"discount_account\": get_default_discount_account(args, item_defaults),\n\t\t\t\"cost_center\": get_default_cost_center(\n\t\t\t\targs, item_defaults, item_group_defaults, brand_defaults\n\t\t\t),\n\t\t\t\"has_serial_no\": item.has_serial_no,\n\t\t\t\"has_batch_no\": item.has_batch_no,\n\t\t\t\"batch_no\": args.get(\"batch_no\"),\n\t\t\t\"uom\": args.uom,\n\t\t\t\"min_order_qty\": flt(item.min_order_qty) if args.doctype == \"Material Request\" else \"\",\n\t\t\t\"qty\": flt(args.qty) or 1.0,\n\t\t\t\"stock_qty\": flt(args.qty) or 1.0,\n\t\t\t\"price_list_rate\": 0.0,\n\t\t\t\"base_price_list_rate\": 0.0,\n\t\t\t\"rate\": 0.0,\n\t\t\t\"base_rate\": 0.0,\n\t\t\t\"amount\": 0.0,\n\t\t\t\"base_amount\": 0.0,\n\t\t\t\"net_rate\": 0.0,\n\t\t\t\"net_amount\": 0.0,\n\t\t\t\"discount_percentage\": 0.0,\n\t\t\t\"discount_amount\": 0.0,\n\t\t\t\"supplier\": get_default_supplier(args, item_defaults, item_group_defaults, brand_defaults),\n\t\t\t\"update_stock\": args.get(\"update_stock\")\n\t\t\tif args.get(\"doctype\") in [\"Sales Invoice\", \"Purchase Invoice\"]\n\t\t\telse 0,\n\t\t\t\"delivered_by_supplier\": item.delivered_by_supplier\n\t\t\tif args.get(\"doctype\") in [\"Sales Order\", \"Sales Invoice\"]\n\t\t\telse 0,\n\t\t\t\"is_fixed_asset\": item.is_fixed_asset,\n\t\t\t\"last_purchase_rate\": item.last_purchase_rate\n\t\t\tif args.get(\"doctype\") in [\"Purchase Order\"]\n\t\t\telse 0,\n\t\t\t\"transaction_date\": args.get(\"transaction_date\"),\n\t\t\t\"against_blanket_order\": args.get(\"against_blanket_order\"),\n\t\t\t\"bom_no\": item.get(\"default_bom\"),\n\t\t\t\"weight_per_unit\": args.get(\"weight_per_unit\") or item.get(\"weight_per_unit\"),\n\t\t\t\"weight_uom\": args.get(\"weight_uom\") or item.get(\"weight_uom\"),\n\t\t\t\"grant_commission\": item.get(\"grant_commission\"),\n\t\t}\n\t)\n\n\tif item.get(\"enable_deferred_revenue\") or item.get(\"enable_deferred_expense\"):\n\t\tout.update(calculate_service_end_date(args, item))\n\n\t# calculate conversion factor\n\tif item.stock_uom == args.uom:\n\t\tout.conversion_factor = 1.0\n\telse:\n\t\tout.conversion_factor = args.conversion_factor or get_conversion_factor(item.name, args.uom).get(\n\t\t\t\"conversion_factor\"\n\t\t)\n\n\targs.conversion_factor = out.conversion_factor\n\tout.stock_qty = out.qty * out.conversion_factor\n\targs.stock_qty = out.stock_qty\n\n\t# calculate last purchase rate\n\tif args.get(\"doctype\") in purchase_doctypes:\n\t\tfrom erpnext.buying.doctype.purchase_order.purchase_order import item_last_purchase_rate\n\n\t\tout.last_purchase_rate = item_last_purchase_rate(\n\t\t\targs.name, args.conversion_rate, item.name, out.conversion_factor\n\t\t)\n\n\t# if default specified in item is for another company, fetch from company\n\tfor d in [\n\t\t[\"Account\", \"income_account\", \"default_income_account\"],\n\t\t[\"Account\", \"expense_account\", \"default_expense_account\"],\n\t\t[\"Cost Center\", \"cost_center\", \"cost_center\"],\n\t\t[\"Warehouse\", \"warehouse\", \"\"],\n\t]:\n\t\tif not out[d[1]]:\n\t\t\tout[d[1]] = frappe.get_cached_value(\"Company\", args.company, d[2]) if d[2] else None\n\n\tfor fieldname in (\"item_name\", \"item_group\", \"brand\", \"stock_uom\"):\n\t\tout[fieldname] = item.get(fieldname)\n\n\tif args.get(\"manufacturer\"):\n\t\tpart_no = get_item_manufacturer_part_no(args.get(\"item_code\"), args.get(\"manufacturer\"))\n\t\tif part_no:\n\t\t\tout[\"manufacturer_part_no\"] = part_no\n\t\telse:\n\t\t\tout[\"manufacturer_part_no\"] = None\n\t\t\tout[\"manufacturer\"] = None\n\telse:\n\t\tdata = frappe.get_value(\n\t\t\t\"Item\", item.name, [\"default_item_manufacturer\", \"default_manufacturer_part_no\"], as_dict=1\n\t\t)\n\n\t\tif data:\n\t\t\tout.update(\n\t\t\t\t{\n\t\t\t\t\t\"manufacturer\": data.default_item_manufacturer,\n\t\t\t\t\t\"manufacturer_part_no\": data.default_manufacturer_part_no,\n\t\t\t\t}\n\t\t\t)\n\n\tchild_doctype = args.doctype + \" Item\"\n\tmeta = frappe.get_meta(child_doctype)\n\tif meta.get_field(\"barcode\"):\n\t\tupdate_barcode_value(out)\n\n\tif out.get(\"weight_per_unit\"):\n\t\tout[\"total_weight\"] = out.weight_per_unit * out.stock_qty\n\n\treturn out\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 322, "n_words": 468, "vocab_size": 274, "complexity": 39, "nloc": 142, "token_counts": 1097, "n_ast_nodes": 1809, "n_identifiers": 80, "d_id": 14620, "documentation": { "docstring": "\n\t:param args: {\n\t \"item_code\": \"\",\n\t \"warehouse\": None,\n\t \"customer\": \"\",\n\t \"conversion_rate\": 1.0,\n\t \"selling_price_list\": None,\n\t \"price_list_currency\": None,\n\t \"price_list_uom_dependant\": None,\n\t \"plc_conversion_rate\": 1.0,\n\t \"doctype\": \"\",\n\t \"name\": \"\",\n\t \"supplier\": None,\n\t \"transaction_date\": None,\n\t \"conversion_rate\": 1.0,\n\t \"buying_price_list\": None,\n\t \"is_subcontracted\": \"Yes\" / \"No\",\n\t \"ignore_pricing_rule\": 0/1\n\t \"project\": \"\",\n\t barcode: \"\",\n\t serial_no: \"\",\n\t currency: \"\",\n\t update_stock: \"\",\n\t price_list: \"\",\n\t company: \"\",\n\t order_type: \"\",\n\t is_pos: \"\",\n\t project: \"\",\n\t qty: \"\",\n\t stock_qty: \"\",\n\t conversion_factor: \"\",\n\t against_blanket_order: 0/1\n\t }\n\t:param item: `item_code` of Item object\n\t:return: frappe._dict\n\t", "n_words": 74, "vocab_size": 47, "n_whitespaces": 528, "language": "en" } }, { "id": 184066, "commit_id": "0ba3ffb1718bdd01a5136fd1bc30e8ed58e6a47c", "repo": "textual", "path": "src/textual/widget.py", "file_name": "widget.py", "fun_name": "outer_size", "commit_message": "size properties", "code": "def outer_size(self) -> Size:\n \n return self._size\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 4, "d_id": 44455, "documentation": { "docstring": "The size of the widget (including padding and border).", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 245233, "commit_id": "665b55f6768dd0c2c32f8e73cd3069eddc1677b0", "repo": "mmdetection", "path": "tests/test_models/test_dense_heads/test_retina_sepBN_head.py", "file_name": "test_retina_sepBN_head.py", "fun_name": "test_retina_sepbn_head_loss", "commit_message": "[Refactor] Refactor NAS-FPN and anchor-free", "code": "def test_retina_sepbn_head_loss(self):\n \n s = 256\n img_metas = [{\n 'img_shape': (s, s, 3),\n 'pad_shape': (s, s, 3),\n 'scale_factor': 1,\n }]\n\n cfg = Config(\n dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.4,\n min_pos_iou=0,\n ignore_iof_thr=-1),\n sampler=dict(type='PseudoSampler'\n ), # Focal loss should use PseudoSampler\n allowed_border=-1,\n pos_weight=-1,\n debug=False))\n anchor_head = RetinaSepBNHead(\n num_classes=4, num_ins=5, in_channels=1, train_cfg=cfg)\n\n # Anchor head expects a multiple levels of features per image\n feats = []\n for i in range(len(anchor_head.prior_generator.strides)):\n feats.append(\n torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))))\n\n cls_scores, bbox_preds = anchor_head.forward(tuple(feats))\n\n # Test that empty ground truth encourages the network to\n # predict background\n gt_instances = InstanceData()\n gt_instances.bboxes = torch.empty((0, 4))\n gt_instances.labels = torch.LongTensor([])\n\n empty_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,\n [gt_instances], img_metas)\n # When there is no truth, the cls loss should be nonzero but\n # there should be no box loss.\n empty_cls_loss = sum(empty_gt_losses['loss_cls'])\n empty_box_loss = sum(empty_gt_losses['loss_bbox'])\n self.assertGreater(empty_cls_loss.item(), 0,\n 'cls loss should be non-zero')\n self.assertEqual(\n empty_box_loss.item(), 0,\n 'there should be no box loss when there are no true boxes')\n\n # When truth is non-empty then both cls and box loss\n # should be nonzero for random inputs\n gt_instances = InstanceData()\n gt_instances.bboxes = torch.Tensor(\n [[23.6667, 23.8757, 238.6326, 151.8874]])\n gt_instances.labels = torch.LongTensor([2])\n\n one_gt_losses = anchor_head.loss_by_feat(cls_scores, bbox_preds,\n [gt_instances], img_metas)\n onegt_cls_loss = sum(one_gt_losses['loss_cls'])\n onegt_box_loss = sum(one_gt_losses['loss_bbox'])\n self.assertGreater(onegt_cls_loss.item(), 0,\n 'cls loss should be non-zero')\n self.assertGreater(onegt_box_loss.item(), 0,\n 'box loss should be non-zero')\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 929, "n_words": 216, "vocab_size": 136, "complexity": 2, "nloc": 51, "token_counts": 364, "n_ast_nodes": 592, "n_identifiers": 54, "d_id": 70717, "documentation": { "docstring": "Tests RetinaSepBN head loss when truth is empty and non-empty.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 248471, "commit_id": "5949ab86f8db0ef3dac2063e42210030f17786fb", "repo": "synapse", "path": "synapse/rest/media/v1/thumbnailer.py", "file_name": "thumbnailer.py", "fun_name": "transpose", "commit_message": "Fix potential thumbnail memory leaks. (#12932)", "code": "def transpose(self) -> Tuple[int, int]:\n \n if self.transpose_method is not None:\n # Safety: `transpose` takes an int rather than e.g. an IntEnum.\n # self.transpose_method is set above to be a value in\n # EXIF_TRANSPOSE_MAPPINGS, and that only contains correct values.\n with self.image:\n self.image = self.image.transpose(self.transpose_method) # type: ignore[arg-type]\n self.width, self.height = self.image.size\n self.transpose_method = None\n # We don't need EXIF any more\n self.image.info[\"exif\"] = None\n return self.image.size\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 191, "n_words": 66, "vocab_size": 53, "complexity": 2, "nloc": 13, "token_counts": 74, "n_ast_nodes": 125, "n_identifiers": 10, "d_id": 72299, "documentation": { "docstring": "Transpose the image using its EXIF Orientation tag\n\n Returns:\n A tuple containing the new image size in pixels as (width, height).\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 46, "language": "en" } }, { "id": 118707, "commit_id": "5f39da13c0c551533a6d313dd0e2f6f9f0f9a5ac", "repo": "streamlit", "path": "lib/tests/streamlit/cli_test.py", "file_name": "cli_test.py", "fun_name": "test_get_command_line", "commit_message": "Get rid of preheated script runs (#4259)\n\n* Get rid of preheated script runs\r\n\r\nWhen a streamlit server is first started, we currently trigger a run of the\r\nscript defining an app and save the resulting deltas so that the very first\r\npage load of an app can be more or less instantaneous.\r\n\r\nThis optimization is currently not too helpful given how streamlit is used in\r\npractice today (it was originally added to make long-running jobs started\r\nvia `streamlit run` feel fast, but people generally don't use streamlit to kick\r\noff long-running computations). Furthermore, we'll soon be adding some features\r\nthat won't play nicely with the optimization. In particular, the upcoming\r\n`st.user` feature interacts with script preheats weirdly as the information\r\nrequired to populate `st.user` doesn't exist in a preheat run.\r\n\r\nGiven complications that will arise in the near-future as well as the fact that\r\nthe optimization itself is a vestigial one, it seems like it's time to remove\r\npreheated script runs.\r\n\r\n* Rework cli_smoke_tests to no longer rely on script preheats\r\n\r\n* Try making tests less timing sensitive\r\n\r\n* Revert an unintended change in an e2e test script\r\n\r\n* Replace `%` usage with an f-string", "code": "def test_get_command_line(self):\n \n mock_context = MagicMock()\n mock_context.parent.command_path = \"streamlit\"\n with patch(\"click.get_current_context\", return_value=mock_context):\n with patch(\"click.get_os_args\", return_value=[\"os_arg1\", \"os_arg2\"]):\n result = cli._get_command_line_as_string()\n self.assertEqual(\"streamlit os_arg1 os_arg2\", result)\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 91, "n_words": 22, "vocab_size": 19, "complexity": 1, "nloc": 7, "token_counts": 57, "n_ast_nodes": 108, "n_identifiers": 12, "d_id": 26370, "documentation": { "docstring": "Test that _get_command_line_as_string correctly concatenates values\n from click.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 22, "language": "en" } }, { "id": 155388, "commit_id": "dc7abf04518230d102bb5272c5ebf9fe20092338", "repo": "modin", "path": "modin/core/storage_formats/base/query_compiler.py", "file_name": "query_compiler.py", "fun_name": "get_positions_from_labels", "commit_message": "REFACTOR-#5202: Pass loc arguments to query compiler. (#5305)\n\nSome Modin implementations may prefer to take rows and columns by label rather than by position.\r\n\r\nSigned-off-by: mvashishtha ", "code": "def get_positions_from_labels(self, row_loc, col_loc):\n \n from modin.pandas.indexing import (\n is_boolean_array,\n is_list_like,\n is_range_like,\n boolean_mask_to_numeric,\n )\n\n lookups = []\n for axis, axis_loc in enumerate((row_loc, col_loc)):\n if is_scalar(axis_loc):\n axis_loc = np.array([axis_loc])\n if isinstance(axis_loc, slice) or is_range_like(axis_loc):\n if isinstance(axis_loc, slice) and axis_loc == slice(None):\n axis_lookup = axis_loc\n else:\n axis_labels = self.get_axis(axis)\n # `slice_indexer` returns a fully-defined numeric slice for a non-fully-defined labels-based slice\n axis_lookup = axis_labels.slice_indexer(\n axis_loc.start, axis_loc.stop, axis_loc.step\n )\n # Converting negative indices to their actual positions:\n axis_lookup = pandas.RangeIndex(\n start=(\n axis_lookup.start\n if axis_lookup.start >= 0\n else axis_lookup.start + len(axis_labels)\n ),\n stop=(\n axis_lookup.stop\n if axis_lookup.stop >= 0\n else axis_lookup.stop + len(axis_labels)\n ),\n step=axis_lookup.step,\n )\n elif self.has_multiindex(axis):\n # `Index.get_locs` raises an IndexError by itself if missing labels were provided,\n # we don't have to do missing-check for the received `axis_lookup`.\n if isinstance(axis_loc, pandas.MultiIndex):\n axis_lookup = self.get_axis(axis).get_indexer_for(axis_loc)\n else:\n axis_lookup = self.get_axis(axis).get_locs(axis_loc)\n elif is_boolean_array(axis_loc):\n axis_lookup = boolean_mask_to_numeric(axis_loc)\n else:\n axis_labels = self.get_axis(axis)\n if is_list_like(axis_loc) and not isinstance(\n axis_loc, (np.ndarray, pandas.Index)\n ):\n # `Index.get_indexer_for` works much faster with numpy arrays than with python lists,\n # so although we lose some time here on converting to numpy, `Index.get_indexer_for`\n # speedup covers the loss that we gain here.\n axis_loc = np.array(axis_loc, dtype=axis_labels.dtype)\n axis_lookup = axis_labels.get_indexer_for(axis_loc)\n # `Index.get_indexer_for` sets -1 value for missing labels, we have to verify whether\n # there are any -1 in the received indexer to raise a KeyError here.\n missing_mask = axis_lookup == -1\n if missing_mask.any():\n missing_labels = (\n axis_loc[missing_mask]\n if is_list_like(axis_loc)\n # If `axis_loc` is not a list-like then we can't select certain\n # labels that are missing and so printing the whole indexer\n else axis_loc\n )\n raise KeyError(missing_labels)\n\n if isinstance(axis_lookup, pandas.Index) and not is_range_like(axis_lookup):\n axis_lookup = axis_lookup.values\n\n lookups.append(axis_lookup)\n return lookups\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 1449, "n_words": 274, "vocab_size": 160, "complexity": 18, "nloc": 58, "token_counts": 353, "n_ast_nodes": 557, "n_identifiers": 42, "d_id": 36372, "documentation": { "docstring": "\n Compute index and column positions from their respective locators.\n\n Inputs to this method are arguments the the pandas user could pass to loc.\n This function will compute the corresponding index and column positions\n that the user could equivalently pass to iloc.\n\n Parameters\n ----------\n row_loc : scalar, slice, list, array or tuple\n Row locator.\n col_loc : scalar, slice, list, array or tuple\n Columns locator.\n\n Returns\n -------\n row_lookup : slice(None) if full axis grab, pandas.RangeIndex if repetition is detected, numpy.ndarray otherwise\n List of index labels.\n col_lookup : slice(None) if full axis grab, pandas.RangeIndex if repetition is detected, numpy.ndarray otherwise\n List of columns labels.\n\n Notes\n -----\n Usage of `slice(None)` as a resulting lookup is a hack to pass information about\n full-axis grab without computing actual indices that triggers lazy computations.\n Ideally, this API should get rid of using slices as indexers and either use a\n common ``Indexer`` object or range and ``np.ndarray`` only.\n ", "n_words": 150, "vocab_size": 98, "n_whitespaces": 328, "language": "en" } }, { "id": 215907, "commit_id": "a8d2d1e1397cdc79b2c5f1ad7f6e3b729dcf8857", "repo": "salt", "path": "tests/pytests/functional/modules/test_win_certutil.py", "file_name": "test_win_certutil.py", "fun_name": "test_get_stored_cert_serials", "commit_message": "Add tests, fix state module", "code": "def test_get_stored_cert_serials(certutil, populate_store):\n \n serials = certutil.get_stored_cert_serials(\"TrustedPublisher\")\n assert \"5be1cc5d51b78dbd49a0b7c00d44806d\" in serials\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 19, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 38, "n_identifiers": 5, "d_id": 54240, "documentation": { "docstring": "\n Test get_stored_cert_serials with a certificate we put in\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 176499, "commit_id": "f6755ffa00211b523c6c0bec5398bc6c3c43c8b1", "repo": "networkx", "path": "networkx/readwrite/sparse6.py", "file_name": "sparse6.py", "fun_name": "_generate_sparse6_bytes", "commit_message": "Update black (#5438)\n\n* CI: sync up black dev requirements version with precommit\r\n\r\n* Run black\r\n\r\nCo-authored-by: Jarrod Millman ", "code": "def _generate_sparse6_bytes(G, nodes, header):\n \n n = len(G)\n if n >= 2**36:\n raise ValueError(\n \"sparse6 is only defined if number of nodes is less \" \"than 2 ** 36\"\n )\n if header:\n yield b\">>sparse6<<\"\n yield b\":\"\n for d in n_to_data(n):\n yield str.encode(chr(d + 63))\n\n k = 1\n while 1 << k < n:\n k += 1\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 125, "n_words": 55, "vocab_size": 44, "complexity": 15, "nloc": 49, "token_counts": 393, "n_ast_nodes": 122, "n_identifiers": 13, "d_id": 41938, "documentation": { "docstring": "Yield bytes in the sparse6 encoding of a graph.\n\n `G` is an undirected simple graph. `nodes` is the list of nodes for\n which the node-induced subgraph will be encoded; if `nodes` is the\n list of all nodes in the graph, the entire graph will be\n encoded. `header` is a Boolean that specifies whether to generate\n the header ``b'>>sparse6<<'`` before the remaining data.\n\n This function generates `bytes` objects in the following order:\n\n 1. the header (if requested),\n 2. the encoding of the number of nodes,\n 3. each character, one-at-a-time, in the encoding of the requested\n node-induced subgraph,\n 4. a newline character.\n\n This function raises :exc:`ValueError` if the graph is too large for\n the graph6 format (that is, greater than ``2 ** 36`` nodes).\n\n ", "n_words": 122, "vocab_size": 78, "n_whitespaces": 167, "language": "en" } }, { "id": 261741, "commit_id": "f2c78fe8c5cf2576f8351238c55dace23fb1d691", "repo": "scikit-learn", "path": "sklearn/datasets/_base.py", "file_name": "_base.py", "fun_name": "load_linnerud", "commit_message": "MAINT handle deprecations from `importlib.resources` (#25157)\n\n\r\n\r\nCo-authored-by: Guillaume Lemaitre ", "code": "def load_linnerud(*, return_X_y=False, as_frame=False):\n \n data_filename = \"linnerud_exercise.csv\"\n target_filename = \"linnerud_physiological.csv\"\n\n # Read header and data\n with _open_text(DATA_MODULE, data_filename) as f:\n header_exercise = f.readline().split()\n f.seek(0) # reset file obj\n data_exercise = np.loadtxt(f, skiprows=1)\n\n with _open_text(DATA_MODULE, target_filename) as f:\n header_physiological = f.readline().split()\n f.seek(0) # reset file obj\n data_physiological = np.loadtxt(f, skiprows=1)\n\n fdescr = load_descr(\"linnerud.rst\")\n\n frame = None\n if as_frame:\n (frame, data_exercise, data_physiological) = _convert_data_dataframe(\n \"load_linnerud\",\n data_exercise,\n data_physiological,\n header_exercise,\n header_physiological,\n )\n if return_X_y:\n return data_exercise, data_physiological\n\n return Bunch(\n data=data_exercise,\n feature_names=header_exercise,\n target=data_physiological,\n target_names=header_physiological,\n frame=frame,\n DESCR=fdescr,\n data_filename=data_filename,\n target_filename=target_filename,\n data_module=DATA_MODULE,\n )\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 304, "n_words": 85, "vocab_size": 58, "complexity": 3, "nloc": 34, "token_counts": 178, "n_ast_nodes": 284, "n_identifiers": 29, "d_id": 76971, "documentation": { "docstring": "Load and return the physical exercise Linnerud dataset.\n\n This dataset is suitable for multi-output regression tasks.\n\n ============== ============================\n Samples total 20\n Dimensionality 3 (for both data and target)\n Features integer\n Targets integer\n ============== ============================\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n return_X_y : bool, default=False\n If True, returns ``(data, target)`` instead of a Bunch object.\n See below for more information about the `data` and `target` object.\n\n .. versionadded:: 0.18\n\n as_frame : bool, default=False\n If True, the data is a pandas DataFrame including columns with\n appropriate dtypes (numeric, string or categorical). The target is\n a pandas DataFrame or Series depending on the number of target columns.\n If `return_X_y` is True, then (`data`, `target`) will be pandas\n DataFrames or Series as described below.\n\n .. versionadded:: 0.23\n\n Returns\n -------\n data : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data : {ndarray, dataframe} of shape (20, 3)\n The data matrix. If `as_frame=True`, `data` will be a pandas\n DataFrame.\n target: {ndarray, dataframe} of shape (20, 3)\n The regression targets. If `as_frame=True`, `target` will be\n a pandas DataFrame.\n feature_names: list\n The names of the dataset columns.\n target_names: list\n The names of the target columns.\n frame: DataFrame of shape (20, 6)\n Only present when `as_frame=True`. DataFrame with `data` and\n `target`.\n\n .. versionadded:: 0.23\n DESCR: str\n The full description of the dataset.\n data_filename: str\n The path to the location of the data.\n target_filename: str\n The path to the location of the target.\n\n .. versionadded:: 0.20\n\n (data, target) : tuple if ``return_X_y`` is True\n Returns a tuple of two ndarrays or dataframe of shape\n `(20, 3)`. Each row represents one sample and each column represents the\n features in `X` and a target in `y` of a given sample.\n\n .. versionadded:: 0.18\n ", "n_words": 284, "vocab_size": 153, "n_whitespaces": 658, "language": "en" } }, { "id": 133361, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/sgd/torch/training_operator.py", "file_name": "training_operator.py", "fun_name": "train_epoch", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def train_epoch(self, iterator, info=None, num_steps=None, epoch_idx=0):\n ", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "\"\"\"Runs one standard training pass over the training dataloader.\n\n Bythis method will iterate over the givencall ``self.train_batch`` over each batch. Ifscheduler_step_freqis set, this default method will also step the scheduler accordingly.\n\n You do not need to call ``train_batch`` in this method if you plan\n to implement a custom optimization/training routine here.\n\n You may find ``ray.util.sgd.utils.AverageMeterCollection`` useful\n when overriding this method. See example below:\n\n .. code-block:: pythonthis default method will also step the scheduler accordingly.\n\n You do not need to calltrain_batchthisyou plan\n to implement a custom optimization/training routine here.\n\n You may find ``ray.util.sgd.utils.AverageMeterCollection`` useful\n when overriding this method. See example below:\n\n .. code-block::training routine here.\n\n You may find ``ray.util.sgd.utils.AverageMeterCollection`` useful\n when overriding this method. See exampleroutine here", "n_ast_errors": 11, "ast_levels": 12, "n_whitespaces": 13, "n_words": 6, "vocab_size": 6, "complexity": 19, "nloc": 46, "token_counts": 318, "n_ast_nodes": 177, "n_identifiers": 61, "d_id": 29990, "documentation": { "docstring": "Runs one standard training pass over the training dataloader.\n\n By default, this method will iterate over the given iterator and\n call ``self.train_batch`` over each batch. If ``scheduler_step_freq``\n is set, this default method will also step the scheduler accordingly.\n\n You do not need to call ``train_batch`` in this method if you plan\n to implement a custom optimization/training routine here.\n\n You may find ``ray.util.sgd.utils.AverageMeterCollection`` useful\n when overriding this method. See example below:\n\n .. code-block:: python\n", "n_words": 73, "vocab_size": 59, "n_whitespaces": 128, "language": "en" } }, { "id": 196428, "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", "repo": "sympy", "path": "sympy/solvers/solvers.py", "file_name": "solvers.py", "fun_name": "solve_linear_system_LU", "commit_message": "Moved imports to higher level", "code": "def solve_linear_system_LU(matrix, syms):\n \n if matrix.rows != matrix.cols - 1:\n raise ValueError(\"Rows should be equal to columns - 1\")\n A = matrix[:matrix.rows, :matrix.rows]\n b = matrix[:, matrix.cols - 1:]\n soln = A.LUsolve(b)\n solutions = {}\n for i in range(soln.rows):\n solutions[syms[i]] = soln[i, 0]\n return solutions\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 82, "n_words": 44, "vocab_size": 36, "complexity": 3, "nloc": 10, "token_counts": 89, "n_ast_nodes": 140, "n_identifiers": 13, "d_id": 47928, "documentation": { "docstring": "\n Solves the augmented matrix system using ``LUsolve`` and returns a\n dictionary in which solutions are keyed to the symbols of *syms* as ordered.\n\n Explanation\n ===========\n\n The matrix must be invertible.\n\n Examples\n ========\n\n >>> from sympy import Matrix, solve_linear_system_LU\n >>> from sympy.abc import x, y, z\n\n >>> solve_linear_system_LU(Matrix([\n ... [1, 2, 0, 1],\n ... [3, 2, 2, 1],\n ... [2, 0, 0, 1]]), [x, y, z])\n {x: 1/2, y: 1/4, z: -1/2}\n\n See Also\n ========\n\n LUsolve\n\n ", "n_words": 75, "vocab_size": 60, "n_whitespaces": 130, "language": "en" } }, { "id": 232763, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/updatemenu/_button.py", "file_name": "_button.py", "fun_name": "args2", "commit_message": "switch to black .22", "code": "def args2(self):\n \n return self[\"args2\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 64207, "documentation": { "docstring": "\n Sets a 2nd set of `args`, these arguments values are passed to\n the Plotly method set in `method` when clicking this button\n while in the active state. Use this to create toggle buttons.\n\n The 'args2' property is an info array that may be specified as:\n\n * a list or tuple of up to 3 elements where:\n (0) The 'args2[0]' property accepts values of any type\n (1) The 'args2[1]' property accepts values of any type\n (2) The 'args2[2]' property accepts values of any type\n\n Returns\n -------\n list\n ", "n_words": 86, "vocab_size": 59, "n_whitespaces": 203, "language": "en" } }, { "id": 247579, "commit_id": "90b2327066d2343faa86c464a182b6f3c4422ecd", "repo": "synapse", "path": "tests/util/test_async_helpers.py", "file_name": "test_async_helpers.py", "fun_name": "test_suppresses_second_cancellation", "commit_message": "Add `delay_cancellation` utility function (#12180)\n\n`delay_cancellation` behaves like `stop_cancellation`, except it\r\ndelays `CancelledError`s until the original `Deferred` resolves.\r\nThis is handy for unifying cleanup paths and ensuring that uncancelled\r\ncoroutines don't use finished logcontexts.\r\n\r\nSigned-off-by: Sean Quah ", "code": "def test_suppresses_second_cancellation(self):\n \n deferred: \"Deferred[str]\" = Deferred()\n wrapper_deferred = delay_cancellation(deferred)\n\n # Cancel the new `Deferred`, twice.\n wrapper_deferred.cancel()\n wrapper_deferred.cancel()\n self.assertNoResult(wrapper_deferred)\n self.assertFalse(\n deferred.called, \"Original `Deferred` was unexpectedly cancelled\"\n )\n\n # Now make the original `Deferred` fail.\n # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed\n # in logs.\n deferred.errback(ValueError(\"abc\"))\n self.assertIsNone(deferred.result, \"`Failure` was not consumed\")\n\n # Now that the original `Deferred` has failed, we should get a `CancelledError`.\n self.failureResultOf(wrapper_deferred, CancelledError)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 192, "n_words": 69, "vocab_size": 55, "complexity": 1, "nloc": 12, "token_counts": 72, "n_ast_nodes": 133, "n_identifiers": 16, "d_id": 71755, "documentation": { "docstring": "Test that a second cancellation is suppressed.\n\n Identical to `test_cancellation` except the new `Deferred` is cancelled twice.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 31, "language": "en" } }, { "id": 206284, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/template/loader_tags.py", "file_name": "loader_tags.py", "fun_name": "do_extends", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def do_extends(parser, token):\n \n bits = token.split_contents()\n if len(bits) != 2:\n raise TemplateSyntaxError(\"'%s' takes one argument\" % bits[0])\n bits[1] = construct_relative_path(parser.origin.template_name, bits[1])\n parent_name = parser.compile_filter(bits[1])\n nodelist = parser.parse()\n if nodelist.get_nodes_by_type(ExtendsNode):\n raise TemplateSyntaxError(\n \"'%s' cannot appear more than once in the same template\" % bits[0]\n )\n return ExtendsNode(nodelist, parent_name)\n\n\n@register.tag(\"include\")", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "@register.tag(\"include\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 103, "n_words": 48, "vocab_size": 42, "complexity": 3, "nloc": 12, "token_counts": 94, "n_ast_nodes": 166, "n_identifiers": 18, "d_id": 51466, "documentation": { "docstring": "\n Signal that this template extends a parent template.\n\n This tag may be used in two ways: ``{% extends \"base\" %}`` (with quotes)\n uses the literal value \"base\" as the name of the parent template to extend,\n or ``{% extends variable %}`` uses the value of ``variable`` as either the\n name of the parent template to extend (if it evaluates to a string) or as\n the parent template itself (if it evaluates to a Template object).\n ", "n_words": 75, "vocab_size": 42, "n_whitespaces": 97, "language": "en" } }, { "id": 276298, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/utils_v1/export_utils.py", "file_name": "export_utils.py", "fun_name": "get_temp_export_dir", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_temp_export_dir(timestamped_export_dir):\n \n (dirname, basename) = os.path.split(timestamped_export_dir)\n if isinstance(basename, bytes):\n str_name = basename.decode(\"utf-8\")\n else:\n str_name = str(basename)\n temp_export_dir = tf.io.gfile.join(\n tf.compat.as_bytes(dirname),\n tf.compat.as_bytes(\"temp-{}\".format(str_name)),\n )\n return temp_export_dir\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 73, "n_words": 24, "vocab_size": 19, "complexity": 2, "nloc": 11, "token_counts": 80, "n_ast_nodes": 132, "n_identifiers": 20, "d_id": 81620, "documentation": { "docstring": "Builds a directory name based on the argument but starting with 'temp-'.\n\n This relies on the fact that TensorFlow Serving ignores subdirectories of\n the base directory that can't be parsed as integers.\n\n Args:\n timestamped_export_dir: the name of the eventual export directory, e.g.\n /foo/bar/\n\n Returns:\n A sister directory prefixed with 'temp-', e.g. /foo/bar/temp-.\n ", "n_words": 52, "vocab_size": 40, "n_whitespaces": 84, "language": "en" } }, { "id": 249857, "commit_id": "4ae967cf6308e80b03da749f0cbaed36988e235e", "repo": "synapse", "path": "tests/util/caches/test_deferred_cache.py", "file_name": "test_deferred_cache.py", "fun_name": "test_callbacks", "commit_message": "Add missing type hints to test.util.caches (#14529)", "code": "def test_callbacks(self) -> None:\n \n cache: DeferredCache[str, int] = DeferredCache(\"test\")\n callbacks = set()\n\n # start with an entry, with a callback\n cache.prefill(\"k1\", 10, callback=lambda: callbacks.add(\"prefill\"))\n\n # now replace that entry with a pending result\n origin_d: \"defer.Deferred[int]\" = defer.Deferred()\n set_d = cache.set(\"k1\", origin_d, callback=lambda: callbacks.add(\"set\"))\n\n # ... and also make a get request\n get_d = cache.get(\"k1\", callback=lambda: callbacks.add(\"get\"))\n\n # we don't expect the invalidation callback for the original value to have\n # been called yet, even though get() will now return a different result.\n # I'm not sure if that is by design or not.\n self.assertEqual(callbacks, set())\n\n # now fire off all the deferreds\n origin_d.callback(20)\n self.assertEqual(self.successResultOf(set_d), 20)\n self.assertEqual(self.successResultOf(get_d), 20)\n\n # now the original invalidation callback should have been called, but none of\n # the others\n self.assertEqual(callbacks, {\"prefill\"})\n callbacks.clear()\n\n # another update should invalidate both the previous results\n cache.prefill(\"k1\", 30)\n self.assertEqual(callbacks, {\"set\", \"get\"})\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 315, "n_words": 140, "vocab_size": 100, "complexity": 1, "nloc": 16, "token_counts": 171, "n_ast_nodes": 300, "n_identifiers": 20, "d_id": 73173, "documentation": { "docstring": "Invalidation callbacks are called at the right time", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 257346, "commit_id": "f6e3a639063887f9f5b27f574a04c7fe602b3185", "repo": "haystack", "path": "haystack/pipelines/base.py", "file_name": "base.py", "fun_name": "components", "commit_message": "Prevent losing names of utilized components when loaded from config (#2525)\n\n* Prevent losing names of utilized components when loaded from config\r\n\r\n* Update Documentation & Code Style\r\n\r\n* update test\r\n\r\n* fix failing tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix even more tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* incorporate review feedback\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def components(self) -> Dict[str, BaseComponent]:\n \n all_components = self._find_all_components()\n return {component.name: component for component in all_components if component.name is not None}\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 41, "n_words": 20, "vocab_size": 18, "complexity": 3, "nloc": 7, "token_counts": 39, "n_ast_nodes": 61, "n_identifiers": 9, "d_id": 75070, "documentation": { "docstring": "\n Returns all components used by this pipeline.\n Note that this also includes such components that are being utilized by other components only and are not being used as a pipeline node directly.\n ", "n_words": 32, "vocab_size": 24, "n_whitespaces": 54, "language": "en" } }, { "id": 44076, "commit_id": "10f5db863e387c0fd7369cf521d624b6df77a65d", "repo": "airflow", "path": "airflow/models/baseoperator.py", "file_name": "baseoperator.py", "fun_name": "set_xcomargs_dependencies", "commit_message": "Set dependencies in MappedOperator via XComArgs (#20931)\n\nCo-authored-by: Kaxil Naik \r\nCo-authored-by: Ephraim Anierobi ", "code": "def set_xcomargs_dependencies(self) -> None:\n \n from airflow.models.xcom_arg import XComArg\n\n for field in self.template_fields:\n if hasattr(self, field):\n arg = getattr(self, field)\n XComArg.apply_upstream_relationship(self, arg)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 83, "n_words": 21, "vocab_size": 21, "complexity": 3, "nloc": 26, "token_counts": 47, "n_ast_nodes": 73, "n_identifiers": 12, "d_id": 8139, "documentation": { "docstring": "\n Resolves upstream dependencies of a task. In this way passing an ``XComArg``\n as value for a template field will result in creating upstream relation between\n two tasks.\n\n **Example**: ::\n\n with DAG(...):\n generate_content = GenerateContentOperator(task_id=\"generate_content\")\n send_email = EmailOperator(..., html_content=generate_content.output)\n\n # This is equivalent to\n with DAG(...):\n generate_content = GenerateContentOperator(task_id=\"generate_content\")\n send_email = EmailOperator(\n ..., html_content=\"{{ task_instance.xcom_pull('generate_content') }}\"\n )\n generate_content >> send_email\n\n ", "n_words": 59, "vocab_size": 47, "n_whitespaces": 237, "language": "en" } }, { "id": 101059, "commit_id": "582c2ce40c11ef235dd3f9100f70e1e2832f8dd3", "repo": "faceswap", "path": "lib/model/loss/perceptual_loss_plaid.py", "file_name": "perceptual_loss_plaid.py", "fun_name": "_hyab", "commit_message": "Add Flip Loss Function\n - Add Flip for AMD and TF\n - Split Perceptual Loss functions to own modules\n - Fix allowed input shape for models\n - Allow GUI tooltip to display at higher width", "code": "def _hyab(self, y_true, y_pred):\n \n delta = y_true - y_pred\n root = K.sqrt(K.clip(K.pow(delta[..., 0:1], 2), self._epsilon, None))\n delta_norm = frobenius_norm(delta[..., 1:3])\n return root + delta_norm\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 59, "n_words": 24, "vocab_size": 20, "complexity": 1, "nloc": 5, "token_counts": 65, "n_ast_nodes": 97, "n_identifiers": 13, "d_id": 20496, "documentation": { "docstring": " Compute the HyAB distance between true and predicted images.\n\n Parameters\n ----------\n y_true: :class:`plaidml.tile.Value`\n The ground truth batch of images in standard or Hunt-adjusted L*A*B* color space\n y_pred: :class:`plaidml.tile.Value`\n The predicted batch of images in in standard or Hunt-adjusted L*A*B* color space\n\n Returns\n -------\n :class:`plaidml.tile.Value`\n image tensor containing the per-pixel HyAB distances between true and predicted images\n ", "n_words": 56, "vocab_size": 34, "n_whitespaces": 146, "language": "en" } }, { "id": 3357, "commit_id": "f83eca58eaf2129d21b5796a301732ab22675130", "repo": "airbyte", "path": "airbyte-cdk/python/unit_tests/sources/test_abstract_source.py", "file_name": "test_abstract_source.py", "fun_name": "test_valid_full_refresh_read_no_slices", "commit_message": "CDK: Fix typing errors (#9037)\n\n* fix typing, drop AirbyteLogger\r\n\r\n* format\r\n\r\n* bump the version\r\n\r\n* use logger instead of fixture logger\r\n\r\nCo-authored-by: Eugene Kulak \r\nCo-authored-by: auganbay ", "code": "def test_valid_full_refresh_read_no_slices(mocker):\n \n stream_output = [{\"k1\": \"v1\"}, {\"k2\": \"v2\"}]\n s1 = MockStream([({\"sync_mode\": SyncMode.full_refresh}, stream_output)], name=\"s1\")\n s2 = MockStream([({\"sync_mode\": SyncMode.full_refresh}, stream_output)], name=\"s2\")\n\n mocker.patch.object(MockStream, \"get_json_schema\", return_value={})\n\n src = MockSource(streams=[s1, s2])\n catalog = ConfiguredAirbyteCatalog(\n streams=[_configured_stream(s1, SyncMode.full_refresh), _configured_stream(s2, SyncMode.full_refresh)]\n )\n\n expected = _as_records(\"s1\", stream_output) + _as_records(\"s2\", stream_output)\n messages = _fix_emitted_at(list(src.read(logger, {}, catalog)))\n\n assert expected == messages\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 91, "n_words": 51, "vocab_size": 39, "complexity": 1, "nloc": 12, "token_counts": 156, "n_ast_nodes": 256, "n_identifiers": 25, "d_id": 459, "documentation": { "docstring": "Tests that running a full refresh sync on streams which don't specify slices produces the expected AirbyteMessages", "n_words": 17, "vocab_size": 17, "n_whitespaces": 16, "language": "en" } }, { "id": 22419, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "Eight_Puzzle_Solver/eight_puzzle.py", "file_name": "eight_puzzle.py", "fun_name": "depth_first_search", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def depth_first_search(self):\n \n if self.isSolvable() == False:\n return (None, None)\n closed = list()\n q = list()\n q.append(Node(state=self.state, depth=0))\n while q:\n node = q.pop()\n if node.isGoalState():\n return (node.moves, len(closed))\n if node.state not in closed:\n closed.append(node.state)\n for action in node.getAvailableActions():\n q.append(node.getResultFromAction(action))\n\n return (None, None)\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 198, "n_words": 41, "vocab_size": 31, "complexity": 6, "nloc": 15, "token_counts": 118, "n_ast_nodes": 190, "n_identifiers": 18, "d_id": 4325, "documentation": { "docstring": "\n Parameters: State\n Returns: List of Moves to solve the state, otherwise None if unsolvable\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 36, "language": "en" } }, { "id": 128240, "commit_id": "65d0c0aa48be8f9f7faae857d3ab71444997755a", "repo": "ray", "path": "python/ray/serve/_private/deployment_state.py", "file_name": "deployment_state.py", "fun_name": "update", "commit_message": "[Serve] add alpha gRPC support (#28175)", "code": "def update(self) -> bool:\n \n try:\n # Add or remove DeploymentReplica instances in self._replicas.\n # This should be the only place we adjust total number of replicas\n # we manage.\n\n running_replicas_changed = self._scale_deployment_replicas()\n\n # Check the state of existing replicas and transition if necessary.\n running_replicas_changed |= self._check_and_update_replicas()\n\n if running_replicas_changed:\n self._notify_running_replicas_changed()\n\n deleted = self._check_curr_status()\n except Exception:\n self._curr_status_info = DeploymentStatusInfo(\n name=self._name,\n status=DeploymentStatus.UNHEALTHY,\n message=\"Failed to update deployment:\" f\"\\n{traceback.format_exc()}\",\n )\n deleted = False\n\n return deleted\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 279, "n_words": 70, "vocab_size": 56, "complexity": 3, "nloc": 24, "token_counts": 72, "n_ast_nodes": 138, "n_identifiers": 20, "d_id": 28641, "documentation": { "docstring": "Attempts to reconcile this deployment to match its goal state.\n\n This is an asynchronous call; it's expected to be called repeatedly.\n\n Also updates the internal DeploymentStatusInfo based on the current\n state of the system.\n\n Returns true if this deployment was successfully deleted.\n ", "n_words": 42, "vocab_size": 36, "n_whitespaces": 77, "language": "en" } }, { "id": 246117, "commit_id": "d8df8e6c1432d25ea1c0310a5f2dc48d1688345f", "repo": "synapse", "path": "synapse/http/site.py", "file_name": "site.py", "fun_name": "_finished_processing", "commit_message": "Don't print HTTPStatus.* in \"Processed...\" logs (#11827)\n\n* Don't print HTTPStatus.* in \"Processed...\" logs\r\n\r\nFixes #11812. See also #7118 and\r\nhttps://github.com/matrix-org/synapse/pull/7188#r401719326 in\r\nparticular.\r\n\r\nCo-authored-by: Brendan Abolivier ", "code": "def _finished_processing(self) -> None:\n \n assert self.logcontext is not None\n assert self.finish_time is not None\n\n usage = self.logcontext.get_resource_usage()\n\n if self._processing_finished_time is None:\n # we completed the request without anything calling processing()\n self._processing_finished_time = time.time()\n\n # the time between receiving the request and the request handler finishing\n processing_time = self._processing_finished_time - self.start_time\n\n # the time between the request handler finishing and the response being sent\n # to the client (nb may be negative)\n response_send_time = self.finish_time - self._processing_finished_time\n\n user_agent = get_request_user_agent(self, \"-\")\n\n # int(self.code) looks redundant, because self.code is already an int.\n # But self.code might be an HTTPStatus (which inherits from int)---which has\n # a different string representation. So ensure we really have an integer.\n code = str(int(self.code))\n if not self.finished:\n # we didn't send the full response before we gave up (presumably because\n # the connection dropped)\n code += \"!\"\n\n log_level = logging.INFO if self._should_log_request() else logging.DEBUG\n\n # If this is a request where the target user doesn't match the user who\n # authenticated (e.g. and admin is puppetting a user) then we log both.\n requester, authenticated_entity = self.get_authenticated_entity()\n if authenticated_entity:\n requester = f\"{authenticated_entity}|{requester}\"\n\n self.synapse_site.access_logger.log(\n log_level,\n \"%s - %s - {%s}\"\n \" Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)\"\n ' %sB %s \"%s %s %s\" \"%s\" [%d dbevts]',\n self.getClientIP(),\n self.synapse_site.site_tag,\n requester,\n processing_time,\n response_send_time,\n usage.ru_utime,\n usage.ru_stime,\n usage.db_sched_duration_sec,\n usage.db_txn_duration_sec,\n int(usage.db_txn_count),\n self.sentLength,\n code,\n self.get_method(),\n self.get_redacted_uri(),\n self.clientproto.decode(\"ascii\", errors=\"replace\"),\n user_agent,\n usage.evt_db_fetch_count,\n )\n\n # complete the opentracing span, if any.\n if self._opentracing_span:\n self._opentracing_span.finish()\n\n try:\n self.request_metrics.stop(self.finish_time, self.code, self.sentLength)\n except Exception as e:\n logger.warning(\"Failed to stop metrics: %r\", e)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 769, "n_words": 250, "vocab_size": 168, "complexity": 7, "nloc": 46, "token_counts": 262, "n_ast_nodes": 432, "n_identifiers": 50, "d_id": 71021, "documentation": { "docstring": "Log the completion of this request and update the metrics", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 134271, "commit_id": "813e1a857d5dfc060b3b6cb846157fdca425e6b0", "repo": "ray", "path": "dashboard/modules/job/tests/test_cli_integration.py", "file_name": "test_cli_integration.py", "fun_name": "test_submit_with_logs_instant_job", "commit_message": "Revert \"Revert \"[Job Submission][refactor 5/N] Remove the head node dependency on the `Raylet` process\"\" (#29008)\n\nReverts #28931 and fixes the tests that were made flaky by that PR.\r\n\r\n Fix address=\"auto\" in cpp job test (fixed by @Catch-Bull )\r\n Fix len_new_owner_port flakiness in test_sdk(fixed by @Catch-Bull )\r\n Fix int conversion flakiness\r\nAdditionally, this PR updates the log tailing behavior from the previous PR to return logs instantly when the job exits, to match the current behavior on master, including the case where the runtime env fails to set up. (In the previous PR, there was a timeout for waiting for the supervisor actor to start, so if the runtime env failed to set up instantly,ray job submit would still wait for the entire 60s timeout before closing the log stream and returning.)\r\n\r\nFinally, this PR updates the default scheduling behavior from the previous PR to make jobs run on the head node by default (configurable via the environment variable RAY_JOB_ALLOW_DRIVERS_ON_HEAD_NODE.). This is to avoid making a breaking behavior change unless absolutely necessary. We can update this default in the future after more discussion.\r\n\r\nIn this PR, the head node id is passed to the agent via internal KV. This is a workaround for the fact that there is no way to retrieve the head node id from within Ray (#29607)", "code": "def test_submit_with_logs_instant_job(self, ray_start_stop):\n \n cmd = \"echo hello\"\n stdout, _ = _run_cmd(f\"ray job submit -- bash -c '{cmd}'\")\n assert \"hello\" in stdout\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 49, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 4, "token_counts": 24, "n_ast_nodes": 49, "n_identifiers": 7, "d_id": 30235, "documentation": { "docstring": "Should exit immediately and print logs even if job returns instantly.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 279795, "commit_id": "2ed044d06d0ae552477672aa8b778f8edafb52f1", "repo": "keras", "path": "keras/saving/pickle_utils.py", "file_name": "pickle_utils.py", "fun_name": "serialize_model_as_bytecode", "commit_message": "Use new saving logic for pickling. This is somewhat cleaner since it restores the exact same model (no usage of traces). It may however be less convenient since it requires get_config() to be implemented and the use of a custom_object_scope.\n\nPiperOrigin-RevId: 474146108", "code": "def serialize_model_as_bytecode(model):\n \n # Note: we don't use a RAM path for this because zipfile cannot write\n # to such paths.\n temp_dir = tempfile.mkdtemp()\n try:\n filepath = os.path.join(temp_dir, \"model.keras\")\n saving_lib.save_model(model, filepath)\n with open(filepath, \"rb\") as f:\n data = f.read()\n except Exception as e:\n raise e\n else:\n return data\n finally:\n tf.io.gfile.rmtree(temp_dir)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 126, "n_words": 49, "vocab_size": 44, "complexity": 3, "nloc": 13, "token_counts": 75, "n_ast_nodes": 134, "n_identifiers": 21, "d_id": 83134, "documentation": { "docstring": "Convert a Keras Model into a bytecode representation for pickling.\n\n Args:\n model: Keras Model instance.\n\n Returns:\n Tuple that can be read by `deserialize_from_bytecode`.\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 46, "language": "en" } }, { "id": 77547, "commit_id": "39f7886a6f8ee98db7e73ce33d94c06139f35bd8", "repo": "wagtail", "path": "wagtail/admin/widgets/chooser.py", "file_name": "chooser.py", "fun_name": "get_value_data_from_instance", "commit_message": "Split out common logic from get_value_data", "code": "def get_value_data_from_instance(self, instance):\n \n return {\n \"id\": instance.pk,\n \"edit_url\": AdminURLFinder().get_edit_url(instance),\n }\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 53, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 28, "n_ast_nodes": 49, "n_identifiers": 6, "d_id": 16673, "documentation": { "docstring": "\n Given a model instance, return a value that we can pass to both the server-side template\n and the client-side rendering code (via telepath) that contains all the information needed\n for display. Typically this is a dict of id, title etc; it must be JSON-serialisable.\n ", "n_words": 44, "vocab_size": 39, "n_whitespaces": 73, "language": "en" } }, { "id": 285729, "commit_id": "c4658b63a936ad219625d30dcbd12a1aa798af09", "repo": "OpenBBTerminal", "path": "openbb_terminal/core/config/paths_helper.py", "file_name": "paths_helper.py", "fun_name": "copy_files", "commit_message": "Add path for custom_imports outside the terminal (#2567)\n\n* add log path\r\n\r\n* add test to check if log file is in correct dir\r\n\r\n* env path\r\n\r\n* black\r\n\r\n* mypy fix\r\n\r\n* add styles folder and styles from repo\r\n\r\n* add timezone as env variable\r\n\r\n* fix changes with main\r\n\r\n* fix test\r\n\r\n* flake8\r\n\r\n* fix linting\r\n\r\n* fix linting\r\n\r\n* changes\r\n\r\n* custom changes\r\n\r\n* add custom_imports outside terminal\r\n\r\n* black\r\n\r\n* black terminal\r\n\r\n* fix test\r\n\r\n* fix merge and remove styles/user\r\n\r\n* some stylistic changes and remove move_files\r\n\r\n* flake8\r\n\r\n* merge main\r\n\r\n* merge move and make into paths_helper\r\n\r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>", "code": "def copy_files(from_dir, to_dir):\n \n\n if from_dir.exists():\n shutil.copytree(from_dir, to_dir, dirs_exist_ok=True)\n\n\ndirs_list = [\n SETTINGS_DIRECTORY,\n USER_DATA_DIRECTORY,\n USER_DATA_DIRECTORY / \"styles\",\n CUSTOM_IMPORTS_DIRECTORY,\n CUSTOM_IMPORTS_DIRECTORY / \"econometrics\",\n]\ndirs_files = [USER_ENV_FILE, REPOSITORY_ENV_FILE]\ncreate_paths(dirs_list)\ncreate_files(dirs_files)\ncopy_files(REPOSITORY_DIRECTORY / \"custom_imports\", CUSTOM_IMPORTS_DIRECTORY)\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 53, "n_words": 31, "vocab_size": 28, "complexity": 2, "nloc": 3, "token_counts": 27, "n_ast_nodes": 109, "n_identifiers": 17, "d_id": 85399, "documentation": { "docstring": "\n Copy default/example files from the repo\n to the user data folder", "n_words": 11, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 154122, "commit_id": "8864bc197974da6d8cda2de2f35ca31d561be1cc", "repo": "modin", "path": "modin/core/io/column_stores/parquet_dispatcher.py", "file_name": "parquet_dispatcher.py", "fun_name": "build_query_compiler", "commit_message": "PERF-#4305: Parallelize `read_parquet` over row groups (#4700)\n\nCo-authored-by: mvashishtha ", "code": "def build_query_compiler(cls, path, columns, index_columns, **kwargs):\n \n col_partitions, column_widths = cls.build_columns(columns)\n partition_ids = cls.call_deploy(path, col_partitions, **kwargs)\n index, sync_index = cls.build_index(path, partition_ids, index_columns)\n remote_parts = cls.build_partition(partition_ids, column_widths)\n if len(partition_ids) > 0:\n row_lengths = [part.length() for part in remote_parts.T[0]]\n else:\n row_lengths = None\n frame = cls.frame_cls(\n remote_parts,\n index,\n columns,\n row_lengths=row_lengths,\n column_widths=column_widths,\n dtypes=None,\n )\n if sync_index:\n frame.synchronize_labels(axis=0)\n return cls.query_compiler_cls(frame)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 231, "n_words": 55, "vocab_size": 44, "complexity": 4, "nloc": 20, "token_counts": 136, "n_ast_nodes": 204, "n_identifiers": 27, "d_id": 35795, "documentation": { "docstring": "\n Build query compiler from deployed tasks outputs.\n\n Parameters\n ----------\n path : str, path object or file-like object\n Path to the file to read.\n columns : list\n List of columns that should be read from file.\n index_columns : list\n List of index columns specified by pandas metadata.\n **kwargs : dict\n Parameters of deploying read_* function.\n\n Returns\n -------\n new_query_compiler : BaseQueryCompiler\n Query compiler with imported data for further processing.\n ", "n_words": 67, "vocab_size": 51, "n_whitespaces": 200, "language": "en" } }, { "id": 235599, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/treemap/_tiling.py", "file_name": "_tiling.py", "fun_name": "packing", "commit_message": "switch to black .22", "code": "def packing(self):\n \n return self[\"packing\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 67043, "documentation": { "docstring": "\n Determines d3 treemap solver. For more info please refer to\n https://github.com/d3/d3-hierarchy#treemap-tiling\n\n The 'packing' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['squarify', 'binary', 'dice', 'slice', 'slice-dice',\n 'dice-slice']\n\n Returns\n -------\n Any\n ", "n_words": 38, "vocab_size": 37, "n_whitespaces": 127, "language": "en" } }, { "id": 266144, "commit_id": "87fd09ca8b5a0d3ec692e241351e1bbc4ac298a7", "repo": "netbox", "path": "netbox/utilities/templatetags/helpers.py", "file_name": "helpers.py", "fun_name": "kg_to_pounds", "commit_message": "Cleanup for #9654", "code": "def kg_to_pounds(n):\n \n return float(n) * 2.204623\n\n\n@register.filter(\"startswith\")", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "@register.filter(\"startswith\")", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 12, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 38, "n_identifiers": 5, "d_id": 78308, "documentation": { "docstring": "\n Convert a weight from kilograms to pounds.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 279802, "commit_id": "571d8786df580d6daa5c57c77b5b15a125631c8f", "repo": "keras", "path": "keras/optimizers/optimizer_experimental/optimizer.py", "file_name": "optimizer.py", "fun_name": "set_weights", "commit_message": "Add method `set_weights` for optimizer backward compatibility.\n\nRemove @doc_controls.do_not_generate_docs for `variables()` method because optimizer is no longer a `tf.Module`.\n\nPiperOrigin-RevId: 474149115", "code": "def set_weights(self, weights):\n \n if not getattr(self, \"_built\", False):\n raise ValueError(\n \"You are calling `set_weights()` on an optimizer that has not \"\n \"yet been built. Please call \"\n \"`optimizer.build(trainable_variables)` to create the \"\n \"optimizer weights before calling `set_weights()`.\"\n )\n\n for variable, weight in zip(self._variables, weights):\n if variable.shape != weight.shape:\n raise ValueError(\n f\"Optimizer variable {self._var_key(variable)} has shape \"\n f\"{str(variable.shape)} not compatible with provided \"\n f\"weight shape {str(weight.shape)}.\"\n )\n variable.assign(weight)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 279, "n_words": 67, "vocab_size": 53, "complexity": 4, "nloc": 16, "token_counts": 66, "n_ast_nodes": 150, "n_identifiers": 13, "d_id": 83138, "documentation": { "docstring": "Set the weights of the optimizer.\n\n Args:\n weights: a list of `tf.Variable`s or numpy arrays, the target values\n of optimizer variables. It should have the same order as\n `self._variables`.\n ", "n_words": 29, "vocab_size": 24, "n_whitespaces": 84, "language": "en" } }, { "id": 47650, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/operators/test_subdag_operator.py", "file_name": "test_subdag_operator.py", "fun_name": "test_subdag_pools", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def test_subdag_pools(self):\n \n dag = DAG('parent', default_args=default_args)\n subdag = DAG('parent.child', default_args=default_args)\n\n session = airflow.settings.Session()\n pool_1 = airflow.models.Pool(pool='test_pool_1', slots=1)\n pool_10 = airflow.models.Pool(pool='test_pool_10', slots=10)\n session.add(pool_1)\n session.add(pool_10)\n session.commit()\n\n EmptyOperator(task_id='dummy', dag=subdag, pool='test_pool_1')\n\n with pytest.raises(AirflowException):\n SubDagOperator(task_id='child', dag=dag, subdag=subdag, pool='test_pool_1')\n\n # recreate dag because failed subdagoperator was already added\n dag = DAG('parent', default_args=default_args)\n SubDagOperator(task_id='child', dag=dag, subdag=subdag, pool='test_pool_10')\n\n session.delete(pool_1)\n session.delete(pool_10)\n session.commit()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 183, "n_words": 53, "vocab_size": 38, "complexity": 1, "nloc": 17, "token_counts": 169, "n_ast_nodes": 287, "n_identifiers": 25, "d_id": 9191, "documentation": { "docstring": "\n Subdags and subdag tasks can't both have a pool with 1 slot\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 244183, "commit_id": "9a166a380229d2aaf5986fa1ff303a941865961a", "repo": "mmdetection", "path": "mmdet/datasets/pipelines/transforms.py", "file_name": "transforms.py", "fun_name": "__call__", "commit_message": "[Feature] Support simple copy paste with some configs. (#7501)\n\n* Testing pre-commit hooks\r\n\r\n* Added base code in transforms\r\n\r\n* Added Simple Copy Paste working version\r\n\r\n* Added checks to simple copy paste\r\n\r\n* refactor simplecopypaste and provide some configs\r\n\r\n* remove lvis-api in .gitignore\r\n\r\n* refactor simplecopypaste and use resize/flip/pad in load_pipeline\r\n\r\n* pre-commit\r\n\r\n* add README.md for simplecopypaste\r\n\r\n* add some unit tests\r\n\r\n* rename some variables\r\n\r\n* add a blend_fn\r\n\r\n* add some unit tests\r\n\r\n* add some comments\r\n\r\n* delete blend_fn\r\n\r\n* simplify some commits\r\n\r\nCo-authored-by: Sudarshan Kamath ", "code": "def __call__(self, results):\n \n\n assert 'mix_results' in results\n num_images = len(results['mix_results'])\n assert num_images == 1, \\\n f'CopyPaste only supports processing 2 images, got {num_images}'\n if self.selected:\n selected_results = self._select_object(results['mix_results'][0])\n else:\n selected_results = results['mix_results'][0]\n return self._copy_paste(results, selected_results)\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 117, "n_words": 35, "vocab_size": 30, "complexity": 2, "nloc": 10, "token_counts": 68, "n_ast_nodes": 116, "n_identifiers": 9, "d_id": 70272, "documentation": { "docstring": "Call function to make a copy-paste of image.\n\n Args:\n results (dict): Result dict.\n Returns:\n dict: Result dict with copy-paste transformed.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 63, "language": "en" } }, { "id": 149525, "commit_id": "8e98a2ff9f4fabf81bf5a4f4e1f772f5c4a091ec", "repo": "freqtrade", "path": "freqtrade/persistence/models.py", "file_name": "models.py", "fun_name": "safe_quote_currency", "commit_message": "api - provide assset_currency via API", "code": "def safe_quote_currency(self) -> str:\n \n try:\n return self.stake_currency or self.pair.split('/')[1].split(':')[0]\n except IndexError:\n return ''\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 56, "n_words": 13, "vocab_size": 12, "complexity": 3, "nloc": 8, "token_counts": 39, "n_ast_nodes": 70, "n_identifiers": 7, "d_id": 34441, "documentation": { "docstring": "\n Compatibility layer for asset - which can be empty for old trades.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 27, "language": "en" } }, { "id": 109447, "commit_id": "c73f4c455514cf5422d27bf38c93250de8316b21", "repo": "matplotlib", "path": "lib/matplotlib/axes/_base.py", "file_name": "_base.py", "fun_name": "_make_twin_axes", "commit_message": "Merge SubplotBase into AxesBase.", "code": "def _make_twin_axes(self, *args, **kwargs):\n \n if 'sharex' in kwargs and 'sharey' in kwargs:\n # The following line is added in v2.2 to avoid breaking Seaborn,\n # which currently uses this internal API.\n if kwargs[\"sharex\"] is not self and kwargs[\"sharey\"] is not self:\n raise ValueError(\"Twinned Axes may share only one axis\")\n ss = self.get_subplotspec()\n if ss:\n twin = self.figure.add_subplot(ss, *args, **kwargs)\n else:\n twin = self.figure.add_axes(\n self.get_position(True), *args, **kwargs,\n axes_locator=_TransformedBoundsLocator(\n [0, 0, 1, 1], self.transAxes))\n self.set_adjustable('datalim')\n twin.set_adjustable('datalim')\n self._twinned_axes.join(self, twin)\n return twin\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 260, "n_words": 78, "vocab_size": 63, "complexity": 6, "nloc": 16, "token_counts": 135, "n_ast_nodes": 222, "n_identifiers": 18, "d_id": 23592, "documentation": { "docstring": "Make a twinx Axes of self. This is used for twinx and twiny.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 248078, "commit_id": "646324437543c096e737777c81b4fe4b45c3e1a7", "repo": "synapse", "path": "tests/test_utils/__init__.py", "file_name": "__init__.py", "fun_name": "setup_awaitable_errors", "commit_message": "Remove unused `# type: ignore`s (#12531)\n\nOver time we've begun to use newer versions of mypy, typeshed, stub\r\npackages---and of course we've improved our own annotations. This makes\r\nsome type ignore comments no longer necessary. I have removed them.\r\n\r\nThere was one exception: a module that imports `select.epoll`. The\r\nignore is redundant on Linux, but I've kept it ignored for those of us\r\nwho work on the source tree using not-Linux. (#11771)\r\n\r\nI'm more interested in the config line which enforces this. I want\r\nunused ignores to be reported, because I think it's useful feedback when\r\nannotating to know when you've fixed a problem you had to previously\r\nignore.\r\n\r\n* Installing extras before typechecking\r\n\r\nLacking an easy way to install all extras generically, let's bite the bullet and\r\nmake install the hand-maintained `all` extra before typechecking.\r\n\r\nNow that https://github.com/matrix-org/backend-meta/pull/6 is merged to\r\nthe release/v1 branch.", "code": "def setup_awaitable_errors() -> Callable[[], None]:\n \n warnings.simplefilter(\"error\", RuntimeWarning)\n\n # unraisablehook was added in Python 3.8.\n if not hasattr(sys, \"unraisablehook\"):\n return lambda: None\n\n # State shared between unraisablehook and check_for_unraisable_exceptions.\n unraisable_exceptions = []\n orig_unraisablehook = sys.unraisablehook\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 62, "n_words": 34, "vocab_size": 31, "complexity": 2, "nloc": 13, "token_counts": 54, "n_ast_nodes": 76, "n_identifiers": 10, "d_id": 72089, "documentation": { "docstring": "\n Convert warnings from a non-awaited coroutines into errors.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 8629, "commit_id": "d85269cd60734790a65c11673bfdd98516b62b6c", "repo": "ludwig", "path": "ludwig/data/split.py", "file_name": "split.py", "fun_name": "required_columns", "commit_message": "Use clearer error messages in ludwig serving, and enable serving to work with configs that have stratified splitting on target columns. (#2740)\n\n* Use clearer serving error messages, and enable serving to work with configs that have stratified splitting on target columns.\n\n* Adjust warning message", "code": "def required_columns(self) -> List[str]:\n \n return []\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 25, "n_identifiers": 4, "d_id": 1468, "documentation": { "docstring": "Returns the list of columns that are required for splitting.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 181347, "commit_id": "51824608865b66ab04b018f55055124edbe603f3", "repo": "gradio", "path": "gradio/utils.py", "file_name": "utils.py", "fun_name": "get_local_ip_address", "commit_message": "Patching `test_get_ip` attempt 2 (#2810)\n\n* ip-patch-2\r\n\r\n* formatting\r\n\r\n* patch 2", "code": "def get_local_ip_address() -> str:\n \n try:\n ip_address = requests.get(\n \"https://checkip.amazonaws.com/\", timeout=3\n ).text.strip()\n except (requests.ConnectionError, requests.exceptions.ReadTimeout):\n ip_address = \"No internet connection\"\n return ip_address\n\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 65, "n_words": 21, "vocab_size": 18, "complexity": 2, "nloc": 9, "token_counts": 45, "n_ast_nodes": 78, "n_identifiers": 11, "d_id": 43310, "documentation": { "docstring": "Gets the public IP address or returns the string \"No internet connection\" if unable to obtain it.", "n_words": 17, "vocab_size": 16, "n_whitespaces": 16, "language": "en" } }, { "id": 164688, "commit_id": "047137ce2619cfe2027e3999dfb92eb614d9a485", "repo": "pandas", "path": "pandas/io/excel/_base.py", "file_name": "_base.py", "fun_name": "path", "commit_message": "DEP: Protect some ExcelWriter attributes (#45795)\n\n* DEP: Deprecate ExcelWriter attributes\r\n\r\n* DEP: Deprecate ExcelWriter attributes\r\n\r\n* Fixup for test\r\n\r\n* Move tests and restore check_extension\r\n\r\ny\r\n\r\n* Deprecate xlwt fm_date and fm_datetime; doc improvements", "code": "def path(self):\n \n self._deprecate(\"path\")\n return self._path\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 26, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 3, "token_counts": 16, "n_ast_nodes": 31, "n_identifiers": 4, "d_id": 39592, "documentation": { "docstring": "\n Path to Excel file.\n\n .. deprecated:: 1.5.0\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 29, "language": "en" } }, { "id": 286657, "commit_id": "963ca9b2b924d0514e0e65243dc8d9d7af023ad1", "repo": "OpenBBTerminal", "path": "openbb_terminal/core/scripts/sdk_audit.py", "file_name": "sdk_audit.py", "fun_name": "functions_df", "commit_message": "Audit SDK and View/Model functions (#3384)\n\n* Initial commit\r\n\r\n* Finalized functionality\r\n\r\n* update script\r\n\r\n* Allow using it without forecasting\r\n\r\n* Update gitignore\r\n\r\n* Update `sdk_audit.py`\r\n\r\n* Fixed issues, found more\r\n\r\n* Added fix for helper functions, and column for SDK type\r\n\r\n* Checked one more thing\r\n\r\n* Moved file\r\n\r\n* Move files ending with models/views\r\n\r\n* Added fix of name\r\n\r\n* Added file path fixes\r\n\r\n* Patch to fix sdk_audit for windows\r\n\r\n* fix\r\n\r\nCo-authored-by: Chavithra PARANA ", "code": "def functions_df() -> pd.DataFrame:\n \n modules = all_view_models()\n all_formatted = []\n for module in modules:\n if not FORECASTING and \"forecast\" in str(module):\n continue\n loaded = load_modules(module)\n # Gets all of a module's functions, but ignores imported functions\n func_list = [\n x[1]\n for x in getmembers(loaded, isfunction)\n if x[1].__module__ == loaded.__name__\n ]\n formatted_list = [format_function(x) for x in func_list]\n all_formatted.extend(formatted_list)\n func_df = pd.DataFrame()\n func_df[\"name\"] = [x[0] for x in all_formatted]\n func_df[\"docstring\"] = [x[1] for x in all_formatted]\n func_dups = len(func_df[\"name\"]) - len(func_df[\"name\"].drop_duplicates())\n if func_dups > 0:\n print(f\"Number of duplicate functions found: {func_dups}\")\n print(\n \"This may indicate that functions are defined several times in the terminal.\\n\"\n )\n func_df = func_df.set_index(\"name\")\n return func_df\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 267, "n_words": 109, "vocab_size": 78, "complexity": 10, "nloc": 32, "token_counts": 169, "n_ast_nodes": 286, "n_identifiers": 26, "d_id": 85962, "documentation": { "docstring": "Creates a dataframe for all functions in 'models' and 'views'.\n\n Returns:\n ----------\n pd.DataFrame\n Information for all view and model functions\n ", "n_words": 20, "vocab_size": 16, "n_whitespaces": 39, "language": "en" } }, { "id": 150527, "commit_id": "96d8882f1e6740f6c0a859c6e5f52a5a30ddb007", "repo": "freqtrade", "path": "freqtrade/freqai/freqai_interface.py", "file_name": "freqai_interface.py", "fun_name": "train_timer", "commit_message": "Plug mem leak, add training timer", "code": "def train_timer(self, do='start'):\n \n if do == 'start':\n self.pair_it_train += 1\n self.begin_time_train = time.time()\n elif do == 'stop':\n end = time.time()\n self.train_time += (end - self.begin_time_train)\n if self.pair_it_train == self.total_pairs:\n logger.info(\n f'Total time spent training pairlist {self.train_time:.2f} seconds')\n self.pair_it_train = 0\n self.train_time = 0\n return\n\n # Following methods which are overridden by user made prediction models.\n # See freqai/prediction_models/CatboostPredictionModel.py for an example.\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 214, "n_words": 61, "vocab_size": 47, "complexity": 4, "nloc": 13, "token_counts": 79, "n_ast_nodes": 147, "n_identifiers": 11, "d_id": 34788, "documentation": { "docstring": "\n Timer designed to track the cumulative time spent training the full pairlist in\n FreqAI.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 268981, "commit_id": "01c906c4178db5ae03b7eb2d298a052c952a0667", "repo": "keras", "path": "keras/layers/rnn/rnn_utils.py", "file_name": "rnn_utils.py", "fun_name": "caching_device", "commit_message": "Reorganize RNN layers, cells and wrappers into smaller logically organized files hosted under an `rnn` directory.\n\nPiperOrigin-RevId: 428841673", "code": "def caching_device(rnn_cell):\n \n if tf.executing_eagerly():\n # caching_device is not supported in eager mode.\n return None\n if not getattr(rnn_cell, '_enable_caching_device', False):\n return None\n # Don't set a caching device when running in a loop, since it is possible that\n # train steps could be wrapped in a tf.while_loop. In that scenario caching\n # prevents forward computations in loop iterations from re-reading the\n # updated weights.\n if control_flow_util.IsInWhileLoop(tf.compat.v1.get_default_graph()):\n logging.warning(\n 'Variable read device caching has been disabled because the '\n 'RNN is in tf.while_loop loop context, which will cause '\n 'reading stalled value in forward path. This could slow down '\n 'the training due to duplicated variable reads. Please '\n 'consider updating your code to remove tf.while_loop if possible.')\n return None\n if (rnn_cell._dtype_policy.compute_dtype !=\n rnn_cell._dtype_policy.variable_dtype):\n logging.warning(\n 'Variable read device caching has been disabled since it '\n 'doesn\\'t work with the mixed precision API. This is '\n 'likely to cause a slowdown for RNN training due to '\n 'duplicated read of variable for each timestep, which '\n 'will be significant in a multi remote worker setting. '\n 'Please consider disabling mixed precision API if '\n 'the performance has been affected.')\n return None\n # Cache the value on the device that access the variable.\n return lambda op: op.device\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 323, "n_words": 202, "vocab_size": 119, "complexity": 5, "nloc": 25, "token_counts": 92, "n_ast_nodes": 178, "n_identifiers": 17, "d_id": 79802, "documentation": { "docstring": "Returns the caching device for the RNN variable.\n\n This is useful for distributed training, when variable is not located as same\n device as the training worker. By enabling the device cache, this allows\n worker to read the variable once and cache locally, rather than read it every\n time step from remote when it is needed.\n\n Note that this is assuming the variable that cell needs for each time step is\n having the same value in the forward path, and only gets updated in the\n backprop. It is true for all the default cells (SimpleRNN, GRU, LSTM). If the\n cell body relies on any variable that gets updated every time step, then\n caching device will cause it to read the stall value.\n\n Args:\n rnn_cell: the rnn cell instance.\n ", "n_words": 127, "vocab_size": 79, "n_whitespaces": 141, "language": "en" } }, { "id": 116966, "commit_id": "257dfe6bac18d28088c7bfc79ca22cde682f9cd6", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/tdengine_handler/tdengine_handler.py", "file_name": "tdengine_handler.py", "fun_name": "get_tables", "commit_message": "Added TDENgine Handler", "code": "def get_tables(self) -> Response:\n \n q = 'SHOW TABLES;'\n\n \n return self.native_query(q)\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 40, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 6, "token_counts": 18, "n_ast_nodes": 34, "n_identifiers": 5, "d_id": 25874, "documentation": { "docstring": "\n Get a list with all of the tabels in TDEngine\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 268016, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/host_profiles.py", "file_name": "host_profiles.py", "fun_name": "get_instance", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def get_instance(self) -> t.Optional[AnsibleCoreCI]:\n \n if not self.core_ci and self.core_ci_state:\n self.core_ci = self.create_core_ci(load=False)\n self.core_ci.load(self.core_ci_state)\n\n return self.core_ci\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 58, "n_words": 15, "vocab_size": 13, "complexity": 3, "nloc": 6, "token_counts": 49, "n_ast_nodes": 80, "n_identifiers": 9, "d_id": 79290, "documentation": { "docstring": "Return the current AnsibleCoreCI instance, loading it if not already loaded.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 24491, "commit_id": "ddaa2c2552e19635cd6cdf38619f1f176c358f89", "repo": "PaddleOCR", "path": "ppstructure/table/table_master_match.py", "file_name": "table_master_match.py", "fun_name": "sort_line_bbox", "commit_message": "add SLANet", "code": "def sort_line_bbox(g, bg):\n \n\n xs = [bg_item[0] for bg_item in bg]\n xs_sorted = sorted(xs)\n\n g_sorted = [None] * len(xs_sorted)\n bg_sorted = [None] * len(xs_sorted)\n for g_item, bg_item in zip(g, bg):\n idx = xs_sorted.index(bg_item[0])\n bg_sorted[idx] = bg_item\n g_sorted[idx] = g_item\n\n return g_sorted, bg_sorted\n\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 83, "n_words": 41, "vocab_size": 26, "complexity": 3, "nloc": 10, "token_counts": 85, "n_ast_nodes": 132, "n_identifiers": 14, "d_id": 4742, "documentation": { "docstring": "\n Sorted the bbox in the same line(group)\n compare coord 'x' value, where 'y' value is closed in the same group.\n :param g: index in the same group\n :param bg: bbox in the same group\n :return:\n ", "n_words": 35, "vocab_size": 22, "n_whitespaces": 54, "language": "en" } }, { "id": 284462, "commit_id": "33a041e5bf93ce93ab1a19adbc5ed74c2f1eb337", "repo": "OpenBBTerminal", "path": "openbb_terminal/stocks/tradinghours/bursa_model.py", "file_name": "bursa_model.py", "fun_name": "get_closed", "commit_message": "Trading hours stock feature (#1697)", "code": "def get_closed() -> pd.DataFrame:\n \n bursa = all_bursa()\n is_open_list = []\n for exchange in bursa.index:\n is_open = check_if_open(bursa, exchange)\n is_open_list.append(is_open)\n bursa[\"open\"] = is_open_list\n bursa = bursa.loc[~bursa[\"open\"]]\n return bursa[[\"name\", \"short_name\"]]\n\n\n@log_start_end(log=logger)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@log_start_end(log=logger)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 63, "n_words": 29, "vocab_size": 23, "complexity": 2, "nloc": 19, "token_counts": 66, "n_ast_nodes": 125, "n_identifiers": 15, "d_id": 84732, "documentation": { "docstring": "Get closed exchanges.\n\n Parameters\n ----------\n\n Returns\n -------\n pd.DataFrame\n Currently closed exchanges\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 36, "language": "en" } }, { "id": 33795, "commit_id": "31be02f14b1724c677bb2e32a5101c7cb6448556", "repo": "transformers", "path": "src/transformers/models/flaubert/modeling_tf_flaubert.py", "file_name": "modeling_tf_flaubert.py", "fun_name": "get_masks", "commit_message": "TF: tf.debugging assertions without tf.running_eagerly() protection (#19030)", "code": "def get_masks(slen, lengths, causal, padding_mask=None):\n \n bs = shape_list(lengths)[0]\n if padding_mask is not None:\n mask = padding_mask\n else:\n # assert lengths.max().item() <= slen\n alen = tf.range(slen, dtype=lengths.dtype)\n mask = alen < tf.expand_dims(lengths, axis=1)\n\n # attention mask is the same as mask, or triangular inferior attention (causal)\n if causal:\n attn_mask = tf.less_equal(\n tf.tile(tf.reshape(alen, (1, 1, slen)), (bs, slen, 1)), tf.reshape(alen, (1, slen, 1))\n )\n else:\n attn_mask = mask\n\n # sanity check\n # assert shape_list(mask) == [bs, slen]\n tf.debugging.assert_equal(shape_list(mask), [bs, slen])\n if causal:\n tf.debugging.assert_equal(shape_list(attn_mask), [bs, slen, slen])\n\n return mask, attn_mask\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 190, "n_words": 87, "vocab_size": 58, "complexity": 4, "nloc": 17, "token_counts": 162, "n_ast_nodes": 243, "n_identifiers": 20, "d_id": 6152, "documentation": { "docstring": "\n Generate hidden states mask, and optionally an attention mask.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 70553, "commit_id": "e9183a95c88fe2eaf4c1d3aff9833633509713f3", "repo": "wagtail", "path": "wagtail/admin/checks.py", "file_name": "checks.py", "fun_name": "css_install_check", "commit_message": "Update docs links to reference new domain", "code": "def css_install_check(app_configs, **kwargs):\n errors = []\n\n css_path = os.path.join(\n os.path.dirname(__file__), 'static', 'wagtailadmin', 'css', 'normalize.css'\n )\n\n if not os.path.isfile(css_path):\n error_hint = % css_path\n\n errors.append(\n Warning(\n \"CSS for the Wagtail admin is missing\",\n hint=error_hint,\n id='wagtailadmin.W001',\n )\n )\n return errors\n\n\n@register(Tags.admin)", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "@register(Tags.admin)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 147, "n_words": 38, "vocab_size": 32, "complexity": 2, "nloc": 21, "token_counts": 73, "n_ast_nodes": 136, "n_identifiers": 19, "d_id": 15519, "documentation": { "docstring": "\n Most likely you are running a development (non-packaged) copy of\n Wagtail and have not built the static assets -\n see https://docs.wagtail.org/en/latest/contributing/developing.html\n\n File not found: %s\n ", "n_words": 25, "vocab_size": 24, "n_whitespaces": 77, "language": "en" } }, { "id": 310881, "commit_id": "7112c5b52a1e0016961a725d4ca90b57ddb350de", "repo": "core", "path": "homeassistant/components/apple_tv/config_flow.py", "file_name": "config_flow.py", "fun_name": "device_scan", "commit_message": "Use zeroconf for scanning in apple_tv (#64528)", "code": "async def device_scan(hass, identifier, loop):\n \n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 8, "n_words": 5, "vocab_size": 5, "complexity": 4, "nloc": 9, "token_counts": 77, "n_ast_nodes": 18, "n_identifiers": 4, "d_id": 109552, "documentation": { "docstring": "Scan for a specific device using identifier as filter.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 60156, "commit_id": "e51b790b7717e2603c1ea480c75e9ee02df3c869", "repo": "prefect", "path": "src/prefect/blocks/abstract.py", "file_name": "abstract.py", "fun_name": "__exit__", "commit_message": "Abstract database block (#7866)\n\nCo-authored-by: Alexander Streed \r\nCo-authored-by: Bill Palombi ", "code": "def __exit__(self, *args) -> None:\n \n raise NotImplementedError(\n f\"{self.__class__.__name__} does not support context management.\"\n )\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 46, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 7, "token_counts": 17, "n_ast_nodes": 40, "n_identifiers": 6, "d_id": 11998, "documentation": { "docstring": "\n Context management method for databases.\n ", "n_words": 5, "vocab_size": 5, "n_whitespaces": 20, "language": "en" } }, { "id": 120630, "commit_id": "b64e36b60fca9661ca2c8ae51a56fae07bf5efe6", "repo": "jax", "path": "jax/_src/lax/eigh.py", "file_name": "eigh.py", "fun_name": "split_spectrum", "commit_message": "Make QDWH-eig implementation jit-table.\n\nMove QDWH-eig from jax._src.scipy.eigh to jax._src.lax.eigh, in preparation for using it to back `lax.eigh` in a future change.\n\nPiperOrigin-RevId: 449362382", "code": "def split_spectrum(H, n, split_point, V0=None):\n \n N, _ = H.shape\n H_shift = H - split_point * jnp.eye(N, dtype=H.dtype)\n U, _, _, _ = qdwh.qdwh(H_shift, is_hermitian=True, dynamic_shape=(n, n))\n P = -0.5 * (U - _mask(jnp.eye(N, dtype=H.dtype), (n, n)))\n rank = jnp.round(jnp.trace(P)).astype(jnp.int32)\n\n V_minus, V_plus = _projector_subspace(P, H, n, rank)\n H_minus = (V_minus.conj().T @ H) @ V_minus\n H_plus = (V_plus.conj().T @ H) @ V_plus\n if V0 is not None:\n V_minus = jnp.dot(V0, V_minus)\n V_plus = jnp.dot(V0, V_plus)\n return H_minus, V_minus, H_plus, V_plus, rank\n\n\n# To help understand the iterative version of the algorithm, the original\n# recursive formulation follows.\n#\n# def _eigh_work(H, V=None, termination_size=128):\n# \n# if H.shape[0] <= termination_size:\n# evals, evecs = jnp_linalg.eigh(H)\n# if V is not None:\n# evecs = jnp.dot(V, evecs)\n# return evals, evecs\n#\n# split_point = jnp.median(jnp.diag(H)) # TODO: Improve this?\n# H_minus, V_minus, H_plus, V_plus = split_spectrum(H, split_point, V0=V)\n# H_minus, V_minus = _eigh_work(H_minus, V=V_minus, termination_size=termination_size)\n# H_plus, V_plus = _eigh_work(H_plus, V=V_plus, termination_size=termination_size)\n#\n# evals = jnp.hstack((H_minus, H_plus))\n# evecs = jnp.hstack((V_minus, V_plus))\n# return evals, evecs\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 210, "n_words": 174, "vocab_size": 95, "complexity": 2, "nloc": 13, "token_counts": 197, "n_ast_nodes": 323, "n_identifiers": 31, "d_id": 26905, "documentation": { "docstring": " The Hermitian matrix `H` is split into two matrices `H_minus`\n `H_plus`, respectively sharing its eigenspaces beneath and above\n its `split_point`th eigenvalue.\n\n Returns, in addition, `V_minus` and `V_plus`, isometries such that\n `Hi = Vi.conj().T @ H @ Vi`. If `V0` is not None, `V0 @ Vi` are\n returned instead; this allows the overall isometries mapping from\n an initial input matrix to progressively smaller blocks to be formed.\n\n Args:\n H: The Hermitian matrix to split.\n split_point: The eigenvalue to split along.\n V0: Matrix of isometries to be updated.\n Returns:\n H_minus: A Hermitian matrix sharing the eigenvalues of `H` beneath\n `split_point`.\n V_minus: An isometry from the input space of `V0` to `H_minus`.\n H_plus: A Hermitian matrix sharing the eigenvalues of `H` above\n `split_point`.\n V_plus: An isometry from the input space of `V0` to `H_plus`.\n rank: The dynamic size of the m subblock.\n The main work loop performing the symmetric eigendecomposition of H.\n# Each step recursively computes a projector into the space of eigenvalues\n# above jnp.mean(jnp.diag(H)). The result of the projections into and out of\n# that space, along with the isometries accomplishing these, are then computed.\n# This is performed recursively until the projections have size 1, and thus\n# store an eigenvalue of the original input; the corresponding isometry is\n# the related eigenvector. The results are then composed.\n#\n# Args:\n# H: The Hermitian input.\n# V: Stores the isometries projecting H into its subspaces.\n# precision: :class:`~jax.lax.Precision` object specifying the matmul precision.\n#\n# Returns:\n# H, V: The result of the projection.\n# ", "n_words": 257, "vocab_size": 138, "n_whitespaces": 321, "language": "en" } }, { "id": 107221, "commit_id": "4c2662ad6f8c7b3c06554dfa3633f50dd011beb0", "repo": "matplotlib", "path": "lib/matplotlib/lines.py", "file_name": "lines.py", "fun_name": "set_dash_joinstyle", "commit_message": "DOC: Document default join style\n\nin the same way as the default cap styles.", "code": "def set_dash_joinstyle(self, s):\n \n js = JoinStyle(s)\n if self._dashjoinstyle != js:\n self.stale = True\n self._dashjoinstyle = js\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 55, "n_words": 16, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 31, "n_ast_nodes": 52, "n_identifiers": 7, "d_id": 22649, "documentation": { "docstring": "\n How to join segments of the line if it `~Line2D.is_dashed`.\n\n The default joinstyle is :rc:`lines.dash_joinstyle`.\n\n Parameters\n ----------\n s : `.JoinStyle` or %(JoinStyle)s\n ", "n_words": 22, "vocab_size": 22, "n_whitespaces": 65, "language": "en" } }, { "id": 136708, "commit_id": "7c8859f1428224710e4c2db2abf0d9ec28536301", "repo": "ray", "path": "python/ray/_private/utils.py", "file_name": "utils.py", "fun_name": "set_omp_num_threads_if_unset", "commit_message": "[core] Set OMP_NUM_THREADS to `num_cpus` required by task/actors by default (#30496)\n\nRay currently sets OMP_NUM_THREADS=1 when the environ variable is not set.\r\nThis PR:\r\n\r\nSets OMP_NUM_THREADS to the number of cpus assigned to the worker that runs a task before running, and reset it after running.\r\nIf num_cpus is a fractional smaller than 1, it will set OMP_NUM_THREADS to 1.\r\nDoesn't override OMP_NUM_THREADS if it's already being specified in runtime env or through os.environ.\r\nSigned-off-by: Ricky Xu \r\nCo-authored-by: Eric Liang \r\nCo-authored-by: Simon Mo ", "code": "def set_omp_num_threads_if_unset() -> bool:\n \n num_threads_from_env = os.environ.get(\"OMP_NUM_THREADS\")\n if num_threads_from_env is not None:\n # No ops if it's set\n return False\n\n # If unset, try setting the correct CPU count assigned.\n runtime_ctx = ray.get_runtime_context()\n if runtime_ctx.worker.mode != ray._private.worker.WORKER_MODE:\n # Non worker mode, no ops.\n return False\n\n num_assigned_cpus = runtime_ctx.get_assigned_resources().get(\"CPU\")\n\n if num_assigned_cpus is None:\n # This is an actor task w/o any num_cpus specified, set it to 1\n logger.debug(\n \"[ray] Forcing OMP_NUM_THREADS=1 to avoid performance \"\n \"degradation with many workers (issue #6998). You can override this \"\n \"by explicitly setting OMP_NUM_THREADS, or changing num_cpus.\"\n )\n num_assigned_cpus = 1\n\n import math\n\n # For num_cpu < 1: Set to 1.\n # For num_cpus >= 1: Set to the floor of the actual assigned cpus.\n omp_num_threads = max(math.floor(num_assigned_cpus), 1)\n os.environ[\"OMP_NUM_THREADS\"] = str(omp_num_threads)\n return True\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 260, "n_words": 129, "vocab_size": 94, "complexity": 4, "nloc": 27, "token_counts": 105, "n_ast_nodes": 189, "n_identifiers": 22, "d_id": 30974, "documentation": { "docstring": "Set the OMP_NUM_THREADS to default to num cpus assigned to the worker\n\n This function sets the environment variable OMP_NUM_THREADS for the worker,\n if the env is not previously set and it's running in worker (WORKER_MODE).\n\n Returns True if OMP_NUM_THREADS is set in this function.\n\n ", "n_words": 44, "vocab_size": 31, "n_whitespaces": 56, "language": "en" } }, { "id": 73773, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/migrations/0066_collection_management_permissions.py", "file_name": "0066_collection_management_permissions.py", "fun_name": "grant_instance_level_collection_management_permissions", "commit_message": "Reformat with black", "code": "def grant_instance_level_collection_management_permissions(apps, schema_editor):\n \n Collection = apps.get_model(\"wagtailcore.Collection\")\n Group = apps.get_model(\"auth.Group\")\n GroupCollectionPermission = apps.get_model(\"wagtailcore.GroupCollectionPermission\")\n Permission = apps.get_model(\"auth.Permission\")\n\n groups_w_permissions = Group.objects.filter(\n permissions__content_type__app_label=\"wagtailcore\",\n permissions__content_type__model=\"collection\",\n permissions__codename__in=[\n \"add_collection\",\n \"change_collection\",\n \"delete_collection\",\n ],\n ).values(\"id\", \"name\", \"permissions__id\", \"permissions__codename\")\n\n for root_collection in Collection.objects.filter(depth=1).all():\n for row in groups_w_permissions:\n GroupCollectionPermission.objects.create(\n group_id=row[\"id\"],\n permission_id=row[\"permissions__id\"],\n collection_id=root_collection.id,\n )\n # Now remove the model-level permissions for collections\n collection_permissions = Permission.objects.filter(\n content_type__app_label=\"wagtailcore\",\n content_type__model=\"collection\",\n codename__in=[\"add_collection\", \"change_collection\", \"delete_collection\"],\n )\n for perm in collection_permissions.all():\n perm.group_set.clear()\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 263, "n_words": 64, "vocab_size": 52, "complexity": 4, "nloc": 28, "token_counts": 172, "n_ast_nodes": 296, "n_identifiers": 31, "d_id": 16100, "documentation": { "docstring": "\n Give the groups who currently manage all collections permission to manage root collections\n ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 20, "language": "en" } }, { "id": 247062, "commit_id": "1901cb1d4a8b7d9af64493fbd336e9aa2561c20c", "repo": "synapse", "path": "tests/rest/client/test_retention.py", "file_name": "test_retention.py", "fun_name": "test_no_default_policy", "commit_message": "Add type hints to `tests/rest/client` (#12084)", "code": "def test_no_default_policy(self) -> None:\n \n room_id = self.helper.create_room_as(self.user_id, tok=self.token)\n\n self._test_retention(room_id)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 6, "token_counts": 32, "n_ast_nodes": 53, "n_identifiers": 9, "d_id": 71472, "documentation": { "docstring": "Tests that an event doesn't get expired if there is neither a default retention\n policy nor a policy specific to the room.\n ", "n_words": 22, "vocab_size": 20, "n_whitespaces": 36, "language": "en" } }, { "id": 47660, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/sensors/test_external_task_sensor.py", "file_name": "test_external_task_sensor.py", "fun_name": "dag_bag_ext", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def dag_bag_ext():\n \n clear_db_runs()\n\n dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False)\n\n dag_0 = DAG(\"dag_0\", start_date=DEFAULT_DATE, schedule_interval=None)\n task_a_0 = EmptyOperator(task_id=\"task_a_0\", dag=dag_0)\n task_b_0 = ExternalTaskMarker(\n task_id=\"task_b_0\", external_dag_id=\"dag_1\", external_task_id=\"task_a_1\", recursion_depth=3, dag=dag_0\n )\n task_a_0 >> task_b_0\n\n dag_1 = DAG(\"dag_1\", start_date=DEFAULT_DATE, schedule_interval=None)\n task_a_1 = ExternalTaskSensor(\n task_id=\"task_a_1\", external_dag_id=dag_0.dag_id, external_task_id=task_b_0.task_id, dag=dag_1\n )\n task_b_1 = ExternalTaskMarker(\n task_id=\"task_b_1\", external_dag_id=\"dag_2\", external_task_id=\"task_a_2\", recursion_depth=2, dag=dag_1\n )\n task_a_1 >> task_b_1\n\n dag_2 = DAG(\"dag_2\", start_date=DEFAULT_DATE, schedule_interval=None)\n task_a_2 = ExternalTaskSensor(\n task_id=\"task_a_2\", external_dag_id=dag_1.dag_id, external_task_id=task_b_1.task_id, dag=dag_2\n )\n task_b_2 = ExternalTaskMarker(\n task_id=\"task_b_2\", external_dag_id=\"dag_3\", external_task_id=\"task_a_3\", recursion_depth=1, dag=dag_2\n )\n task_a_2 >> task_b_2\n\n dag_3 = DAG(\"dag_3\", start_date=DEFAULT_DATE, schedule_interval=None)\n task_a_3 = ExternalTaskSensor(\n task_id=\"task_a_3\", external_dag_id=dag_2.dag_id, external_task_id=task_b_2.task_id, dag=dag_3\n )\n task_b_3 = EmptyOperator(task_id=\"task_b_3\", dag=dag_3)\n task_a_3 >> task_b_3\n\n for dag in [dag_0, dag_1, dag_2, dag_3]:\n dag_bag.bag_dag(dag=dag, root_dag=dag)\n\n yield dag_bag\n\n clear_db_runs()\n\n\n@pytest.fixture", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 243, "n_words": 111, "vocab_size": 69, "complexity": 2, "nloc": 35, "token_counts": 290, "n_ast_nodes": 460, "n_identifiers": 36, "d_id": 9197, "documentation": { "docstring": "\n Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies\n set up using ExternalTaskMarker and ExternalTaskSensor.\n\n dag_0: task_a_0 >> task_b_0\n |\n |\n dag_1: ---> task_a_1 >> task_b_1\n |\n |\n dag_2: ---> task_a_2 >> task_b_2\n |\n |\n dag_3: ---> task_a_3 >> task_b_3\n ", "n_words": 45, "vocab_size": 35, "n_whitespaces": 480, "language": "en" } }, { "id": 279741, "commit_id": "e3e3a428f0a7955040c8a8fb8b2ad6f3e16d29eb", "repo": "keras", "path": "keras/saving/experimental/serialization_lib.py", "file_name": "serialization_lib.py", "fun_name": "deserialize_keras_object", "commit_message": "Remaster serialization logic.\n\nThere were several significant flaws, most prominently:\n\n- We had 2 separate serialization systems partially overlapping and interacting with each other: the JSON encoder/decoder one, and serialize/deserialize_keras_objects. The new system is fully standalone.\n- We ignored objects passed via `custom_objects` most of the time.\n\nPiperOrigin-RevId: 473794783", "code": "def deserialize_keras_object(config, custom_objects=None):\n ", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "\"\"\"Retrieve the object by deserializing the config dict.\n\n The config dict is a Python dictionary that consists of a set of key-value\n pairs, and represents a Keras object, such as an `Optimizer`, `Layer`,\n `Metrics`, etc. The saving and loading library uses the following keys to\n record information of a Keras object:\n\n - `class_name`: String. This is the name of the class,\n as exactly defined in the source\n code, such as \"LossesContainer\".\n - `config`: Dict. Library-defined or user-defined key-value pairs that store\n the configuration of the object, as obtained by `object.get_config()`.\n - `module`: String. The path of the python module, such as\n \"keras.engine.compile_utils\". Built-in Keras classes\n expect to have prefix `keras`.\n - `registered_name`: String. The key the class is registered under via\n `keras.utils.register_keras_serializable(package, name)` API. The key has\n the format of '{package}>{name}', where `package` and `name` are the\n arguments passed to `register_keras_serializable()`. If `name` is not\n provided, it defaults to the class name. If `registered_name` successfully\n resolves to a class (that was registered), the `class_name` and `config`\n values in the dict will not be used. `registered_name` is only used for\n non-built-in classes.\n\n For example, the following dictionary represents the built-in Adam optimizer\n with the relevant config:\n\n ```python\n dict_structure = {\n \"class_name\": \"Adam\",\n \"config\": {\n \"amsgrad\": false,\n \"beta_1\": 0.8999999761581421,\n \"beta_2\": 0.9990000128746033,\n \"decay\": 0.0,\n \"epsilon\": 1e-07,\n \"learning_rate\": 0.0010000000474974513,\n \"name\": \"Adam\"\n },\n \"module\": \"keras.optimizers\",\n \"registered_name\": None\n }\n # Returns an `Adam` instance identical to the original one.\n deserialize_keras_object(dict_structure)\n ```\n\n If the class does not have an exported Keras namespace, the library tracks\n it by its `module` and `class_name`. For example:\n\n ```python\n dict_structure = {\n \"class_name\": \"LossesContainer\",\n \"config\": {\n \"losses\": [...],\n \"total_loss_mean\": {...},\n },\n \"module\": \"keras.engine.compile_utils\",\n \"registered_name\": \"LossesContainer\"\n }\n\n # Returns a `LossesContainer` instance identical to the original one.\n deserialize_keras_object(dict_structure)\n ```\n\n And the following dictionary represents a user-customized````", "n_ast_errors": 3, "ast_levels": 8, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 15, "nloc": 54, "token_counts": 281, "n_ast_nodes": 49, "n_identifiers": 14, "d_id": 83118, "documentation": { "docstring": "Retrieve the object by deserializing the config dict.\n\n The config dict is a Python dictionary that consists of a set of key-value\n pairs, and represents a Keras object, such as an `Optimizer`, `Layer`,\n `Metrics`, etc. The saving and loading library uses the following keys to\n record information of a Keras object:\n\n - `class_name`: String. This is the name of the class,\n as exactly defined in the source\n code, such as \"LossesContainer\".\n - `config`: Dict. Library-defined or user-defined key-value pairs that store\n the configuration of the object, as obtained by `object.get_config()`.\n - `module`: String. The path of the python module, such as\n \"keras.engine.compile_utils\". Built-in Keras classes\n expect to have prefix `keras`.\n - `registered_name`: String. The key the class is registered under via\n `keras.utils.register_keras_serializable(package, name)` API. The key has\n the format of '{package}>{name}', where `package` and `name` are the\n arguments passed to `register_keras_serializable()`. If `name` is not\n provided, it defaults to the class name. If `registered_name` successfully\n resolves to a class (that was registered), the `class_name` and `config`\n values in the dict will not be used. `registered_name` is only used for\n non-built-in classes.\n\n For example, the following dictionary represents the built-in Adam optimizer\n with the relevant config:\n\n ```python\n dict_structure = {\n \"class_name\": \"Adam\",\n \"config\": {\n \"amsgrad\": false,\n \"beta_1\": 0.8999999761581421,\n \"beta_2\": 0.9990000128746033,\n \"decay\": 0.0,\n \"epsilon\": 1e-07,\n \"learning_rate\": 0.0010000000474974513,\n \"name\": \"Adam\"\n },\n \"module\": \"keras.optimizers\",\n \"registered_name\": None\n }\n # Returns an `Adam` instance identical to the original one.\n deserialize_keras_object(dict_structure)\n ```\n\n If the class does not have an exported Keras namespace, the library tracks\n it by its `module` and `class_name`. For example:\n\n ```python\n dict_structure = {\n \"class_name\": \"LossesContainer\",\n \"config\": {\n \"losses\": [...],\n \"total_loss_mean\": {...},\n },\n \"module\": \"keras.engine.compile_utils\",\n \"registered_name\": \"LossesContainer\"\n }\n\n # Returns a `LossesContainer` instance identical to the original one.\n deserialize_keras_object(dict_structure)\n ```\n\n And the following dictionary represents a user-customized `MeanSquaredError`\n loss:\n\n ```python", "n_words": 296, "vocab_size": 179, "n_whitespaces": 591, "language": "en" } }, { "id": 155368, "commit_id": "219edb5fb772609d3fafaac02ded0294ea434aa8", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/hdk_on_native/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "from_arrow", "commit_message": "FIX-#4859: Add support for PyArrow Dictionary Arrays to type mapping (#5271)\n\nCo-authored-by: Iaroslav Igoshev \r\nSigned-off-by: Andrey Pavlenko ", "code": "def from_arrow(cls, at, index_cols=None, index=None, columns=None):\n \n (\n new_frame,\n new_lengths,\n new_widths,\n unsupported_cols,\n ) = cls._partition_mgr_cls.from_arrow(at, return_dims=True)\n\n if columns is not None:\n new_columns = columns\n new_index = pd.RangeIndex(at.num_rows) if index is None else index\n elif index_cols:\n data_cols = [col for col in at.column_names if col not in index_cols]\n new_columns = pd.Index(data=data_cols, dtype=\"O\")\n new_index = index\n else:\n assert index is None\n new_columns = pd.Index(data=at.column_names, dtype=\"O\")\n new_index = pd.RangeIndex(at.num_rows)\n\n new_dtypes = []\n\n for col in at.columns:\n if pyarrow.types.is_dictionary(col.type):\n new_dtypes.append(LazyProxyCategoricalDtype(at, col._name))\n else:\n new_dtypes.append(cls._arrow_type_to_dtype(col.type))\n\n if len(unsupported_cols) > 0:\n ErrorMessage.single_warning(\n f\"Frame contain columns with unsupported data-types: {unsupported_cols}. \"\n + \"All operations with this frame will be default to pandas!\"\n )\n\n return cls(\n partitions=new_frame,\n index=new_index,\n columns=new_columns,\n row_lengths=new_lengths,\n column_widths=new_widths,\n dtypes=pd.Series(data=new_dtypes, index=at.column_names),\n index_cols=index_cols,\n has_unsupported_data=len(unsupported_cols) > 0,\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 518, "n_words": 117, "vocab_size": 80, "complexity": 9, "nloc": 39, "token_counts": 258, "n_ast_nodes": 396, "n_identifiers": 41, "d_id": 36363, "documentation": { "docstring": "\n Build a frame from an Arrow table.\n\n Parameters\n ----------\n at : pyarrow.Table\n Source table.\n index_cols : list of str, optional\n List of index columns in the source table which\n are ignored in transformation.\n index : pandas.Index, optional\n An index to be used by the new frame. Should present\n if `index_cols` is not None.\n columns : Index or array-like, optional\n Column labels to use for resulting frame.\n\n Returns\n -------\n HdkOnNativeDataframe\n The new frame.\n ", "n_words": 72, "vocab_size": 56, "n_whitespaces": 227, "language": "en" } }, { "id": 247058, "commit_id": "1901cb1d4a8b7d9af64493fbd336e9aa2561c20c", "repo": "synapse", "path": "tests/rest/client/test_retention.py", "file_name": "test_retention.py", "fun_name": "test_retention_event_purged_with_state_event_outside_allowed", "commit_message": "Add type hints to `tests/rest/client` (#12084)", "code": "def test_retention_event_purged_with_state_event_outside_allowed(self) -> None:\n \n room_id = self.helper.create_room_as(self.user_id, tok=self.token)\n\n # Set a max_lifetime higher than the maximum allowed value.\n self.helper.send_state(\n room_id=room_id,\n event_type=EventTypes.Retention,\n body={\"max_lifetime\": one_day_ms * 4},\n tok=self.token,\n )\n\n # Check that the event is purged after waiting for the maximum allowed duration\n # instead of the one specified in the room's policy.\n self._test_retention_event_purged(room_id, one_day_ms * 1.5)\n\n # Set a max_lifetime lower than the minimum allowed value.\n self.helper.send_state(\n room_id=room_id,\n event_type=EventTypes.Retention,\n body={\"max_lifetime\": one_hour_ms},\n tok=self.token,\n )\n\n # Check that the event is purged after waiting for the minimum allowed duration\n # instead of the one specified in the room's policy.\n self._test_retention_event_purged(room_id, one_day_ms * 0.5)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 286, "n_words": 100, "vocab_size": 49, "complexity": 1, "nloc": 19, "token_counts": 114, "n_ast_nodes": 174, "n_identifiers": 16, "d_id": 71468, "documentation": { "docstring": "Tests that the server configuration can override the policy for a room when\n running the purge jobs.\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 31, "language": "en" } }, { "id": 55891, "commit_id": "f166d70fcfcdf4fceeb222f273b8e0eab6fb1b26", "repo": "prefect", "path": "src/prefect/client.py", "file_name": "client.py", "fun_name": "raise_for_status", "commit_message": "Create PrefectResponse", "code": "def raise_for_status(self) -> None:\n \n try:\n return super().raise_for_status()\n except HTTPStatusError as exc:\n raise PrefectHTTPStatusError.from_httpx_error(exc) from None\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 58, "n_words": 15, "vocab_size": 15, "complexity": 2, "nloc": 11, "token_counts": 32, "n_ast_nodes": 56, "n_identifiers": 7, "d_id": 11416, "documentation": { "docstring": "\n Raise an exception if the response contains an HTTPStatusError.\n\n The `PrefectHTTPStatusError` contains useful additional information that\n is not contained in the `HTTPStatusError`.\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 51, "language": "en" } }, { "id": 146277, "commit_id": "b1496d235fce4f19fb53553e7fb78e97e1d19054", "repo": "ray", "path": "python/ray/tune/tests/test_tune_restore.py", "file_name": "test_tune_restore.py", "fun_name": "test", "commit_message": "[tune] fix error handling for fail_fast case. (#22982)", "code": "def test(self):\n \n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 9, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 33646, "documentation": { "docstring": "Trainable crashes with fail_fast flag and the original crash message\n should bubble up.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 19, "language": "en" } }, { "id": 205870, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/sql/query.py", "file_name": "query.py", "fun_name": "check_related_objects", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def check_related_objects(self, field, value, opts):\n \n if field.is_relation:\n # Check that the field and the queryset use the same model in a\n # query like .filter(author=Author.objects.all()). For example, the\n # opts would be Author's (from the author field) and value.model\n # would be Author.objects.all() queryset's .model (Author also).\n # The field is the related field on the lhs side.\n if (\n isinstance(value, Query)\n and not value.has_select_fields\n and not check_rel_lookup_compatibility(value.model, opts, field)\n ):\n raise ValueError(\n 'Cannot use QuerySet for \"%s\": Use a QuerySet for \"%s\".'\n % (value.model._meta.object_name, opts.object_name)\n )\n elif hasattr(value, \"_meta\"):\n self.check_query_object_type(value, opts, field)\n elif hasattr(value, \"__iter__\"):\n for v in value:\n self.check_query_object_type(v, opts, field)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 378, "n_words": 103, "vocab_size": 71, "complexity": 8, "nloc": 16, "token_counts": 104, "n_ast_nodes": 165, "n_identifiers": 17, "d_id": 51255, "documentation": { "docstring": "Check the type of object passed to query relations.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 177506, "commit_id": "d82815dba6c8ddce19cd49f700298dc82a58f066", "repo": "networkx", "path": "networkx/algorithms/shortest_paths/weighted.py", "file_name": "weighted.py", "fun_name": "single_source_dijkstra_path_length", "commit_message": "Hide edges with a weight of None in A*. (#5945)\n\n* Hide edges with a weight of None in A*.\r\n\r\nThis matches the Dijkstra's weight interface.\r\n\r\n* Update Dijkstra's and A* docs for weights of None.\r\n\r\n* Add tests for A* with weight of None.\r\n\r\n* Add another test for A* with a weight function.\r\n\r\n* Document that None indicates a hidden edge.", "code": "def single_source_dijkstra_path_length(G, source, cutoff=None, weight=\"weight\"):\n \n return multi_source_dijkstra_path_length(G, {source}, cutoff=cutoff, weight=weight)\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 16, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 33, "n_ast_nodes": 50, "n_identifiers": 6, "d_id": 42410, "documentation": { "docstring": "Find shortest weighted path lengths in G from a source node.\n\n Compute the shortest path length between source and all other\n reachable nodes for a weighted graph.\n\n Parameters\n ----------\n G : NetworkX graph\n\n source : node label\n Starting node for path\n\n cutoff : integer or float, optional\n Length (sum of edge weights) at which the search is stopped.\n If cutoff is provided, only return paths with summed weight <= cutoff.\n\n weight : string or function\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number or None to indicate a hidden edge.\n\n Returns\n -------\n length : dict\n Dict keyed by node to shortest path length from source.\n\n Raises\n ------\n NodeNotFound\n If `source` is not in `G`.\n\n Examples\n --------\n >>> G = nx.path_graph(5)\n >>> length = nx.single_source_dijkstra_path_length(G, 0)\n >>> length[4]\n 4\n >>> for node in [0, 1, 2, 3, 4]:\n ... print(f\"{node}: {length[node]}\")\n 0: 0\n 1: 1\n 2: 2\n 3: 3\n 4: 4\n\n Notes\n -----\n Edge weight attributes must be numerical.\n Distances are calculated as sums of weighted edges traversed.\n\n The weight function can be used to hide edges by returning None.\n So ``weight = lambda u, v, d: 1 if d['color']==\"red\" else None``\n will find the shortest red path.\n\n See Also\n --------\n single_source_dijkstra, single_source_bellman_ford_path_length\n\n ", "n_words": 289, "vocab_size": 174, "n_whitespaces": 512, "language": "en" } }, { "id": 125445, "commit_id": "b856daebbdc923a216ce412be477c61e6cc5707e", "repo": "ray", "path": "python/ray/serve/tests/test_cli.py", "file_name": "test_cli.py", "fun_name": "test_status_error_msg_format", "commit_message": "[Serve] Fix Formatting of Error Messages printed in `serve status` (#26578)", "code": "def test_status_error_msg_format(ray_start_stop):\n \n\n config_file_name = os.path.join(\n os.path.dirname(__file__), \"test_config_files\", \"deployment_fail.yaml\"\n )\n\n subprocess.check_output([\"serve\", \"deploy\", config_file_name])\n\n status_response = subprocess.check_output(\n [\"serve\", \"status\", \"-a\", \"http://localhost:52365/\"]\n )\n serve_status = yaml.safe_load(status_response)\n print(\"serve_status\", serve_status)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 63, "n_words": 25, "vocab_size": 22, "complexity": 1, "nloc": 12, "token_counts": 79, "n_ast_nodes": 123, "n_identifiers": 15, "d_id": 27873, "documentation": { "docstring": "Deploys a faulty config file and checks its status.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 108151, "commit_id": "ec410abbb3a721e31f3aaa61e9e4f941467e35e1", "repo": "matplotlib", "path": "lib/matplotlib/backends/backend_svg.py", "file_name": "backend_svg.py", "fun_name": "_get_style_dict", "commit_message": "Deprecate functions in backends", "code": "def _get_style_dict(self, gc, rgbFace):\n \n attrib = {}\n\n forced_alpha = gc.get_forced_alpha()\n\n if gc.get_hatch() is not None:\n attrib['fill'] = \"url(#%s)\" % self._get_hatch(gc, rgbFace)\n if (rgbFace is not None and len(rgbFace) == 4 and rgbFace[3] != 1.0\n and not forced_alpha):\n attrib['fill-opacity'] = _short_float_fmt(rgbFace[3])\n else:\n if rgbFace is None:\n attrib['fill'] = 'none'\n else:\n if tuple(rgbFace[:3]) != (0, 0, 0):\n attrib['fill'] = rgb2hex(rgbFace)\n if (len(rgbFace) == 4 and rgbFace[3] != 1.0\n and not forced_alpha):\n attrib['fill-opacity'] = _short_float_fmt(rgbFace[3])\n\n if forced_alpha and gc.get_alpha() != 1.0:\n attrib['opacity'] = _short_float_fmt(gc.get_alpha())\n\n offset, seq = gc.get_dashes()\n if seq is not None:\n attrib['stroke-dasharray'] = ','.join(\n _short_float_fmt(val) for val in seq)\n attrib['stroke-dashoffset'] = _short_float_fmt(float(offset))\n\n linewidth = gc.get_linewidth()\n if linewidth:\n rgb = gc.get_rgb()\n attrib['stroke'] = rgb2hex(rgb)\n if not forced_alpha and rgb[3] != 1.0:\n attrib['stroke-opacity'] = _short_float_fmt(rgb[3])\n if linewidth != 1.0:\n attrib['stroke-width'] = _short_float_fmt(linewidth)\n if gc.get_joinstyle() != 'round':\n attrib['stroke-linejoin'] = gc.get_joinstyle()\n if gc.get_capstyle() != 'butt':\n attrib['stroke-linecap'] = _capstyle_d[gc.get_capstyle()]\n\n return attrib\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 580, "n_words": 145, "vocab_size": 76, "complexity": 21, "nloc": 37, "token_counts": 342, "n_ast_nodes": 558, "n_identifiers": 27, "d_id": 23079, "documentation": { "docstring": "Generate a style string from the GraphicsContext and rgbFace.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 270767, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/base_layer.py", "file_name": "base_layer.py", "fun_name": "_get_node_attribute_at_index", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _get_node_attribute_at_index(self, node_index, attr, attr_name):\n \n if not self._inbound_nodes:\n raise RuntimeError(\n f\"The layer {self.name} has never been called \"\n f\"and thus has no defined {attr_name}.\"\n )\n if not len(self._inbound_nodes) > node_index:\n raise ValueError(\n f\"Asked to get {attr_name} at node \"\n f\"{node_index}, but the layer has only \"\n f\"{len(self._inbound_nodes)} inbound nodes.\"\n )\n values = getattr(self._inbound_nodes[node_index], attr)\n if isinstance(values, list) and len(values) == 1:\n return values[0]\n else:\n return values\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 249, "n_words": 66, "vocab_size": 54, "complexity": 5, "nloc": 17, "token_counts": 84, "n_ast_nodes": 165, "n_identifiers": 14, "d_id": 80572, "documentation": { "docstring": "Private utility to retrieves an attribute (e.g. inputs) from a node.\n\n This is used to implement the methods:\n - get_input_shape_at\n - get_output_shape_at\n - get_input_at\n etc...\n\n Args:\n node_index: Integer index of the node from which\n to retrieve the attribute.\n attr: Exact node attribute name.\n attr_name: Human-readable attribute name, for error messages.\n\n Returns:\n The layer's attribute `attr` at the node of index `node_index`.\n\n Raises:\n RuntimeError: If the layer has no inbound nodes, or if called in Eager\n mode.\n ValueError: If the index provided does not match any node.\n ", "n_words": 86, "vocab_size": 66, "n_whitespaces": 257, "language": "en" } }, { "id": 63102, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py", "file_name": "__init__.py", "fun_name": "_is_current", "commit_message": "upd; format", "code": "def _is_current(self, file_path, zip_path):\n \n timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])\n if not os.path.isfile(file_path):\n return False\n stat = os.stat(file_path)\n if stat.st_size != size or stat.st_mtime != timestamp:\n return False\n # check that the contents match\n zip_contents = self.loader.get_data(zip_path)\n with open(file_path, 'rb') as f:\n file_contents = f.read()\n return zip_contents == file_contents\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 143, "n_words": 47, "vocab_size": 36, "complexity": 4, "nloc": 11, "token_counts": 92, "n_ast_nodes": 152, "n_identifiers": 21, "d_id": 13147, "documentation": { "docstring": "\n Return True if the file_path is current for this zip_path\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 47757, "commit_id": "197cff3194e855b9207c3c0da8ae093a0d5dda55", "repo": "airflow", "path": "airflow/models/taskmixin.py", "file_name": "taskmixin.py", "fun_name": "iter_mapped_dependants", "commit_message": "Ensure TaskMap only checks \"relevant\" dependencies (#23053)\n\nWhen looking for \"mapped dependants\" of a task, we only want a task if\r\nit not only is a direct downstream of the task, but also it actually\r\n\"uses\" the task's pushed XCom for task mapping. So we need to peek into\r\nthe mapped downstream task's expansion kwargs, and only count it as a\r\nmapped dependant if the upstream is referenced there.", "code": "def iter_mapped_dependants(self) -> Iterator[\"MappedOperator\"]:\n \n return (\n downstream\n for downstream in self._iter_all_mapped_downstreams()\n if any(p.node_id == self.node_id for p in downstream.iter_mapped_dependencies())\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 74, "n_words": 20, "vocab_size": 17, "complexity": 4, "nloc": 13, "token_counts": 42, "n_ast_nodes": 69, "n_identifiers": 9, "d_id": 9246, "documentation": { "docstring": "Return mapped nodes that depend on the current task the expansion.\n\n For now, this walks the entire DAG to find mapped nodes that has this\n current task as an upstream. We cannot use ``downstream_list`` since it\n only contains operators, not task groups. In the future, we should\n provide a way to record an DAG node's all downstream nodes instead.\n ", "n_words": 59, "vocab_size": 45, "n_whitespaces": 94, "language": "en" } }, { "id": 313498, "commit_id": "7a5fa8eb58f49282e73f454826472ba54cd37a30", "repo": "core", "path": "tests/components/nest/test_events.py", "file_name": "test_events.py", "fun_name": "device_traits", "commit_message": "Update more nest tests to use common fixtures (#73303)\n\nUpdate nest tests to use fixtures", "code": "def device_traits() -> list[str]:\n \n return [\"sdm.devices.traits.DoorbellChime\"]\n\n\n@pytest.fixture(autouse=True)", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture(autouse=True)", "n_ast_errors": 1, "ast_levels": 7, "n_whitespaces": 12, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 42, "n_identifiers": 6, "d_id": 112116, "documentation": { "docstring": "Fixture for the present traits of the device under test.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 270587, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/dtensor/layout_map.py", "file_name": "layout_map.py", "fun_name": "__getitem__", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def __getitem__(self, key):\n \n if key in self._layout_map:\n return self._layout_map[key]\n\n for k in self._layout_map:\n if re.match(k, key):\n return self._layout_map[k]\n return None\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 85, "n_words": 20, "vocab_size": 14, "complexity": 4, "nloc": 7, "token_counts": 48, "n_ast_nodes": 74, "n_identifiers": 7, "d_id": 80488, "documentation": { "docstring": "Retrieve the corresponding layout by the string key.\n\n When there isn't an exact match, all the existing keys in the layout map\n will be treated as a regex and map against the input key again. The first\n match will be returned, based on the key insertion order. Return None if\n there isn't any match found.\n\n Args:\n key: the string key as the query for the layout.\n\n Returns:\n Corresponding layout based on the query.\n ", "n_words": 73, "vocab_size": 50, "n_whitespaces": 140, "language": "en" } }, { "id": 258643, "commit_id": "a793c1f0ad7dd63b2a896d2e84087089a11e7fca", "repo": "scikit-learn", "path": "sklearn/datasets/_base.py", "file_name": "_base.py", "fun_name": "load_diabetes", "commit_message": "DOC Ensures that sklearn.datasets._base.load_breast_cancer passes numpydoc validation (#22346)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: Arturo Amor <86408019+ArturoAmorQ@users.noreply.github.com>", "code": "def load_diabetes(*, return_X_y=False, as_frame=False, scaled=True):\n \n data_filename = \"diabetes_data_raw.csv.gz\"\n target_filename = \"diabetes_target.csv.gz\"\n data = load_gzip_compressed_csv_data(data_filename)\n target = load_gzip_compressed_csv_data(target_filename)\n\n if scaled:\n data = scale(data, copy=False)\n data /= data.shape[0] ** 0.5\n\n fdescr = load_descr(\"diabetes.rst\")\n\n feature_names = [\"age\", \"sex\", \"bmi\", \"bp\", \"s1\", \"s2\", \"s3\", \"s4\", \"s5\", \"s6\"]\n\n frame = None\n target_columns = [\n \"target\",\n ]\n if as_frame:\n frame, data, target = _convert_data_dataframe(\n \"load_diabetes\", data, target, feature_names, target_columns\n )\n\n if return_X_y:\n return data, target\n\n return Bunch(\n data=data,\n target=target,\n frame=frame,\n DESCR=fdescr,\n feature_names=feature_names,\n data_filename=data_filename,\n target_filename=target_filename,\n data_module=DATA_MODULE,\n )\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 234, "n_words": 80, "vocab_size": 60, "complexity": 4, "nloc": 30, "token_counts": 164, "n_ast_nodes": 260, "n_identifiers": 22, "d_id": 75347, "documentation": { "docstring": "Load and return the diabetes dataset (regression).\n\n ============== ==================\n Samples total 442\n Dimensionality 10\n Features real, -.2 < x < .2\n Targets integer 25 - 346\n ============== ==================\n\n .. note::\n The meaning of each feature (i.e. `feature_names`) might be unclear\n (especially for `ltg`) as the documentation of the original dataset is\n not explicit. We provide information that seems correct in regard with\n the scientific literature in this field of research.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n return_X_y : bool, default=False\n If True, returns ``(data, target)`` instead of a Bunch object.\n See below for more information about the `data` and `target` object.\n\n .. versionadded:: 0.18\n\n as_frame : bool, default=False\n If True, the data is a pandas DataFrame including columns with\n appropriate dtypes (numeric). The target is\n a pandas DataFrame or Series depending on the number of target columns.\n If `return_X_y` is True, then (`data`, `target`) will be pandas\n DataFrames or Series as described below.\n\n .. versionadded:: 0.23\n\n scaled : bool, default=True\n If True, the feature variables are mean centered and scaled by the\n standard deviation times the square root of `n_samples`.\n If False, raw data is returned for the feature variables.\n\n .. versionadded:: 1.1\n\n Returns\n -------\n data : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data : {ndarray, dataframe} of shape (442, 10)\n The data matrix. If `as_frame=True`, `data` will be a pandas\n DataFrame.\n target: {ndarray, Series} of shape (442,)\n The regression target. If `as_frame=True`, `target` will be\n a pandas Series.\n feature_names: list\n The names of the dataset columns.\n frame: DataFrame of shape (442, 11)\n Only present when `as_frame=True`. DataFrame with `data` and\n `target`.\n\n .. versionadded:: 0.23\n DESCR: str\n The full description of the dataset.\n data_filename: str\n The path to the location of the data.\n target_filename: str\n The path to the location of the target.\n\n (data, target) : tuple if ``return_X_y`` is True\n Returns a tuple of two ndarray of shape (n_samples, n_features)\n A 2D array with each row representing one sample and each column\n representing the features and/or target of a given sample.\n\n .. versionadded:: 0.18\n ", "n_words": 339, "vocab_size": 194, "n_whitespaces": 739, "language": "en" } }, { "id": 37487, "commit_id": "57e6464ac9a31156f1c93e59107323e6ec01309e", "repo": "transformers", "path": "src/transformers/testing_utils.py", "file_name": "testing_utils.py", "fun_name": "require_pyctcdecode", "commit_message": "Update all require decorators to use skipUnless when possible (#16999)", "code": "def require_pyctcdecode(test_case):\n \n return unittest.skipUnless(is_pyctcdecode_available(), \"test requires pyctcdecode\")(test_case)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 37, "n_identifiers": 5, "d_id": 6794, "documentation": { "docstring": "\n Decorator marking a test that requires pyctcdecode\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 250017, "commit_id": "acea4d7a2ff61b5beda420b54a8451088060a8cd", "repo": "synapse", "path": "tests/util/test_stream_change_cache.py", "file_name": "test_stream_change_cache.py", "fun_name": "test_get_entities_changed", "commit_message": "Add missing types to tests.util. (#14597)\n\nRemoves files under tests.util from the ignored by list, then\r\nfully types all tests/util/*.py files.", "code": "def test_get_entities_changed(self) -> None:\n \n cache = StreamChangeCache(\"#test\", 1)\n\n cache.entity_has_changed(\"user@foo.com\", 2)\n cache.entity_has_changed(\"bar@baz.net\", 3)\n cache.entity_has_changed(\"user@elsewhere.org\", 4)\n\n # Query all the entries, but mid-way through the stream. We should only\n # get the ones after that point.\n self.assertEqual(\n cache.get_entities_changed(\n [\"user@foo.com\", \"bar@baz.net\", \"user@elsewhere.org\"], stream_pos=2\n ),\n {\"bar@baz.net\", \"user@elsewhere.org\"},\n )\n\n # Query all the entries mid-way through the stream, but include one\n # that doesn't exist in it. We shouldn't get back the one that doesn't\n # exist.\n self.assertEqual(\n cache.get_entities_changed(\n [\n \"user@foo.com\",\n \"bar@baz.net\",\n \"user@elsewhere.org\",\n \"not@here.website\",\n ],\n stream_pos=2,\n ),\n {\"bar@baz.net\", \"user@elsewhere.org\"},\n )\n\n # Query all the entries, but before the first known point. We will get\n # all the entries we queried for, including ones that don't exist.\n self.assertEqual(\n cache.get_entities_changed(\n [\n \"user@foo.com\",\n \"bar@baz.net\",\n \"user@elsewhere.org\",\n \"not@here.website\",\n ],\n stream_pos=0,\n ),\n {\"user@foo.com\", \"bar@baz.net\", \"user@elsewhere.org\", \"not@here.website\"},\n )\n\n # Query a subset of the entries mid-way through the stream. We should\n # only get back the subset.\n self.assertEqual(\n cache.get_entities_changed([\"bar@baz.net\"], stream_pos=2),\n {\"bar@baz.net\"},\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 682, "n_words": 150, "vocab_size": 77, "complexity": 1, "nloc": 45, "token_counts": 158, "n_ast_nodes": 281, "n_identifiers": 8, "d_id": 73231, "documentation": { "docstring": "\n StreamChangeCache.get_entities_changed will return the entities in the\n given list that have changed since the provided stream ID. If the\n stream position is earlier than the earliest known position, it will\n return all of the entities queried for.\n ", "n_words": 37, "vocab_size": 28, "n_whitespaces": 74, "language": "en" } }, { "id": 60852, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/models/link.py", "file_name": "link.py", "fun_name": "is_hash_allowed", "commit_message": "upd; format", "code": "def is_hash_allowed(self, hashes):\n # type: (Optional[Hashes]) -> bool\n \n if hashes is None or not self.has_hash:\n return False\n # Assert non-None so mypy knows self.hash_name and self.hash are str.\n assert self.hash_name is not None\n assert self.hash is not None\n\n return hashes.is_hash_allowed(self.hash_name, hex_digest=self.hash)\n\n\n# TODO: Relax this comparison logic to ignore, for example, fragments.", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 111, "n_words": 52, "vocab_size": 40, "complexity": 3, "nloc": 6, "token_counts": 49, "n_ast_nodes": 79, "n_identifiers": 7, "d_id": 12300, "documentation": { "docstring": "\n Return True if the link has a hash and it is allowed.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 266497, "commit_id": "de5f60e374524de13fe079b52282cd7a9eeabd5f", "repo": "ansible", "path": "test/lib/ansible_test/_internal/provider/source/unsupported.py", "file_name": "unsupported.py", "fun_name": "get_paths", "commit_message": "ansible-test - Improve help for unsupported cwd. (#76866)\n\n* ansible-test - Improve help for unsupported cwd.\r\n\r\n* The `--help` option is now available when an unsupported cwd is in use.\r\n* The `--help` output now shows the same instructions about cwd as would be shown in error messages if the cwd is unsupported.\r\n* Add `--version` support to show the ansible-core version.\r\n* The explanation about cwd usage has been improved to explain more clearly what is required.\r\n\r\nResolves https://github.com/ansible/ansible/issues/64523\r\nResolves https://github.com/ansible/ansible/issues/67551", "code": "def get_paths(self, path): # type: (str) -> t.List[str]\n \n return []\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 25, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 21, "n_identifiers": 3, "d_id": 78438, "documentation": { "docstring": "Return the list of available content paths under the given path.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 60675, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/configuration.py", "file_name": "configuration.py", "fun_name": "_normalized_keys", "commit_message": "upd; format", "code": "def _normalized_keys(self, section, items):\n # type: (str, Iterable[Tuple[str, Any]]) -> Dict[str, Any]\n \n normalized = {}\n for name, val in items:\n key = section + \".\" + _normalize_name(name)\n normalized[key] = val\n return normalized\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 89, "n_words": 32, "vocab_size": 27, "complexity": 2, "nloc": 6, "token_counts": 39, "n_ast_nodes": 65, "n_identifiers": 9, "d_id": 12240, "documentation": { "docstring": "Normalizes items to construct a dictionary with normalized keys.\n\n This routine is where the names become keys and are made the same\n regardless of source - configuration files or environment.\n ", "n_words": 30, "vocab_size": 29, "n_whitespaces": 51, "language": "en" } }, { "id": 110184, "commit_id": "ca78e3d0eba4d948835c5499e0ff4084b998f28e", "repo": "matplotlib", "path": "lib/matplotlib/testing/decorators.py", "file_name": "decorators.py", "fun_name": "check_figures_equal", "commit_message": "[DOC] swapped params in fig_compare_error msg", "code": "def check_figures_equal(*, extensions=(\"png\", \"pdf\", \"svg\"), tol=0):\n ", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "\n \"\"\"\n Decorator for test cases that generate and compare two figures.\n\n The decorated function must take two keyword arguments, *fig_test*\n and *fig_ref*, and draw the test and reference images on them.\n After the function returns, the figures are saved and compared.\n\n This decorator should be preferred over `image_comparison` when possible in\n order to keep the size of the test suite from ballooning.\n\n Parameters\n ----------\n extensions : list, default: [\"png\", \"pdf\", \"svg\"]The extensions toThe RMS threshold above which thefailedIf any new figures are created (and not subsequently closed) inside\n the test function.\n\n Examples\n --------\n Check that calling `.Axes.plot` with a single argument plots it against\n ``[0, 1, 2, ...]``::and not subsequently closed) inside\n the testCheck that calling `.Axes.plot` with a single argument plots it against\n ``", "n_ast_errors": 8, "ast_levels": 9, "n_whitespaces": 9, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 39, "token_counts": 45, "n_ast_nodes": 130, "n_identifiers": 38, "d_id": 23964, "documentation": { "docstring": "\n Decorator for test cases that generate and compare two figures.\n\n The decorated function must take two keyword arguments, *fig_test*\n and *fig_ref*, and draw the test and reference images on them.\n After the function returns, the figures are saved and compared.\n\n This decorator should be preferred over `image_comparison` when possible in\n order to keep the size of the test suite from ballooning.\n\n Parameters\n ----------\n extensions : list, default: [\"png\", \"pdf\", \"svg\"]\n The extensions to test.\n tol : float\n The RMS threshold above which the test is considered failed.\n\n Raises\n ------\n RuntimeError\n If any new figures are created (and not subsequently closed) inside\n the test function.\n\n Examples\n --------\n Check that calling `.Axes.plot` with a single argument plots it against\n ``[0, 1, 2, ...]``::\n", "n_words": 121, "vocab_size": 97, "n_whitespaces": 200, "language": "en" } }, { "id": 176975, "commit_id": "abaa68779ccb4cce8d1a5ecade622ab96d01edeb", "repo": "networkx", "path": "networkx/algorithms/lowest_common_ancestors.py", "file_name": "lowest_common_ancestors.py", "fun_name": "lowest_common_ancestor", "commit_message": "Add examples to lowest common ancestors algorithms (#5531)\n\n* Add examples to lowest common ancestors documentation\r\n\r\n* Fix output style of examples\r\n\r\n* Fix output style of example\r\n\r\n* Update pre-commit\r\n\r\n* Update networkx/algorithms/lowest_common_ancestors.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/lowest_common_ancestors.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Indentation fix & pprint dictionary\r\n\r\n* Update networkx/algorithms/lowest_common_ancestors.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/lowest_common_ancestors.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/lowest_common_ancestors.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Move \"import pprint\" to the example\r\n\r\nCo-authored-by: dtuncturk \r\nCo-authored-by: Ross Barnowski ", "code": "def lowest_common_ancestor(G, node1, node2, default=None):\n \n ans = list(all_pairs_lowest_common_ancestor(G, pairs=[(node1, node2)]))\n if ans:\n assert len(ans) == 1\n return ans[0][1]\n else:\n return default\n\n\n@not_implemented_for(\"undirected\")\n@not_implemented_for(\"multigraph\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@not_implemented_for(\"undirected\")\n@not_implemented_for(\"multigraph\")", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 54, "n_words": 23, "vocab_size": 22, "complexity": 2, "nloc": 7, "token_counts": 55, "n_ast_nodes": 105, "n_identifiers": 11, "d_id": 42203, "documentation": { "docstring": "Compute the lowest common ancestor of the given pair of nodes.\n\n Parameters\n ----------\n G : NetworkX directed graph\n\n node1, node2 : nodes in the graph.\n\n default : object\n Returned if no common ancestor between `node1` and `node2`\n\n Returns\n -------\n The lowest common ancestor of node1 and node2,\n or default if they have no common ancestors.\n\n Examples\n --------\n >>> G = nx.DiGraph([(0, 1), (0, 2), (2, 3), (2, 4), (1, 6), (4, 5)])\n >>> nx.lowest_common_ancestor(G, 3, 5)\n 2\n\n We can also set `default` argument as below. The value of default is returned\n if there are no common ancestors of given two nodes.\n\n >>> G = nx.DiGraph([(4, 5), (12, 13)])\n >>> nx.lowest_common_ancestor(G, 12, 5, default=\"No common ancestors!\")\n 'No common ancestors!'\n\n Notes\n -----\n Only defined on non-null directed acyclic graphs.\n Takes n log(n) time in the size of the graph.\n See `all_pairs_lowest_common_ancestor` when you have\n more than one pair of nodes of interest.\n\n See Also\n --------\n tree_all_pairs_lowest_common_ancestor\n all_pairs_lowest_common_ancestor\n ", "n_words": 155, "vocab_size": 107, "n_whitespaces": 252, "language": "en" } }, { "id": 100313, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "lib/gui/analysis/stats.py", "file_name": "stats.py", "fun_name": "_get_per_session_stats", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def _get_per_session_stats(self):\n \n if self._per_session_stats is None:\n logger.debug(\"Collating per session stats\")\n compiled = []\n for session_id, ts_data in self._time_stats.items():\n logger.debug(\"Compiling session ID: %s\", session_id)\n if self._state is None:\n logger.debug(\"Session state dict doesn't exist. Most likely task has been \"\n \"terminated during compilation\")\n return\n compiled.append(self._collate_stats(session_id, ts_data))\n\n self._per_session_stats = list(sorted(compiled, key=lambda k: k[\"session\"]))\n\n elif self._session.is_training:\n logger.debug(\"Collating per session stats for latest training data\")\n session_id = self._session.session_ids[-1]\n ts_data = self._time_stats[session_id]\n\n if session_id > len(self._per_session_stats):\n self._per_session_stats.append(self._collate_stats(session_id, ts_data))\n\n stats = self._per_session_stats[-1]\n\n stats[\"start\"] = ts_data[\"start_time\"]\n stats[\"end\"] = ts_data[\"end_time\"]\n stats[\"elapsed\"] = int(stats[\"end\"] - stats[\"start\"])\n stats[\"iterations\"] = ts_data[\"iterations\"]\n stats[\"rate\"] = (((stats[\"batch\"] * 2) * stats[\"iterations\"])\n / stats[\"elapsed\"] if stats[\"elapsed\"] != 0 else 0)\n logger.debug(\"per_session_stats: %s\", self._per_session_stats)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 447, "n_words": 107, "vocab_size": 79, "complexity": 7, "nloc": 26, "token_counts": 249, "n_ast_nodes": 425, "n_identifiers": 23, "d_id": 19810, "documentation": { "docstring": " Populate the attribute :attr:`_per_session_stats` with a sorted list by session ID\n of each ID in the training/loaded session. Stats contain the session ID, start, end and\n elapsed times, the training rate, batch size and number of iterations for each session.\n\n If a training session is running, then updates the training sessions stats only.\n ", "n_words": 53, "vocab_size": 39, "n_whitespaces": 82, "language": "en" } }, { "id": 281521, "commit_id": "82747072c511beb1b2672846ae2ee4aec53eb562", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/portfolio/portfolio_view.py", "file_name": "portfolio_view.py", "fun_name": "load_info", "commit_message": "Terminal Wide Rich (#1161)\n\n* My idea for how we handle Rich moving forward\r\n\r\n* remove independent consoles\r\n\r\n* FIxed pylint issues\r\n\r\n* add a few vars\r\n\r\n* Switched print to console\r\n\r\n* More transitions\r\n\r\n* Changed more prints\r\n\r\n* Replaced all prints\r\n\r\n* Fixing tabulate\r\n\r\n* Finished replace tabulate\r\n\r\n* Finished removing rich from Tabulate\r\n\r\n* add Panel around menu\r\n\r\n* add GST watermark under feature flag\r\n\r\n* Fixed 46 tests\r\n\r\n* Delete test_screener[False].yaml\r\n\r\n* Delete test_screener[True].yaml\r\n\r\n* Fixed the rest of the tests\r\n\r\n* add help and source color vars and use rgb\r\n\r\n* rich on stocks/options\r\n\r\n* update rich on disc, dps, sia\r\n\r\n* rich in gov, ins and scr menus\r\n\r\n* ba and ca menus with rich\r\n\r\n* Fixed import issue\r\n\r\n* Fixed some tests\r\n\r\n* removed termcolor\r\n\r\n* Removed prettytable\r\n\r\n* add rich to remaining stocks menus\r\n\r\n* FIxed linting issue\r\n\r\n* Added James' changes\r\n\r\n* Updated dependencies\r\n\r\n* Add rich to cryptocurrency menu\r\n\r\n* refactor economy and forex\r\n\r\n* refactor etf with rich\r\n\r\n* refactor mfunds\r\n\r\n* refactor rich rest\r\n\r\n* not specify style so default color works well on any background\r\n\r\n* Fixing mypy issues\r\n\r\n* Updated tests\r\n\r\n* More test fixes\r\n\r\n* James' test fixes\r\n\r\n* Updating tests : stocks/screener - fix cassettes using BR\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : disable DEBUG_MODE\r\n\r\n* Updating tests : stocks/fa/yfinance\r\n\r\n* minor fixes that escape\r\n\r\n* Improve the rich table function (that replaces tabulate :D )\r\n\r\n* Fixed bad code\r\n\r\n* delete rogue file + dcf fix + NoConsole\r\n\r\n* sia mypy\r\n\r\n* fuck you linter\r\n\r\n* fuck you linter pt 2\r\n\r\n* skip hehe\r\n\r\n* i hate the black linter\r\n\r\n* ubuntu mypy attempt\r\n\r\n* Update : rich_config + gtff\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : stocks\r\n\r\n* Update : rich_config\r\n\r\n* Updating : rich_config\r\n\r\n* make panel configurable for Theodore :b\r\n\r\n* colors update\r\n\r\n* Merged\r\n\r\n* Updating : rich_config + feature_flags\r\n\r\n* Updating : rich_config\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating : feature_flags\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: james \r\nCo-authored-by: jose-donato ", "code": "def load_info():\n \n text = \n console.print(text)\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 15, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 16, "token_counts": 14, "n_ast_nodes": 40, "n_identifiers": 4, "d_id": 83824, "documentation": { "docstring": "Prints instructions to load a CSV\n\n Returns\n ----------\n text : str\n Information on how to load a csv\n \nIn order to load a CSV do the following:\n\n1. Add headers to the first row, below is data for each column:\\n\n\\t1. Identifier for the asset (such as a stock ticker)\n\\t2. Type of asset (stock, bond, option, crypto)\n\\t3. The volume of the asset transacted\n\\t4. The buy date in yyyy/mm/dd\n\\t5. The Price paid for the asset\n\\t6. Any fees paid during the transaction\n\\t7. A premium paid or received if this was an option\n\\t8. Whether the asset was bought (covered) or sold (shorted)\\n\n2. Place this file in gamestonk_terminal/portfolio/portfolios\\n\n ", "n_words": 112, "vocab_size": 82, "n_whitespaces": 128, "language": "en" } }, { "id": 196702, "commit_id": "9ad8ab9fe58051cf11626ba6654852fcfec60147", "repo": "sympy", "path": "sympy/stats/joint_rv_types.py", "file_name": "joint_rv_types.py", "fun_name": "MultivariateNormal", "commit_message": "Documentation cleanup 5", "code": "def MultivariateNormal(name, mu, sigma):\n r\n return multivariate_rv(MultivariateNormalDistribution, name, mu, sigma)\n\n#-------------------------------------------------------------------------------\n# Multivariate Laplace distribution --------------------------------------------\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 19, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 58, "token_counts": 22, "n_ast_nodes": 33, "n_identifiers": 6, "d_id": 48120, "documentation": { "docstring": "\n Creates a continuous random variable with Multivariate Normal\n Distribution.\n\n The density of the multivariate normal distribution can be found at [1].\n\n Parameters\n ==========\n\n mu : List representing the mean or the mean vector\n sigma : Positive semidefinite square matrix\n Represents covariance Matrix\n If `\\sigma` is noninvertible then only sampling is supported currently\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import MultivariateNormal, density, marginal_distribution\n >>> from sympy import symbols, MatrixSymbol\n >>> X = MultivariateNormal('X', [3, 4], [[2, 1], [1, 2]])\n >>> y, z = symbols('y z')\n >>> density(X)(y, z)\n sqrt(3)*exp(-y**2/3 + y*z/3 + 2*y/3 - z**2/3 + 5*z/3 - 13/3)/(6*pi)\n >>> density(X)(1, 2)\n sqrt(3)*exp(-4/3)/(6*pi)\n >>> marginal_distribution(X, X[1])(y)\n exp(-(y - 4)**2/4)/(2*sqrt(pi))\n >>> marginal_distribution(X, X[0])(y)\n exp(-(y - 3)**2/4)/(2*sqrt(pi))\n\n The example below shows that it is also possible to use\n symbolic parameters to define the MultivariateNormal class.\n\n >>> n = symbols('n', integer=True, positive=True)\n >>> Sg = MatrixSymbol('Sg', n, n)\n >>> mu = MatrixSymbol('mu', n, 1)\n >>> obs = MatrixSymbol('obs', n, 1)\n >>> X = MultivariateNormal('X', mu, Sg)\n\n The density of a multivariate normal can be\n calculated using a matrix argument, as shown below.\n\n >>> density(X)(obs)\n (exp(((1/2)*mu.T - (1/2)*obs.T)*Sg**(-1)*(-mu + obs))/sqrt((2*pi)**n*Determinant(Sg)))[0, 0]\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Multivariate_normal_distribution\n\n ", "n_words": 193, "vocab_size": 137, "n_whitespaces": 325, "language": "en" } }, { "id": 183434, "commit_id": "dd18ecbdbe744812509630935a877424202f2a70", "repo": "textual", "path": "src/textual/_text_backend.py", "file_name": "_text_backend.py", "fun_name": "cursor_text_end", "commit_message": "Docstring improvements", "code": "def cursor_text_end(self) -> bool:\n \n text_length = len(self.content)\n if self.cursor_index == text_length:\n return False\n\n self.cursor_index = text_length\n return True\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 64, "n_words": 18, "vocab_size": 14, "complexity": 2, "nloc": 11, "token_counts": 32, "n_ast_nodes": 54, "n_identifiers": 7, "d_id": 44186, "documentation": { "docstring": "Move the cursor to the end of the text\n\n Returns:\n bool: True if the cursor moved. False otherwise.\n ", "n_words": 18, "vocab_size": 14, "n_whitespaces": 43, "language": "en" } }, { "id": 30034, "commit_id": "d5ef58653803075849a6a13177e7a6e604aa2f60", "repo": "saleor", "path": "saleor/permission/models.py", "file_name": "models.py", "fun_name": "has_perms", "commit_message": "Move PermissionsMixin from django auth", "code": "def has_perms(self, perm_list, obj=None): # noqa: D205, D212, D400, D415\n \n return all(self.has_perm(perm, obj) for perm in perm_list)\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 32, "n_words": 17, "vocab_size": 17, "complexity": 2, "nloc": 2, "token_counts": 28, "n_ast_nodes": 44, "n_identifiers": 7, "d_id": 5285, "documentation": { "docstring": "\n Return True if the user has each of the specified permissions. If\n object is passed, check if the user has all required perms for it.\n ", "n_words": 25, "vocab_size": 20, "n_whitespaces": 47, "language": "en" } }, { "id": 114397, "commit_id": "44d7ef0e08e5144870ad2831ce6e221f9044c47c", "repo": "mindsdb", "path": "mindsdb/api/http/namespaces/util.py", "file_name": "util.py", "fun_name": "get", "commit_message": "'files' route", "code": "def get(self):\n \n response = {\n 'learn': False,\n 'predict': False,\n 'analyse': False\n }\n\n if os.name != 'posix':\n return response\n\n for process_type in response:\n processes_dir = Path(tempfile.gettempdir()).joinpath(f'mindsdb/processes/{process_type}/')\n if not processes_dir.is_dir():\n continue\n process_marks = [x.name for x in processes_dir.iterdir()]\n for p_mark in process_marks:\n pid = int(p_mark.split('-')[0])\n try:\n psutil.Process(pid)\n except Exception:\n processes_dir.joinpath(p_mark).unlink()\n else:\n response[process_type] = True\n\n return response\n\n\n@ns_conf.route('/telemetry')", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "@ns_conf.route('/telemetry')", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 316, "n_words": 55, "vocab_size": 42, "complexity": 7, "nloc": 22, "token_counts": 125, "n_ast_nodes": 230, "n_identifiers": 25, "d_id": 25182, "documentation": { "docstring": " Checks server use native for learn or analyse.\n Will return right result only on Linux.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 34, "language": "en" } }, { "id": 155480, "commit_id": "b541b6c18e6fb4515e998b9b4f88528490cf69c6", "repo": "modin", "path": "modin/pandas/dataframe.py", "file_name": "dataframe.py", "fun_name": "add_suffix", "commit_message": "REFACTOR-#3948: Use `__constructor__` in `DataFrame` and `Series` classes (#5485)\n\nSigned-off-by: Anatoly Myachev ", "code": "def add_suffix(self, suffix): # noqa: PR01, RT01, D200\n \n return self.__constructor__(\n query_compiler=self._query_compiler.add_suffix(suffix)\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 45, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 24, "n_ast_nodes": 41, "n_identifiers": 6, "d_id": 36395, "documentation": { "docstring": "\n Suffix labels with string `suffix`.\n ", "n_words": 5, "vocab_size": 5, "n_whitespaces": 20, "language": "en" } }, { "id": 120043, "commit_id": "bc658e74567ffa941b31f4e89463dc713d2ecbf4", "repo": "jax", "path": "jax/_src/lax/linalg.py", "file_name": "linalg.py", "fun_name": "_broadcasting_select_mhlo", "commit_message": "[MHLO] Add direct MHLO lowerings for most linear algebra kernels.\n\nPiperOrigin-RevId: 439927594", "code": "def _broadcasting_select_mhlo(which, x, y):\n \n which_type, x_type, y_type = (\n ir.RankedTensorType(v.type) for v in (which, x, y))\n out_shape = list(lax_internal.broadcast_shapes(\n tuple(which_type.shape), tuple(x_type.shape), tuple(y_type.shape)))\n bcast_dims = lambda shape: mlir.dense_int_elements(\n range(len(out_shape) - len(shape), len(out_shape)))\n if which_type.shape != out_shape:\n which = mhlo.BroadcastInDimOp(\n ir.RankedTensorType.get(out_shape, which_type.element_type), which,\n bcast_dims(which_type.shape))\n if x_type.shape != out_shape:\n x = mhlo.BroadcastInDimOp(\n ir.RankedTensorType.get(out_shape, x_type.element_type), x,\n bcast_dims(x_type.shape))\n if y_type.shape != out_shape:\n y = mhlo.BroadcastInDimOp(\n ir.RankedTensorType.get(out_shape, y_type.element_type), y,\n bcast_dims(y_type.shape))\n return mhlo.SelectOp(which, x, y).result\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 140, "n_words": 68, "vocab_size": 50, "complexity": 5, "nloc": 20, "token_counts": 211, "n_ast_nodes": 316, "n_identifiers": 28, "d_id": 26747, "documentation": { "docstring": "Wrapper around XLA `Select` that broadcasts its arguments.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 278618, "commit_id": "3613c3defc39c236fb1592c4f7ba1a9cc887343a", "repo": "keras", "path": "keras/applications/regnet.py", "file_name": "regnet.py", "fun_name": "preprocess_input", "commit_message": "Remove pylint comments.\n\nPiperOrigin-RevId: 452353044", "code": "def preprocess_input(x, data_format=None):\n \n return x\n\n\n@keras_export(\"keras.applications.regnet.decode_predictions\")", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.applications.regnet.decode_predictions\")", "n_ast_errors": 1, "ast_levels": 7, "n_whitespaces": 11, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 32, "n_identifiers": 4, "d_id": 82631, "documentation": { "docstring": "A placeholder method for backward compatibility.\n\n The preprocessing logic has been included in the regnet model\n implementation. Users are no longer required to call this method to\n normalize the input data. This method does nothing and only kept as a\n placeholder to align the API surface between old and new version of model.\n\n Args:\n x: A floating point `numpy.array` or a `tf.Tensor`.\n data_format: Optional data format of the image tensor/array. Defaults to\n None, in which case the global setting\n `tf.keras.backend.image_data_format()` is used (unless you changed it,\n it defaults to \"channels_last\").{mode}\n\n Returns:\n Unchanged `numpy.array` or `tf.Tensor`.\n ", "n_words": 95, "vocab_size": 76, "n_whitespaces": 152, "language": "en" } }, { "id": 63461, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "setDebugActions", "commit_message": "upd; format", "code": "def setDebugActions(self, startAction, successAction, exceptionAction):\n \n self.debugActions = (startAction or _defaultStartDebugAction,\n successAction or _defaultSuccessDebugAction,\n exceptionAction or _defaultExceptionDebugAction)\n self.debug = True\n return self\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 105, "n_words": 21, "vocab_size": 18, "complexity": 4, "nloc": 6, "token_counts": 36, "n_ast_nodes": 54, "n_identifiers": 10, "d_id": 13314, "documentation": { "docstring": "\n Enable display of debugging messages while doing pattern matching.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 136395, "commit_id": "326d84f1149319809191e7887155df7f04f6f46a", "repo": "ray", "path": "python/ray/train/predictor.py", "file_name": "predictor.py", "fun_name": "preferred_batch_format", "commit_message": "[AIR][Predictor] Enable numpy based predictor (#28917)\n\nCo-authored-by: Clark Zinzow \r\nCo-authored-by: Amog Kamsetty ", "code": "def preferred_batch_format(cls) -> BatchFormat:\n \n return BatchFormat.PANDAS\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 12, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 4, "d_id": 30906, "documentation": { "docstring": "Batch format hint for upstream producers to try yielding best block format.\n\n The preferred batch format to use if both `_predict_pandas` and\n `_predict_numpy` are implemented. Defaults to Pandas.\n\n Can be overriden by predictor classes depending on the framework type,\n e.g. TorchPredictor prefers Numpy and XGBoostPredictor prefers Pandas as\n native batch format.\n\n ", "n_words": 51, "vocab_size": 44, "n_whitespaces": 93, "language": "en" } }, { "id": 30144, "commit_id": "fa2ad657482aca9dc628e6d7062b8badf2706bb6", "repo": "spotify-downloader", "path": "tests/types/test_song.py", "file_name": "test_song.py", "fun_name": "test_song_from_data_dump", "commit_message": "v4 init", "code": "def test_song_from_data_dump():\n \n\n # Loads from str\n song = Song.from_data_dump(\n \n )\n\n assert song.name == \"Ropes\"\n assert song.artists == [\"Dirty Palm\", \"Chandler Jewels\"]\n assert song.album_name == \"Ropes\"\n assert song.album_artist == \"Dirty Palm\"\n assert song.genres == [\"gaming edm\", \"melbourne bounce international\"]\n assert song.disc_number == 1\n assert song.duration == 188\n assert song.year == 2021\n assert song.date == \"2021-10-28\"\n assert song.track_number == 1\n assert song.tracks_count == 1\n assert song.isrc == \"GB2LD2110301\"\n assert song.song_id == \"1t2qKa8K72IBC8yQlhD9bU\"\n assert (\n song.cover_url\n == \"https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332\"\n )\n assert song.explicit == False\n assert song.download_url == None\n\n", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 169, "n_words": 84, "vocab_size": 50, "complexity": 1, "nloc": 47, "token_counts": 119, "n_ast_nodes": 207, "n_identifiers": 20, "d_id": 5345, "documentation": { "docstring": "\n Tests if Song.from_data_dump() works correctly.\n \n {\n \"name\": \"Ropes\",\n \"artists\": [\"Dirty Palm\", \"Chandler Jewels\"],\n \"album_name\": \"Ropes\",\n \"album_artist\": \"Dirty Palm\",\n \"genres\": [\"gaming edm\", \"melbourne bounce international\"],\n \"disc_number\": 1,\n \"duration\": 188,\n \"year\": 2021,\n \"date\": \"2021-10-28\",\n \"track_number\": 1,\n \"tracks_count\": 1,\n \"isrc\": \"GB2LD2110301\",\n \"song_id\": \"1t2qKa8K72IBC8yQlhD9bU\",\n \"cover_url\": \"https://i.scdn.co/image/ab67616d0000b273fe2cb38e4d2412dbb0e54332\",\n \"explicit\": false,\n \"download_url\": null,\n \"artist\" : \"Dirty Palm\",\n \"disc_count\": 1,\n \"copyright\": \"\",\n \"publisher\": \"\",\n \"url\": \"https://open.spotify.com/track/1t2qKa8K72IBC8yQlhD9bU\"\n }\n ", "n_words": 59, "vocab_size": 51, "n_whitespaces": 319, "language": "en" } }, { "id": 200567, "commit_id": "1eee7b6ba5b4903ac889a73feab130572d232554", "repo": "sympy", "path": "sympy/tensor/tensor.py", "file_name": "tensor.py", "fun_name": "_dedupe_indices_in_rule", "commit_message": "Add TensMul._dedupe_indices_in_rule\n\nThis applies self._dedupe_indices on all values of `rule`.", "code": "def _dedupe_indices_in_rule(self, rule):\n \n index_rules = {k:v for k,v in rule.items() if isinstance(k, TensorIndex)}\n other_rules = {k:v for k,v in rule.items() if k not in index_rules.keys()}\n exclude = set(self.get_indices())\n\n newrule = {}\n newrule.update(index_rules)\n exclude.update(index_rules.keys())\n exclude.update(index_rules.values())\n for old, new in other_rules.items():\n new_renamed = self._dedupe_indices(new, exclude)\n if old == new or new_renamed is None:\n newrule[old] = new\n else:\n newrule[old] = new_renamed\n exclude.update(get_indices(new_renamed))\n return newrule\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 209, "n_words": 61, "vocab_size": 39, "complexity": 8, "nloc": 16, "token_counts": 153, "n_ast_nodes": 245, "n_identifiers": 21, "d_id": 49702, "documentation": { "docstring": "\n rule: dict\n\n This applies self._dedupe_indices on all values of rule.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 32, "language": "en" } }, { "id": 249114, "commit_id": "c97042f7eef3748e17c90e48a4122389a89c4735", "repo": "synapse", "path": "tests/rest/admin/test_media.py", "file_name": "test_media.py", "fun_name": "test_delete_media_never_accessed", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13469)", "code": "def test_delete_media_never_accessed(self) -> None:\n \n\n # upload and do not access\n server_and_media_id = self._create_media()\n self.pump(1.0)\n\n # test that the file exists\n media_id = server_and_media_id.split(\"/\")[1]\n local_path = self.filepaths.local_media_filepath(media_id)\n self.assertTrue(os.path.exists(local_path))\n\n # timestamp after upload/create\n now_ms = self.clock.time_msec()\n channel = self.make_request(\n \"POST\",\n self.url + \"?before_ts=\" + str(now_ms),\n access_token=self.admin_user_tok,\n )\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(1, channel.json_body[\"total\"])\n self.assertEqual(\n media_id,\n channel.json_body[\"deleted_media\"][0],\n )\n\n self._access_media(server_and_media_id, False)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 230, "n_words": 56, "vocab_size": 48, "complexity": 1, "nloc": 23, "token_counts": 146, "n_ast_nodes": 236, "n_identifiers": 28, "d_id": 72621, "documentation": { "docstring": "\n Tests that media deleted if it is older than `before_ts` and never accessed\n `last_access_ts` is `NULL` and `created_ts` < `before_ts`\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 42, "language": "en" } }, { "id": 221346, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/cmd.py", "file_name": "cmd.py", "fun_name": "parseline", "commit_message": "add python 3.10.4 for windows", "code": "def parseline(self, line):\n \n line = line.strip()\n if not line:\n return None, None, line\n elif line[0] == '?':\n line = 'help ' + line[1:]\n elif line[0] == '!':\n if hasattr(self, 'do_shell'):\n line = 'shell ' + line[1:]\n else:\n return None, None, line\n i, n = 0, len(line)\n while i < n and line[i] in self.identchars: i = i+1\n cmd, arg = line[:i], line[i:].strip()\n return cmd, arg, line\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 203, "n_words": 66, "vocab_size": 41, "complexity": 7, "nloc": 15, "token_counts": 129, "n_ast_nodes": 211, "n_identifiers": 11, "d_id": 56361, "documentation": { "docstring": "Parse the line into a command name and a string containing\n the arguments. Returns a tuple containing (command, args, line).\n 'command' and 'args' may be None if the line couldn't be parsed.\n ", "n_words": 32, "vocab_size": 24, "n_whitespaces": 54, "language": "en" } }, { "id": 72652, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/api/v2/filters.py", "file_name": "filters.py", "fun_name": "filter_queryset", "commit_message": "Reformat with black", "code": "def filter_queryset(self, request, queryset, view):\n \n fields = set(view.get_available_fields(queryset.model, db_fields_only=True))\n\n # Locale is a database field, but we provide a separate filter for it\n if \"locale\" in fields:\n fields.remove(\"locale\")\n\n for field_name, value in request.GET.items():\n if field_name in fields:\n try:\n field = queryset.model._meta.get_field(field_name)\n except LookupError:\n field = None\n\n # Convert value into python\n try:\n if isinstance(\n field, (models.BooleanField, models.NullBooleanField)\n ):\n value = parse_boolean(value)\n elif isinstance(field, (models.IntegerField, models.AutoField)):\n value = int(value)\n elif isinstance(field, models.ForeignKey):\n value = field.target_field.get_prep_value(value)\n except ValueError as e:\n raise BadRequestError(\n \"field filter error. '%s' is not a valid value for %s (%s)\"\n % (value, field_name, str(e))\n )\n\n if isinstance(field, TaggableManager):\n for tag in value.split(\",\"):\n queryset = queryset.filter(**{field_name + \"__name\": tag})\n\n # Stick a message on the queryset to indicate that tag filtering has been performed\n # This will let the do_search method know that it must raise an error as searching\n # and tag filtering at the same time is not supported\n queryset._filtered_by_tag = True\n else:\n queryset = queryset.filter(**{field_name: value})\n\n return queryset\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 758, "n_words": 162, "vocab_size": 108, "complexity": 11, "nloc": 31, "token_counts": 220, "n_ast_nodes": 359, "n_identifiers": 39, "d_id": 15909, "documentation": { "docstring": "\n This performs field level filtering on the result set\n Eg: ?title=James Joyce\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 34, "language": "en" } }, { "id": 105068, "commit_id": "5669b8c8d75b8c3106abd23f21d902d1f020e25d", "repo": "datasets", "path": "src/datasets/builder.py", "file_name": "builder.py", "fun_name": "_generate_examples", "commit_message": "Add missing kwargs to docstrings (#4446)", "code": "def _generate_examples(self, **kwargs):\n \n raise NotImplementedError()\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 24, "n_identifiers": 4, "d_id": 22061, "documentation": { "docstring": "Default function generating examples for each `SplitGenerator`.\n\n This function preprocess the examples from the raw data to the preprocessed\n dataset files.\n This function is called once for each `SplitGenerator` defined in\n `_split_generators`. The examples yielded here will be written on\n disk.\n\n Args:\n **kwargs (additional keyword arguments): Arguments forwarded from the SplitGenerator.gen_kwargs\n\n Yields:\n key: `str` or `int`, a unique deterministic example identification key.\n * Unique: An error will be raised if two examples are yield with the\n same key.\n * Deterministic: When generating the dataset twice, the same example\n should have the same key.\n Good keys can be the image id, or line number if examples are extracted\n from a text file.\n The key will be hashed and sorted to shuffle examples deterministically,\n such as generating the dataset multiple times keep examples in the\n same order.\n example: `dict`, a feature dictionary\n ready to be encoded and written to disk. The example will be\n encoded with `self.info.features.encode_example({...})`.\n ", "n_words": 157, "vocab_size": 98, "n_whitespaces": 419, "language": "en" } }, { "id": 81164, "commit_id": "452744b67e02823879e722fe574984a2d760ed60", "repo": "awx", "path": "awx/main/tasks/callback.py", "file_name": "callback.py", "fun_name": "get_delayed_update_fields", "commit_message": "Delay update of artifacts and error fields until final job save (#11832)\n\n* Delay update of artifacts until final job save\r\n\r\nSave tracebacks from receptor module to callback object\r\n\r\nMove receptor traceback check up to be more logical\r\n\r\nUse new mock_me fixture to avoid DB call with me method\r\n\r\nUpdate the special runner message to the delay_update pattern\r\n\r\n* Move special runner message into post-processing of callback fields", "code": "def get_delayed_update_fields(self):\n \n self.extra_update_fields['emitted_events'] = self.event_ct\n if 'got an unexpected keyword argument' in self.extra_update_fields.get('result_traceback', ''):\n self.delay_update(result_traceback=ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE)\n return self.extra_update_fields\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 56, "n_words": 17, "vocab_size": 17, "complexity": 2, "nloc": 5, "token_counts": 42, "n_ast_nodes": 76, "n_identifiers": 8, "d_id": 17165, "documentation": { "docstring": "Return finalized dict of all fields that should be saved along with the job status change", "n_words": 16, "vocab_size": 16, "n_whitespaces": 15, "language": "en" } }, { "id": 23815, "commit_id": "63484257442362057ab4ea4acd769d52d42da9f1", "repo": "PaddleOCR", "path": "ppocr/modeling/heads/rec_robustscanner_head.py", "file_name": "rec_robustscanner_head.py", "fun_name": "forward_train", "commit_message": "add robustscanner", "code": "def forward_train(self, feat, out_enc, targets, valid_ratios):\n \n\n tgt_embedding = self.embedding(targets)\n\n n, c_enc, h, w = out_enc.shape\n assert c_enc == self.dim_model\n _, c_feat, _, _ = feat.shape\n assert c_feat == self.dim_input\n _, len_q, c_q = tgt_embedding.shape\n assert c_q == self.dim_model\n assert len_q <= self.max_seq_len\n\n query, _ = self.sequence_layer(tgt_embedding)\n query = paddle.transpose(query, (0, 2, 1))\n key = paddle.reshape(out_enc, [n, c_enc, h * w])\n if self.encode_value:\n value = key\n else:\n value = paddle.reshape(feat, [n, c_feat, h * w])\n\n # mask = None\n # if valid_ratios is not None:\n # mask = paddle.zeros(shape=[n, len_q, h, w], dtype='bool')\n # for i, valid_ratio in enumerate(valid_ratios):\n # valid_width = min(w, math.ceil(w * valid_ratio))\n # if valid_width < w:\n # mask[i, :, :, valid_width:] = True\n # # mask = mask.view(n, h * w)\n # mask = paddle.reshape(mask, (n, len_q, h * w))\n\n attn_out = self.attention_layer(query, key, value, h, w, valid_ratios)\n # attn_out = attn_out.permute(0, 2, 1).contiguous()\n attn_out = paddle.transpose(attn_out, (0, 2, 1))\n\n if self.return_feature:\n return attn_out\n\n out = self.prediction(attn_out)\n\n return out\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 443, "n_words": 163, "vocab_size": 91, "complexity": 3, "nloc": 22, "token_counts": 200, "n_ast_nodes": 300, "n_identifiers": 33, "d_id": 4647, "documentation": { "docstring": "\n Args:\n feat (Tensor): Tensor of shape :math:`(N, D_i, H, W)`.\n out_enc (Tensor): Encoder output of shape\n :math:`(N, D_m, H, W)`.\n targets (Tensor): a tensor of shape :math:`(N, T)`. Each element is the index of a\n character.\n valid_ratios (Tensor): valid length ratio of img.\n Returns:\n Tensor: A raw logit tensor of shape :math:`(N, T, C-1)` if\n ``return_feature=False``. Otherwise it would be the hidden feature\n before the prediction projection layer, whose shape is\n :math:`(N, T, D_m)`.\n ", "n_words": 74, "vocab_size": 50, "n_whitespaces": 214, "language": "en" } }, { "id": 204719, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/management/utils.py", "file_name": "utils.py", "fun_name": "normalize_path_patterns", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def normalize_path_patterns(patterns):\n \n patterns = [os.path.normcase(p) for p in patterns]\n dir_suffixes = {\"%s*\" % path_sep for path_sep in {\"/\", os.sep}}\n norm_patterns = []\n for pattern in patterns:\n for dir_suffix in dir_suffixes:\n if pattern.endswith(dir_suffix):\n norm_patterns.append(pattern[: -len(dir_suffix)])\n break\n else:\n norm_patterns.append(pattern)\n return norm_patterns\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 123, "n_words": 39, "vocab_size": 29, "complexity": 6, "nloc": 12, "token_counts": 86, "n_ast_nodes": 141, "n_identifiers": 15, "d_id": 50851, "documentation": { "docstring": "Normalize an iterable of glob style patterns based on OS.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 12341, "commit_id": "072a47a4fa97aca68203882e1ef809681a523097", "repo": "jina", "path": "jina/excepts.py", "file_name": "excepts.py", "fun_name": "details", "commit_message": "feat: better error messages when gateway can't connect to other deployment (#4677)", "code": "def details(self):\n \n return self._details if self._details else self.og_exception.details()\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 22, "n_words": 8, "vocab_size": 7, "complexity": 2, "nloc": 2, "token_counts": 22, "n_ast_nodes": 37, "n_identifiers": 4, "d_id": 2262, "documentation": { "docstring": "\n :return: details of this exception\n ", "n_words": 5, "vocab_size": 5, "n_whitespaces": 20, "language": "en" } }, { "id": 146461, "commit_id": "f646d3fc312f63a6cf3e59a00ae1b3d6ab40393a", "repo": "ray", "path": "python/ray/serve/api.py", "file_name": "api.py", "fun_name": "bind", "commit_message": "[serve] Add unimplemented interfaces for Deployment DAG APIs (#23125)\n\nAdds the following interfaces (without implementation, for discussion / approval):\r\n- `serve.Application`\r\n- `serve.DeploymentNode`\r\n- `serve.DeploymentMethodNode`, `serve.DAGHandle`, and `serve.drivers.PipelineDriver`\r\n- `serve.run` & `serve.build`\r\n\r\nIn addition to these Python APIs, we will also support the following CLI commands:\r\n- `serve run [--blocking=true] my_file:my_node_or_app # Uses Ray client, blocking by default.`\r\n- `serve build my_file:my_node output_path.yaml`\r\n- `serve deploy [--blocking=false] # Uses REST API, non-blocking by default.`\r\n- `serve status [--watch=false] # Uses REST API, non-blocking by default.`", "code": "def bind(self, *args, **kwargs) -> DeploymentNode:\n \n raise NotImplementedError()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 7, "token_counts": 18, "n_ast_nodes": 31, "n_identifiers": 6, "d_id": 33689, "documentation": { "docstring": "Bind the provided arguments and return a DeploymentNode.\n\n The returned bound deployment can be deployed or bound to other\n deployments to create a multi-deployment application.\n ", "n_words": 25, "vocab_size": 22, "n_whitespaces": 46, "language": "en" } }, { "id": 198410, "commit_id": "e94a7b45d7b033ccbd57395dca28b654f875c54c", "repo": "sympy", "path": "sympy/integrals/meijerint.py", "file_name": "meijerint.py", "fun_name": "_inflate_g", "commit_message": "Improve loop performance", "code": "def _inflate_g(g, n):\n \n # TODO should this be a method of meijerg?\n # See: [L, page 150, equation (5)]", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 28, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 8, "token_counts": 118, "n_ast_nodes": 17, "n_identifiers": 3, "d_id": 48919, "documentation": { "docstring": " Return C, h such that h is a G function of argument z**n and\n g = C*h. ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 25, "language": "en" } }, { "id": 123469, "commit_id": "df4293473d2fb6e887e31522cab5aff95e201581", "repo": "sqlmap", "path": "lib/core/common.py", "file_name": "common.py", "fun_name": "paramToDict", "commit_message": "Fixing DeprecationWarning (logger.warn)", "code": "def paramToDict(place, parameters=None):\n \n\n testableParameters = OrderedDict()\n\n if place in conf.parameters and not parameters:\n parameters = conf.parameters[place]\n\n parameters = re.sub(r\"&(\\w{1,4});\", r\"%s\\g<1>%s\" % (PARAMETER_AMP_MARKER, PARAMETER_SEMICOLON_MARKER), parameters)\n if place == PLACE.COOKIE:\n splitParams = parameters.split(conf.cookieDel or DEFAULT_COOKIE_DELIMITER)\n else:\n splitParams = parameters.split(conf.paramDel or DEFAULT_GET_POST_DELIMITER)\n\n for element in splitParams:\n element = re.sub(r\"%s(.+?)%s\" % (PARAMETER_AMP_MARKER, PARAMETER_SEMICOLON_MARKER), r\"&\\g<1>;\", element)\n parts = element.split(\"=\")\n\n if len(parts) >= 2:\n parameter = urldecode(parts[0].replace(\" \", \"\"))\n\n if not parameter:\n continue\n\n if conf.paramDel and conf.paramDel == '\\n':\n parts[-1] = parts[-1].rstrip()\n\n condition = not conf.testParameter\n condition |= conf.testParameter is not None and parameter in conf.testParameter\n condition |= place == PLACE.COOKIE and len(intersect((PLACE.COOKIE,), conf.testParameter, True)) > 0\n\n if condition:\n value = \"=\".join(parts[1:])\n\n if parameter in (conf.base64Parameter or []):\n try:\n kb.base64Originals[parameter] = oldValue = value\n value = urldecode(value, convall=True)\n value = decodeBase64(value, binary=False, encoding=conf.encoding or UNICODE_ENCODING)\n parameters = re.sub(r\"\\b%s(\\b|\\Z)\" % re.escape(oldValue), value, parameters)\n except:\n errMsg = \"parameter '%s' does not contain \" % parameter\n errMsg += \"valid Base64 encoded value ('%s')\" % value\n raise SqlmapValueException(errMsg)\n\n testableParameters[parameter] = value\n\n if not conf.multipleTargets and not (conf.csrfToken and re.search(conf.csrfToken, parameter, re.I)):\n _ = urldecode(testableParameters[parameter], convall=True)\n if (_.endswith(\"'\") and _.count(\"'\") == 1 or re.search(r'\\A9{3,}', _) or re.search(r'\\A-\\d+\\Z', _) or re.search(DUMMY_USER_INJECTION, _)) and not parameter.upper().startswith(GOOGLE_ANALYTICS_COOKIE_PREFIX):\n warnMsg = \"it appears that you have provided tainted parameter values \"\n warnMsg += \"('%s') with most likely leftover \" % element\n warnMsg += \"chars/statements from manual SQL injection test(s). \"\n warnMsg += \"Please, always use only valid parameter values \"\n warnMsg += \"so sqlmap could be able to run properly\"\n logger.warning(warnMsg)\n\n message = \"are you really sure that you want to continue (sqlmap could have problems)? [y/N] \"\n\n if not readInput(message, default='N', boolean=True):\n raise SqlmapSilentQuitException\n elif not _:\n warnMsg = \"provided value for parameter '%s' is empty. \" % parameter\n warnMsg += \"Please, always use only valid parameter values \"\n warnMsg += \"so sqlmap could be able to run properly\"\n logger.warning(warnMsg)\n\n if place in (PLACE.POST, PLACE.GET):\n for regex in (r\"\\A((?:<[^>]+>)+\\w+)((?:<[^>]+>)+)\\Z\", r\"\\A([^\\w]+.*\\w+)([^\\w]+)\\Z\"):\n match = re.search(regex, testableParameters[parameter])\n if match:\n try:\n candidates = OrderedDict()\n", "url": "https://github.com/sqlmapproject/sqlmap.git", "language": "Python", "ast_errors": "def paramToDict(place, parameters=None):\n \"\"\"\n Split the parameters into names and values, check if these parameters\n are within the testable parameters and return in a dictionary.\n \"\"\"\n\n testableParameters = OrderedDict()\n\n if place in conf.parameters and not parameters:\n parameters = conf.parameters[place]\n\n parameters = re.sub(r\"&(\\w{1,4});\", r\"%s\\g<1>%s\" % (PARAMETER_AMP_MARKER, PARAMETER_SEMICOLON_MARKER), parameters)\n if place == PLACE.COOKIE:\n splitParams = parameters.split(conf.cookieDel or DEFAULT_COOKIE_DELIMITER)\n else:\n splitParams = parameters.split(conf.paramDel or DEFAULT_GET_POST_DELIMITER)\n\n for element in splitParams:\n element = re.sub(r\"%s(.+?)%s\" % (PARAMETER_AMP_MARKER, PARAMETER_SEMICOLON_MARKER), r\"&\\g<1>;\", element)\n parts = element.split(\"=\")\n\n if len(parts) >= 2:\n parameter = urldecode(parts[0].replace(\" \", \"\"))\n\n if not parameter:\n continue\n\n if conf.paramDel and conf.paramDel == '\\n':\n parts[-1] = parts[-1].rstrip()\n\n condition = not conf.testParameter\n condition |= conf.testParameter is not None and parameter in conf.testParameter\n condition |= place == PLACE.COOKIE and len(intersect((PLACE.COOKIE,), conf.testParameter, True)) > 0\n\n if condition:\n value = \"=\".join(parts[1:])\n\n if parameter in (conf.base64Parameter or []):\n try:\n kb.base64Originals[parameter] = oldValue = value\n value = urldecode(value, convall=True)\n value = decodeBase64(value, binary=False, encoding=conf.encoding or UNICODE_ENCODING)\n parameters = re.sub(r\"\\b%s(\\b|\\Z)\" % re.escape(oldValue), value, parameters)\n except:\n errMsg = \"parameter '%s' does not contain \" % parameter\n errMsg += \"valid Base64 encoded value ('%s')\" % value\n raise SqlmapValueException(errMsg)\n\n testableParameters[parameter] = value\n\n if not conf.multipleTargets and not (conf.csrfToken and re.search(conf.csrfToken, parameter, re.I)):\n _ = urldecode(testableParameters[parameter], convall=True)\n if (_.endswith(\"'\") and _.count(\"'\") == 1 or re.search(r'\\A9{3,}', _) or re.search(r'\\A-\\d+\\Z', _) or re.search(DUMMY_USER_INJECTION, _)) and not parameter.upper().startswith(GOOGLE_ANALYTICS_COOKIE_PREFIX):\n warnMsg = \"it appears that you have provided tainted parameter values \"\n warnMsg += \"('%s') with most likely leftover \" % element\n warnMsg += \"chars/statements from manual SQL injection test(s). \"\n warnMsg += \"Please, always use only valid parameter values \"\n warnMsg += \"so sqlmap could be able to run properly\"\n logger.warning(warnMsg)\n\n message = \"are you really sure that you want to continue (sqlmap could have problems)? [y/N] \"\n\n if not readInput(message, default='N', boolean=True):\n raise SqlmapSilentQuitException\n elif not _:\n warnMsg = \"provided value for parameter '%s' is empty. \" % parameter\n warnMsg += \"Please, always use only valid parameter values \"\n warnMsg += \"so sqlmap could be able to run properly\"\n logger.warning(warnMsg)\n\n if place in (PLACE.POST, PLACE.GET):\n for regex in (r\"\\A((?:<[^>]+>)+\\w+)((?:<[^>]+>)+)\\Z\", r\"\\A([^\\w]+.*\\w+)([^\\w]+)\\Z\"):\n match = re.search(regex, testableParameters[parameter])\n if match:\n try:\n candidates = OrderedDict()\n", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 1252, "n_words": 325, "vocab_size": 170, "complexity": 56, "nloc": 110, "token_counts": 979, "n_ast_nodes": 868, "n_identifiers": 66, "d_id": 27381, "documentation": { "docstring": "\n Split the parameters into names and values, check if these parameters\n are within the testable parameters and return in a dictionary.\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 31, "language": "en" } }, { "id": 243502, "commit_id": "f9a3178bb34e6b28bc46d42ef88f5069ebabde32", "repo": "Pillow", "path": "src/PIL/ImagePalette.py", "file_name": "ImagePalette.py", "fun_name": "getcolor", "commit_message": "Fix #6652: Handle translucent color used in RGB ImagePallete", "code": "def getcolor(self, color, image=None):\n \n if self.rawmode:\n raise ValueError(\"palette contains raw palette data\")\n if isinstance(color, tuple):\n if self.mode == \"RGB\" and len(color) == 4:\n if color[3] == 255:\n color = color[:3]\n else:\n raise ValueError(\n \"RGB ImagePalette can't handle non-opaque RGBA colors\"\n )\n elif self.mode == \"RGBA\":\n if len(color) == 3:\n color += (255,)\n try:\n return self.colors[color]\n except KeyError as e:\n # allocate new color slot\n if not isinstance(self.palette, bytearray):\n self._palette = bytearray(self.palette)\n index = len(self.palette) // 3\n special_colors = ()\n if image:\n special_colors = (\n image.info.get(\"background\"),\n image.info.get(\"transparency\"),\n )\n while index in special_colors:\n index += 1\n if index >= 256:\n if image:\n # Search for an unused index\n for i, count in reversed(list(enumerate(image.histogram()))):\n if count == 0 and i not in special_colors:\n index = i\n break\n if index >= 256:\n raise ValueError(\"cannot allocate more than 256 colors\") from e\n self.colors[color] = index\n if index * 3 < len(self.palette):\n self._palette = (\n self.palette[: index * 3]\n + bytes(color)\n + self.palette[index * 3 + 3 :]\n )\n else:\n self._palette += bytes(color)\n self.dirty = 1\n return index\n else:\n raise ValueError(f\"unknown color specifier: {repr(color)}\")\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 1055, "n_words": 178, "vocab_size": 102, "complexity": 19, "nloc": 49, "token_counts": 299, "n_ast_nodes": 500, "n_identifiers": 29, "d_id": 70038, "documentation": { "docstring": "Given an rgb tuple, allocate palette entry.\n\n .. warning:: This method is experimental.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 27, "language": "en" } }, { "id": 243968, "commit_id": "301d4a2d4cfe1cdb62608e2892924be3e67e3098", "repo": "mmdetection", "path": "mmdet/core/visualization/image.py", "file_name": "image.py", "fun_name": "draw_masks", "commit_message": "[Feature] Support visualization for Panoptic Segmentation (#7041)\n\n* First commit of v2\r\n\r\n* split the functions\r\n\r\n* Support to show panoptic result\r\n\r\n* temp\r\n\r\n* Support to show gt\r\n\r\n* support show gt\r\n\r\n* fix lint\r\n\r\n* Support to browse datasets\r\n\r\n* Fix unit tests\r\n\r\n* Fix findContours\r\n\r\n* fix comments\r\n\r\n* Fix pre-commit\r\n\r\n* fix lint\r\n\r\n* Add the type of an argument", "code": "def draw_masks(ax, img, masks, color=None, with_edge=True, alpha=0.8):\n \n taken_colors = set([0, 0, 0])\n if color is None:\n random_colors = np.random.randint(0, 255, (masks.size(0), 3))\n color = [tuple(c) for c in random_colors]\n color = np.array(color, dtype=np.uint8)\n polygons = []\n for i, mask in enumerate(masks):\n if with_edge:\n contours, _ = bitmap_to_polygon(mask)\n polygons += [Polygon(c) for c in contours]\n\n color_mask = color[i]\n while tuple(color_mask) in taken_colors:\n color_mask = _get_bias_color(color_mask)\n taken_colors.add(tuple(color_mask))\n\n mask = mask.astype(bool)\n img[mask] = img[mask] * (1 - alpha) + color_mask * alpha\n\n p = PatchCollection(\n polygons, facecolor='none', edgecolors='w', linewidths=1, alpha=0.8)\n ax.add_collection(p)\n\n return ax, img\n\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 218, "n_words": 91, "vocab_size": 66, "complexity": 7, "nloc": 21, "token_counts": 217, "n_ast_nodes": 325, "n_identifiers": 38, "d_id": 70170, "documentation": { "docstring": "Draw masks on the image and their edges on the axes.\n\n Args:\n ax (matplotlib.Axes): The input axes.\n img (ndarray): The image with the shape of (3, h, w).\n masks (ndarray): The masks with the shape of (n, h, w).\n color (ndarray): The colors for each masks with the shape\n of (n, 3).\n with_edge (bool): Whether to draw edges. Default: True.\n alpha (float): Transparency of bounding boxes. Default: 0.8.\n\n Returns:\n matplotlib.Axes: The result axes.\n ndarray: The result image.\n ", "n_words": 77, "vocab_size": 47, "n_whitespaces": 153, "language": "en" } }, { "id": 132194, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/experiment.py", "file_name": "experiment.py", "fun_name": "convert_to_experiment_list", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def convert_to_experiment_list(experiments):\n \n exp_list = experiments\n\n # Transform list if necessary\n if experiments is None:\n exp_list = []\n elif isinstance(experiments, Experiment):\n exp_list = [experiments]\n elif type(experiments) is dict:\n exp_list = [\n Experiment.from_json(name, spec) for name, spec in experiments.items()\n ]\n\n # Validate exp_list\n if type(exp_list) is list and all(isinstance(exp, Experiment) for exp in exp_list):\n if len(exp_list) > 1:\n logger.info(\n \"Running with multiple concurrent experiments. \"\n \"All experiments will be using the same SearchAlgorithm.\"\n )\n else:\n raise TuneError(\"Invalid argument: {}\".format(experiments))\n\n return exp_list\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 214, "n_words": 79, "vocab_size": 59, "complexity": 9, "nloc": 19, "token_counts": 112, "n_ast_nodes": 188, "n_identifiers": 19, "d_id": 29694, "documentation": { "docstring": "Produces a list of Experiment objects.\n\n Converts input from dict, single experiment, or list of\n experiments to list of experiments. If input is None,\n will return an empty list.\n\n Arguments:\n experiments (Experiment | list | dict): Experiments to run.\n\n Returns:\n List of experiments.\n ", "n_words": 43, "vocab_size": 32, "n_whitespaces": 75, "language": "en" } }, { "id": 245072, "commit_id": "bb7239ff635c4d9afd9c37a6e432251029aafb51", "repo": "mmdetection", "path": "tests/test_core/test_bbox/test_assigners/test_approx_max_iou_assigner.py", "file_name": "test_approx_max_iou_assigner.py", "fun_name": "test_approx_iou_assigner_with_empty_boxes_and_gt", "commit_message": "Refactor SABL RetinaNet", "code": "def test_approx_iou_assigner_with_empty_boxes_and_gt(self):\n \n assigner = ApproxMaxIoUAssigner(\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n )\n bboxes = torch.empty((0, 4))\n gt_bboxes = torch.empty((0, 4))\n gt_labels = torch.LongTensor([])\n\n pred_instances = InstanceData()\n pred_instances.priors = bboxes\n pred_instances.approxs = bboxes[:, None, :]\n gt_instances = InstanceData()\n gt_instances.bboxes = gt_bboxes\n gt_instances.labels = gt_labels\n assign_result = assigner.assign(pred_instances, gt_instances)\n\n self.assertEqual(len(assign_result.gt_inds), 0)\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 165, "n_words": 45, "vocab_size": 29, "complexity": 1, "nloc": 16, "token_counts": 116, "n_ast_nodes": 178, "n_identifiers": 23, "d_id": 70644, "documentation": { "docstring": "Test corner case where an network might predict no boxes and no\n gt.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 19, "language": "en" } }, { "id": 216118, "commit_id": "8ea5342cbde034383938e244cdb16a0bf8a777e8", "repo": "salt", "path": "tests/pytests/unit/modules/test_yumpkg.py", "file_name": "test_yumpkg.py", "fun_name": "test_remove_not_installed", "commit_message": "Fix exception in yumpkg.remove for not installed package", "code": "def test_remove_not_installed():\n \n name = \"foo\"\n list_pkgs_mock = MagicMock(return_value={})\n cmd_mock = MagicMock(\n return_value={\"pid\": 12345, \"retcode\": 0, \"stdout\": \"\", \"stderr\": \"\"}\n )\n salt_mock = {\n \"cmd.run_all\": cmd_mock,\n \"lowpkg.version_cmp\": rpm.version_cmp,\n \"pkg_resource.parse_targets\": MagicMock(\n return_value=({name: None}, \"repository\")\n ),\n }\n with patch.object(yumpkg, \"list_pkgs\", list_pkgs_mock), patch(\n \"salt.utils.systemd.has_scope\", MagicMock(return_value=False)\n ), patch.dict(yumpkg.__salt__, salt_mock):\n\n # Test yum\n with patch.dict(yumpkg.__context__, {\"yum_bin\": \"yum\"}), patch.dict(\n yumpkg.__grains__, {\"os\": \"CentOS\", \"osrelease\": 7}\n ):\n yumpkg.remove(name)\n cmd_mock.assert_not_called()\n\n # Test dnf\n yumpkg.__context__.pop(\"yum_bin\")\n cmd_mock.reset_mock()\n with patch.dict(yumpkg.__context__, {\"yum_bin\": \"dnf\"}), patch.dict(\n yumpkg.__grains__, {\"os\": \"Fedora\", \"osrelease\": 27}\n ):\n yumpkg.remove(name)\n cmd_mock.assert_not_called()\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 280, "n_words": 78, "vocab_size": 60, "complexity": 1, "nloc": 28, "token_counts": 212, "n_ast_nodes": 379, "n_identifiers": 20, "d_id": 54409, "documentation": { "docstring": "\n Tests that no exception raised on removing not installed package\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 323000, "commit_id": "93cae49c0c572b5c1ac972759140fbe924b0374d", "repo": "PaddleNLP", "path": "examples/model_interpretation/task/transformer.py", "file_name": "transformer.py", "fun_name": "forward", "commit_message": "Add NLP model interpretation (#1752)\n\n* upload NLP interpretation\r\n\r\n* fix problems and relocate project\r\n\r\n* remove abandoned picture\r\n\r\n* remove abandoned picture\r\n\r\n* fix dead link in README\r\n\r\n* fix dead link in README\r\n\r\n* fix code style problems\r\n\r\n* fix CR round 1\r\n\r\n* remove .gitkeep files\r\n\r\n* fix code style\r\n\r\n* fix file encoding problem\r\n\r\n* fix code style\r\n\r\n* delete duplicated files due to directory rebuild\r\n\r\n* fix CR round 2\r\n\r\n* fix code style\r\n\r\n* fix ernie tokenizer\r\n\r\n* fix code style\r\n\r\n* fix problem from CR round 1\r\n\r\n* fix bugs\r\n\r\n* fix README\r\n\r\n* remove duplicated files\r\n\r\n* deal with diff of old and new tokenizer results\r\n\r\n* fix CR round 4\r\n\r\n* fix code style\r\n\r\n* add missing dependence\r\n\r\n* fix broken import path\r\n\r\n* move some data file to cloud\r\n\r\n* MRC upper case to lower case\r\n\r\nCo-authored-by: Zeyu Chen \r\nCo-authored-by: binlinquge \r\nCo-authored-by: Guo Sheng ", "code": "def forward(self, tgt, memory, tgt_mask=None, memory_mask=None, cache=None):\n r\n tgt_mask = _convert_attention_mask(tgt_mask, tgt.dtype)\n memory_mask = _convert_attention_mask(memory_mask, memory.dtype)\n\n residual = tgt\n if self.normalize_before:\n tgt = self.norm1(tgt)\n if cache is None:\n tgt = self.self_attn(tgt, tgt, tgt, tgt_mask, None)\n else:\n tgt, incremental_cache = self.self_attn(tgt, tgt, tgt, tgt_mask,\n cache[0])\n tgt = residual + self.dropout1(tgt)\n if not self.normalize_before:\n tgt = self.norm1(tgt)\n\n residual = tgt\n if self.normalize_before:\n tgt = self.norm2(tgt)\n if cache is None:\n tgt = self.cross_attn(tgt, memory, memory, memory_mask, None)\n else:\n tgt, static_cache = self.cross_attn(tgt, memory, memory,\n memory_mask, cache[1])\n tgt = residual + self.dropout2(tgt)\n if not self.normalize_before:\n tgt = self.norm2(tgt)\n\n residual = tgt\n if self.normalize_before:\n tgt = self.norm3(tgt)\n tgt = self.linear2(self.dropout(self.activation(self.linear1(tgt))))\n tgt = residual + self.dropout3(tgt)\n if not self.normalize_before:\n tgt = self.norm3(tgt)\n return tgt if cache is None else (tgt, (incremental_cache,\n static_cache))\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 527, "n_words": 126, "vocab_size": 47, "complexity": 10, "nloc": 80, "token_counts": 290, "n_ast_nodes": 437, "n_identifiers": 25, "d_id": 118328, "documentation": { "docstring": "\n Applies a Transformer decoder layer on the input.\n\n Parameters:\n tgt (Tensor): The input of Transformer decoder layer. It is a tensor\n with shape `[batch_size, target_length, d_model]`. The data type\n should be float32 or float64.\n memory (Tensor): The output of Transformer encoder. It is a tensor\n with shape `[batch_size, source_length, d_model]`. The data type\n should be float32 or float64.\n tgt_mask (Tensor, optional): A tensor used in self attention\n to prevents attention to some unwanted positions, usually the\n the subsequent positions. It is a tensor with shape broadcasted\n to `[batch_size, n_head, target_length, target_length]`.\n When the data type is bool, the unwanted positions have `False` \n values and the others have `True` values. When the data type is \n int, the unwanted positions have 0 values and the others have 1 \n values. When the data type is float, the unwanted positions have \n `-INF` values and the others have 0 values. It can be None when \n nothing wanted or needed to be prevented attention to. Default None.\n memory_mask (Tensor, optional): A tensor used in decoder-encoder\n cross attention to prevents attention to some unwanted positions,\n usually the paddings. It is a tensor with shape broadcasted to \n `[batch_size, n_head, target_length, source_length]`. When the \n data type is bool, the unwanted positions have `False` values \n and the others have `True` values. When the data type is int, \n the unwanted positions have 0 values and the others have 1 \n values. When the data type is float, the unwanted positions have \n `-INF` values and the others have 0 values. It can be None when \n nothing wanted or needed to be prevented attention to. Default None.\n cache (tuple, optional): It is a tuple( :code:`(incremental_cache, static_cache)` ),\n `incremental_cache` is an instance of `MultiHeadAttention.Cache`,\n `static_cache` is an instance of `MultiHeadAttention.StaticCache.\n See `TransformerDecoderLayer.gen_cache` for more details. It is\n only used for inference and should be None for training. Default\n None.\n\n Returns:\n Tensor|tuple: It is a tensor that has the same shape and data type \\\n as `tgt`, representing the output of Transformer decoder layer. \\\n Or a tuple if `cache` is not None, except for decoder layer output, \\\n the tuple includes the new cache which is same as input `cache` \\\n argument but `incremental_cache` in it has an incremental length. \\\n See `MultiHeadAttention.gen_cache` and `MultiHeadAttention.forward` \\\n for more details.\n ", "n_words": 374, "vocab_size": 130, "n_whitespaces": 976, "language": "en" } }, { "id": 300603, "commit_id": "4885331509eeffe50f42d76b234996467b06170f", "repo": "core", "path": "homeassistant/helpers/template.py", "file_name": "template.py", "fun_name": "forgiving_int", "commit_message": "Fail template functions when no default specified (#71687)", "code": "def forgiving_int(value, default=_SENTINEL, base=10):\n \n result = jinja2.filters.do_int(value, default=default, base=base)\n if result is _SENTINEL:\n raise_no_default(\"int\", value)\n return result\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 36, "n_words": 17, "vocab_size": 15, "complexity": 2, "nloc": 5, "token_counts": 45, "n_ast_nodes": 71, "n_identifiers": 10, "d_id": 99463, "documentation": { "docstring": "Try to convert value to an int, and raise if it fails.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 160203, "commit_id": "0d13f9f747887b290108a909dd92c3cb47239921", "repo": "numpy", "path": "numpy/linalg/linalg.py", "file_name": "linalg.py", "fun_name": "norm", "commit_message": "BUG: Consistent promotion for norm for all values of ord (#17709)\n\nPreviously, numpy.linalg.norm would return values with the same floating-point\r\ntype as input arrays for most values of the ``ord`` parameter, but not all.\r\nThis PR fixes this so that the output dtype matches the input for all (valid) values\r\nof ``ord``.\r\n\r\nCo-authored-by: Kenichi Maehashi \r\nCo-authored-by: Ross Barnowski ", "code": "def norm(x, ord=None, axis=None, keepdims=False):\n \n x = asarray(x)\n\n if not issubclass(x.dtype.type, (inexact, object_)):\n x = x.astype(float)\n\n # Immediately handle some default, simple, fast, and common cases.\n if axis is None:\n ndim = x.ndim\n if ((ord is None) or\n (ord in ('f', 'fro') and ndim == 2) or\n (ord == 2 and ndim == 1)):\n\n x = x.ravel(order='K')\n if isComplexType(x.dtype.type):\n sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)\n else:\n sqnorm = dot(x, x)\n ret = sqrt(sqnorm)\n if keepdims:\n ret = ret.reshape(ndim*[1])\n return ret\n\n # Normalize the `axis` argument to a tuple.\n nd = x.ndim\n if axis is None:\n axis = tuple(range(nd))\n elif not isinstance(axis, tuple):\n try:\n axis = int(axis)\n except Exception as e:\n raise TypeError(\"'axis' must be None, an integer or a tuple of integers\") from e\n axis = (axis,)\n\n if len(axis) == 1:\n if ord == Inf:\n return abs(x).max(axis=axis, keepdims=keepdims)\n elif ord == -Inf:\n return abs(x).min(axis=axis, keepdims=keepdims)\n elif ord == 0:\n # Zero norm\n return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)\n elif ord == 1:\n # special case for speedup\n return add.reduce(abs(x), axis=axis, keepdims=keepdims)\n elif ord is None or ord == 2:\n # special case for speedup\n s = (x.conj() * x).real\n return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))\n # None of the str-type keywords for ord ('fro', 'nuc')\n # are valid for vectors\n elif isinstance(ord, str):\n raise ValueError(f\"Invalid norm order '{ord}' for vectors\")\n else:\n absx = abs(x)\n absx **= ord\n ret = add.reduce(absx, axis=axis, keepdims=keepdims)\n ret **= reciprocal(ord, dtype=ret.dtype)\n return ret\n elif len(axis) == 2:\n row_axis, col_axis = axis\n row_axis = normalize_axis_index(row_axis, nd)\n col_axis = normalize_axis_index(col_axis, nd)\n if row_axis == col_axis:\n raise ValueError('Duplicate axes given.')\n if ord == 2:\n ret = _multi_svd_norm(x, row_axis, col_axis, amax)\n elif ord == -2:\n ret = _multi_svd_norm(x, row_axis, col_axis, amin)\n elif ord == 1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)\n elif ord == Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)\n elif ord == -1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)\n elif ord == -Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)\n elif ord in [None, 'fro', 'f']:\n ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))\n elif ord == 'nuc':\n ret = _multi_svd_norm(x, row_axis, col_axis, sum)\n else:\n raise ValueError(\"Invalid norm order for matrices.\")\n if keepdims:\n ret_shape = list(x.shape)\n ret_shape[axis[0]] = 1\n ret_shape[axis[1]] = 1\n ret = ret.reshape(ret_shape)\n return ret\n else:\n raise ValueError(\"Improper number of dimensions to norm.\")\n\n\n# multi_dot\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 1233, "n_words": 395, "vocab_size": 178, "complexity": 36, "nloc": 87, "token_counts": 746, "n_ast_nodes": 1183, "n_identifiers": 55, "d_id": 38571, "documentation": { "docstring": "\n Matrix or vector norm.\n\n This function is able to return one of eight different matrix norms,\n or one of an infinite number of vector norms (described below), depending\n on the value of the ``ord`` parameter.\n\n Parameters\n ----------\n x : array_like\n Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord`\n is None. If both `axis` and `ord` are None, the 2-norm of\n ``x.ravel`` will be returned.\n ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional\n Order of the norm (see table under ``Notes``). inf means numpy's\n `inf` object. The default is None.\n axis : {None, int, 2-tuple of ints}, optional.\n If `axis` is an integer, it specifies the axis of `x` along which to\n compute the vector norms. If `axis` is a 2-tuple, it specifies the\n axes that hold 2-D matrices, and the matrix norms of these matrices\n are computed. If `axis` is None then either a vector norm (when `x`\n is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default\n is None.\n\n .. versionadded:: 1.8.0\n\n keepdims : bool, optional\n If this is set to True, the axes which are normed over are left in the\n result as dimensions with size one. With this option the result will\n broadcast correctly against the original `x`.\n\n .. versionadded:: 1.10.0\n\n Returns\n -------\n n : float or ndarray\n Norm of the matrix or vector(s).\n\n See Also\n --------\n scipy.linalg.norm : Similar function in SciPy.\n\n Notes\n -----\n For values of ``ord < 1``, the result is, strictly speaking, not a\n mathematical 'norm', but it may still be useful for various numerical\n purposes.\n\n The following norms can be calculated:\n\n ===== ============================ ==========================\n ord norm for matrices norm for vectors\n ===== ============================ ==========================\n None Frobenius norm 2-norm\n 'fro' Frobenius norm --\n 'nuc' nuclear norm --\n inf max(sum(abs(x), axis=1)) max(abs(x))\n -inf min(sum(abs(x), axis=1)) min(abs(x))\n 0 -- sum(x != 0)\n 1 max(sum(abs(x), axis=0)) as below\n -1 min(sum(abs(x), axis=0)) as below\n 2 2-norm (largest sing. value) as below\n -2 smallest singular value as below\n other -- sum(abs(x)**ord)**(1./ord)\n ===== ============================ ==========================\n\n The Frobenius norm is given by [1]_:\n\n :math:`||A||_F = [\\\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`\n\n The nuclear norm is the sum of the singular values.\n\n Both the Frobenius and nuclear norm orders are only defined for\n matrices and raise a ValueError when ``x.ndim != 2``.\n\n References\n ----------\n .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,\n Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15\n\n Examples\n --------\n >>> from numpy import linalg as LA\n >>> a = np.arange(9) - 4\n >>> a\n array([-4, -3, -2, ..., 2, 3, 4])\n >>> b = a.reshape((3, 3))\n >>> b\n array([[-4, -3, -2],\n [-1, 0, 1],\n [ 2, 3, 4]])\n\n >>> LA.norm(a)\n 7.745966692414834\n >>> LA.norm(b)\n 7.745966692414834\n >>> LA.norm(b, 'fro')\n 7.745966692414834\n >>> LA.norm(a, np.inf)\n 4.0\n >>> LA.norm(b, np.inf)\n 9.0\n >>> LA.norm(a, -np.inf)\n 0.0\n >>> LA.norm(b, -np.inf)\n 2.0\n\n >>> LA.norm(a, 1)\n 20.0\n >>> LA.norm(b, 1)\n 7.0\n >>> LA.norm(a, -1)\n -4.6566128774142013e-010\n >>> LA.norm(b, -1)\n 6.0\n >>> LA.norm(a, 2)\n 7.745966692414834\n >>> LA.norm(b, 2)\n 7.3484692283495345\n\n >>> LA.norm(a, -2)\n 0.0\n >>> LA.norm(b, -2)\n 1.8570331885190563e-016 # may vary\n >>> LA.norm(a, 3)\n 5.8480354764257312 # may vary\n >>> LA.norm(a, -3)\n 0.0\n\n Using the `axis` argument to compute vector norms:\n\n >>> c = np.array([[ 1, 2, 3],\n ... [-1, 1, 4]])\n >>> LA.norm(c, axis=0)\n array([ 1.41421356, 2.23606798, 5. ])\n >>> LA.norm(c, axis=1)\n array([ 3.74165739, 4.24264069])\n >>> LA.norm(c, ord=1, axis=1)\n array([ 6., 6.])\n\n Using the `axis` argument to compute matrix norms:\n\n >>> m = np.arange(8).reshape(2,2,2)\n >>> LA.norm(m, axis=(1,2))\n array([ 3.74165739, 11.22497216])\n >>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])\n (3.7416573867739413, 11.224972160321824)\n\n ", "n_words": 581, "vocab_size": 322, "n_whitespaces": 1267, "language": "en" } }, { "id": 266200, "commit_id": "d4a231585ac9a25d9739552d8c9e433dbf9398af", "repo": "netbox", "path": "netbox/extras/tests/test_models.py", "file_name": "test_models.py", "fun_name": "test_multiple_tags_return_distinct_objects_with_seperate_config_contexts", "commit_message": "Clean up tests", "code": "def test_multiple_tags_return_distinct_objects_with_seperate_config_contexts(self):\n \n site = Site.objects.first()\n platform = Platform.objects.first()\n tenant = Tenant.objects.first()\n tag1, tag2 = list(Tag.objects.all())\n\n tag_context_1 = ConfigContext.objects.create(\n name=\"tag-1\",\n weight=100,\n data={\n \"tag\": 1\n }\n )\n tag_context_1.tags.add(tag1)\n\n tag_context_2 = ConfigContext.objects.create(\n name=\"tag-2\",\n weight=100,\n data={\n \"tag\": 1\n }\n )\n tag_context_2.tags.add(tag2)\n\n device = Device.objects.create(\n name=\"Device 3\",\n site=site,\n tenant=tenant,\n platform=platform,\n device_role=DeviceRole.objects.first(),\n device_type=DeviceType.objects.first()\n )\n device.tags.set([tag1, tag2])\n\n annotated_queryset = Device.objects.filter(name=device.name).annotate_config_context_data()\n self.assertEqual(ConfigContext.objects.get_for_object(device).count(), 2)\n self.assertEqual(device.get_config_context(), annotated_queryset[0].get_config_context())\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 360, "n_words": 57, "vocab_size": 42, "complexity": 1, "nloc": 33, "token_counts": 223, "n_ast_nodes": 358, "n_identifiers": 38, "d_id": 78334, "documentation": { "docstring": "\n Tagged items use a generic relationship, which results in duplicate rows being returned when queried.\n This is combatted by by appending distinct() to the config context querysets. This test creates a config\n context assigned to two tags and ensures objects related by those same two tags result in only a single\n config context record being returned.\n\n This test case is seperate from the above in that it deals with multiple config context objects in play.\n\n See https://github.com/netbox-community/netbox/issues/5387\n ", "n_words": 77, "vocab_size": 54, "n_whitespaces": 127, "language": "en" } }, { "id": 293727, "commit_id": "bc862e97ed68cce8c437327651f85892787e755e", "repo": "core", "path": "tests/components/recorder/test_pool.py", "file_name": "test_pool.py", "fun_name": "test_recorder_pool", "commit_message": "Use a dedicated executor pool for database operations (#68105)\n\nCo-authored-by: Erik Montnemery \r\nCo-authored-by: Franck Nijhof ", "code": "def test_recorder_pool(caplog):\n \n\n engine = create_engine(\"sqlite://\", poolclass=RecorderPool)\n get_session = sessionmaker(bind=engine)\n shutdown = False\n connections = []\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 15, "vocab_size": 12, "complexity": 1, "nloc": 34, "token_counts": 234, "n_ast_nodes": 55, "n_identifiers": 11, "d_id": 92782, "documentation": { "docstring": "Test RecorderPool gives the same connection in the creating thread.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 156008, "commit_id": "cccb9d8d8e33a891396b1275c2448c352ef40c27", "repo": "dask", "path": "dask/array/core.py", "file_name": "core.py", "fun_name": "apply_infer_dtype", "commit_message": "absolufy-imports - No relative - PEP8 (#8796)\n\nConversation in https://github.com/dask/distributed/issues/5889", "code": "def apply_infer_dtype(func, args, kwargs, funcname, suggest_dtype=\"dtype\", nout=None):\n \n from dask.array.utils import meta_from_array\n\n # make sure that every arg is an evaluated array\n args = [\n np.ones_like(meta_from_array(x), shape=((1,) * x.ndim), dtype=x.dtype)\n if is_arraylike(x)\n else x\n for x in args\n ]\n try:\n with np.errstate(all=\"ignore\"):\n o = func(*args, **kwargs)\n except Exception as e:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n tb = \"\".join(traceback.format_tb(exc_traceback))\n suggest = (\n (\n \"Please specify the dtype explicitly using the \"\n \"`{dtype}` kwarg.\\n\\n\".format(dtype=suggest_dtype)\n )\n if suggest_dtype\n else \"\"\n )\n msg = (\n f\"`dtype` inference failed in `{funcname}`.\\n\\n\"\n f\"{suggest}\"\n \"Original error is below:\\n\"\n \"------------------------\\n\"\n f\"{e!r}\\n\\n\"\n \"Traceback:\\n\"\n \"---------\\n\"\n f\"{tb}\"\n )\n else:\n msg = None\n if msg is not None:\n raise ValueError(msg)\n return o.dtype if nout is None else tuple(e.dtype for e in o)\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 413, "n_words": 119, "vocab_size": 92, "complexity": 8, "nloc": 37, "token_counts": 192, "n_ast_nodes": 341, "n_identifiers": 37, "d_id": 36503, "documentation": { "docstring": "\n Tries to infer output dtype of ``func`` for a small set of input arguments.\n\n Parameters\n ----------\n func: Callable\n Function for which output dtype is to be determined\n\n args: List of array like\n Arguments to the function, which would usually be used. Only attributes\n ``ndim`` and ``dtype`` are used.\n\n kwargs: dict\n Additional ``kwargs`` to the ``func``\n\n funcname: String\n Name of calling function to improve potential error messages\n\n suggest_dtype: None/False or String\n If not ``None`` adds suggestion to potential error message to specify a dtype\n via the specified kwarg. Defaults to ``'dtype'``.\n\n nout: None or Int\n ``None`` if function returns single output, integer if many.\n Deafults to ``None``.\n\n Returns\n -------\n : dtype or List of dtype\n One or many dtypes (depending on ``nout``)\n ", "n_words": 121, "vocab_size": 86, "n_whitespaces": 231, "language": "en" } }, { "id": 118556, "commit_id": "704eab3478cf69847825b23dabf15813a8ac9fa2", "repo": "streamlit", "path": "lib/streamlit/delta_generator.py", "file_name": "delta_generator.py", "fun_name": "_cursor", "commit_message": "Rename and refactor `Report` machinery (#4141)\n\nThis refactor renames (almost) everything related to the outdated \"report\" concept with more precise concepts that we use throughout our code, primarily \"script run\", \"session\", and \"app\".", "code": "def _cursor(self) -> Optional[Cursor]:\n \n if self._provided_cursor is None:\n return cursor.get_container_cursor(self._root_container)\n else:\n return self._provided_cursor\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 56, "n_words": 13, "vocab_size": 11, "complexity": 2, "nloc": 9, "token_counts": 33, "n_ast_nodes": 55, "n_identifiers": 8, "d_id": 26289, "documentation": { "docstring": "Return our Cursor. This will be None if we're not running in a\n ScriptThread - e.g., if we're running a \"bare\" script outside of\n Streamlit.\n ", "n_words": 25, "vocab_size": 21, "n_whitespaces": 46, "language": "en" } }, { "id": 43184, "commit_id": "677c42227c08f705142f298ab88915f133cd94e5", "repo": "airflow", "path": "airflow/migrations/versions/0111_2_3_3_add_indexes_for_cascade_deletes.py", "file_name": "0111_2_3_3_add_indexes_for_cascade_deletes.py", "fun_name": "_mysql_tables_where_indexes_already_present", "commit_message": "Add indexes for CASCADE deletes for task_instance (#24488)\n\nWhen we add foreign keys with ON DELETE CASCADE, and we delete rows in the foreign table, the database needs to join back to the referencing table. If there's no suitable index, then it can be slow to perform the deletes.", "code": "def _mysql_tables_where_indexes_already_present(conn):\n \n to_check = [\n ('xcom', 'idx_xcom_task_instance'),\n ('task_reschedule', 'idx_task_reschedule_dag_run'),\n ('task_fail', 'idx_task_fail_task_instance'),\n ]\n tables = set()\n for tbl, idx in to_check:\n if conn.execute(f\"show indexes from {tbl} where Key_name = '{idx}'\").first():\n tables.add(tbl)\n return tables\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 89, "n_words": 32, "vocab_size": 29, "complexity": 3, "nloc": 11, "token_counts": 61, "n_ast_nodes": 115, "n_identifiers": 10, "d_id": 7856, "documentation": { "docstring": "\n If user downgraded and is upgrading again, we have to check for existing\n indexes on mysql because we can't (and don't) drop them as part of the\n downgrade.\n ", "n_words": 28, "vocab_size": 27, "n_whitespaces": 41, "language": "en" } }, { "id": 260585, "commit_id": "7da7ba603d42398c6e7cf89ea5336b8aabac7bae", "repo": "scikit-learn", "path": "sklearn/decomposition/_truncated_svd.py", "file_name": "_truncated_svd.py", "fun_name": "fit", "commit_message": "MNT TrucatedSVD uses _validate_parameters (#23987)\n\nCo-authored-by: jeremiedbb ", "code": "def fit(self, X, y=None):\n \n # param validation is done in fit_transform\n self.fit_transform(X)\n return self\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 42, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 34, "n_identifiers": 5, "d_id": 76359, "documentation": { "docstring": "Fit model on training data X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n Returns the transformer object.\n ", "n_words": 40, "vocab_size": 37, "n_whitespaces": 129, "language": "en" } }, { "id": 268943, "commit_id": "373ad97c72ed1ac4b6898e85b2cfd7b016e4b469", "repo": "keras", "path": "keras/preprocessing/image.py", "file_name": "image.py", "fun_name": "apply_channel_shift", "commit_message": "Copy image utils from keras_preprocessing directly into core keras\n\nThis is not new code, we are just moving these utilities directly\ninto keras from keras-preprocessing.\n\nFor the library code, just fixed linting errors.\nFor the test code, had to do more major changes to port from pytest, but\nhopefully any errors have been caught by the tests themselves.\n\nPiperOrigin-RevId: 427274651", "code": "def apply_channel_shift(x, intensity, channel_axis=0):\n \n x = np.rollaxis(x, channel_axis, 0)\n min_x, max_x = np.min(x), np.max(x)\n channel_images = [\n np.clip(x_channel + intensity, min_x, max_x) for x_channel in x]\n x = np.stack(channel_images, axis=0)\n x = np.rollaxis(x, 0, channel_axis + 1)\n return x\n\n\n@keras_export('keras.preprocessing.image.random_channel_shift')", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export('keras.preprocessing.image.random_channel_shift')", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 51, "n_words": 40, "vocab_size": 29, "complexity": 2, "nloc": 8, "token_counts": 89, "n_ast_nodes": 144, "n_identifiers": 16, "d_id": 79775, "documentation": { "docstring": "Performs a channel shift.\n\n Args:\n x: Input tensor. Must be 3D.\n intensity: Transformation intensity.\n channel_axis: Index of axis for channels in the input tensor.\n\n Returns:\n Numpy image tensor.\n ", "n_words": 28, "vocab_size": 26, "n_whitespaces": 51, "language": "en" } }, { "id": 132862, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/trial_runner.py", "file_name": "trial_runner.py", "fun_name": "_requeue_trial", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _requeue_trial(self, trial):\n \n self._scheduler_alg.on_trial_error(self, trial)\n self.trial_executor.set_status(trial, Trial.PENDING)\n\n # TODO(rliaw): Right now, this pushes the trial to the end of queue\n # because restoration can be expensive. However, this is not\n # ideal since it just hides the issue - a better fix would\n # be to use an actor table to detect the IP of the Trainable\n # and rsync the files there.\n # See https://github.com/ray-project/ray/issues/5168\n self._trials.pop(self._trials.index(trial))\n self._trials.append(trial)\n self._live_trials.add(trial)\n\n with warn_if_slow(\"scheduler.on_trial_add\"):\n self._scheduler_alg.on_trial_add(\n TrialRunnerWrapper(self, runner_whitelist_attr={\"search_alg\"}), trial\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 204, "n_words": 76, "vocab_size": 60, "complexity": 1, "nloc": 10, "token_counts": 86, "n_ast_nodes": 148, "n_identifiers": 19, "d_id": 29841, "documentation": { "docstring": "Notification to TrialScheduler and requeue trial.\n\n This does not notify the SearchAlgorithm because the function\n evaluation is still in progress.\n\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 41, "language": "en" } }, { "id": 246318, "commit_id": "c3db7a0b59d48b8872bc24096f9a2467ef35f703", "repo": "synapse", "path": "tests/federation/transport/test_knocking.py", "file_name": "test_knocking.py", "fun_name": "test_room_state_returned_when_knocking", "commit_message": "Tests: replace mocked Authenticator with the real thing (#11913)\n\nIf we prepopulate the test homeserver with a key for a remote homeserver, we\r\ncan make federation requests to it without having to stub out the\r\nauthenticator. This has two advantages:\r\n\r\n * means that what we are testing is closer to reality (ie, we now have\r\n complete tests for the incoming-request-authorisation flow)\r\n\r\n * some tests require that other objects be signed by the remote server (eg,\r\n the event in `/send_join`), and doing that would require a whole separate\r\n set of mocking out. It's much simpler just to use real keys.", "code": "def test_room_state_returned_when_knocking(self):\n \n user_id = self.register_user(\"u1\", \"you the one\")\n user_token = self.login(\"u1\", \"you the one\")\n\n fake_knocking_user_id = \"@user:other.example.com\"\n\n # Create a room with a room version that includes knocking\n room_id = self.helper.create_room_as(\n \"u1\",\n is_public=False,\n room_version=RoomVersions.V7.identifier,\n tok=user_token,\n )\n\n # Update the join rules and add additional state to the room to check for later\n expected_room_state = self.send_example_state_events_to_room(\n self.hs, room_id, user_id\n )\n\n channel = self.make_signed_federation_request(\n \"GET\",\n \"/_matrix/federation/v1/make_knock/%s/%s?ver=%s\"\n % (\n room_id,\n fake_knocking_user_id,\n # Inform the remote that we support the room version of the room we're\n # knocking on\n RoomVersions.V7.identifier,\n ),\n )\n self.assertEquals(200, channel.code, channel.result)\n\n # Note: We don't expect the knock membership event to be sent over federation as\n # part of the stripped room state, as the knocking homeserver already has that\n # event. It is only done for clients during /sync\n\n # Extract the generated knock event json\n knock_event = channel.json_body[\"event\"]\n\n # Check that the event has things we expect in it\n self.assertEquals(knock_event[\"room_id\"], room_id)\n self.assertEquals(knock_event[\"sender\"], fake_knocking_user_id)\n self.assertEquals(knock_event[\"state_key\"], fake_knocking_user_id)\n self.assertEquals(knock_event[\"type\"], EventTypes.Member)\n self.assertEquals(knock_event[\"content\"][\"membership\"], Membership.KNOCK)\n\n # Turn the event json dict into a proper event.\n # We won't sign it properly, but that's OK as we stub out event auth in `prepare`\n signed_knock_event = builder.create_local_event_from_event_dict(\n self.clock,\n self.hs.hostname,\n self.hs.signing_key,\n room_version=RoomVersions.V7,\n event_dict=knock_event,\n )\n\n # Convert our proper event back to json dict format\n signed_knock_event_json = signed_knock_event.get_pdu_json(\n self.clock.time_msec()\n )\n\n # Send the signed knock event into the room\n channel = self.make_signed_federation_request(\n \"PUT\",\n \"/_matrix/federation/v1/send_knock/%s/%s\"\n % (room_id, signed_knock_event.event_id),\n signed_knock_event_json,\n )\n self.assertEquals(200, channel.code, channel.result)\n\n # Check that we got the stripped room state in return\n room_state_events = channel.json_body[\"knock_state_events\"]\n\n # Validate the stripped room state events\n self.check_knock_room_state_against_room_state(\n room_state_events, expected_room_state\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 834, "n_words": 259, "vocab_size": 149, "complexity": 1, "nloc": 50, "token_counts": 276, "n_ast_nodes": 454, "n_identifiers": 43, "d_id": 71151, "documentation": { "docstring": "\n Tests that specific, stripped state events from a room are returned after\n a remote homeserver successfully knocks on a local room.\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 43, "language": "en" } }, { "id": 276251, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/saving_utils.py", "file_name": "saving_utils.py", "fun_name": "should_overwrite", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def should_overwrite(filepath, overwrite):\n \n # If file exists and should not be overwritten.\n if not overwrite and os.path.isfile(filepath):\n return ask_to_proceed_with_overwrite(filepath)\n return True\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 40, "n_words": 21, "vocab_size": 18, "complexity": 3, "nloc": 4, "token_counts": 28, "n_ast_nodes": 48, "n_identifiers": 7, "d_id": 81607, "documentation": { "docstring": "Returns whether the filepath should be overwritten.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 241795, "commit_id": "5628849933f1ba002f34b88b4d3af24f68008b39", "repo": "scipy", "path": "scipy/sparse/linalg/_isolve/utils.py", "file_name": "utils.py", "fun_name": "make_system", "commit_message": "MAINT: sparse.linalg: Remove unnecessary operations", "code": "def make_system(A, M, x0, b):\n \n A_ = A\n A = aslinearoperator(A)\n\n if A.shape[0] != A.shape[1]:\n raise ValueError(f'expected square matrix, but got shape={(A.shape,)}')\n\n N = A.shape[0]\n\n b = asanyarray(b)\n\n if not (b.shape == (N,1) or b.shape == (N,)):\n raise ValueError(f'shapes of A {A.shape} and b {b.shape} are '\n 'incompatible')\n\n if b.dtype.char not in 'fdFD':\n b = b.astype('d') # upcast non-FP types to double\n", "url": "https://github.com/scipy/scipy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 132, "n_words": 62, "vocab_size": 48, "complexity": 17, "nloc": 51, "token_counts": 379, "n_ast_nodes": 194, "n_identifiers": 14, "d_id": 69699, "documentation": { "docstring": "Make a linear system Ax=b\n\n Parameters\n ----------\n A : LinearOperator\n sparse or dense matrix (or any valid input to aslinearoperator)\n M : {LinearOperator, Nones}\n preconditioner\n sparse or dense matrix (or any valid input to aslinearoperator)\n x0 : {array_like, str, None}\n initial guess to iterative method.\n ``x0 = 'Mb'`` means using the nonzero initial guess ``M @ b``.\n Default is `None`, which means using the zero initial guess.\n b : array_like\n right hand side\n\n Returns\n -------\n (A, M, x, b, postprocess)\n A : LinearOperator\n matrix of the linear system\n M : LinearOperator\n preconditioner\n x : rank 1 ndarray\n initial guess\n b : rank 1 ndarray\n right hand side\n postprocess : function\n converts the solution vector to the appropriate\n type and dimensions (e.g. (N,1) matrix)\n\n ", "n_words": 123, "vocab_size": 77, "n_whitespaces": 303, "language": "en" } }, { "id": 22452, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "Google_News.py", "file_name": "Google_News.py", "fun_name": "news", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def news(xml_news_url, counter):\n ", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "\"\"\"Print select details from a html response containing xmla html response containing", "n_ast_errors": 2, "ast_levels": 5, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 3, "nloc": 16, "token_counts": 95, "n_ast_nodes": 23, "n_identifiers": 11, "d_id": 4346, "documentation": { "docstring": "Print select details from a html response containing xml", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 261018, "commit_id": "2710a9e7eefd2088ce35fd2fb6651d5f97e5ef8b", "repo": "scikit-learn", "path": "sklearn/utils/_array_api.py", "file_name": "_array_api.py", "fun_name": "_estimator_with_converted_arrays", "commit_message": "ENH Adds Array API support to LinearDiscriminantAnalysis (#22554)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Julien Jerphanion ", "code": "def _estimator_with_converted_arrays(estimator, converter):\n \n from sklearn.base import clone\n\n new_estimator = clone(estimator)\n for key, attribute in vars(estimator).items():\n if hasattr(attribute, \"__array_namespace__\") or isinstance(\n attribute, numpy.ndarray\n ):\n attribute = converter(attribute)\n setattr(new_estimator, key, attribute)\n return new_estimator\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 89, "n_words": 31, "vocab_size": 27, "complexity": 4, "nloc": 10, "token_counts": 67, "n_ast_nodes": 107, "n_identifiers": 16, "d_id": 76626, "documentation": { "docstring": "Create new estimator which converting all attributes that are arrays.\n\n Parameters\n ----------\n estimator : Estimator\n Estimator to convert\n\n converter : callable\n Callable that takes an array attribute and returns the converted array.\n\n Returns\n -------\n new_estimator : Estimator\n Convert estimator\n ", "n_words": 39, "vocab_size": 32, "n_whitespaces": 84, "language": "en" } }, { "id": 275487, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_v2/optimizer_v2.py", "file_name": "optimizer_v2.py", "fun_name": "variables", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def variables(self):\n \n return self._weights\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 81391, "documentation": { "docstring": "Returns variables of this Optimizer based on the order created.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 108704, "commit_id": "02c7ae22b4b1e7cc4fb70e18b208115f438f8f7b", "repo": "matplotlib", "path": "lib/matplotlib/backends/backend_pdf.py", "file_name": "backend_pdf.py", "fun_name": "_get_link_annotation", "commit_message": "Refactor URL handling", "code": "def _get_link_annotation(gc, x, y, width, height):\n \n link_annotation = {\n 'Type': Name('Annot'),\n 'Subtype': Name('Link'),\n 'Rect': (x, y, x + width, y + height),\n 'Border': [0, 0, 0],\n 'A': {\n 'S': Name('URI'),\n 'URI': gc.get_url(),\n },\n }\n return link_annotation\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 112, "n_words": 36, "vocab_size": 31, "complexity": 1, "nloc": 12, "token_counts": 80, "n_ast_nodes": 132, "n_identifiers": 9, "d_id": 23311, "documentation": { "docstring": "\n Create a link annotation object for embedding URLs.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 280005, "commit_id": "38b618ad90d669c85cccee521ad73cc0630cf750", "repo": "keras", "path": "keras/optimizers/optimizer_experimental/optimizer.py", "file_name": "optimizer.py", "fun_name": "exclude_from_weight_decay", "commit_message": "Add general `weight_decay` support in optimizer.\n\nWe still keep adamw optimizer in case people want an explicit adamw. We can delete it in a followup cl.\n\nPiperOrigin-RevId: 477043911", "code": "def exclude_from_weight_decay(self, var_list=None, var_names=None):\n \n if hasattr(self, \"_built\") and self._built:\n raise ValueError(\n \"`exclude_from_weight_decay()` can only be configued before \"\n \"the optimizer is built.\"\n )\n\n if var_list:\n self._exclude_from_weight_decay = [\n self._var_key(variable) for variable in var_list\n ]\n else:\n self._exclude_from_weight_decay = []\n self._exclude_from_weight_decay_names = var_names or []\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 178, "n_words": 43, "vocab_size": 38, "complexity": 6, "nloc": 13, "token_counts": 67, "n_ast_nodes": 113, "n_identifiers": 11, "d_id": 83214, "documentation": { "docstring": "Exclude variables from weight decay.\n\n This method must be called before the optimizer's `build` method is\n called. You can set specific variables to exclude out, or set a list of\n strings as the anchor words, if any of which appear in a variable's\n name, then the variable is excluded.\n\n Args:\n var_list: A list of `tf.Variable`s to exclude from weight decay.\n var_names: A list of strings. If any string in `var_names` appear\n in the model variable's name, then this model variable is\n excluded from weight decay. For example, `var_names=['bias']`\n excludes all bias variables from weight decay.\n ", "n_words": 95, "vocab_size": 59, "n_whitespaces": 204, "language": "en" } }, { "id": 171634, "commit_id": "e2df99823758210fb2b7c4aba39e23f3445f7cd3", "repo": "pandas", "path": "pandas/_version.py", "file_name": "_version.py", "fun_name": "render", "commit_message": "BLD: use nonvendor versioneer (#49924)\n\n* BLD: remove vendored versioneer\r\n\r\n* run vis\r\n\r\n* move config to pyproject.toml\r\n\r\n* add versioneer to deps\r\n\r\n* run pyupgrade\r\n\r\n* fix isort and pylint\r\n\r\n* fix ci\r\n\r\n* fix env", "code": "def render(pieces, style):\n \n if pieces[\"error\"]:\n return {\n \"version\": \"unknown\",\n \"full-revisionid\": pieces.get(\"long\"),\n \"dirty\": None,\n \"error\": pieces[\"error\"],\n \"date\": None,\n }\n\n if not style or style == \"default\":\n style = \"pep440\" # the default\n\n if style == \"pep440\":\n rendered = render_pep440(pieces)\n elif style == \"pep440-branch\":\n rendered = render_pep440_branch(pieces)\n elif style == \"pep440-pre\":\n rendered = render_pep440_pre(pieces)\n elif style == \"pep440-post\":\n rendered = render_pep440_post(pieces)\n elif style == \"pep440-post-branch\":\n rendered = render_pep440_post_branch(pieces)\n elif style == \"pep440-old\":\n rendered = render_pep440_old(pieces)\n elif style == \"git-describe\":\n rendered = render_git_describe(pieces)\n elif style == \"git-describe-long\":\n rendered = render_git_describe_long(pieces)\n else:\n raise ValueError(f\"unknown style '{style}'\")\n\n return {\n \"version\": rendered,\n \"full-revisionid\": pieces[\"long\"],\n \"dirty\": pieces[\"dirty\"],\n \"error\": None,\n \"date\": pieces.get(\"date\"),\n }\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 322, "n_words": 105, "vocab_size": 53, "complexity": 12, "nloc": 36, "token_counts": 186, "n_ast_nodes": 347, "n_identifiers": 14, "d_id": 40701, "documentation": { "docstring": "Render the given version pieces into the requested style.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 308771, "commit_id": "25fe213f222f8f49a8126130a8e507fa15e63c83", "repo": "core", "path": "homeassistant/components/google_assistant/helpers.py", "file_name": "helpers.py", "fun_name": "get_local_agent_user_id", "commit_message": "Enable local fulfillment google assistant (#63218)\n\nCo-authored-by: Paulus Schoutsen ", "code": "def get_local_agent_user_id(self, webhook_id):\n \n found_agent_user_id = None\n for agent_user_id, agent_user_data in self._store.agent_user_ids.items():\n if agent_user_data[STORE_GOOGLE_LOCAL_WEBHOOK_ID] == webhook_id:\n found_agent_user_id = agent_user_id\n break\n\n return found_agent_user_id\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 90, "n_words": 21, "vocab_size": 18, "complexity": 3, "nloc": 7, "token_counts": 40, "n_ast_nodes": 65, "n_identifiers": 10, "d_id": 107509, "documentation": { "docstring": "Return the user ID to be used for actions received via the local SDK.\n\n Return None is no agent user id is found.\n ", "n_words": 23, "vocab_size": 19, "n_whitespaces": 37, "language": "en" } }, { "id": 196233, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/core/exprtools.py", "file_name": "exprtools.py", "fun_name": "factor_nc", "commit_message": "Updated import locations", "code": "def factor_nc(expr):\n \n from sympy.simplify.simplify import powsimp\n from sympy.polys import gcd, factor\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 11, "vocab_size": 9, "complexity": 49, "nloc": 132, "token_counts": 983, "n_ast_nodes": 36, "n_identifiers": 8, "d_id": 47733, "documentation": { "docstring": "Return the factored form of ``expr`` while handling non-commutative\n expressions.\n\n Examples\n ========\n\n >>> from sympy import factor_nc, Symbol\n >>> from sympy.abc import x\n >>> A = Symbol('A', commutative=False)\n >>> B = Symbol('B', commutative=False)\n >>> factor_nc((x**2 + 2*A*x + A**2).expand())\n (x + A)**2\n >>> factor_nc(((x + A)*(x + B)).expand())\n (x + A)*(x + B)\n ", "n_words": 53, "vocab_size": 36, "n_whitespaces": 89, "language": "en" } }, { "id": 224448, "commit_id": "f79b34d174e41084391868e7b503f5c61b8b1bdf", "repo": "mkdocs", "path": "mkdocs/plugins.py", "file_name": "plugins.py", "fun_name": "on_template_context", "commit_message": "Move plugin events docs into source code + refactor\n\n* Create real (no-op) methods for each event in the base class.\n* Refactor event dispatcher to not check for methods' existence, instead just call them.\n* Move documentation from Markdown into docstrings of these methods.\n* Activate the 'mkdocstrings' plugin.\n* Use 'mkdocstrings' to insert documentation from those docstrings into the site.", "code": "def on_template_context(self, context, template_name, config):\n \n return context\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 22, "n_identifiers": 5, "d_id": 57293, "documentation": { "docstring": "\n The `template_context` event is called immediately after the context is created\n for the subject template and can be used to alter the context for that specific\n template only.\n\n Parameters:\n context: dict of template context variables\n template_name: string filename of template\n config: global configuration object\n\n Returns:\n dict of template context variables\n ", "n_words": 50, "vocab_size": 35, "n_whitespaces": 137, "language": "en" } }, { "id": 8071, "commit_id": "e4fc06f986e03919d9aef3ab55c05fee5a6b9d3a", "repo": "ludwig", "path": "ludwig/datasets/loaders/dataset_loader.py", "file_name": "dataset_loader.py", "fun_name": "_download_and_process", "commit_message": "Config-first Datasets API (ludwig.datasets refactor) (#2479)\n\n* Adds README and stub for reading dataset configs.\r\n\r\n* Adds __init__.py for configs, moves circular import into function scope in ludwig/datasets/__init__.py\r\n\r\n* Print config files in datasets folder.\r\n\r\n* First pass at automatic archive extraction.\r\n\r\n* Implemented downloading and extract.\r\n\r\n* Refactor DatasetConfig into its own file.\r\n\r\n* Fixed bugs downloading kaggle dataset.\r\n\r\n* Makes registry store dataset instances, not classes. Also comments out import_submodules for testing.\r\n\r\n* Typo fix.\r\n\r\n* Only pass data files on to load_unprocessed_dataframe, symlink directories.\r\n\r\n* Downloading dataset files into existing directory if exists.\r\n\r\n* Refactor: make datasets fully config-first, lazy load dataset loaders.\r\n\r\n* Implemented agnews custom loader.\r\n\r\n* Implements train/validation/test split by files, and globbing support\r\n\r\n* Adds _glob_multiple\r\n\r\n* Adds adult_census_income, agnews, allstate_claims_severity.\r\n\r\n* Implements sha256 verification, adds more datasets up to creditcard_fraud.\r\n\r\n* Adds checksums, dbpedia, electricity\r\n\r\n* Fixes gzip file name returned as string not list, adds up to forest_cover dataset.\r\n\r\n* Adds datasets up to reuters_r8\r\n\r\n* Adds all datasets which don't require a custom class.\r\n\r\n* Restore dataset import behavior by implementing module __getattr__\r\n\r\n* Adds KDD datasets.\r\n\r\n* Adds ieee_fraud.\r\n\r\n* Adds imbalanced_insurance, insurance_lite.\r\n\r\n* Adds mnist.\r\n\r\n* Completes implementation of all of the built-in datasets.\r\n\r\n* Made cache_dir optional, read from environment variable if set.\r\n\r\n* Upgrades datasets tests.\r\n\r\n* Adds test for new dataset config API. Also adds scripts for dataset link checking.\r\n\r\n* Fixes loading allstate claims severity dataset.\r\n\r\n* Use @lru_cache(1), @cache not supported in python < 3.9\r\n\r\n* Deletes dataset registry, updates automl test utils\r\n\r\n* Fix imports of datasets API.\r\n\r\n* Adds more detail to sha256: docstring and basic README\r\n\r\n* Copy-paste link oops.\r\n\r\n* Fixes handling of nested archive types like .tar.bz Also adds a LUDWIG_CACHE and export to the README\r\n\r\n* Adds link for twitter bots.\r\n\r\n* Fix order of splits in README.md\r\n\r\n* typo\r\n\r\n* Adds verify as a phase in doc string.\r\n\r\n* Support .pqt, .pq extensions for parquet.\r\n\r\n* Handle nested archives with longer file extensions like .csv.zip\r\n\r\n* Handle nested .gz types properly too. Check all extensions with .endswith\r\n\r\n* Handle all archive types with .endswith\r\n\r\n* Update ludwig/datasets/loaders/split_loaders.py\r\n\r\nCo-authored-by: Joppe Geluykens \r\n\r\n* Adds explanation for export, fixes preserve_paths (should be relative to processed_dataset_dir)\r\n\r\n* Resolve preserved paths relative to raw dataset dir before move.\r\n\r\n* Catch runtime exception from extracting sub-archives.\r\n\r\nCo-authored-by: Daniel Treiman \r\nCo-authored-by: Joppe Geluykens ", "code": "def _download_and_process(self, kaggle_username=None, kaggle_key=None):\n \n if self.state == DatasetState.NOT_LOADED:\n try:\n self.download(kaggle_username=kaggle_username, kaggle_key=kaggle_key)\n except Exception:\n logger.exception(\"Failed to download dataset\")\n self.verify()\n if self.state == DatasetState.DOWNLOADED:\n # Extract dataset\n try:\n self.extract()\n except Exception:\n logger.exception(\"Failed to extract dataset\")\n if self.state == DatasetState.EXTRACTED:\n # Transform dataset\n try:\n self.transform()\n except Exception:\n logger.exception(\"Failed to transform dataset\")\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 261, "n_words": 48, "vocab_size": 28, "complexity": 7, "nloc": 17, "token_counts": 101, "n_ast_nodes": 175, "n_identifiers": 16, "d_id": 1324, "documentation": { "docstring": "Loads the dataset, downloaded and processing it if needed.\n\n If dataset is already processed, does nothing.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 30, "language": "en" } }, { "id": 61896, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/compat.py", "file_name": "compat.py", "fun_name": "fromkeys", "commit_message": "upd; format", "code": "def fromkeys(cls, iterable, value=None):\n \n d = cls()\n for key in iterable:\n d[key] = value\n return d\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 75, "n_words": 16, "vocab_size": 14, "complexity": 2, "nloc": 5, "token_counts": 30, "n_ast_nodes": 48, "n_identifiers": 6, "d_id": 12743, "documentation": { "docstring": "OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S\n and values equal to v (which defaults to None).\n\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 41, "language": "en" } }, { "id": 73922, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/permission_policies/base.py", "file_name": "base.py", "fun_name": "_get_permission_name", "commit_message": "Reformat with black", "code": "def _get_permission_name(self, action):\n \n return \"%s.%s_%s\" % (self.app_label, action, self.model_name)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 22, "n_ast_nodes": 36, "n_identifiers": 5, "d_id": 16177, "documentation": { "docstring": "\n Get the full app-label-qualified permission name (as required by\n user.has_perm(...) ) for the given action on this model\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 40, "language": "en" } }, { "id": 265243, "commit_id": "c5770392e32aeeaed9bd8dcf907a11c7df352b6c", "repo": "netbox", "path": "netbox/netbox/views/generic/object_views.py", "file_name": "object_views.py", "fun_name": "get_children", "commit_message": "Refactor ObjectChildrenView", "code": "def get_children(self, request, parent):\n \n raise NotImplementedError(f'{self.__class__.__name__} must implement get_children()')\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 37, "n_identifiers": 7, "d_id": 78045, "documentation": { "docstring": "\n Return a QuerySet of child objects.\n\n Args:\n request: The current request\n parent: The parent object\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 59, "language": "en" } }, { "id": 181860, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tpot/builtins/feature_transformers.py", "file_name": "feature_transformers.py", "fun_name": "fit", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def fit(self, X, y=None):\n \n X = check_array(X, accept_sparse='csr')\n return self\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 40, "n_identifiers": 6, "d_id": 43629, "documentation": { "docstring": "Do nothing and return the estimator unchanged\n This method is just there to implement the usual API and hence\n work in pipelines.\n Parameters\n ----------\n X : array-like\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 69, "language": "en" } }, { "id": 77527, "commit_id": "0f7a365bf8bf72a4894c1ca447cf52ba67f40b0c", "repo": "wagtail", "path": "wagtail/admin/widgets/chooser.py", "file_name": "chooser.py", "fun_name": "get_hidden_input_context", "commit_message": "Avoid calling super().render() in BaseChooser\n\nThis frees us up to redefine template_name and get_context in subclasses without it interfering with the rendering of the hidden input.", "code": "def get_hidden_input_context(self, name, value, attrs):\n \n return super().get_context(name, value, attrs)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 23, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 25, "n_ast_nodes": 38, "n_identifiers": 7, "d_id": 16669, "documentation": { "docstring": "\n Return the context variables required to render the underlying hidden input element\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 27, "language": "en" } }, { "id": 73489, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/settings/tests/test_admin.py", "file_name": "test_admin.py", "fun_name": "test_with_no_current_site", "commit_message": "Reformat with black", "code": "def test_with_no_current_site(self):\n \n self.default_site.is_default_site = False\n self.default_site.save()\n\n start_url = reverse(\"wagtailsettings:edit\", args=[\"tests\", \"testsetting\"])\n response = self.client.get(\n start_url, follow=True, HTTP_HOST=\"noneoftheabove.example.com\"\n )\n self.assertEqual(302, response.redirect_chain[0][1])\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 80, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 8, "token_counts": 68, "n_ast_nodes": 113, "n_identifiers": 15, "d_id": 16026, "documentation": { "docstring": "\n Redirection should not break if the current request does not correspond to a site\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 29, "language": "en" } }, { "id": 65238, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/financial_statements.py", "file_name": "financial_statements.py", "fun_name": "get_fiscal_year_data", "commit_message": "style: format code with black", "code": "def get_fiscal_year_data(from_fiscal_year, to_fiscal_year):\n\tfiscal_year = frappe.db.sql(\n\t\t,\n\t\t{\"from_fiscal_year\": from_fiscal_year, \"to_fiscal_year\": to_fiscal_year},\n\t\tas_dict=1,\n\t)\n\n\treturn fiscal_year[0] if fiscal_year else {}\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 12, "n_words": 19, "vocab_size": 18, "complexity": 2, "nloc": 9, "token_counts": 42, "n_ast_nodes": 66, "n_identifiers": 8, "d_id": 13830, "documentation": { "docstring": "select min(year_start_date) as year_start_date,\n\t\tmax(year_end_date) as year_end_date from `tabFiscal Year` where\n\t\tname between %(from_fiscal_year)s and %(to_fiscal_year)s", "n_words": 16, "vocab_size": 15, "n_whitespaces": 13, "language": "en" } }, { "id": 262817, "commit_id": "947a96a8d2fc80bb76a4492fc9b631d642cf5065", "repo": "pyinstaller", "path": "tests/functional/test_qt.py", "file_name": "test_qt.py", "fun_name": "_test_Qt_QtWebEngineQuick", "commit_message": "tests: fixup QtWebEngine Qml/Quick test", "code": "def _test_Qt_QtWebEngineQuick(pyi_builder, qt_flavor):\n if is_darwin:\n # QtWebEngine on Mac OS only works with a onedir build -- onefile builds do not work.\n # Skip the test execution for onefile builds.\n if pyi_builder._mode != 'onedir':\n pytest.skip('QtWebEngine on macOS is supported only in onedir mode.')\n\n source = \n import QtQuick 2.0\n import QtQuick.Window 2.0\n import QtWebEngine 1.0\n\n Window {{\n visible: true\n WebEngineView {{\n id: view\n anchors.fill: parent\n Component.onCompleted: loadHtml('\n \n \n \n \n Test web page\n \n \n

This is a test web page.

\n \n \n ')\n }}\n Connections {{\n target: view\n function onLoadingChanged(loadRequest) {{\n if (loadRequest.status !== WebEngineView.LoadStartedStatus) {{\n Qt.quit()\n }}\n }}\n }}\n }}\n .format(qt_flavor)\n\n pyi_builder.test_source(source, **USE_WINDOWED_KWARG)\n\n\n@requires('PyQt5')\n@requires('PyQtWebEngine')", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "@requires('PyQt5')\n@requires('PyQtWebEngine')", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 762, "n_words": 110, "vocab_size": 86, "complexity": 3, "nloc": 66, "token_counts": 40, "n_ast_nodes": 93, "n_identifiers": 12, "d_id": 77384, "documentation": { "docstring": "\n import sys\n\n from {0}.QtGui import QGuiApplication\n from {0}.QtQml import QQmlApplicationEngine\n\n is_qt6 = '{0}' in {{'PyQt6', 'PySide6'}}\n\n if is_qt6:\n from {0}.QtWebEngineQuick import QtWebEngineQuick\n else:\n from {0}.QtWebEngine import QtWebEngine as QtWebEngineQuick\n QtWebEngineQuick.initialize()\n\n app = QGuiApplication([])\n engine = QQmlApplicationEngine()\n engine.loadData(b)\n\n if not engine.rootObjects():\n sys.exit(-1)\n\n if is_qt6:\n # Qt6: exec_() is deprecated in PySide6 and removed from PyQt6 in favor of exec()\n res = app.exec()\n else:\n res = app.exec_()\n del engine\n sys.exit(res)\n ", "n_words": 68, "vocab_size": 47, "n_whitespaces": 247, "language": "en" } }, { "id": 110667, "commit_id": "2a1a1a6e47e41b8992d462c48491d2ce347694cd", "repo": "matplotlib", "path": "lib/matplotlib/legend.py", "file_name": "legend.py", "fun_name": "get_patches", "commit_message": "API/DOC: Document legend_handles and legend_handlers\n\n- deprecate legendHandles", "code": "def get_patches(self):\n r\n return silent_list('Patch',\n [h for h in self.legend_handles\n if isinstance(h, Patch)])\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 79, "n_words": 13, "vocab_size": 13, "complexity": 3, "nloc": 5, "token_counts": 29, "n_ast_nodes": 46, "n_identifiers": 7, "d_id": 24247, "documentation": { "docstring": "Return the list of `~.patches.Patch`\\s in the legend.", "n_words": 8, "vocab_size": 7, "n_whitespaces": 7, "language": "en" } }, { "id": 179286, "commit_id": "cc0cff893f9d7d472788adc2510c123967b384fe", "repo": "gradio", "path": "gradio/queueing.py", "file_name": "queueing.py", "fun_name": "pop", "commit_message": "Format The Codebase\n- black formatting\n- isort formatting", "code": "def pop():\n conn = sqlite3.connect(DB_FILE)\n c = conn.cursor()\n c.execute(\"BEGIN EXCLUSIVE\")\n c.execute(\n \n )\n result = c.fetchone()\n if result is None:\n conn.commit()\n return None\n queue_index = result[0]\n c.execute(\n ,\n (queue_index,),\n )\n conn.commit()\n return result[0], result[1], json.loads(result[2]), result[3]\n\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 106, "n_words": 35, "vocab_size": 27, "complexity": 2, "nloc": 23, "token_counts": 98, "n_ast_nodes": 160, "n_identifiers": 14, "d_id": 42936, "documentation": { "docstring": "\n SELECT queue_index, hash, input_data, action FROM queue\n WHERE popped = 0 ORDER BY queue_index ASC LIMIT 1;\n \n UPDATE queue SET popped = 1, input_data = '' WHERE queue_index = ?;\n ", "n_words": 30, "vocab_size": 23, "n_whitespaces": 59, "language": "en" } }, { "id": 223754, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/headerregistry.py", "file_name": "headerregistry.py", "fun_name": "__call__", "commit_message": "add python 3.10.4 for windows", "code": "def __call__(self, name, value):\n \n return self[name](name, value)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 31, "n_identifiers": 4, "d_id": 57050, "documentation": { "docstring": "Create a header instance for header 'name' from 'value'.\n\n Creates a header instance by creating a specialized class for parsing\n and representing the specified header by combining the factory\n base_class with a specialized class from the registry or the\n default_class, and passing the name and value to the constructed\n class's constructor.\n\n ", "n_words": 51, "vocab_size": 32, "n_whitespaces": 93, "language": "en" } }, { "id": 115431, "commit_id": "fc9776d9b342f873cbb3f36fd39955b9e1ea6f76", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/sqlite_handler/sqlite_handler.py", "file_name": "sqlite_handler.py", "fun_name": "disconnect", "commit_message": "added connection_args and connection_args_example dicts", "code": "def disconnect(self):\r\n \r\n\r\n if self.is_connected is False:\r\n return\r\n\r\n self.connection.close()\r\n self.is_connected = False\r\n return self.is_connected\r\n\r", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 59, "n_words": 13, "vocab_size": 10, "complexity": 2, "nloc": 6, "token_counts": 30, "n_ast_nodes": 52, "n_identifiers": 5, "d_id": 25459, "documentation": { "docstring": "\r\n Close any existing connections.\r\n ", "n_words": 4, "vocab_size": 4, "n_whitespaces": 19, "language": "en" } }, { "id": 286516, "commit_id": "0ae89d6cc20be84bf49c31e437fda38a845ebc68", "repo": "OpenBBTerminal", "path": "openbb_terminal/cryptocurrency/due_diligence/dd_controller.py", "file_name": "dd_controller.py", "fun_name": "call_mkt", "commit_message": "Style fixing: removing --ascend/--descend (#3395)\n\n* stocks candle to use reverse\r\n\r\n* qa raw to use reverse\r\n\r\n* etf candle to use reverse\r\n\r\n* oss rossix to use reverse\r\n\r\n* crypto/defi to use reverse\r\n\r\n* crypto/disc to use reverse\r\n\r\n* added test\r\n\r\n* crypto/dd to use reverse\r\n\r\n* crypto/onchain to use reverse\r\n\r\n* crypto/ov to use revert\r\n\r\n* forex candle to use revert\r\n\r\n* conibase controller to use revert\r\n\r\n* tests to use reverse\r\n\r\n* covid to use reverse\r\n\r\n* removing ascend\r\n\r\n* removing ascend from econ\r\n\r\n* more removing ascend\r\n\r\n* more removing ascend\r\n\r\n* more removing ascend\r\n\r\n* fixing stuff on .md files\r\n\r\n* fixed economy controller tests\r\n\r\n* fixed screener tests\r\n\r\n* fa controller to use comma separated when multiple inputs", "code": "def call_mkt(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"mkt\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=,\n )\n parser.add_argument(\n \"--vs\",\n help=\"Quoted currency. Default USD\",\n dest=\"vs\",\n default=\"USD\",\n type=str,\n choices=coinpaprika_view.CURRENCIES,\n )\n parser.add_argument(\n \"-l\",\n \"--limit\",\n default=20,\n dest=\"limit\",\n help=\"Limit of records\",\n type=check_positive,\n )\n parser.add_argument(\n \"-s\",\n \"--sort\",\n dest=\"sortby\",\n type=str,\n help=\"Sort by given column. Default: pct_volume_share\",\n default=\"pct_volume_share\",\n choices=coinpaprika_view.MARKET_FILTERS,\n )\n parser.add_argument(\n \"-r\",\n \"--reverse\",\n action=\"store_true\",\n dest=\"reverse\",\n default=False,\n help=(\n \"Data is sorted in descending order by default. \"\n \"Reverse flag will sort it in an ascending way. \"\n \"Only works when raw data is displayed.\"\n ),\n )\n parser.add_argument(\n \"-u\",\n \"--urls\",\n dest=\"urls\",\n action=\"store_true\",\n help=,\n default=False,\n )\n ns_parser = self.parse_known_args_and_warn(\n parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED\n )\n if ns_parser:\n if self.symbol:\n coinpaprika_view.display_markets(\n from_symbol=self.symbol,\n to_symbol=ns_parser.vs,\n limit=ns_parser.limit,\n sortby=ns_parser.sortby,\n ascend=ns_parser.reverse,\n links=ns_parser.urls,\n export=ns_parser.export,\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 847, "n_words": 109, "vocab_size": 89, "complexity": 3, "nloc": 73, "token_counts": 239, "n_ast_nodes": 384, "n_identifiers": 38, "d_id": 85839, "documentation": { "docstring": "Process mkt commandGet all markets found for given coin.\n You can display only N number of markets with --limt parameter.\n You can sort data by pct_volume_share, exchange, pair, trust_score, volume, price --sort parameter\n and also with --reverse flag to sort ascending.\n You can use additional flag --urls to see urls for each market\n Displays:\n exchange, pair, trust_score, volume, price, pct_volume_share,Flag to show urls. If you will use that flag you will see only:\n exchange, pair, trust_score, market_url columns", "n_words": 78, "vocab_size": 55, "n_whitespaces": 186, "language": "en" } }, { "id": 195878, "commit_id": "46ba104ee0f9cb35b54c2f5f5591cfabb26d0301", "repo": "sympy", "path": "sympy/core/expr.py", "file_name": "expr.py", "fun_name": "nseries", "commit_message": "Fixed failing doctest", "code": "def nseries(self, x=None, x0=0, n=6, dir='+', logx=None, cdir=0):\n \n if x and x not in self.free_symbols:\n return self\n if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None):\n return self.series(x, x0, n, dir, cdir=cdir)\n else:\n return self._eval_nseries(x, n=n, logx=logx, cdir=cdir)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 111, "n_words": 49, "vocab_size": 40, "complexity": 6, "nloc": 7, "token_counts": 91, "n_ast_nodes": 135, "n_identifiers": 11, "d_id": 47462, "documentation": { "docstring": "\n Wrapper to _eval_nseries if assumptions allow, else to series.\n\n If x is given, x0 is 0, dir='+', and self has x, then _eval_nseries is\n called. This calculates \"n\" terms in the innermost expressions and\n then builds up the final series just by \"cross-multiplying\" everything\n out.\n\n The optional ``logx`` parameter can be used to replace any log(x) in the\n returned series with a symbolic value to avoid evaluating log(x) at 0. A\n symbol to use in place of log(x) should be provided.\n\n Advantage -- it's fast, because we do not have to determine how many\n terms we need to calculate in advance.\n\n Disadvantage -- you may end up with less terms than you may have\n expected, but the O(x**n) term appended will always be correct and\n so the result, though perhaps shorter, will also be correct.\n\n If any of those assumptions is not met, this is treated like a\n wrapper to series which will try harder to return the correct\n number of terms.\n\n See also lseries().\n\n Examples\n ========\n\n >>> from sympy import sin, log, Symbol\n >>> from sympy.abc import x, y\n >>> sin(x).nseries(x, 0, 6)\n x - x**3/6 + x**5/120 + O(x**6)\n >>> log(x+1).nseries(x, 0, 5)\n x - x**2/2 + x**3/3 - x**4/4 + O(x**5)\n\n Handling of the ``logx`` parameter --- in the following example the\n expansion fails since ``sin`` does not have an asymptotic expansion\n at -oo (the limit of log(x) as x approaches 0):\n\n >>> e = sin(log(x))\n >>> e.nseries(x, 0, 6)\n Traceback (most recent call last):\n ...\n PoleError: ...\n ...\n >>> logx = Symbol('logx')\n >>> e.nseries(x, 0, 6, logx=logx)\n sin(logx)\n\n In the following example, the expansion works but only returns self\n unless the ``logx`` parameter is used:\n\n >>> e = x**y\n >>> e.nseries(x, 0, 2)\n x**y\n >>> e.nseries(x, 0, 2, logx=logx)\n exp(logx*y)\n\n ", "n_words": 294, "vocab_size": 182, "n_whitespaces": 610, "language": "en" } }, { "id": 101555, "commit_id": "7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5", "repo": "faceswap", "path": "lib/training/preview_tk.py", "file_name": "preview_tk.py", "fun_name": "_process_triggers", "commit_message": "Training - Use custom preview pop-out", "code": "def _process_triggers(self) -> None:\n \n if self._triggers is None: # Don't need triggers for GUI\n return\n logger.debug(\"Processing triggers\")\n root = self._canvas.winfo_toplevel()\n for key in self._keymaps:\n bindkey = \"Return\" if key == \"enter\" else key\n logger.debug(\"Adding trigger for key: '%s'\", bindkey)\n\n root.bind(f\"<{bindkey}>\", self._on_keypress)\n logger.debug(\"Processed triggers\")\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 130, "n_words": 43, "vocab_size": 35, "complexity": 4, "nloc": 17, "token_counts": 72, "n_ast_nodes": 131, "n_identifiers": 13, "d_id": 20965, "documentation": { "docstring": " Process the standard faceswap key press triggers:\n\n m = toggle_mask\n r = refresh\n s = save\n enter = quit\n ", "n_words": 19, "vocab_size": 16, "n_whitespaces": 55, "language": "en" } }, { "id": 136382, "commit_id": "326d84f1149319809191e7887155df7f04f6f46a", "repo": "ray", "path": "python/ray/data/dataset.py", "file_name": "dataset.py", "fun_name": "to_arrow_refs", "commit_message": "[AIR][Predictor] Enable numpy based predictor (#28917)\n\nCo-authored-by: Clark Zinzow \r\nCo-authored-by: Amog Kamsetty ", "code": "def to_arrow_refs(self) -> List[ObjectRef[\"pyarrow.Table\"]]:\n \n blocks: List[ObjectRef[Block]] = self.get_internal_block_refs()\n\n if self.dataset_format() == BlockFormat.ARROW:\n # Zero-copy path.\n return blocks\n\n block_to_arrow = cached_remote_fn(_block_to_arrow)\n return [block_to_arrow.remote(block) for block in blocks]\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 83, "n_words": 26, "vocab_size": 24, "complexity": 3, "nloc": 17, "token_counts": 61, "n_ast_nodes": 100, "n_identifiers": 15, "d_id": 30902, "documentation": { "docstring": "Convert this dataset into a distributed set of Arrow tables.\n\n This is only supported for datasets convertible to Arrow records.\n This function is zero-copy if the existing data is already in Arrow\n format. Otherwise, the data will be converted to Arrow format.\n\n Time complexity: O(1) unless conversion is required.\n\n Returns:\n A list of remote Arrow tables created from this dataset.\n ", "n_words": 60, "vocab_size": 46, "n_whitespaces": 113, "language": "en" } }, { "id": 100878, "commit_id": "04337e0c5efd442c1ce3e2da193dd8749f1e30d8", "repo": "faceswap", "path": "lib/model/losses_tf.py", "file_name": "losses_tf.py", "fun_name": "_get_kernel", "commit_message": "SSIM Updates\n - Standardize DSSIM Function\n - Implement MSSIM function for AMD", "code": "def _get_kernel(self) -> tf.Tensor:\n \n coords = np.arange(self._filter_size, dtype=\"float32\")\n coords -= (self._filter_size - 1) / 2.\n\n kernel = np.square(coords)\n kernel *= -0.5 / np.square(self._filter_sigma)\n kernel = np.reshape(kernel, (1, -1)) + np.reshape(kernel, (-1, 1))\n kernel = K.constant(np.reshape(kernel, (1, -1)))\n kernel = K.softmax(kernel)\n kernel = K.reshape(kernel, (self._filter_size, self._filter_size, 1, 1))\n return kernel\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 119, "n_words": 49, "vocab_size": 33, "complexity": 1, "nloc": 17, "token_counts": 141, "n_ast_nodes": 211, "n_identifiers": 16, "d_id": 20328, "documentation": { "docstring": " Obtain the base kernel for performing depthwise convolution.\n\n Returns\n -------\n :class:`tf.Tensor`\n The gaussian kernel based on selected size and sigma\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 60, "language": "en" } }, { "id": 26493, "commit_id": "aca6418d6c36956bc1ab530e6ef7e146ec9df90c", "repo": "saleor", "path": "saleor/plugins/webhook/tests/subscription_webhooks/test_create_deliveries_for_subscription.py", "file_name": "test_create_deliveries_for_subscription.py", "fun_name": "test_validate_subscription_query_invalid", "commit_message": "Add Webhook payload via graphql subscriptions (#9394)\n\n* Add PoC of webhook subscriptions\r\n\r\n* add async webhooks subscription payloads feature\r\n\r\n* remove unneeded file\r\n\r\n* add translations subscription handling, fixes after review\r\n\r\n* remove todo\r\n\r\n* add descriptions\r\n\r\n* add descriptions, move subsrciption_payloads.py\r\n\r\n* refactor\r\n\r\n* fix imports, add changelog\r\n\r\n* check_document_is_single_subscription refactor\r\n\r\nCo-authored-by: Maciej Korycinski \r\nCo-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>", "code": "def test_validate_subscription_query_invalid():\n\n result = validate_subscription_query(\"invalid_query\")\n assert result is False\n\n\nTEST_VALID_SUBSCRIPTION_QUERY_WITH_FRAGMENT = \n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 16, "n_words": 11, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 33, "n_identifiers": 4, "d_id": 5022, "documentation": { "docstring": "\nfragment productFragment on Product{\n name\n}\nsubscription{\n event{\n ...on ProductUpdated{\n product{\n id\n ...productFragment\n }\n }\n }\n}\n", "n_words": 17, "vocab_size": 13, "n_whitespaces": 46, "language": "en" } }, { "id": 208027, "commit_id": "59263b0409e3f02dc16ca8a3bd1e42b5a3eba36d", "repo": "celery", "path": "celery/utils/imports.py", "file_name": "imports.py", "fun_name": "find_module", "commit_message": "Minor refactors, found by static analysis (#7587)\n\n* Remove deprecated methods in `celery.local.Proxy`\r\n\r\n* Collapse conditionals for readability\r\n\r\n* Remove unused parameter `uuid`\r\n\r\n* Remove unused import `ClusterOptions`\r\n\r\n* Remove dangerous mutable default argument\r\n\r\nContinues work from #5478\r\n\r\n* Remove always `None` and unused global variable\r\n\r\n* Remove unreachable `elif` block\r\n\r\n* Consolidate import statements\r\n\r\n* Add missing parameter to `os._exit()`\r\n\r\n* Add missing assert statement\r\n\r\n* Remove unused global `WindowsError`\r\n\r\n* Use `mkstemp` instead of deprecated `mktemp`\r\n\r\n* No need for `for..else` constructs in loops that don't break\r\n\r\nIn these cases where the loop returns or raises instead of breaking, it\r\nis simpler to just put the code that runs after the loop completes right\r\nafter the loop instead.\r\n\r\n* Use the previously unused parameter `compat_modules`\r\n\r\nPreviously this parameter was always overwritten by the value of\r\n`COMPAT_MODULES.get(name, ())`, which was very likely unintentional.\r\n\r\n* Remove unused local variable `tz`\r\n\r\n* Make `assert_received` actually check for `is_received`\r\n\r\nPreviously, it called `is_accepted`, which was likely a copy-paste\r\nmistake from the `assert_accepted` method.\r\n\r\n* Use previously unused `args` and `kwargs` params\r\n\r\nUnlike other backends' `__reduce__` methods, the one from `RedisBackend`\r\nsimply overwrites `args` and `kwargs` instead of adding to them. This\r\nchange makes it more in line with other backends.\r\n\r\n* Update celery/backends/filesystem.py\r\n\r\nCo-authored-by: Gabriel Soldani <1268700+gabrielsoldani@users.noreply.github.com>\r\n\r\nCo-authored-by: Asif Saif Uddin ", "code": "def find_module(module, path=None, imp=None):\n \n if imp is None:\n imp = import_module\n with cwd_in_path():\n try:\n return imp(module)\n except ImportError:\n # Raise a more specific error if the problem is that one of the\n # dot-separated segments of the module name is not a package.\n if '.' in module:\n parts = module.split('.')\n for i, part in enumerate(parts[:-1]):\n package = '.'.join(parts[:i + 1])\n try:\n mpart = imp(package)\n except ImportError:\n # Break out and re-raise the original ImportError\n # instead.\n break\n try:\n mpart.__path__\n except AttributeError:\n raise NotAPackage(package)\n raise\n\n", "url": "https://github.com/celery/celery.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 432, "n_words": 84, "vocab_size": 61, "complexity": 7, "nloc": 20, "token_counts": 105, "n_ast_nodes": 185, "n_identifiers": 18, "d_id": 52179, "documentation": { "docstring": "Version of :func:`imp.find_module` supporting dots.", "n_words": 5, "vocab_size": 5, "n_whitespaces": 4, "language": "en" } }, { "id": 249309, "commit_id": "2281427175e4c93a30c39607fb4ac23c2a1f399f", "repo": "synapse", "path": "tests/rest/admin/test_event_reports.py", "file_name": "test_event_reports.py", "fun_name": "test_from_is_negative", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13488)\n\n* Use literals in place of `HTTPStatus` constants in tests\r\n\r\n* newsfile\r\n\r\n* code style\r\n\r\n* code style", "code": "def test_from_is_negative(self) -> None:\n \n\n channel = self.make_request(\n \"GET\",\n self.url + \"?from=-5\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.INVALID_PARAM, channel.json_body[\"errcode\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 86, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 11, "token_counts": 60, "n_ast_nodes": 97, "n_identifiers": 13, "d_id": 72812, "documentation": { "docstring": "\n Testing that a negative from parameter returns a 400\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 24, "language": "en" } }, { "id": 216122, "commit_id": "7e1c2baa659ee2a975cbe4ed0f6d85e34ec91e50", "repo": "salt", "path": "tests/pytests/unit/output/test_highstate.py", "file_name": "test_highstate.py", "fun_name": "test__compress_ids_not_dict", "commit_message": "fixes saltstack/salt#61549 allow roll-up of duplicate IDs with different names", "code": "def test__compress_ids_not_dict():\n \n data = [\"malformed\"]\n actual_output = highstate._compress_ids(data)\n assert actual_output == data\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 24, "n_words": 12, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 22, "n_ast_nodes": 41, "n_identifiers": 5, "d_id": 54413, "documentation": { "docstring": "\n Simple test for returning original malformed data\n to let the outputter figure it out.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 24, "language": "en" } }, { "id": 205736, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/options.py", "file_name": "options.py", "fun_name": "_populate_directed_relation_graph", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _populate_directed_relation_graph(self):\n \n related_objects_graph = defaultdict(list)\n\n all_models = self.apps.get_models(include_auto_created=True)\n for model in all_models:\n opts = model._meta\n # Abstract model's fields are copied to child models, hence we will\n # see the fields from the child models.\n if opts.abstract:\n continue\n fields_with_relations = (\n f\n for f in opts._get_fields(reverse=False, include_parents=False)\n if f.is_relation and f.related_model is not None\n )\n for f in fields_with_relations:\n if not isinstance(f.remote_field.model, str):\n remote_label = f.remote_field.model._meta.concrete_model._meta.label\n related_objects_graph[remote_label].append(f)\n\n for model in all_models:\n # Set the relation_tree using the internal __dict__. In this way\n # we avoid calling the cached property. In attribute lookup,\n # __dict__ takes precedence over a data descriptor (such as\n # @cached_property). This means that the _meta._relation_tree is\n # only called if related_objects is not in __dict__.\n related_objects = related_objects_graph[\n model._meta.concrete_model._meta.label\n ]\n model._meta.__dict__[\"_relation_tree\"] = related_objects\n # It seems it is possible that self is not in all_models, so guard\n # against that with default for get().\n return self.__dict__.get(\"_relation_tree\", EMPTY_RELATION_TREE)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 500, "n_words": 151, "vocab_size": 100, "complexity": 9, "nloc": 22, "token_counts": 153, "n_ast_nodes": 248, "n_identifiers": 31, "d_id": 51182, "documentation": { "docstring": "\n This method is used by each model to find its reverse objects. As this\n method is very expensive and is accessed frequently (it looks up every\n field in a model, in every app), it is computed on first access and then\n is set as a property on every model.\n ", "n_words": 49, "vocab_size": 38, "n_whitespaces": 85, "language": "en" } }, { "id": 45487, "commit_id": "69f6f9e01b6df76c3c8fa266d460324163957887", "repo": "airflow", "path": "airflow/migrations/versions/a66efa278eea_add_precision_to_execution_date_in_mysql.py", "file_name": "a66efa278eea_add_precision_to_execution_date_in_mysql.py", "fun_name": "downgrade", "commit_message": "Autogenerate migration reference doc (#21601)\n\n* document airflow version in each alembic migration module and use this to autogen the doc\r\n* update each migration module to have the same description used in migration ref (so it can be used in autogen)", "code": "def downgrade():\n \n conn = op.get_bind()\n if conn.dialect.name == \"mysql\":\n op.alter_column(\n table_name=TABLE_NAME, column_name=COLUMN_NAME, type_=mysql.TIMESTAMP(), nullable=False\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 49, "n_words": 15, "vocab_size": 15, "complexity": 2, "nloc": 6, "token_counts": 45, "n_ast_nodes": 75, "n_identifiers": 15, "d_id": 8614, "documentation": { "docstring": "Unapply Add Precision to ``execution_date`` in ``RenderedTaskInstanceFields`` table", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 229885, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/funnelarea/_marker.py", "file_name": "_marker.py", "fun_name": "colors", "commit_message": "switch to black .22", "code": "def colors(self):\n \n return self[\"colors\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 61558, "documentation": { "docstring": "\n Sets the color of each sector. If not specified, the default\n trace color set is used to pick the sector colors.\n\n The 'colors' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n ", "n_words": 43, "vocab_size": 39, "n_whitespaces": 100, "language": "en" } }, { "id": 215409, "commit_id": "ab4803984bce4a4de7cc10910e7310c4babf557e", "repo": "salt", "path": "salt/transport/rabbitmq.py", "file_name": "rabbitmq.py", "fun_name": "post_fork", "commit_message": "Start to add base class defs", "code": "def post_fork(self, payload_handler, io_loop):\n \n\n if not io_loop:\n raise ValueError(\"io_loop must be set\")\n self.payload_handler = payload_handler\n self.io_loop = io_loop\n self._rmq_nonblocking_connection_wrapper = RMQNonBlockingConnectionWrapper(\n self.opts, io_loop=io_loop\n )\n self._rmq_nonblocking_connection_wrapper.register_message_callback(\n self.handle_message\n )\n self._rmq_nonblocking_connection_wrapper.connect()\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 124, "n_words": 28, "vocab_size": 25, "complexity": 2, "nloc": 12, "token_counts": 60, "n_ast_nodes": 99, "n_identifiers": 11, "d_id": 53954, "documentation": { "docstring": "\n After forking we need to set up handlers to listen to the\n router\n\n :param func payload_handler: A function to called to handle incoming payloads as\n they are picked up off the wire\n :param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling\n ", "n_words": 45, "vocab_size": 36, "n_whitespaces": 117, "language": "en" } }, { "id": 337105, "commit_id": "008b608f1551dbcf521284ed0e7a6722cd02ef07", "repo": "diffusers", "path": "examples/text_to_image/train_text_to_image.py", "file_name": "train_text_to_image.py", "fun_name": "to", "commit_message": "[train_text2image] Fix EMA and make it compatible with deepspeed. (#813)\n\n* fix ema\r\n\r\n* style\r\n\r\n* add comment about copy\r\n\r\n* style\r\n\r\n* quality", "code": "def to(self, device=None, dtype=None) -> None:\n r\n # .to() on the tensors handles None correctly\n self.shadow_params = [\n p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device)\n for p in self.shadow_params\n ]\n\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 78, "n_words": 29, "vocab_size": 28, "complexity": 3, "nloc": 10, "token_counts": 56, "n_ast_nodes": 85, "n_identifiers": 7, "d_id": 120959, "documentation": { "docstring": "Move internal buffers of the ExponentialMovingAverage to `device`.\n\n Args:\n device: like `device` argument to `torch.Tensor.to`\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 40, "language": "en" } }, { "id": 124713, "commit_id": "365ffe21e592589880e3116302705b5e08a5b81f", "repo": "ray", "path": "python/ray/tests/test_state_api.py", "file_name": "test_state_api.py", "fun_name": "_try_state_query_expect_rate_limit", "commit_message": "[Core | State Observability] Implement API Server (Dashboard) HTTP Requests Throttling (#26257)\n\nThis is to limit the max number of HTTP requests the dashboard (API server) will accept before rejecting more requests.\r\nThis will make sure the observability requests do not overload the downstream systems (raylet/gcs) when delegating too many concurrent state observability requests to the cluster.", "code": "def _try_state_query_expect_rate_limit(api_func, res_q, start_q=None):\n \n try:\n # Indicate start of the process\n if start_q is not None:\n start_q.put(1)\n api_func()\n except RayStateApiException as e:\n # Other exceptions will be thrown\n if \"Max number of in-progress requests\" in str(e):\n res_q.put(1)\n else:\n res_q.put(e)\n except Exception as e:\n res_q.put(e)\n else:\n res_q.put(0)\n\n\n@pytest.mark.skipif(\n sys.platform == \"win32\",\n reason=\"Lambda test functions could not be pickled on Windows\",\n)", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.skipif(\n sys.platform == \"win32\",\n reason=\"Lambda test functions could not be pickled on Windows\",\n)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 168, "n_words": 60, "vocab_size": 50, "complexity": 5, "nloc": 14, "token_counts": 75, "n_ast_nodes": 163, "n_identifiers": 15, "d_id": 27666, "documentation": { "docstring": "Utility functions for rate limit related e2e tests below", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 292141, "commit_id": "44befe5f11390365e2ff0a7ce03133c1edd838a9", "repo": "core", "path": "homeassistant/components/twilio/__init__.py", "file_name": "__init__.py", "fun_name": "handle_webhook", "commit_message": "Fix Twilio webhook content type (#66561)", "code": "async def handle_webhook(hass, webhook_id, request):\n \n data = dict(await request.post())\n data[\"webhook_id\"] = webhook_id\n hass.bus.async_fire(RECEIVED_DATA, dict(data))\n\n return web.Response(text=\"\")\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 31, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 49, "n_ast_nodes": 84, "n_identifiers": 13, "d_id": 91243, "documentation": { "docstring": "Handle incoming webhook from Twilio for inbound messages and calls.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 261366, "commit_id": "7cf938c78ff0e38a231a7cb3a2a7fa412bb47966", "repo": "scikit-learn", "path": "sklearn/metrics/pairwise.py", "file_name": "pairwise.py", "fun_name": "manhattan_distances", "commit_message": "API Remove `sklearn.metrics.manhattan_distances` option `sum_over_features` (#24630)", "code": "def manhattan_distances(X, Y=None, *, sum_over_features=\"deprecated\"):\n \n # TODO(1.4): remove sum_over_features\n if sum_over_features != \"deprecated\":\n warnings.warn(\n \"`sum_over_features` is deprecated in version 1.2 and will be\"\n \" removed in version 1.4.\",\n FutureWarning,\n )\n else:\n sum_over_features = True\n\n X, Y = check_pairwise_arrays(X, Y)\n\n if issparse(X) or issparse(Y):\n if not sum_over_features:\n raise TypeError(\n \"sum_over_features=%r not supported for sparse matrices\"\n % sum_over_features\n )\n\n X = csr_matrix(X, copy=False)\n Y = csr_matrix(Y, copy=False)\n X.sum_duplicates() # this also sorts indices in-place\n Y.sum_duplicates()\n D = np.zeros((X.shape[0], Y.shape[0]))\n _sparse_manhattan(X.data, X.indices, X.indptr, Y.data, Y.indices, Y.indptr, D)\n return D\n\n if sum_over_features:\n return distance.cdist(X, Y, \"cityblock\")\n\n D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]\n D = np.abs(D, D)\n return D.reshape((-1, X.shape[1]))\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 308, "n_words": 108, "vocab_size": 81, "complexity": 6, "nloc": 28, "token_counts": 214, "n_ast_nodes": 339, "n_identifiers": 26, "d_id": 76778, "documentation": { "docstring": "Compute the L1 distances between the vectors in X and Y.\n\n With sum_over_features equal to False it returns the componentwise\n distances.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like of shape (n_samples_X, n_features)\n An array where each row is a sample and each column is a feature.\n\n Y : array-like of shape (n_samples_Y, n_features), default=None\n An array where each row is a sample and each column is a feature.\n If `None`, method uses `Y=X`.\n\n sum_over_features : bool, default=True\n If True the function returns the pairwise distance matrix\n else it returns the componentwise L1 pairwise-distances.\n Not supported for sparse matrix inputs.\n\n .. deprecated:: 1.2\n ``sum_over_features`` was deprecated in version 1.2 and will be removed in\n 1.4.\n\n Returns\n -------\n D : ndarray of shape (n_samples_X * n_samples_Y, n_features) or \\\n (n_samples_X, n_samples_Y)\n If sum_over_features is False shape is\n (n_samples_X * n_samples_Y, n_features) and D contains the\n componentwise L1 pairwise-distances (ie. absolute difference),\n else shape is (n_samples_X, n_samples_Y) and D contains\n the pairwise L1 distances.\n\n Notes\n -----\n When X and/or Y are CSR sparse matrices and they are not already\n in canonical format, this function modifies them in-place to\n make them canonical.\n\n Examples\n --------\n >>> from sklearn.metrics.pairwise import manhattan_distances\n >>> manhattan_distances([[3]], [[3]])\n array([[0.]])\n >>> manhattan_distances([[3]], [[2]])\n array([[1.]])\n >>> manhattan_distances([[2]], [[3]])\n array([[1.]])\n >>> manhattan_distances([[1, 2], [3, 4]],\\\n [[1, 2], [0, 3]])\n array([[0., 2.],\n [4., 4.]])\n ", "n_words": 225, "vocab_size": 133, "n_whitespaces": 444, "language": "en" } }, { "id": 109904, "commit_id": "df6f95703b60348e01603f98a439b133da2938a0", "repo": "matplotlib", "path": "lib/mpl_toolkits/axisartist/axis_artist.py", "file_name": "axis_artist.py", "fun_name": "set_axis_direction", "commit_message": "Improve mpl_toolkit documentation", "code": "def set_axis_direction(self, label_direction):\n \n self.set_default_alignment(label_direction)\n self.set_default_angle(label_direction)\n self._axis_direction = label_direction\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 36, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 25, "n_ast_nodes": 43, "n_identifiers": 6, "d_id": 23812, "documentation": { "docstring": "\n Adjust the text angle and text alignment of ticklabels\n according to the Matplotlib convention.\n\n The *label_direction* must be one of [left, right, bottom, top].\n\n ===================== ========== ========= ========== ==========\n Property left bottom right top\n ===================== ========== ========= ========== ==========\n ticklabel angle 90 0 -90 180\n ticklabel va center baseline center baseline\n ticklabel ha right center right center\n ===================== ========== ========= ========== ==========\n\n Note that the text angles are actually relative to (90 + angle\n of the direction to the ticklabel), which gives 0 for bottom\n axis.\n\n Parameters\n ----------\n label_direction : {\"left\", \"bottom\", \"right\", \"top\"}\n\n ", "n_words": 94, "vocab_size": 60, "n_whitespaces": 331, "language": "en" } }, { "id": 266053, "commit_id": "93e7457e0d84ad24cba22cc5c0811777ddebf94e", "repo": "netbox", "path": "netbox/netbox/views/generic/bulk_views.py", "file_name": "bulk_views.py", "fun_name": "prep_related_object_data", "commit_message": "4347 Add JSON/YAML import support for all objects (#10367)\n\n* 4347 initial code for json import\r\n\r\n* 4347 initial code for json import\r\n\r\n* Clean up form processing logic\r\n\r\n* Consolidate import forms\r\n\r\n* Consolidate object import/update logic\r\n\r\n* Clean up bulk import view\r\n\r\nCo-authored-by: jeremystretch ", "code": "def prep_related_object_data(self, parent, data):\n \n return data\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 20, "n_identifiers": 4, "d_id": 78283, "documentation": { "docstring": "\n Hook to modify the data for related objects before it's passed to the related object form (for example, to\n assign a parent object).\n ", "n_words": 23, "vocab_size": 19, "n_whitespaces": 45, "language": "en" } }, { "id": 199650, "commit_id": "93e4d381d35cd4c21a3a8d713c157f8fb21f725b", "repo": "sympy", "path": "sympy/polys/appellseqs.py", "file_name": "appellseqs.py", "fun_name": "euler_poly", "commit_message": "Custom Appell sequence functions and a doctest", "code": "def euler_poly(n, x=None, polys=False):\n \n if n < 0:\n raise ValueError(\"Cannot generate Euler polynomial of degree %s\" % n)\n poly = DMP(dup_euler(int(n), QQ), QQ)\n if x is not None:\n poly = Poly.new(poly, x)\n else:\n poly = PurePoly.new(poly, Dummy('x'))\n return poly if polys else poly.as_expr()\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 82, "n_words": 43, "vocab_size": 36, "complexity": 4, "nloc": 9, "token_counts": 83, "n_ast_nodes": 133, "n_identifiers": 15, "d_id": 49316, "documentation": { "docstring": "Generates the Euler polynomial `\\operatorname{E}_n(x)`.\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ", "n_words": 31, "vocab_size": 26, "n_whitespaces": 63, "language": "en" } }, { "id": 218574, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/json/encoder.py", "file_name": "encoder.py", "fun_name": "iterencode", "commit_message": "add python 3.10.4 for windows", "code": "def iterencode(self, o, _one_shot=False):\n \n if self.check_circular:\n markers = {}\n else:\n markers = None\n if self.ensure_ascii:\n _encoder = encode_basestring_ascii\n else:\n _encoder = encode_basestring\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 101, "n_words": 22, "vocab_size": 15, "complexity": 6, "nloc": 22, "token_counts": 138, "n_ast_nodes": 66, "n_identifiers": 10, "d_id": 55396, "documentation": { "docstring": "Encode the given object and yield each string\n representation as available.\n\n For example::\n\n for chunk in JSONEncoder().iterencode(bigobject):\n mysocket.write(chunk)\n\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 65, "language": "en" } }, { "id": 185757, "commit_id": "b524fa08eecadc83b0b694278db1c79d90feb9d8", "repo": "textual", "path": "src/textual/widgets/_data_table.py", "file_name": "_data_table.py", "fun_name": "clear", "commit_message": "ffixed table refresh on add row", "code": "def clear(self) -> None:\n \n self.row_count = 0\n self._clear_caches()\n self._y_offsets.clear()\n self.data.clear()\n self.rows.clear()\n self._line_no = 0\n self._require_update_dimensions = True\n self.refresh()\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 81, "n_words": 18, "vocab_size": 15, "complexity": 1, "nloc": 14, "token_counts": 54, "n_ast_nodes": 94, "n_identifiers": 10, "d_id": 45161, "documentation": { "docstring": "Clear the table.\n\n Args:\n columns (bool, optional): Also clear the columns. Defaults to False.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 39, "language": "en" } }, { "id": 259484, "commit_id": "d400723a2112f15c5d5b4d40dfac2ed8a19cca5c", "repo": "scikit-learn", "path": "sklearn/inspection/_plot/tests/test_boundary_decision_display.py", "file_name": "test_boundary_decision_display.py", "fun_name": "test_string_target", "commit_message": "FEA Add DecisionBoundaryDisplay (#16061)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Loïc Estève ", "code": "def test_string_target(pyplot):\n \n iris = load_iris()\n X = iris.data[:, [0, 1]]\n\n # Use strings as target\n y = iris.target_names[iris.target]\n log_reg = LogisticRegression().fit(X, y)\n\n # Does not raise\n DecisionBoundaryDisplay.from_estimator(\n log_reg,\n X,\n grid_resolution=5,\n response_method=\"predict\",\n )\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 87, "n_words": 32, "vocab_size": 28, "complexity": 1, "nloc": 11, "token_counts": 64, "n_ast_nodes": 103, "n_identifiers": 16, "d_id": 75797, "documentation": { "docstring": "Check that decision boundary works with classifiers trained on string labels.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 44105, "commit_id": "602abe8394fafe7de54df7e73af56de848cdf617", "repo": "airflow", "path": "airflow/models/variable.py", "file_name": "variable.py", "fun_name": "setdefault", "commit_message": "Remove `:type` lines now sphinx-autoapi supports typehints (#20951)\n\n* Remove `:type` lines now sphinx-autoapi supports typehints\r\n\r\nSince we have no updated sphinx-autoapi to a more recent version it\r\nsupports showing type hints in the documentation, so we don't need to\r\nhave the type hints _and_ the `:type` lines -- which is good, as the\r\nones in the doc strings are easy to get out of date!\r\n\r\nThe following settings have been set:\r\n\r\n`autodoc_typehints = 'description'` -- show types in description (where\r\nprevious `:type` used to show up)\r\n\r\n`autodoc_typehints_description_target = 'documented'` -- only link to\r\ntypes that are documented. (Without this we have some missing return\r\ntypes that aren't documented, and aren't linked to in our current python\r\nAPI docs, so this caused a build failure)\r\n\r\n`autodoc_typehints_format = 'short'` -- Shorten type hints where\r\npossible, i.e. `StringIO` instead of `io.StringIO`\r\n\r\n* Add argument type names to local spelling dictionary\r\n\r\nNow that we are using the type hints in the docs, sphinxcontrib-spelling\r\npicks them up as words to be checked, so we have to ignore them.\r\n\r\nI've chosen to add the provider specific ones to local dictionary files\r\nrather than the global, as for example, `mgmt` is an error in most\r\nplaces, but not in some of the Azure provider.", "code": "def setdefault(cls, key, default, description=None, deserialize_json=False):\n \n obj = Variable.get(key, default_var=None, deserialize_json=deserialize_json)\n if obj is None:\n if default is not None:\n Variable.set(key, default, description=description, serialize_json=deserialize_json)\n return default\n else:\n raise ValueError('Default Value must be set')\n else:\n return obj\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 142, "n_words": 36, "vocab_size": 27, "complexity": 3, "nloc": 10, "token_counts": 74, "n_ast_nodes": 113, "n_identifiers": 13, "d_id": 8155, "documentation": { "docstring": "\n Like a Python builtin dict object, setdefault returns the current value\n for a key, and if it isn't there, stores the default value and returns it.\n\n :param key: Dict key for this Variable\n :param default: Default value to set and return if the variable\n isn't already in the DB\n :param deserialize_json: Store this as a JSON encoded value in the DB\n and un-encode it when retrieving a value\n :return: Mixed\n ", "n_words": 70, "vocab_size": 46, "n_whitespaces": 142, "language": "en" } }, { "id": 279734, "commit_id": "00524152437b957ca4e850a5db014e223d3c6826", "repo": "keras", "path": "keras/engine/base_layer.py", "file_name": "base_layer.py", "fun_name": "_update_trackables", "commit_message": "isort, black and flake8 checked", "code": "def _update_trackables(self):\n \n for trackable_obj in self._self_tracked_trackables:\n if isinstance(\n trackable_obj, tf.__internal__.tracking.TrackableDataStructure\n ):\n self._track_variables(trackable_obj)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 78, "n_words": 12, "vocab_size": 12, "complexity": 3, "nloc": 6, "token_counts": 33, "n_ast_nodes": 54, "n_identifiers": 10, "d_id": 83115, "documentation": { "docstring": "Track variables added to lists/dicts after creation", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 203511, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/tests.py", "file_name": "tests.py", "fun_name": "assertCountSeleniumElements", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def assertCountSeleniumElements(self, selector, count, root_element=None):\n \n from selenium.webdriver.common.by import By\n\n root_element = root_element or self.selenium\n self.assertEqual(\n len(root_element.find_elements(By.CSS_SELECTOR, selector)), count\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 65, "n_words": 19, "vocab_size": 18, "complexity": 2, "nloc": 6, "token_counts": 51, "n_ast_nodes": 76, "n_identifiers": 14, "d_id": 50416, "documentation": { "docstring": "\n Assert number of matches for a CSS selector.\n\n `root_element` allow restriction to a pre-selected node.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 37, "language": "en" } }, { "id": 185316, "commit_id": "cf14b812ed47982463062e5b51bce506ad6ede1f", "repo": "textual", "path": "src/textual/app.py", "file_name": "app.py", "fun_name": "action_pop_screen", "commit_message": "words", "code": "async def action_pop_screen(self) -> None:\n \n self.pop_screen()\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 13, "n_ast_nodes": 26, "n_identifiers": 3, "d_id": 44967, "documentation": { "docstring": "Removes the topmost screen and makes the new topmost screen active.", "n_words": 11, "vocab_size": 8, "n_whitespaces": 10, "language": "en" } }, { "id": 83031, "commit_id": "4f482c234c3ab72d264e7bff7835dad5207b9d07", "repo": "zulip", "path": "zerver/tests/test_message_send.py", "file_name": "test_message_send.py", "fun_name": "test_empty_string_topic", "commit_message": "string_validation: Standardize missing topic with missing stream name.\n\nCo-authored-by: Shlok Patel ", "code": "def test_empty_string_topic(self) -> None:\n \n self.login(\"hamlet\")\n result = self.client_post(\n \"/json/messages\",\n {\n \"type\": \"stream\",\n \"to\": \"Verona\",\n \"client\": \"test suite\",\n \"content\": \"Test message\",\n \"topic\": \"\",\n },\n )\n self.assert_json_error(result, \"Topic can't be empty!\")\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 172, "n_words": 29, "vocab_size": 29, "complexity": 1, "nloc": 16, "token_counts": 54, "n_ast_nodes": 107, "n_identifiers": 6, "d_id": 17583, "documentation": { "docstring": "\n Sending a message that has empty string topic should fail\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 96875, "commit_id": "3e8115c4a681e9c4adeafb1f15eb669a9342b93c", "repo": "sentry", "path": "src/sentry/snuba/metrics/fields/base.py", "file_name": "base.py", "fun_name": "generate_metric_ids", "commit_message": "feat(metrics): Add initial framework for derived metrics [INGEST-924] (#32451)\n\n* feat(metrics): Add initial framework for derived metrics\r\n\r\nAdds support for derived metrics composed of\r\nconstituent metrics that span one entity\r\n\r\n* Adds logic/test for when metric does not exist\r\n\r\n* Fix failing test + incorporate PR feedback\r\n\r\n* Rename snql functions to their snuba name", "code": "def generate_metric_ids(self) -> Set[Any]:\n \n raise NotImplementedError\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 6, "token_counts": 13, "n_ast_nodes": 23, "n_identifiers": 5, "d_id": 19347, "documentation": { "docstring": "\n Method that generates all the metric ids required to query an instance of\n MetricsFieldBase\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 36, "language": "en" } }, { "id": 231197, "commit_id": "d5a345d01507f8b6792c51507d1d8f35d7386d29", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/scatter/_marker.py", "file_name": "_marker.py", "fun_name": "angleref", "commit_message": "update to plotly.js 2.16.1", "code": "def angleref(self):\n \n return self[\"angleref\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 62773, "documentation": { "docstring": "\n Sets the reference for marker angle. With \"previous\", angle 0\n points along the line from the previous point to this one. With\n \"up\", angle 0 points toward the top of the screen.\n\n The 'angleref' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['previous', 'up']\n\n Returns\n -------\n Any\n ", "n_words": 55, "vocab_size": 44, "n_whitespaces": 136, "language": "en" } }, { "id": 321259, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "qutebrowser/mainwindow/prompt.py", "file_name": "prompt.py", "fun_name": "ask_question", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def ask_question(self, question, blocking):\n \n log.prompt.debug(\"Asking question {}, blocking {}, loops {}, queue \"\n \"{}\".format(question, blocking, self._loops,\n self._queue))\n\n if self._shutting_down:\n # If we're currently shutting down we have to ignore this question\n # to avoid segfaults - see\n # https://github.com/qutebrowser/qutebrowser/issues/95\n log.prompt.debug(\"Ignoring question because we're shutting down.\")\n question.abort()\n return None\n\n if self._question is not None and not blocking:\n # We got an async question, but we're already busy with one, so we\n # just queue it up for later.\n log.prompt.debug(\"Adding {} to queue.\".format(question))\n self._queue.append(question)\n return None\n\n if blocking:\n # If we're blocking we save the old question on the stack, so we\n # can restore it after exec, if exec gets called multiple times.\n log.prompt.debug(\"New question is blocking, saving {}\".format(\n self._question))\n old_question = self._question\n if old_question is not None:\n old_question.interrupted = True\n\n self._question = question\n self.show_prompts.emit(question)\n\n if blocking:\n loop = qtutils.EventLoop()\n self._loops.append(loop)\n loop.destroyed.connect(lambda: self._loops.remove(loop))\n question.completed.connect(loop.quit)\n question.completed.connect(loop.deleteLater)\n log.prompt.debug(\"Starting loop.exec() for {}\".format(question))\n flags = cast(QEventLoop.ProcessEventsFlags,\n QEventLoop.ProcessEventsFlag.ExcludeSocketNotifiers)\n loop.exec(flags)\n log.prompt.debug(\"Ending loop.exec() for {}\".format(question))\n\n log.prompt.debug(\"Restoring old question {}\".format(old_question))\n self._question = old_question\n self.show_prompts.emit(old_question)\n if old_question is None:\n # Nothing left to restore, so we can go back to popping async\n # questions.\n if self._queue:\n self._pop_later()\n\n return question.answer\n else:\n question.completed.connect(self._pop_later)\n return None\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 786, "n_words": 193, "vocab_size": 117, "complexity": 9, "nloc": 41, "token_counts": 295, "n_ast_nodes": 497, "n_identifiers": 36, "d_id": 117614, "documentation": { "docstring": "Display a prompt for a given question.\n\n Args:\n question: The Question object to ask.\n blocking: If True, this function blocks and returns the result.\n\n Return:\n The answer of the user when blocking=True.\n None if blocking=False.\n ", "n_words": 35, "vocab_size": 32, "n_whitespaces": 100, "language": "en" } }, { "id": 19700, "commit_id": "9a3b3ce70621af6f9adaa9eeac9cf83fa149319c", "repo": "pipenv", "path": "pipenv/exceptions.py", "file_name": "exceptions.py", "fun_name": "prettify_exc", "commit_message": "Issue 4993 Add standard pre commit hooks and apply linting. (#4994)\n\n* Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.", "code": "def prettify_exc(error):\n \n errors = []\n for exc in KNOWN_EXCEPTIONS:\n search_string = exc.match_string if exc.match_string else exc.exception_name\n split_string = (\n exc.show_from_string if exc.show_from_string else exc.exception_name\n )\n if search_string in error:\n # for known exceptions with no display rules and no prefix\n # we should simply show nothing\n if not exc.show_from_string and not exc.prefix:\n errors.append(\"\")\n continue\n elif exc.prefix and exc.prefix in error:\n _, error, info = error.rpartition(exc.prefix)\n else:\n _, error, info = error.rpartition(split_string)\n errors.append(f\"{error} {info}\")\n if not errors:\n return f\"{vistir.misc.decode_for_output(error)}\"\n\n return \"\\n\".join(errors)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 267, "n_words": 80, "vocab_size": 51, "complexity": 10, "nloc": 19, "token_counts": 126, "n_ast_nodes": 231, "n_identifiers": 19, "d_id": 3069, "documentation": { "docstring": "Catch known errors and prettify them instead of showing the\n entire traceback, for better UX", "n_words": 15, "vocab_size": 15, "n_whitespaces": 17, "language": "en" } }, { "id": 73308, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/views.py", "file_name": "views.py", "fun_name": "get_filename", "commit_message": "Reformat with black", "code": "def get_filename(self):\n \n return getattr(self.model_admin, \"export_filename\", super().get_filename())\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 41, "n_identifiers": 5, "d_id": 16012, "documentation": { "docstring": "Get filename for exported spreadsheet, without extension", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 220829, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/tasks.py", "file_name": "tasks.py", "fun_name": "ensure_future", "commit_message": "add python 3.10.4 for windows", "code": "def ensure_future(coro_or_future, *, loop=None):\n \n return _ensure_future(coro_or_future, loop=loop)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 34, "n_identifiers": 4, "d_id": 56137, "documentation": { "docstring": "Wrap a coroutine or an awaitable in a future.\n\n If the argument is a Future, it is returned directly.\n ", "n_words": 19, "vocab_size": 16, "n_whitespaces": 25, "language": "en" } }, { "id": 203604, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/auth/backends.py", "file_name": "backends.py", "fun_name": "_get_permissions", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _get_permissions(self, user_obj, obj, from_name):\n \n if not user_obj.is_active or user_obj.is_anonymous or obj is not None:\n return set()\n\n perm_cache_name = \"_%s_perm_cache\" % from_name\n if not hasattr(user_obj, perm_cache_name):\n if user_obj.is_superuser:\n perms = Permission.objects.all()\n else:\n perms = getattr(self, \"_get_%s_permissions\" % from_name)(user_obj)\n perms = perms.values_list(\"content_type__app_label\", \"codename\").order_by()\n setattr(\n user_obj, perm_cache_name, {\"%s.%s\" % (ct, name) for ct, name in perms}\n )\n return getattr(user_obj, perm_cache_name)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 204, "n_words": 58, "vocab_size": 44, "complexity": 7, "nloc": 14, "token_counts": 117, "n_ast_nodes": 190, "n_identifiers": 21, "d_id": 50471, "documentation": { "docstring": "\n Return the permissions of `user_obj` from `from_name`. `from_name` can\n be either \"group\" or \"user\" to return permissions from\n `_get_group_permissions` or `_get_user_permissions` respectively.\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 51, "language": "en" } }, { "id": 62876, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/packaging/specifiers.py", "file_name": "specifiers.py", "fun_name": "__hash__", "commit_message": "upd; format", "code": "def __hash__(self):\n # type: () -> int\n \n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 1, "token_counts": 6, "n_ast_nodes": 14, "n_identifiers": 2, "d_id": 13057, "documentation": { "docstring": "\n Returns a hash value for this Specifier like object.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 65547, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/buying/doctype/supplier_scorecard_variable/supplier_scorecard_variable.py", "file_name": "supplier_scorecard_variable.py", "fun_name": "get_cost_of_delayed_shipments", "commit_message": "style: format code with black", "code": "def get_cost_of_delayed_shipments(scorecard):\n\t\n\treturn get_total_cost_of_shipments(scorecard) - get_cost_of_on_time_shipments(scorecard)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 4, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 29, "n_identifiers": 4, "d_id": 13924, "documentation": { "docstring": "Gets the total cost of all delayed shipments in the period (based on Purchase Receipts - POs)", "n_words": 17, "vocab_size": 16, "n_whitespaces": 16, "language": "en" } }, { "id": 20474, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/scanner.py", "file_name": "scanner.py", "fun_name": "check", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def check(self, pattern):\n \n if self.eos:\n raise EndOfText()\n if pattern not in self._re_cache:\n self._re_cache[pattern] = re.compile(pattern, self.flags)\n return self._re_cache[pattern].match(self.data, self.pos)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 69, "n_words": 19, "vocab_size": 18, "complexity": 3, "nloc": 6, "token_counts": 60, "n_ast_nodes": 93, "n_identifiers": 12, "d_id": 3391, "documentation": { "docstring": "\n Apply `pattern` on the current position and return\n the match object. (Doesn't touch pos). Use this for\n lookahead.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 47, "language": "en" } }, { "id": 47898, "commit_id": "98d52af7074e9a82457515588bdf9cdd6de70f35", "repo": "airflow", "path": "tests/executors/test_kubernetes_executor.py", "file_name": "test_kubernetes_executor.py", "fun_name": "test_clear_not_launched_queued_tasks_mapped_task", "commit_message": "Use map_index when clearing not launched tasks in k8s (#23224)", "code": "def test_clear_not_launched_queued_tasks_mapped_task(self, dag_maker, session):\n \n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 41, "token_counts": 238, "n_ast_nodes": 17, "n_identifiers": 4, "d_id": 9291, "documentation": { "docstring": "One mapped task has a launched pod - other does not.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 227437, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_mesh3d.py", "file_name": "_mesh3d.py", "fun_name": "vertexcolorsrc", "commit_message": "switch to black .22", "code": "def vertexcolorsrc(self):\n \n return self[\"vertexcolorsrc\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 59110, "documentation": { "docstring": "\n Sets the source reference on Chart Studio Cloud for\n `vertexcolor`.\n\n The 'vertexcolorsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 84, "language": "en" } }, { "id": 226181, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_bar.py", "file_name": "_bar.py", "fun_name": "widthsrc", "commit_message": "switch to black .22", "code": "def widthsrc(self):\n \n return self[\"widthsrc\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 57854, "documentation": { "docstring": "\n Sets the source reference on Chart Studio Cloud for `width`.\n\n The 'widthsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 77, "language": "en" } }, { "id": 196808, "commit_id": "f757f3daae6e11ea0cfb7dadc133274d8d74315f", "repo": "sympy", "path": "sympy/matrices/utilities.py", "file_name": "utilities.py", "fun_name": "_dotprodsimp", "commit_message": "Reordered imports 2", "code": "def _dotprodsimp(expr, withsimp=False):\n \n from sympy.simplify.simplify import dotprodsimp as dps\n return dps(expr, withsimp=withsimp)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 3, "token_counts": 29, "n_ast_nodes": 45, "n_identifiers": 7, "d_id": 48190, "documentation": { "docstring": "Wrapper for simplify.dotprodsimp to avoid circular imports.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 19980, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_internal/utils/glibc.py", "file_name": "glibc.py", "fun_name": "libc_ver", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def libc_ver() -> Tuple[str, str]:\n \n glibc_version = glibc_version_string()\n if glibc_version is None:\n return (\"\", \"\")\n else:\n return (\"glibc\", glibc_version)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 45, "n_words": 19, "vocab_size": 17, "complexity": 2, "nloc": 11, "token_counts": 36, "n_ast_nodes": 64, "n_identifiers": 5, "d_id": 3162, "documentation": { "docstring": "Try to determine the glibc version\n\n Returns a tuple of strings (lib, version) which default to empty strings\n in case the lookup fails.\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 32, "language": "en" } }, { "id": 114370, "commit_id": "0fd3b436c38f38bcae6fed9e14dc4d2a12e90793", "repo": "mindsdb", "path": "mindsdb/integrations/libs/base_handler.py", "file_name": "base_handler.py", "fun_name": "select_query", "commit_message": "fix tests and reformat", "code": "def select_query(self, targets, from_stmt, where_stmt) -> pd.DataFrame:\n # noqa\n raise NotImplementedError()\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 26, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 7, "token_counts": 20, "n_ast_nodes": 33, "n_identifiers": 8, "d_id": 25169, "documentation": { "docstring": "\n Select data from some entity in the handler and return in dataframe format.\n \n This method assumes a raw query has been parsed beforehand with mindsdb_sql using some dialect compatible with the handler, and only targets, from, and where clauses are fed into it.\n ", "n_words": 43, "vocab_size": 37, "n_whitespaces": 73, "language": "en" } }, { "id": 124658, "commit_id": "8bb67427c18887f43721cf9726d6836c3b40cafb", "repo": "ray", "path": "python/ray/train/tests/test_data_parallel_trainer.py", "file_name": "test_data_parallel_trainer.py", "fun_name": "test_bad_return_in_train_loop", "commit_message": "[AIR] Discard returns of train loops in Trainers (#26448)\n\nDiscards returns of user defined train loop functions to prevent deser issues with eg. torch models. Those returns are not used anywhere in AIR, so there is no loss of functionality.", "code": "def test_bad_return_in_train_loop(ray_start_4_cpus):\n \n\n # Simulates what happens with eg. torch models", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 16, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 8, "token_counts": 30, "n_ast_nodes": 14, "n_identifiers": 2, "d_id": 27647, "documentation": { "docstring": "Test to check if returns from train loop are discarded.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 232549, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/template/_data.py", "file_name": "_data.py", "fun_name": "cone", "commit_message": "switch to black .22", "code": "def cone(self):\n \n return self[\"cone\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 63993, "documentation": { "docstring": "\n The 'cone' property is a tuple of instances of\n Cone that may be specified as:\n - A list or tuple of instances of plotly.graph_objs.layout.template.data.Cone\n - A list or tuple of dicts of string/value properties that\n will be passed to the Cone constructor\n\n Supported dict properties:\n\n Returns\n -------\n tuple[plotly.graph_objs.layout.template.data.Cone]\n ", "n_words": 48, "vocab_size": 33, "n_whitespaces": 131, "language": "en" } }, { "id": 67037, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/projects/report/project_wise_stock_tracking/project_wise_stock_tracking.py", "file_name": "project_wise_stock_tracking.py", "fun_name": "get_purchased_items_cost", "commit_message": "style: format code with black", "code": "def get_purchased_items_cost():\n\tpr_items = frappe.db.sql(\n\t\t,\n\t\tas_dict=1,\n\t)\n\n\tpr_item_map = {}\n\tfor item in pr_items:\n\t\tpr_item_map.setdefault(item.project, item.amount)\n\n\treturn pr_item_map\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 10, "n_words": 19, "vocab_size": 17, "complexity": 2, "nloc": 11, "token_counts": 42, "n_ast_nodes": 67, "n_identifiers": 11, "d_id": 14414, "documentation": { "docstring": "select project, sum(base_net_amount) as amount\n\t\tfrom `tabPurchase Receipt Item` where ifnull(project, '') != ''\n\t\tand docstatus = 1 group by project", "n_words": 21, "vocab_size": 21, "n_whitespaces": 18, "language": "en" } }, { "id": 176206, "commit_id": "5dfd57af2a141a013ae3753e160180b82bec9469", "repo": "networkx", "path": "networkx/tests/test_convert_scipy.py", "file_name": "test_convert_scipy.py", "fun_name": "test_identity_weighted_graph_matrix", "commit_message": "Use scipy.sparse array datastructure (#5139)\n\n* Step 1: use sparse arrays in nx.to_scipy_sparse_matrix.\r\n\r\nSeems like a reasonable place to start.\r\nnx.to_scipy_sparse_matrix is one of the primary interfaces to\r\nscipy.sparse from within NetworkX.\r\n\r\n* 1: Use np.outer instead of mult col/row vectors\r\n\r\nFix two instances in modularitymatrix where a new 2D array was being\r\ncreated via an outer product of two \\\"vectors\\\".\r\n\r\nIn the matrix case, this was a row vector \\* a column vector. In the\r\narray case this can be disambiguated by being explicit with np.outer.\r\n\r\n* Update _transition_matrix in laplacianmatrix module\r\n\r\n - A few instances of matrix multiplication operator\r\n - Add np.newaxis + transpose to get shape right for broadcasting\r\n - Explicitly convert e.g. sp.sparse.spdiags to a csr_array.\r\n\r\n* Update directed_combinitorial_laplacian w/ sparse array.\r\n\r\n - Wrap spdiags in csr_array and update matmul operators.\r\n\r\n* Rm matrix-specific code from lgc and hmn modules\r\n\r\n - Replace .A call with appropriate array semantics\r\n - wrap sparse.diags in csr_array.\r\n\r\n* Change hits to use sparse array semantics.\r\n\r\n - Replace * with @\r\n - Remove superfluous calls to flatten.\r\n\r\n* Update sparse matrix usage in layout module.\r\n - Simplify lil.getrowview call\r\n - Wrap spdiags in csr_array.\r\n\r\n* lil_matrix -> lil_array in graphmatrix.py.\r\n\r\n* WIP: Start working on algebraic connectivity module.\r\n\r\n* Incorporate auth mat varname feedback.\r\n\r\n* Revert 1D slice and comment for 1D sparse future.\r\n\r\n* Add TODOs: rm csr_array wrapper around spdiags etc.\r\n\r\n* WIP: cleanup algebraicconn: tracemin_fiedler.\r\n\r\n* Typo.\r\n\r\n* Finish reviewing algebraicconnectivity.\r\n\r\n* Convert bethe_hessian matrix to use sparse arrays.\r\n\r\n* WIP: update laplacian.\r\n\r\nUpdate undirected laplacian functions.\r\n\r\n* WIP: laplacian - add comment about _transition_matrix return types.\r\n\r\n* Finish laplacianmatrix review.\r\n\r\n* Update attrmatrix.\r\n\r\n* Switch to official laplacian function.\r\n\r\n* Update pagerank to use sparse array.\r\n\r\n* Switch bipartite matrix to sparse arrays.\r\n\r\n* Check from_scipy_sparse_matrix works with arrays.\r\n\r\nModifies test suite.\r\n\r\n* Apply changes from review.\r\n\r\n* Fix failing docstring tests.\r\n\r\n* Fix missing axis for in-place multiplication.\r\n\r\n* Use scipy==1.8rc2\r\n\r\n* Use matrix multiplication\r\n\r\n* Fix PyPy CI\r\n\r\n* [MRG] Create plot_subgraphs.py example (#5165)\r\n\r\n* Create plot_subgraphs.py\r\n\r\nhttps://github.com/networkx/networkx/issues/4220\r\n\r\n* Update plot_subgraphs.py\r\n\r\nblack\r\n\r\n* Update plot_subgraphs.py\r\n\r\nlint plus font_size\r\n\r\n* Update plot_subgraphs.py\r\n\r\nadded more plots\r\n\r\n* Update plot_subgraphs.py\r\n\r\nremoved plots from the unit test and added comments\r\n\r\n* Update plot_subgraphs.py\r\n\r\nlint\r\n\r\n* Update plot_subgraphs.py\r\n\r\ntypos fixed\r\n\r\n* Update plot_subgraphs.py\r\n\r\nadded nodes to the plot of the edges removed that was commented out for whatever reason\r\n\r\n* Update plot_subgraphs.py\r\n\r\nrevert the latest commit - the line was commented out for a reason - it's broken\r\n\r\n* Update plot_subgraphs.py\r\n\r\nfixed node color issue\r\n\r\n* Update plot_subgraphs.py\r\n\r\nformat fix\r\n\r\n* Update plot_subgraphs.py\r\n\r\nforgot to draw the nodes... now fixed\r\n\r\n* Fix sphinx warnings about heading length.\r\n\r\n* Update examples/algorithms/plot_subgraphs.py\r\n\r\n* Update examples/algorithms/plot_subgraphs.py\r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Dan Schult \r\n\r\n* Add traveling salesman problem to example gallery (#4874)\r\n\r\nAdds an example of the using Christofides to solve the TSP problem to the example galery.\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() (#5037)\r\n\r\n* Fixed inconsistent documentation for nbunch parameter in DiGraph.edges()\r\n\r\n* Resolved Requested Changes\r\n\r\n* Revert changes to degree docstrings.\r\n\r\n* Update comments in example.\r\n\r\n* Apply wording to edges method in all graph classes.\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Compatibility updates from testing with numpy/scipy/pytest rc's (#5226)\r\n\r\n* Rm deprecated scipy subpkg access.\r\n\r\n* Use recwarn fixture in place of deprecated pytest pattern.\r\n\r\n* Rm unnecessary try/except from tests.\r\n\r\n* Replace internal `close` fn with `math.isclose`. (#5224)\r\n\r\n* Replace internal close fn with math.isclose.\r\n\r\n* Fix lines in docstring examples.\r\n\r\n* Fix Python 3.10 deprecation warning w/ int div. (#5231)\r\n\r\n* Touchups and suggestions for subgraph gallery example (#5225)\r\n\r\n* Simplify construction of G with edges rm'd\r\n\r\n* Rm unused graph attribute.\r\n\r\n* Shorten categorization by node type.\r\n\r\n* Simplify node coloring.\r\n\r\n* Simplify isomorphism check.\r\n\r\n* Rm unit test.\r\n\r\n* Rm redundant plotting of each subgraph.\r\n\r\n* Use new package name (#5234)\r\n\r\n* Allowing None edges in weight function of bidirectional Dijkstra (#5232)\r\n\r\n* added following feature also to bidirectional dijkstra: The weight function can be used to hide edges by returning None.\r\n\r\n* changed syntax for better readability and code duplicate avoidance\r\n\r\nCo-authored-by: Hohmann, Nikolas \r\n\r\n* Add an FAQ about assigning issues. (#5182)\r\n\r\n* Add FAQ about assigning issues.\r\n\r\n* Add note about linking issues from new PRs.\r\n\r\n* Update dev deps (#5243)\r\n\r\n* Update minor doc issues with tex notation (#5244)\r\n\r\n* Add FutureWarnings to fns that return sparse matrices\r\n\r\n - biadjacency_matrix.\r\n - bethe_hessian_matrix.\r\n - incidence_matrix.\r\n - laplacian functions.\r\n - modularity_matrix functions.\r\n - adjacency_matrix.\r\n\r\n* Add to_scipy_sparse_array and use it everywhere.\r\n\r\nAdd a new conversion function to preserve array semantics internally\r\nwhile not altering behavior for users.\r\n\r\nAlso adds FutureWarning to to_scipy_sparse_matrix.\r\n\r\n* Add from_scipy_sparse_array. Supercedes from_scipy_sparse_matrix.\r\n\r\n* Handle deprecations in separate PR.\r\n\r\n* Fix docstring examples.\r\n\r\nCo-authored-by: Mridul Seth \r\n\r\nCo-authored-by: Jarrod Millman \r\nCo-authored-by: Andrew Knyazev \r\nCo-authored-by: Dan Schult \r\nCo-authored-by: eskountis <56514439+eskountis@users.noreply.github.com>\r\nCo-authored-by: Anutosh Bhat <87052487+anutosh491@users.noreply.github.com>\r\nCo-authored-by: NikHoh \r\nCo-authored-by: Hohmann, Nikolas \r\nCo-authored-by: Sultan Orazbayev \r\nCo-authored-by: Mridul Seth ", "code": "def test_identity_weighted_graph_matrix(self):\n \n A = nx.to_scipy_sparse_array(self.G3)\n self.identity_conversion(self.G3, A, nx.Graph())\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 32, "n_ast_nodes": 53, "n_identifiers": 8, "d_id": 41766, "documentation": { "docstring": "Conversion from weighted graph to sparse matrix to weighted graph.", "n_words": 10, "vocab_size": 8, "n_whitespaces": 9, "language": "en" } }, { "id": 8572, "commit_id": "abfdc05018cc4dec5a2fed20ad09e94f1749fca9", "repo": "ludwig", "path": "ludwig/datasets/loaders/hm_fashion_recommendations.py", "file_name": "hm_fashion_recommendations.py", "fun_name": "_merge_dataframes", "commit_message": "Add H&M fashion recommendation dataset (#2708)\n\n* allow individual file downloads from kaggle\r\n\r\n* pipe download_filenames to kaggle download fn\r\n\r\n* add dataset config for H&M Fashion Recommendations\r\n\r\n* add custom loader\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* use local backend instead of mock\r\n\r\n* add docstring for sample\r\n\r\n* fix titanic test\r\n\r\n* move negative_sample to ludwig.data\r\n\r\n* do not negative sample in loader\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def _merge_dataframes(transactions_df, articles_df, customers_df):\n \n # Merge the transactions and articles dataframes\n transactions_df = pd.merge(\n transactions_df,\n articles_df,\n how=\"left\",\n left_on=\"article_id\",\n right_on=\"article_id\",\n )\n\n # Merge the transactions and customers dataframes\n transactions_df = pd.merge(\n transactions_df,\n customers_df,\n how=\"left\",\n left_on=\"customer_id\",\n right_on=\"customer_id\",\n )\n\n return transactions_df\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 132, "n_words": 38, "vocab_size": 24, "complexity": 1, "nloc": 16, "token_counts": 58, "n_ast_nodes": 96, "n_identifiers": 9, "d_id": 1463, "documentation": { "docstring": "Merge the transactions, articles, and customers dataframes into a single dataframe.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 270669, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/base_layer.py", "file_name": "base_layer.py", "fun_name": "_get_trainable_state", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _get_trainable_state(self):\n \n trainable_state = weakref.WeakKeyDictionary()\n for layer in self._flatten_layers():\n trainable_state[layer] = layer.trainable\n return trainable_state\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 53, "n_words": 14, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 32, "n_ast_nodes": 54, "n_identifiers": 8, "d_id": 80516, "documentation": { "docstring": "Get the `trainable` state of each sublayer.\n\n Returns:\n A dict mapping all sublayers to their `trainable` value.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 40, "language": "en" } }, { "id": 204637, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/management/commands/inspectdb.py", "file_name": "inspectdb.py", "fun_name": "get_field_type", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_field_type(self, connection, table_name, row):\n \n field_params = {}\n field_notes = []\n\n try:\n field_type = connection.introspection.get_field_type(row.type_code, row)\n except KeyError:\n field_type = \"TextField\"\n field_notes.append(\"This field type is a guess.\")\n\n # Add max_length for all CharFields.\n if field_type == \"CharField\" and row.internal_size:\n field_params[\"max_length\"] = int(row.internal_size)\n\n if field_type in {\"CharField\", \"TextField\"} and row.collation:\n field_params[\"db_collation\"] = row.collation\n\n if field_type == \"DecimalField\":\n if row.precision is None or row.scale is None:\n field_notes.append(\n \"max_digits and decimal_places have been guessed, as this \"\n \"database handles decimal fields as float\"\n )\n field_params[\"max_digits\"] = (\n row.precision if row.precision is not None else 10\n )\n field_params[\"decimal_places\"] = (\n row.scale if row.scale is not None else 5\n )\n else:\n field_params[\"max_digits\"] = row.precision\n field_params[\"decimal_places\"] = row.scale\n\n return field_type, field_params, field_notes\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 459, "n_words": 116, "vocab_size": 74, "complexity": 11, "nloc": 28, "token_counts": 176, "n_ast_nodes": 299, "n_identifiers": 17, "d_id": 50819, "documentation": { "docstring": "\n Given the database connection, the table name, and the cursor row\n description, this routine will return the given field type name, as\n well as any additional keyword parameters and notes for the field.\n ", "n_words": 33, "vocab_size": 26, "n_whitespaces": 62, "language": "en" } }, { "id": 199434, "commit_id": "2163f938f26e75e10f2d25b92321511988eff502", "repo": "sympy", "path": "sympy/solvers/solvers.py", "file_name": "solvers.py", "fun_name": "solve_undetermined_coeffs", "commit_message": "mv solve_undetermined_coeffs and legacy behavior", "code": "def solve_undetermined_coeffs(equ, coeffs, *syms, **flags):\n r\n if not (coeffs and all(i.is_Symbol for i in coeffs)):\n raise ValueError('must provide symbols for coeffs')\n\n if isinstance(equ, Eq):\n eq = equ.lhs - equ.rhs\n else:\n eq = equ\n\n ceq = cancel(eq)\n xeq = _mexpand(ceq.as_numer_denom()[0], recursive=True)\n\n free = xeq.free_symbols\n coeffs = free & set(coeffs)\n if not coeffs:\n return ([], {}) if flags.get('set', None) else [] # solve(0, x) -> []\n\n if not syms:\n # e.g. A*exp(x) + B - (exp(x) + y) separated into parts that\n # don't/do depend on coeffs gives\n # -(exp(x) + y), A*exp(x) + B\n # then see what symbols are common to both\n # {x} = {x, A, B} - {x, y}\n ind, dep = xeq.as_independent(*coeffs, as_Add=True)\n dfree = dep.free_symbols\n syms = dfree & ind.free_symbols\n if not syms:\n # but if the system looks like (a + b)*x + b - c\n # then {} = {a, b, x} - c\n # so calculate {x} = {a, b, x} - {a, b}\n syms = dfree - set(coeffs)\n if not syms:\n syms = [Dummy()]\n else:\n if len(syms) == 1 and iterable(syms[0]):\n syms = syms[0]\n e, s, _ = recast_to_symbols([xeq], syms)\n xeq = e[0]\n syms = s\n\n # find the functional forms in which symbols appear\n\n gens = set(xeq.as_coefficients_dict(*syms).keys()) - {1}\n cset = set(coeffs)\n if any(g.has_xfree(cset) for g in gens):\n return # a generator contained a coefficient symbol\n\n # make sure we are working with symbols for generators\n\n e, gens, _ = recast_to_symbols([xeq], list(gens))\n xeq = e[0]\n\n # collect coefficients in front of generators\n\n system = list(collect(xeq, gens, evaluate=False).values())\n\n # get a solution\n\n soln = solve(system, coeffs, **flags)\n\n # unpack unless told otherwise if length is 1\n\n settings = flags.get('dict', None) or flags.get('set', None)\n if type(soln) is dict or settings or len(soln) != 1:\n return soln\n return soln[0]\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 580, "n_words": 295, "vocab_size": 168, "complexity": 18, "nloc": 103, "token_counts": 358, "n_ast_nodes": 580, "n_identifiers": 53, "d_id": 49266, "documentation": { "docstring": "\n Solve a system of equations in $k$ parameters that is formed by\n matching coefficients in variables ``coeffs`` that are on\n factors dependent on the remaining variables (or those given\n explicitly by ``syms``.\n\n Explanation\n ===========\n\n The result of this function is a dictionary with symbolic values of those\n parameters with respect to coefficients in $q$ -- empty if there\n is no solution or coefficients do not appear in the equation -- else\n None (if the system was not recognized). If there is more than one\n solution, the solutions are passed as a list. The output can be modified using\n the same semantics as for `solve` since the flags that are passed are sent\n directly to `solve` so, for example the flag ``dict=True`` will always return a list\n of solutions as dictionaries.\n\n This function accepts both Equality and Expr class instances.\n The solving process is most efficient when symbols are specified\n in addition to parameters to be determined, but an attempt to\n determine them (if absent) will be made. If an expected solution is not\n obtained (and symbols were not specified) try specifying them.\n\n Examples\n ========\n\n >>> from sympy import Eq, solve_undetermined_coeffs\n >>> from sympy.abc import a, b, c, h, p, k, x, y\n\n >>> solve_undetermined_coeffs(Eq(a*x + a + b, x/2), [a, b], x)\n {a: 1/2, b: -1/2}\n >>> solve_undetermined_coeffs(a - 2, [a])\n {a: 2}\n\n The equation can be nonlinear in the symbols:\n\n >>> X, Y, Z = y, x**y, y*x**y\n >>> eq = a*X + b*Y + c*Z - X - 2*Y - 3*Z\n >>> coeffs = a, b, c\n >>> syms = x, y\n >>> solve_undetermined_coeffs(eq, coeffs, syms)\n {a: 1, b: 2, c: 3}\n\n And the system can be nonlinear in coefficients, too, but if\n there is only a single solution, it will be returned as a\n dictionary:\n\n >>> eq = a*x**2 + b*x + c - ((x - h)**2 + 4*p*k)/4/p\n >>> solve_undetermined_coeffs(eq, (h, p, k), x)\n {h: -b/(2*a), k: (4*a*c - b**2)/(4*a), p: 1/(4*a)}\n\n Multiple solutions are always returned in a list:\n\n >>> solve_undetermined_coeffs(a**2*x + b - x, [a, b], x)\n [{a: -1, b: 0}, {a: 1, b: 0}]\n\n Using flag ``dict=True`` (in keeping with semantics in :func:`~.solve`)\n will force the result to always be a list with any solutions\n as elements in that list.\n\n >>> solve_undetermined_coeffs(a*x - 2*x, [a], dict=True)\n [{a: 2}]\n ", "n_words": 385, "vocab_size": 218, "n_whitespaces": 534, "language": "en" } }, { "id": 149765, "commit_id": "fc837c4daa27a18ff0e86128f4d52089b88fa5fb", "repo": "freqtrade", "path": "freqtrade/templates/FreqaiExampleStrategy.py", "file_name": "FreqaiExampleStrategy.py", "fun_name": "populate_any_indicators", "commit_message": "add freqao backend machinery, user interface, documentation", "code": "def populate_any_indicators(self, pair, df, tf, informative=None,coin=''):\n \n if informative is None:\n informative = self.dp.get_pair_dataframe(pair, tf)\n\n informative[coin+'rsi'] = ta.RSI(informative, timeperiod=14)\n informative[coin+'mfi'] = ta.MFI(informative, timeperiod=25)\n informative[coin+'adx'] = ta.ADX(informative, window=20)\n\n informative[coin+'20sma'] = ta.SMA(informative,timeperiod=20)\n informative[coin+'21ema'] = ta.EMA(informative,timeperiod=21)\n informative[coin+'bmsb'] = np.where(informative[coin+'20sma'].lt(informative[coin+'21ema']),1,0)\n informative[coin+'close_over_20sma'] = informative['close']/informative[coin+'20sma']\n\n informative[coin+'mfi'] = ta.MFI(informative, timeperiod=25)\n\n informative[coin+'ema21'] = ta.EMA(informative, timeperiod=21)\n informative[coin+'sma20'] = ta.SMA(informative, timeperiod=20)\n stoch = ta.STOCHRSI(informative, 15, 20, 2, 2)\n informative[coin+'srsi-fk'] = stoch['fastk']\n informative[coin+'srsi-fd'] = stoch['fastd']\n\n bollinger = qtpylib.bollinger_bands(qtpylib.typical_price(informative), window=14, stds=2.2)\n informative[coin+'bb_lowerband'] = bollinger['lower']\n informative[coin+'bb_middleband'] = bollinger['mid']\n informative[coin+'bb_upperband'] = bollinger['upper']\n informative[coin+'bb_width'] = ((informative[coin+\"bb_upperband\"] - informative[coin+\"bb_lowerband\"]) / informative[coin+\"bb_middleband\"])\n informative[coin+'close-bb_lower'] = informative['close'] / informative[coin+'bb_lowerband']\n\n informative[coin+'roc'] = ta.ROC(informative, timeperiod=3)\n informative[coin+'adx'] = ta.ADX(informative, window=14)\n\n macd = ta.MACD(informative)\n informative[coin+'macd'] = macd['macd']\n informative[coin+'pct-change'] = informative['close'].pct_change()\n informative[coin+'relative_volume'] = informative['volume'] / informative['volume'].rolling(10).mean()\n\n informative[coin+'pct-change'] = informative['close'].pct_change()\n\n indicators = [col for col in informative if col.startswith(coin)]\n\n for n in range(self.freqai_info['feature_parameters']['shift']+1):\n if n==0: continue\n informative_shift = informative[indicators].shift(n)\n informative_shift = informative_shift.add_suffix('_shift-'+str(n))\n informative = pd.concat((informative,informative_shift),axis=1)\n\n df = merge_informative_pair(df, informative, self.config['timeframe'], tf, ffill=True)\n skip_columns = [(s + '_'+tf) for s in\n ['date', 'open', 'high', 'low', 'close', 'volume']]\n df = df.drop(columns=skip_columns)\n\n return df\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 481, "n_words": 165, "vocab_size": 109, "complexity": 7, "nloc": 40, "token_counts": 614, "n_ast_nodes": 1004, "n_identifiers": 53, "d_id": 34522, "documentation": { "docstring": "\n Function designed to automatically generate, name and merge features\n from user indicated timeframes in the configuration file. User can add\n additional features here, but must follow the naming convention.\n :params:\n :pair: pair to be used as informative\n :df: strategy dataframe which will receive merges from informatives\n :tf: timeframe of the dataframe which will modify the feature names\n :informative: the dataframe associated with the informative pair\n :coin: the name of the coin which will modify the feature names.\n ", "n_words": 77, "vocab_size": 54, "n_whitespaces": 148, "language": "en" } }, { "id": 260817, "commit_id": "49279c3267c0c54cdba80a571820c46f25fbe883", "repo": "scikit-learn", "path": "sklearn/utils/__init__.py", "file_name": "__init__.py", "fun_name": "shuffle", "commit_message": "DOC ensures sklearn.utils.shuffle passes numpydoc validation (#24367)\n\nCo-authored-by: Guillaume Lemaitre ", "code": "def shuffle(*arrays, random_state=None, n_samples=None):\n \n return resample(\n *arrays, replace=False, n_samples=n_samples, random_state=random_state\n )\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 27, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 33, "n_ast_nodes": 50, "n_identifiers": 6, "d_id": 76516, "documentation": { "docstring": "Shuffle arrays or sparse matrices in a consistent way.\n\n This is a convenience alias to ``resample(*arrays, replace=False)`` to do\n random permutations of the collections.\n\n Parameters\n ----------\n *arrays : sequence of indexable data-structures\n Indexable data-structures can be arrays, lists, dataframes or scipy\n sparse matrices with consistent first dimension.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for shuffling\n the data.\n Pass an int for reproducible results across multiple function calls.\n See :term:`Glossary `.\n\n n_samples : int, default=None\n Number of samples to generate. If left to None this is\n automatically set to the first dimension of the arrays. It should\n not be larger than the length of arrays.\n\n Returns\n -------\n shuffled_arrays : sequence of indexable data-structures\n Sequence of shuffled copies of the collections. The original arrays\n are not impacted.\n\n See Also\n --------\n resample : Resample arrays or sparse matrices in a consistent way.\n\n Examples\n --------\n It is possible to mix sparse and dense arrays in the same run::\n\n >>> import numpy as np\n >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])\n >>> y = np.array([0, 1, 2])\n\n >>> from scipy.sparse import coo_matrix\n >>> X_sparse = coo_matrix(X)\n\n >>> from sklearn.utils import shuffle\n >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)\n >>> X\n array([[0., 0.],\n [2., 1.],\n [1., 0.]])\n\n >>> X_sparse\n <3x2 sparse matrix of type '<... 'numpy.float64'>'\n with 3 stored elements in Compressed Sparse Row format>\n\n >>> X_sparse.toarray()\n array([[0., 0.],\n [2., 1.],\n [1., 0.]])\n\n >>> y\n array([2, 1, 0])\n\n >>> shuffle(y, n_samples=2, random_state=0)\n array([0, 1])\n ", "n_words": 248, "vocab_size": 152, "n_whitespaces": 519, "language": "en" } }, { "id": 261771, "commit_id": "b0bf2315a771ed10b10d1f6a24a48ebdba34cf16", "repo": "scikit-learn", "path": "sklearn/utils/fixes.py", "file_name": "fixes.py", "fun_name": "_eigh", "commit_message": "MAINT fix deprecation raised in scipy-dev build (#25175)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Loïc Estève ", "code": "def _eigh(*args, **kwargs):\n \n eigvals = kwargs.pop(\"subset_by_index\", None)\n return scipy.linalg.eigh(*args, eigvals=eigvals, **kwargs)\n\n\n# remove when https://github.com/joblib/joblib/issues/1071 is fixed", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 37, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 3, "token_counts": 37, "n_ast_nodes": 62, "n_identifiers": 8, "d_id": 76983, "documentation": { "docstring": "Wrapper for `scipy.linalg.eigh` that handles the deprecation of `eigvals`.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 131053, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/serve/utils.py", "file_name": "utils.py", "fun_name": "compute_dict_delta", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def compute_dict_delta(old_dict, new_dict) -> Tuple[dict, dict, dict]:\n \n added_keys, removed_keys, updated_keys = compute_iterable_delta(\n old_dict.keys(), new_dict.keys()\n )\n return (\n {k: new_dict[k] for k in added_keys},\n {k: old_dict[k] for k in removed_keys},\n {k: new_dict[k] for k in updated_keys},\n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 79, "n_words": 36, "vocab_size": 26, "complexity": 4, "nloc": 17, "token_counts": 79, "n_ast_nodes": 113, "n_identifiers": 11, "d_id": 29455, "documentation": { "docstring": "Given two dicts, return the entries that's (added, removed, updated).\n\n Usage:\n >>> old = {\"a\": 1, \"b\": 2}\n >>> new = {\"a\": 3, \"d\": 4}\n >>> compute_dict_delta(old, new)\n ({\"d\": 4}, {\"b\": 2}, {\"a\": 3})\n ", "n_words": 34, "vocab_size": 29, "n_whitespaces": 68, "language": "en" } }, { "id": 277095, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/tf_utils.py", "file_name": "tf_utils.py", "fun_name": "convert_shapes", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def convert_shapes(input_shape, to_tuples=True):\n \n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 7, "token_counts": 25, "n_ast_nodes": 18, "n_identifiers": 3, "d_id": 81867, "documentation": { "docstring": "Converts nested shape representations to desired format.\n\n Performs:\n\n TensorShapes -> tuples if `to_tuples=True`.\n tuples of int or None -> TensorShapes if `to_tuples=False`.\n\n Valid objects to be converted are:\n - TensorShapes\n - tuples with elements of type int or None.\n - ints\n - None\n\n Args:\n input_shape: A nested structure of objects to be converted to TensorShapes.\n to_tuples: If `True`, converts all TensorShape to tuples. Otherwise converts\n all tuples representing shapes to TensorShapes.\n\n Returns:\n Nested structure of shapes in desired format.\n\n Raises:\n ValueError: when the input tensor shape can't be converted to tuples, eg\n unknown tensor shape.\n ", "n_words": 95, "vocab_size": 58, "n_whitespaces": 165, "language": "en" } }, { "id": 88271, "commit_id": "0711b240a4efe79f06629914d5836cd6acbfcf78", "repo": "sentry", "path": "src/sentry/integrations/mixins/issues.py", "file_name": "issues.py", "fun_name": "get_performance_issue_description_data", "commit_message": "feat(github): Add span evidence to performance issues (#41041)\n\nAdd span evidence to the description of a GitHub issue created from a\r\nperformance issue. Currently the GitHub issue is fairly empty as for an\r\nerror issue it shows the stacktrace, but for a performance issue it's\r\njust a link back to the Sentry issue.\r\n\r\n\"Screen", "code": "def get_performance_issue_description_data(self, event):\n \n spans, matched_problem = get_span_and_problem(event)\n if not matched_problem:\n return \"\"\n\n parent_span, repeating_spans = get_parent_and_repeating_spans(spans, matched_problem)\n transaction_name = get_span_evidence_value_problem(matched_problem)\n parent_span = get_span_evidence_value(parent_span)\n repeating_spans = get_span_evidence_value(repeating_spans)\n num_repeating_spans = (\n str(len(matched_problem.offender_span_ids)) if matched_problem.offender_span_ids else \"\"\n )\n return (transaction_name, parent_span, num_repeating_spans, repeating_spans)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 132, "n_words": 40, "vocab_size": 30, "complexity": 3, "nloc": 12, "token_counts": 79, "n_ast_nodes": 128, "n_identifiers": 16, "d_id": 18365, "documentation": { "docstring": "Generate the span evidence data from a performance issue to populate\n an integration's ticket description. Each integration will need to take\n this data and format it appropriately.\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 48, "language": "en" } }, { "id": 267136, "commit_id": "b439e41a915ccec0ccbabecc966919ea406db74e", "repo": "ansible", "path": "lib/ansible/galaxy/collection/__init__.py", "file_name": "__init__.py", "fun_name": "install_src", "commit_message": "expand ansible-doc coverage (#74963)\n\n* Expand ansible-doc to tests/filters and fix existing issues\r\n\r\n enable filter/test docs if in single file or companion yaml\r\n add docs for several filters/tests plugins\r\n allow .yml companion for docs for other plugins, must be colocated\r\n verify plugins are valid (not modules, cannot)\r\n fix 'per collection' filtering\r\n limit old style deprecation (_ prefix) to builtin/legacy\r\n start move to pathlib for saner path handling\r\n moved some funcitons, kept backwards compat shims with deprecation notice\r\n\r\n Co-authored-by: Abhijeet Kasurde \r\n Co-authored-by: Felix Fontein \r\n Co-authored-by: Sandra McCann ", "code": "def install_src(collection, b_collection_path, b_collection_output_path, artifacts_manager):\n r\n collection_meta = artifacts_manager.get_direct_collection_meta(collection)\n\n if 'build_ignore' not in collection_meta: # installed collection, not src\n # FIXME: optimize this? use a different process? copy instead of build?\n collection_meta['build_ignore'] = []\n collection_manifest = _build_manifest(**collection_meta)\n file_manifest = _build_files_manifest(\n b_collection_path,\n collection_meta['namespace'], collection_meta['name'],\n collection_meta['build_ignore'],\n )\n\n collection_output_path = _build_collection_dir(\n b_collection_path, b_collection_output_path,\n collection_manifest, file_manifest,\n )\n\n display.display(\n 'Created collection for {coll!s} at {path!s}'.\n format(coll=collection, path=collection_output_path)\n )\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 156, "n_words": 63, "vocab_size": 52, "complexity": 2, "nloc": 33, "token_counts": 93, "n_ast_nodes": 150, "n_identifiers": 17, "d_id": 78754, "documentation": { "docstring": "Install the collection from source control into given dir.\n\n Generates the Ansible collection artifact data from a galaxy.yml and\n installs the artifact to a directory.\n This should follow the same pattern as build_collection, but instead\n of creating an artifact, install it.\n\n :param collection: Collection to be installed.\n :param b_collection_path: Collection dirs layout path.\n :param b_collection_output_path: The installation directory for the \\\n collection artifact.\n :param artifacts_manager: Artifacts manager.\n\n :raises AnsibleError: If no collection metadata found.\n ", "n_words": 74, "vocab_size": 59, "n_whitespaces": 140, "language": "en" } }, { "id": 160657, "commit_id": "d7e2582cd33b22a767286e8a3d95b336dfe51a34", "repo": "numpy", "path": "numpy/lib/arraysetops.py", "file_name": "arraysetops.py", "fun_name": "in1d", "commit_message": "MAINT: bool instead of np.bool_ dtype", "code": "def in1d(ar1, ar2, assume_unique=False, invert=False, method='auto'):\n \n # Ravel both arrays, behavior for the first array could be different\n ar1 = np.asarray(ar1).ravel()\n ar2 = np.asarray(ar2).ravel()\n\n # Ensure that iteration through object arrays yields size-1 arrays\n if ar2.dtype == object:\n ar2 = ar2.reshape(-1, 1)\n # Convert booleans to uint8 so we can use the fast integer algorithm\n if ar1.dtype == bool:\n ar1 = ar1.view(np.uint8)\n if ar2.dtype == bool:\n ar2 = ar2.view(np.uint8)\n\n # Check if we can use a fast integer algorithm:\n integer_arrays = (np.issubdtype(ar1.dtype, np.integer) and\n np.issubdtype(ar2.dtype, np.integer))\n\n if method not in {'auto', 'sort', 'dictionary'}:\n raise ValueError(\n \"Invalid method: {0}. \".format(method)\n + \"Please use 'auto', 'sort' or 'dictionary'.\")\n\n if integer_arrays and method in {'auto', 'dictionary'}:\n ar2_min = np.min(ar2)\n ar2_max = np.max(ar2)\n ar2_size = ar2.size\n\n # Check for integer overflow\n with np.errstate(over='raise'):\n try:\n ar2_range = ar2_max - ar2_min\n\n # Optimal performance is for approximately\n # log10(size) > (log10(range) - 2.27) / 0.927.\n # See discussion on\n # https://github.com/numpy/numpy/pull/12065\n optimal_parameters = (\n np.log10(ar2_size) >\n ((np.log10(ar2_range + 1.0) - 2.27) / 0.927)\n )\n except FloatingPointError:\n optimal_parameters = False\n\n # Use the fast integer algorithm\n if optimal_parameters or method == 'dictionary':\n\n if invert:\n outgoing_array = np.ones_like(ar1, dtype=bool)\n else:\n outgoing_array = np.zeros_like(ar1, dtype=bool)\n\n # Make elements 1 where the integer exists in ar2\n if invert:\n isin_helper_ar = np.ones(ar2_range + 1, dtype=bool)\n isin_helper_ar[ar2 - ar2_min] = 0\n else:\n isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool)\n isin_helper_ar[ar2 - ar2_min] = 1\n\n # Mask out elements we know won't work\n basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min)\n outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] -\n ar2_min]\n\n return outgoing_array\n elif method == 'dictionary':\n raise ValueError(\n \"'dictionary' method is only \"\n \"supported for boolean or integer arrays. \"\n \"Please select 'sort' or 'auto' for the method.\"\n )\n\n\n # Check if one of the arrays may contain arbitrary objects\n contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject\n\n # This code is run when\n # a) the first condition is true, making the code significantly faster\n # b) the second condition is true (i.e. `ar1` or `ar2` may contain\n # arbitrary objects), since then sorting is not guaranteed to work\n if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object:\n if invert:\n mask = np.ones(len(ar1), dtype=bool)\n for a in ar2:\n mask &= (ar1 != a)\n else:\n mask = np.zeros(len(ar1), dtype=bool)\n for a in ar2:\n mask |= (ar1 == a)\n return mask\n\n # Otherwise use sorting\n if not assume_unique:\n ar1, rev_idx = np.unique(ar1, return_inverse=True)\n ar2 = np.unique(ar2)\n\n ar = np.concatenate((ar1, ar2))\n # We need this to be a stable sort, so always use 'mergesort'\n # here. The values from the first array should always come before\n # the values from the second array.\n order = ar.argsort(kind='mergesort')\n sar = ar[order]\n if invert:\n bool_ar = (sar[1:] != sar[:-1])\n else:\n bool_ar = (sar[1:] == sar[:-1])\n flag = np.concatenate((bool_ar, [invert]))\n ret = np.empty(ar.shape, dtype=bool)\n ret[order] = flag\n\n if assume_unique:\n return ret[:len(ar1)]\n else:\n return ret[rev_idx]\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 1320, "n_words": 469, "vocab_size": 245, "complexity": 23, "nloc": 77, "token_counts": 600, "n_ast_nodes": 971, "n_identifiers": 58, "d_id": 38687, "documentation": { "docstring": "\n Test whether each element of a 1-D array is also present in a second array.\n\n Returns a boolean array the same length as `ar1` that is True\n where an element of `ar1` is in `ar2` and False otherwise.\n\n We recommend using :func:`isin` instead of `in1d` for new code.\n\n Parameters\n ----------\n ar1 : (M,) array_like\n Input array.\n ar2 : array_like\n The values against which to test each value of `ar1`.\n assume_unique : bool, optional\n If True, the input arrays are both assumed to be unique, which\n can speed up the calculation. Default is False.\n invert : bool, optional\n If True, the values in the returned array are inverted (that is,\n False where an element of `ar1` is in `ar2` and True otherwise).\n Default is False. ``np.in1d(a, b, invert=True)`` is equivalent\n to (but is faster than) ``np.invert(in1d(a, b))``.\n method : {'auto', 'sort', 'dictionary'}, optional\n The algorithm to use. This will not affect the final result,\n but will affect the speed. Default is 'auto'.\n\n - If 'sort', will use a sort-based approach.\n - If 'dictionary', will use a key-dictionary approach similar\n to a counting sort. This is only available for boolean and\n integer arrays.\n - If 'auto', will automatically choose the method which is\n expected to perform the fastest, which depends\n on the size and range of `ar2`. For larger sizes,\n 'dictionary' is chosen. For larger range or smaller\n sizes, 'sort' is chosen.\n\n .. versionadded:: 1.8.0\n\n Returns\n -------\n in1d : (M,) ndarray, bool\n The values `ar1[in1d]` are in `ar2`.\n\n See Also\n --------\n isin : Version of this function that preserves the\n shape of ar1.\n numpy.lib.arraysetops : Module with a number of other functions for\n performing set operations on arrays.\n\n Notes\n -----\n `in1d` can be considered as an element-wise function version of the\n python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly\n equivalent to ``np.array([item in b for item in a])``.\n However, this idea fails if `ar2` is a set, or similar (non-sequence)\n container: As ``ar2`` is converted to an array, in those cases\n ``asarray(ar2)`` is an object array rather than the expected array of\n contained values.\n\n .. versionadded:: 1.4.0\n\n Examples\n --------\n >>> test = np.array([0, 1, 2, 5, 0])\n >>> states = [0, 2]\n >>> mask = np.in1d(test, states)\n >>> mask\n array([ True, False, True, False, True])\n >>> test[mask]\n array([0, 2, 0])\n >>> mask = np.in1d(test, states, invert=True)\n >>> mask\n array([False, True, False, True, False])\n >>> test[mask]\n array([1, 5])\n ", "n_words": 397, "vocab_size": 223, "n_whitespaces": 763, "language": "en" } }, { "id": 35787, "commit_id": "2eb7bb15e771f13192968cd4657c78f76b0799fe", "repo": "transformers", "path": "src/transformers/training_args.py", "file_name": "training_args.py", "fun_name": "world_size", "commit_message": "Updates in Trainer to support new features in SM Model Parallel library (#15877)\n\n* Create optimizer after model creation for SMP\r\n\r\n* update dp_rank to rdp_rank for opt_state_dict\r\n\r\n* update world_size and process_index for smp\r\n\r\n* Address comments\r\n\r\n* Lint fix\r\n\r\nCo-authored-by: Cavdar ", "code": "def world_size(self):\n \n if is_torch_tpu_available():\n return xm.xrt_world_size()\n elif is_sagemaker_mp_enabled():\n return smp.dp_size() if not smp.state.cfg.prescaled_batch else smp.rdp_size()\n elif is_sagemaker_dp_enabled():\n return sm_dist.get_world_size()\n elif self.local_rank != -1:\n return torch.distributed.get_world_size()\n return 1\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 113, "n_words": 27, "vocab_size": 20, "complexity": 6, "nloc": 10, "token_counts": 72, "n_ast_nodes": 122, "n_identifiers": 18, "d_id": 6535, "documentation": { "docstring": "\n The number of processes used in parallel.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 160281, "commit_id": "a0c2e826738daa0cbd83aba85852405b73878f5b", "repo": "numpy", "path": "numpy/core/_dtype.py", "file_name": "_dtype.py", "fun_name": "_is_packed", "commit_message": "API: Fix structured dtype cast-safety, promotion, and comparison\n\nThis PR replaces the old gh-15509 implementing proper type promotion\nfor structured voids. It further fixes the casting safety to consider\ncasts with equivalent field number and matching order as \"safe\"\nand if the names, titles, and offsets match as \"equiv\".\n\nThe change perculates into the void comparison, and since it fixes\nthe order, it removes the current FutureWarning there as well.\n\nThis addresses https://github.com/liberfa/pyerfa/issues/77\nand replaces gh-15509 (the implementation has changed too much).\n\nFixes gh-15494 (and probably a few more)\n\nCo-authored-by: Allan Haldane ", "code": "def _is_packed(dtype):\n \n align = dtype.isalignedstruct\n max_alignment = 1\n total_offset = 0\n for name in dtype.names:\n fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])\n\n if align:\n total_offset = _aligned_offset(total_offset, fld_dtype.alignment)\n max_alignment = max(max_alignment, fld_dtype.alignment)\n\n if fld_offset != total_offset:\n return False\n total_offset += fld_dtype.itemsize\n\n if align:\n total_offset = _aligned_offset(total_offset, max_alignment)\n\n if total_offset != dtype.itemsize:\n return False\n return True\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 152, "n_words": 53, "vocab_size": 32, "complexity": 6, "nloc": 17, "token_counts": 97, "n_ast_nodes": 153, "n_identifiers": 17, "d_id": 38590, "documentation": { "docstring": "\n Checks whether the structured data type in 'dtype'\n has a simple layout, where all the fields are in order,\n and follow each other with no alignment padding.\n\n When this returns true, the dtype can be reconstructed\n from a list of the field names and dtypes with no additional\n dtype parameters.\n\n Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.\n ", "n_words": 55, "vocab_size": 45, "n_whitespaces": 80, "language": "en" } }, { "id": 304515, "commit_id": "ced8278e3222501dde7d769ea4b57aae75f62438", "repo": "core", "path": "homeassistant/components/bluetooth/scanner.py", "file_name": "scanner.py", "fun_name": "_async_stop", "commit_message": "Auto recover when the Bluetooth adapter stops responding (#77043)", "code": "async def _async_stop(self) -> None:\n \n if self._cancel_watchdog:\n self._cancel_watchdog()\n self._cancel_watchdog = None\n await self._async_stop_scanner()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 56, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 6, "token_counts": 29, "n_ast_nodes": 53, "n_identifiers": 4, "d_id": 103322, "documentation": { "docstring": "Cancel watchdog and bluetooth discovery under the lock.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 215934, "commit_id": "fe48a85e8204f3840264f16235ea3bde3e664c65", "repo": "salt", "path": "salt/modules/status.py", "file_name": "status.py", "fun_name": "netdev", "commit_message": "Allow for Python 3 using view objects for a dictionary keys() function", "code": "def netdev():\n \n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 14, "token_counts": 56, "n_ast_nodes": 12, "n_identifiers": 1, "d_id": 54260, "documentation": { "docstring": "\n .. versionchanged:: 2016.3.2\n Return the network device stats for this minion\n\n .. versionchanged:: 2016.11.4\n Added support for AIX\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' status.netdev\n ", "n_words": 26, "vocab_size": 22, "n_whitespaces": 63, "language": "en" } }, { "id": 190450, "commit_id": "4fc3616712edb19179b17dd270ad6cf63abf99c2", "repo": "DeOldify", "path": "fastai/torch_core.py", "file_name": "torch_core.py", "fun_name": "remove_module_load", "commit_message": "Upgrading to support latest Pytorch version", "code": "def remove_module_load(state_dict):\n \n new_state_dict = OrderedDict()\n for k, v in state_dict.items(): new_state_dict[k[7:]] = v\n return new_state_dict\n", "url": "https://github.com/jantic/DeOldify.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 27, "n_words": 15, "vocab_size": 12, "complexity": 2, "nloc": 4, "token_counts": 34, "n_ast_nodes": 57, "n_identifiers": 7, "d_id": 46351, "documentation": { "docstring": "create new OrderedDict that does not contain `module.`", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 278255, "commit_id": "f0fc6f798937a7a5fdab469c0f16bdde7cfc4ccd", "repo": "keras", "path": "keras/models/cloning.py", "file_name": "cloning.py", "fun_name": "in_place_subclassed_model_state_restoration", "commit_message": "resolve line-too-long in models", "code": "def in_place_subclassed_model_state_restoration(model):\n \n assert not model._is_graph_network\n # Restore layers and build attributes\n if (\n hasattr(model, \"_original_attributes_cache\")\n and model._original_attributes_cache is not None\n ):\n # Models have sticky attribute assignment, so we want to be careful to\n # add back the previous attributes and track Layers by their original\n # names without adding dependencies on \"utility\" attributes which Models\n # exempt when they're constructed.\n setattr_tracking = model._setattr_tracking\n model._setattr_tracking = False\n model._self_tracked_trackables = []\n for name, value in model._original_attributes_cache.items():\n setattr(model, name, value)\n if isinstance(value, Layer):\n model._self_tracked_trackables.append(value)\n model._original_attributes_cache = None\n model._setattr_tracking = setattr_tracking\n else:\n # Restore to the state of a never-called model.\n _reset_build_compile_trackers(model)\n\n\n@keras_export(\"keras.__internal__.models.clone_and_build_model\", v1=[])", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.__internal__.models.clone_and_build_model\", v1=[])", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 253, "n_words": 101, "vocab_size": 75, "complexity": 5, "nloc": 17, "token_counts": 97, "n_ast_nodes": 181, "n_identifiers": 18, "d_id": 82432, "documentation": { "docstring": "Restores the original state of a model after it was \"reset\".\n\n This undoes this action of `_in_place_subclassed_model_reset`, which is\n called in `clone_and_build_model` if `in_place_reset` is set to True.\n\n Args:\n model: Instance of a Keras model created via subclassing, on which\n `_in_place_subclassed_model_reset` was previously called.\n ", "n_words": 44, "vocab_size": 37, "n_whitespaces": 68, "language": "en" } }, { "id": 130791, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/node.py", "file_name": "node.py", "fun_name": "_make_inc_temp", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _make_inc_temp(self, suffix=\"\", prefix=\"\", directory_name=None):\n \n if directory_name is None:\n directory_name = ray._private.utils.get_ray_temp_dir()\n directory_name = os.path.expanduser(directory_name)\n index = self._incremental_dict[suffix, prefix, directory_name]\n # `tempfile.TMP_MAX` could be extremely large,\n # so using `range` in Python2.x should be avoided.\n while index < tempfile.TMP_MAX:\n if index == 0:\n filename = os.path.join(directory_name, prefix + suffix)\n else:\n filename = os.path.join(\n directory_name, prefix + \".\" + str(index) + suffix\n )\n index += 1\n if not os.path.exists(filename):\n # Save the index.\n self._incremental_dict[suffix, prefix, directory_name] = index\n return filename\n\n raise FileExistsError(errno.EEXIST, \"No usable temporary filename found\")\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 306, "n_words": 86, "vocab_size": 60, "complexity": 5, "nloc": 17, "token_counts": 142, "n_ast_nodes": 228, "n_identifiers": 23, "d_id": 29365, "documentation": { "docstring": "Return a incremental temporary file name. The file is not created.\n\n Args:\n suffix (str): The suffix of the temp file.\n prefix (str): The prefix of the temp file.\n directory_name (str) : The base directory of the temp file.\n\n Returns:\n A string of file name. If there existing a file having\n the same name, the returned name will look like\n \"{directory_name}/{prefix}.{unique_index}{suffix}\"\n ", "n_words": 60, "vocab_size": 38, "n_whitespaces": 155, "language": "en" } }, { "id": 19989, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_internal/utils/misc.py", "file_name": "misc.py", "fun_name": "captured_stdout", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def captured_stdout() -> ContextManager[StreamWrapper]:\n \n return captured_output(\"stdout\")\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 10, "token_counts": 15, "n_ast_nodes": 30, "n_identifiers": 4, "d_id": 3166, "documentation": { "docstring": "Capture the output of sys.stdout:\n\n with captured_stdout() as stdout:\n print('hello')\n self.assertEqual(stdout.getvalue(), 'hello\\n')\n\n Taken from Lib/support/__init__.py in the CPython repo.\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 47, "language": "en" } }, { "id": 133631, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/agents/a3c/tests/test_a3c.py", "file_name": "test_a3c.py", "fun_name": "test_a3c_compilation", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_a3c_compilation(self):\n \n config = a3c.DEFAULT_CONFIG.copy()\n config[\"num_workers\"] = 2\n config[\"num_envs_per_worker\"] = 2\n\n num_iterations = 1\n\n # Test against all frameworks.\n for _ in framework_iterator(config, with_eager_tracing=True):\n for env in [\"CartPole-v1\", \"Pendulum-v1\", \"PongDeterministic-v0\"]:\n print(\"env={}\".format(env))\n config[\"model\"][\"use_lstm\"] = env == \"CartPole-v1\"\n trainer = a3c.A3CTrainer(config=config, env=env)\n for i in range(num_iterations):\n results = trainer.train()\n check_train_results(results)\n print(results)\n check_compute_single_action(\n trainer, include_state=config[\"model\"][\"use_lstm\"]\n )\n trainer.stop()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 295, "n_words": 54, "vocab_size": 42, "complexity": 4, "nloc": 18, "token_counts": 129, "n_ast_nodes": 224, "n_identifiers": 23, "d_id": 30064, "documentation": { "docstring": "Test whether an A3CTrainer can be built with both frameworks.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 73270, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/tests/test_simple_modeladmin.py", "file_name": "test_simple_modeladmin.py", "fun_name": "test_author_name_present", "commit_message": "Reformat with black", "code": "def test_author_name_present(self):\n \n response = self.get_for_author(1)\n self.assertContains(response, \"J. R. R. Tolkien\", 2)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 32, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 42, "n_identifiers": 5, "d_id": 16001, "documentation": { "docstring": "\n The author name should appear twice. Once in the header, and once\n more in the field listing\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 39, "language": "en" } }, { "id": 106974, "commit_id": "cfabe79945743dd375db4fe8bcdbaab00330dfe8", "repo": "matplotlib", "path": "lib/matplotlib/axes/_base.py", "file_name": "_base.py", "fun_name": "_update_title_position", "commit_message": "FIX: Autoposition title when yaxis has offset\n\nMove any title above the y axis offset text it would overlap with the\noffset. If multiple titles are present, they are vertically aligned to\nthe highest one.", "code": "def _update_title_position(self, renderer):\n \n if self._autotitlepos is not None and not self._autotitlepos:\n _log.debug('title position was updated manually, not adjusting')\n return\n\n titles = (self.title, self._left_title, self._right_title)\n\n for title in titles:\n x, _ = title.get_position()\n # need to start again in case of window resizing\n title.set_position((x, 1.0))\n # need to check all our twins too...\n axs = self._twinned_axes.get_siblings(self)\n # and all the children\n for ax in self.child_axes:\n if ax is not None:\n locator = ax.get_axes_locator()\n if locator:\n pos = locator(self, renderer)\n ax.apply_aspect(pos)\n else:\n ax.apply_aspect()\n axs = axs + [ax]\n top = -np.Inf\n for ax in axs:\n bb = None\n if (ax.xaxis.get_ticks_position() in ['top', 'unknown']\n or ax.xaxis.get_label_position() == 'top'):\n bb = ax.xaxis.get_tightbbox(renderer)\n if bb is None:\n bb = ax.get_window_extent(renderer)\n top = max(top, bb.ymax)\n if title.get_text():\n ax.yaxis.get_tightbbox(renderer) # update offsetText\n if ax.yaxis.offsetText.get_text():\n bb = ax.yaxis.offsetText.get_tightbbox(renderer)\n if bb.intersection(title.get_tightbbox(renderer), bb):\n top = bb.ymax\n if top < 0:\n # the top of Axes is not even on the figure, so don't try and\n # automatically place it.\n _log.debug('top of Axes not in the figure, so title not moved')\n return\n if title.get_window_extent(renderer).ymin < top:\n _, y = self.transAxes.inverted().transform((0, top))\n title.set_position((x, y))\n # empirically, this doesn't always get the min to top,\n # so we need to adjust again.\n if title.get_window_extent(renderer).ymin < top:\n _, y = self.transAxes.inverted().transform(\n (0., 2 * top - title.get_window_extent(renderer).ymin))\n title.set_position((x, y))\n\n ymax = max(title.get_position()[1] for title in titles)\n for title in titles:\n # now line up all the titles at the highest baseline.\n x, _ = title.get_position()\n title.set_position((x, ymax))\n\n # Drawing", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1070, "n_words": 245, "vocab_size": 135, "complexity": 19, "nloc": 47, "token_counts": 411, "n_ast_nodes": 662, "n_identifiers": 43, "d_id": 22531, "documentation": { "docstring": "\n Update the title position based on the bounding box enclosing\n all the ticklabels and x-axis spine and xlabel...\n ", "n_words": 18, "vocab_size": 15, "n_whitespaces": 40, "language": "en" } }, { "id": 217715, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/http/client.py", "file_name": "client.py", "fun_name": "geturl", "commit_message": "add python 3.10.4 for windows", "code": "def geturl(self):\n \n return self.url\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 54897, "documentation": { "docstring": "Return the real URL of the page.\n\n In some cases, the HTTP server redirects a client to another\n URL. The urlopen() function handles this transparently, but in\n some cases the caller needs to know which URL the client was\n redirected to. The geturl() method can be used to get at this\n redirected URL.\n\n ", "n_words": 53, "vocab_size": 40, "n_whitespaces": 95, "language": "en" } }, { "id": 5513, "commit_id": "f9348b22517556e1af5d1831db7187b912ee0126", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-s3/source_s3/source_files_abstract/stream.py", "file_name": "stream.py", "fun_name": "get_updated_history", "commit_message": "🐛 Source Amazon S3: solve possible case of files being missed during incremental syncs (#12568)\n\n* Added history to state\r\n\r\n* Deleted unused import\r\n\r\n* Rollback abnormal state file\r\n\r\n* Rollback abnormal state file\r\n\r\n* Fixed type error issue\r\n\r\n* Fix state issue\r\n\r\n* Updated after review\r\n\r\n* Bumped version", "code": "def get_updated_history(self, current_stream_state, latest_record_datetime, latest_record, current_parsed_datetime, state_date):\n \n\n history = current_stream_state.get(\"history\", {})\n\n file_modification_date = latest_record_datetime.strftime(\"%Y-%m-%d\")\n\n # add record to history if record modified date in range delta start from state\n if latest_record_datetime.date() + timedelta(days=self.buffer_days) >= state_date:\n history_item = set(history.setdefault(file_modification_date, set()))\n history_item.add(latest_record[self.ab_file_name_col])\n history[file_modification_date] = history_item\n\n # reset history to new date state\n if current_parsed_datetime.date() != state_date:\n history = {\n date: history[date]\n for date in history\n if datetime.strptime(date, \"%Y-%m-%d\").date() + timedelta(days=self.buffer_days) >= state_date\n }\n\n return history\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 229, "n_words": 73, "vocab_size": 49, "complexity": 5, "nloc": 14, "token_counts": 134, "n_ast_nodes": 215, "n_identifiers": 22, "d_id": 784, "documentation": { "docstring": "\n History is dict which basically groups files by their modified_at date.\n After reading each record we add its file to the history set if it wasn't already there.\n Then we drop from the history set any entries whose key is less than now - buffer_days\n ", "n_words": 45, "vocab_size": 40, "n_whitespaces": 74, "language": "en" } }, { "id": 207745, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_inheritance", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_inheritance(self):\n \n should_contain = [\n '
  • Villain: Bob'\n % reverse(\"admin:admin_views_villain_change\", args=(self.sv1.pk,)),\n '
  • Super villain: Bob'\n % reverse(\"admin:admin_views_supervillain_change\", args=(self.sv1.pk,)),\n \"
  • Secret hideout: floating castle\",\n \"
  • Super secret hideout: super floating castle!\",\n ]\n response = self.client.get(\n reverse(\"admin:admin_views_villain_delete\", args=(self.sv1.pk,))\n )\n for should in should_contain:\n self.assertContains(response, should, 1)\n response = self.client.get(\n reverse(\"admin:admin_views_supervillain_delete\", args=(self.sv1.pk,))\n )\n for should in should_contain:\n self.assertContains(response, should, 1)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 228, "n_words": 55, "vocab_size": 36, "complexity": 3, "nloc": 19, "token_counts": 128, "n_ast_nodes": 204, "n_identifiers": 12, "d_id": 52081, "documentation": { "docstring": "\n In the case of an inherited model, if either the child or\n parent-model instance is deleted, both instances are listed\n for deletion, as well as any relationships they have.\n ", "n_words": 29, "vocab_size": 27, "n_whitespaces": 58, "language": "en" } }, { "id": 208683, "commit_id": "75b3d1cc6d5e1e629705d8a7233a374f1e4235e7", "repo": "ipython", "path": "IPython/terminal/magics.py", "file_name": "magics.py", "fun_name": "store_or_execute", "commit_message": "Get history from sql.\n\nFixes #13585\n\nBy getting history from sql we can get the transformed history.\nThis also skip storing history if `%paste` is used and `%paste` itself\nwill insert the pasted value in history which is more conveninent.", "code": "def store_or_execute(self, block, name):\n \n if name:\n # If storing it for further editing\n self.shell.user_ns[name] = SList(block.splitlines())\n print(\"Block assigned to '%s'\" % name)\n else:\n b = self.preclean_input(block)\n self.shell.user_ns['pasted_block'] = b\n self.shell.using_paste_magics = True\n try:\n self.shell.run_cell(b, store_history=True)\n finally:\n self.shell.using_paste_magics = False\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 178, "n_words": 39, "vocab_size": 33, "complexity": 3, "nloc": 12, "token_counts": 86, "n_ast_nodes": 144, "n_identifiers": 14, "d_id": 52454, "documentation": { "docstring": " Execute a block, or store it in a variable, per the user's request.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 21, "language": "en" } }, { "id": 218498, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ipaddress.py", "file_name": "ipaddress.py", "fun_name": "_count_righthand_zero_bits", "commit_message": "add python 3.10.4 for windows", "code": "def _count_righthand_zero_bits(number, bits):\n \n if number == 0:\n return bits\n return min(bits, (~number & (number-1)).bit_length())\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 30, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 4, "token_counts": 35, "n_ast_nodes": 57, "n_identifiers": 5, "d_id": 55351, "documentation": { "docstring": "Count the number of zero bits on the right hand side.\n\n Args:\n number: an integer.\n bits: maximum number of bits to count.\n\n Returns:\n The number of zero bits on the right hand side of the number.\n\n ", "n_words": 36, "vocab_size": 22, "n_whitespaces": 66, "language": "en" } }, { "id": 276242, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/saving_utils.py", "file_name": "saving_utils.py", "fun_name": "_deserialize_metric", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _deserialize_metric(metric_config):\n \n from keras import (\n metrics as metrics_module,\n ) # pylint:disable=g-import-not-at-top\n\n if metric_config in [\"accuracy\", \"acc\", \"crossentropy\", \"ce\"]:\n # Do not deserialize accuracy and cross-entropy strings as we have special\n # case handling for these in compile, based on model output shape.\n return metric_config\n return metrics_module.deserialize(metric_config)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 91, "n_words": 47, "vocab_size": 41, "complexity": 2, "nloc": 7, "token_counts": 37, "n_ast_nodes": 68, "n_identifiers": 6, "d_id": 81600, "documentation": { "docstring": "Deserialize metrics, leaving special strings untouched.", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 270120, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/constraints.py", "file_name": "constraints.py", "fun_name": "__call__", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def __call__(self, w):\n \n return w\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 18, "n_identifiers": 3, "d_id": 80394, "documentation": { "docstring": "Applies the constraint to the input weight variable.\n\n By default, the inputs weight variable is not modified.\n Users should override this method to implement their own projection\n function.\n\n Args:\n w: Input weight variable.\n\n Returns:\n Projected variable (by default, returns unmodified inputs).\n ", "n_words": 41, "vocab_size": 33, "n_whitespaces": 101, "language": "en" } }, { "id": 185863, "commit_id": "4b5fd43423a327e4cd6d477a66bebc9588fd1488", "repo": "textual", "path": "src/textual/widgets/_placeholder.py", "file_name": "_placeholder.py", "fun_name": "on_click", "commit_message": "Add scaffolding for the Placeholder widget.", "code": "def on_click(self) -> None:\n \n self.cycle_variant()\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 3, "token_counts": 13, "n_ast_nodes": 25, "n_identifiers": 3, "d_id": 45212, "documentation": { "docstring": "Clicking on the placeholder cycles through the placeholder variants.", "n_words": 9, "vocab_size": 7, "n_whitespaces": 8, "language": "en" } }, { "id": 217711, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/http/client.py", "file_name": "client.py", "fun_name": "set_tunnel", "commit_message": "add python 3.10.4 for windows", "code": "def set_tunnel(self, host, port=None, headers=None):\n \n\n if self.sock:\n raise RuntimeError(\"Can't set up tunnel for established connection\")\n\n self._tunnel_host, self._tunnel_port = self._get_hostport(host, port)\n if headers:\n self._tunnel_headers = headers\n else:\n self._tunnel_headers.clear()\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 95, "n_words": 27, "vocab_size": 25, "complexity": 3, "nloc": 8, "token_counts": 59, "n_ast_nodes": 96, "n_identifiers": 12, "d_id": 54894, "documentation": { "docstring": "Set up host and port for HTTP CONNECT tunnelling.\n\n In a connection that uses HTTP CONNECT tunneling, the host passed to the\n constructor is used as a proxy server that relays all communication to\n the endpoint passed to `set_tunnel`. This done by sending an HTTP\n CONNECT request to the proxy server when the connection is established.\n\n This method must be called before the HTTP connection has been\n established.\n\n The headers argument should be a mapping of extra HTTP headers to send\n with the CONNECT request.\n ", "n_words": 85, "vocab_size": 54, "n_whitespaces": 148, "language": "en" } }, { "id": 243999, "commit_id": "1516986a616fee8bb741d0ab2be40683045efccd", "repo": "mmdetection", "path": "mmdet/datasets/openimages.py", "file_name": "openimages.py", "fun_name": "get_ann_info", "commit_message": "[Feature] Support OpenImages Dataset (#6331)\n\n* [Feature] support openimage group of eval\r\n\r\n* [Feature] support openimage group of eval\r\n\r\n* support openimage dataset\r\n\r\n* support openimage challenge dataset\r\n\r\n* fully support OpenImages-V6 and OpenImages Challenge 2019\r\n\r\n* Fix some logic error\r\n\r\n* update config file\r\n\r\n* fix get data_infos error\r\n\r\n* fully support OpenImages evaluation\r\n\r\n* update OpenImages config files\r\n\r\n* [Feature] support OpenImages datasets\r\n\r\n* fix bug\r\n\r\n* support load image metas from pipeline\r\n\r\n* fix bug\r\n\r\n* fix get classes logic error\r\n\r\n* update code\r\n\r\n* support get image metas\r\n\r\n* support openimags\r\n\r\n* support collect image metas\r\n\r\n* support Open Images\r\n\r\n* fix openimages logic\r\n\r\n* minor fix\r\n\r\n* add a new function to compute openimages tpfp\r\n\r\n* minor fix\r\n\r\n* fix ci error\r\n\r\n* minor fix\r\n\r\n* fix indication\r\n\r\n* minor fix\r\n\r\n* fix returns\r\n\r\n* fix returns\r\n\r\n* fix returns\r\n\r\n* fix returns\r\n\r\n* fix returns\r\n\r\n* minor fix\r\n\r\n* update readme\r\n\r\n* support loading image level labels and fix some logic\r\n\r\n* minor fix\r\n\r\n* minor fix\r\n\r\n* add class names\r\n\r\n* minor fix\r\n\r\n* minor fix\r\n\r\n* minor fix\r\n\r\n* add openimages test unit\r\n\r\n* minor fix\r\n\r\n* minor fix\r\n\r\n* fix test unit\r\n\r\n* minor fix\r\n\r\n* fix logic error\r\n\r\n* minor fix\r\n\r\n* fully support openimages\r\n\r\n* minor fix\r\n\r\n* fix docstring\r\n\r\n* fix docstrings in readthedocs\r\n\r\n* update get image metas script\r\n\r\n* label_description_file -> label_file\r\n\r\n* update openimages readme\r\n\r\n* fix test unit\r\n\r\n* fix test unit\r\n\r\n* minor fix\r\n\r\n* update readme file\r\n\r\n* Update get_image_metas.py", "code": "def get_ann_info(self, idx):\n \n img_id = self.data_infos[idx]['img_id']\n bboxes = []\n labels = []\n bboxes_ignore = []\n labels_ignore = []\n is_occludeds = []\n is_truncateds = []\n is_group_ofs = []\n is_depictions = []\n is_insides = []\n for obj in self.ann_infos[img_id]:\n label = int(obj['label'])\n bbox = [\n float(obj['bbox'][0]),\n float(obj['bbox'][1]),\n float(obj['bbox'][2]),\n float(obj['bbox'][3])\n ]\n bboxes.append(bbox)\n labels.append(label)\n\n # Other parameters\n is_occludeds.append(obj['is_occluded'])\n is_truncateds.append(obj['is_truncated'])\n is_group_ofs.append(obj['is_group_of'])\n is_depictions.append(obj['is_depiction'])\n is_insides.append(obj['is_inside'])\n if not bboxes:\n bboxes = np.zeros((0, 4))\n labels = np.zeros((0, ))\n else:\n bboxes = np.array(bboxes)\n labels = np.array(labels)\n if not bboxes_ignore:\n bboxes_ignore = np.zeros((0, 4))\n labels_ignore = np.zeros((0, ))\n else:\n bboxes_ignore = np.array(bboxes_ignore)\n labels_ignore = np.array(labels_ignore)\n\n assert len(is_group_ofs) == len(labels) == len(bboxes)\n gt_is_group_ofs = np.array(is_group_ofs, dtype=np.bool)\n\n # These parameters is not used yet.\n is_occludeds = np.array(is_occludeds, dtype=np.bool)\n is_truncateds = np.array(is_truncateds, dtype=np.bool)\n is_depictions = np.array(is_depictions, dtype=np.bool)\n is_insides = np.array(is_insides, dtype=np.bool)\n\n ann = dict(\n bboxes=bboxes.astype(np.float32),\n labels=labels.astype(np.int64),\n bboxes_ignore=bboxes_ignore.astype(np.float32),\n labels_ignore=labels_ignore.astype(np.int64),\n gt_is_group_ofs=gt_is_group_ofs,\n is_occludeds=is_occludeds,\n is_truncateds=is_truncateds,\n is_depictions=is_depictions,\n is_insides=is_insides)\n\n return ann\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 684, "n_words": 141, "vocab_size": 79, "complexity": 4, "nloc": 55, "token_counts": 423, "n_ast_nodes": 674, "n_identifiers": 33, "d_id": 70189, "documentation": { "docstring": "Get OpenImages annotation by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 61, "language": "en" } }, { "id": 208148, "commit_id": "c3c6594b4cdea898abba218f576a669700dba98d", "repo": "celery", "path": "t/unit/tasks/test_canvas.py", "file_name": "test_canvas.py", "fun_name": "test_chord_clone_kwargs", "commit_message": "BLM-2: Adding unit tests to chord clone (#7668)\n\n* Added .python-version and .vscode to .gitignore\r\n\r\n* Added test_chord_clone_kwargs() to verify chord cloning treats kwargs correctly\r\n\r\n* Happify linter", "code": "def test_chord_clone_kwargs(self, subtests):\n \n\n with subtests.test(msg='Verify chord cloning clones kwargs correctly'):\n c = chord([signature('g'), signature('h')], signature('i'), kwargs={'U': 6})\n c2 = c.clone()\n assert c2.kwargs == c.kwargs\n\n with subtests.test(msg='Cloning the chord with overridden kwargs'):\n override_kw = {'X': 2}\n c3 = c.clone(args=(1,), kwargs=override_kw)\n\n with subtests.test(msg='Verify the overridden kwargs were cloned correctly'):\n new_kw = c.kwargs.copy()\n new_kw.update(override_kw)\n assert c3.kwargs == new_kw\n\n", "url": "https://github.com/celery/celery.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 171, "n_words": 55, "vocab_size": 39, "complexity": 1, "nloc": 12, "token_counts": 127, "n_ast_nodes": 222, "n_identifiers": 17, "d_id": 52217, "documentation": { "docstring": " Test that chord clone ensures the kwargs are the same ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 11, "language": "en" } }, { "id": 100940, "commit_id": "bad5025aea1adb9126580e14e064e6c99089243d", "repo": "faceswap", "path": "lib/serializer.py", "file_name": "serializer.py", "fun_name": "save", "commit_message": "Core updates\n - Change loss loading mechanism\n - Autosize tooltips based on content size\n - Random linting + code modernisation", "code": "def save(self, filename, data):\n \n logger.debug(\"filename: %s, data type: %s\", filename, type(data))\n filename = self._check_extension(filename)\n try:\n with open(filename, self._write_option) as s_file:\n s_file.write(self.marshal(data))\n except IOError as err:\n msg = f\"Error writing to '{filename}': {err.strerror}\"\n raise FaceswapError(msg) from err\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 119, "n_words": 36, "vocab_size": 33, "complexity": 2, "nloc": 9, "token_counts": 72, "n_ast_nodes": 131, "n_identifiers": 18, "d_id": 20387, "documentation": { "docstring": " Serialize data and save to a file\n\n Parameters\n ----------\n filename: str\n The path to where the serialized file should be saved\n data: varies\n The data that is to be serialized to file\n\n Example\n ------\n >>> serializer = get_serializer('json')\n >>> data ['foo', 'bar']\n >>> json_file = '/path/to/json/file.json'\n >>> serializer.save(json_file, data)\n ", "n_words": 49, "vocab_size": 35, "n_whitespaces": 149, "language": "en" } }, { "id": 299284, "commit_id": "66551e6fcbd063e53c13adc8a6462b8e00ce1450", "repo": "core", "path": "homeassistant/components/cast/media_player.py", "file_name": "media_player.py", "fun_name": "_media_status", "commit_message": "Add state buffering to media_player and use it in cast (#70802)", "code": "def _media_status(self):\n \n media_status = self.media_status\n media_status_received = self.media_status_received\n\n if (\n media_status is None\n or media_status.player_state == MEDIA_PLAYER_STATE_UNKNOWN\n ):\n groups = self.mz_media_status\n for k, val in groups.items():\n if val and val.player_state != MEDIA_PLAYER_STATE_UNKNOWN:\n media_status = val\n media_status_received = self.mz_media_status_received[k]\n break\n\n return (media_status, media_status_received)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 200, "n_words": 42, "vocab_size": 32, "complexity": 6, "nloc": 14, "token_counts": 72, "n_ast_nodes": 115, "n_identifiers": 12, "d_id": 98218, "documentation": { "docstring": "\n Return media status.\n\n First try from our own cast, then groups which our cast is a member in.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 40, "language": "en" } }, { "id": 268958, "commit_id": "0c959a0670a2bcb12dc7a1717ce7416ff1f7cc27", "repo": "keras", "path": "keras/feature_column/dense_features_v2.py", "file_name": "dense_features_v2.py", "fun_name": "no_manual_dependency_tracking_scope", "commit_message": "Remove deprecated TF1 Layer APIs `apply()`, `get_updates_for()`, `get_losses_for()`, and remove the `inputs` argument in the `add_loss()` method.\n\nPiperOrigin-RevId: 428134172", "code": "def no_manual_dependency_tracking_scope(obj):\n ", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "\"\"\"A context that disables manual dependency tracking for the given `obj`.\n\n Sometimes library methods might track objects on their own and we might want\n to disable that and do the tracking on our own. One can then use this context\n manager to disable the tracking the library method does and do your own\n tracking.\n\n For example:the given `obj`.\n\n Sometimes library methods might track objects on their own and we might want\n to disable that and do the tracking on our own. One can then use this context\n manager to disable the tracking the library method does and do your own", "n_ast_errors": 2, "ast_levels": 8, "n_whitespaces": 3, "n_words": 2, "vocab_size": 2, "complexity": 2, "nloc": 7, "token_counts": 31, "n_ast_nodes": 92, "n_identifiers": 42, "d_id": 79789, "documentation": { "docstring": "A context that disables manual dependency tracking for the given `obj`.\n\n Sometimes library methods might track objects on their own and we might want\n to disable that and do the tracking on our own. One can then use this context\n manager to disable the tracking the library method does and do your own\n tracking.\n\n For example:\n\n class TestLayer(tf.keras.Layer):", "n_words": 58, "vocab_size": 42, "n_whitespaces": 63, "language": "en" } }, { "id": 101397, "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", "repo": "faceswap", "path": "scripts/fsmedia.py", "file_name": "fsmedia.py", "fun_name": "load", "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", "code": "def load(self) -> Generator[Tuple[str, np.ndarray], None, None]:\n \n iterator = self._load_video_frames if self._is_video else self._load_disk_frames\n for filename, image in iterator():\n yield filename, image\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 54, "n_words": 22, "vocab_size": 20, "complexity": 3, "nloc": 13, "token_counts": 48, "n_ast_nodes": 73, "n_identifiers": 13, "d_id": 20812, "documentation": { "docstring": " Generator to load frames from a folder of images or from a video file.\n\n Yields\n ------\n filename: str\n The filename of the current frame\n image: :class:`numpy.ndarray`\n A single frame\n ", "n_words": 29, "vocab_size": 25, "n_whitespaces": 87, "language": "en" } }, { "id": 43918, "commit_id": "d48a3a357fd89ec805d086d5b6c1f1d4daf77b9a", "repo": "airflow", "path": "airflow/models/taskinstance.py", "file_name": "taskinstance.py", "fun_name": "_execute_task", "commit_message": "Add TaskMap and TaskInstance.map_id (#20286)\n\nCo-authored-by: Ash Berlin-Taylor ", "code": "def _execute_task(self, context, task_copy):\n \n # If the task has been deferred and is being executed due to a trigger,\n # then we need to pick the right method to come back to, otherwise\n # we go for the default execute\n execute_callable = task_copy.execute\n if self.next_method:\n # __fail__ is a special signal value for next_method that indicates\n # this task was scheduled specifically to fail.\n if self.next_method == \"__fail__\":\n next_kwargs = self.next_kwargs or {}\n raise TaskDeferralError(next_kwargs.get(\"error\", \"Unknown\"))\n # Grab the callable off the Operator/Task and add in any kwargs\n execute_callable = getattr(task_copy, self.next_method)\n if self.next_kwargs:\n execute_callable = partial(execute_callable, **self.next_kwargs)\n # If a timeout is specified for the task, make it fail\n # if it goes beyond\n if task_copy.execution_timeout:\n # If we are coming in with a next_method (i.e. from a deferral),\n # calculate the timeout from our start_date.\n if self.next_method:\n timeout_seconds = (\n task_copy.execution_timeout - (timezone.utcnow() - self.start_date)\n ).total_seconds()\n else:\n timeout_seconds = task_copy.execution_timeout.total_seconds()\n try:\n # It's possible we're already timed out, so fast-fail if true\n if timeout_seconds <= 0:\n raise AirflowTaskTimeout()\n # Run task in timeout wrapper\n with timeout(timeout_seconds):\n result = execute_callable(context=context)\n except AirflowTaskTimeout:\n task_copy.on_kill()\n raise\n else:\n result = execute_callable(context=context)\n # If the task returns a result, push an XCom containing it\n if task_copy.do_xcom_push and result is not None:\n with create_session() as session:\n self.xcom_push(key=XCOM_RETURN_KEY, value=result, session=session)\n self._record_task_map_for_downstreams(result, session=session)\n return result\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 731, "n_words": 219, "vocab_size": 138, "complexity": 11, "nloc": 31, "token_counts": 206, "n_ast_nodes": 354, "n_identifiers": 30, "d_id": 8094, "documentation": { "docstring": "Executes Task (optionally with a Timeout) and pushes Xcom results", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 320711, "commit_id": "f6a365172afe127a4ba770e14569f2d3cd7569b4", "repo": "qutebrowser", "path": "scripts/dev/update_3rdparty.py", "file_name": "update_3rdparty.py", "fun_name": "update_pdfjs", "commit_message": "Use legacy PDF.js build for macOS/Windows releases\n\nFixes #7108", "code": "def update_pdfjs(target_version=None, legacy=False, gh_token=None):\n \n if target_version is None:\n version, url = get_latest_pdfjs_url(gh_token, legacy=legacy)\n else:\n # We need target_version as x.y.z, without the 'v' prefix, though the\n # user might give it on the command line\n if target_version.startswith('v'):\n target_version = target_version[1:]\n # version should have the prefix to be consistent with the return value\n # of get_latest_pdfjs_url()\n version = 'v' + target_version\n suffix = \"-legacy\" if legacy else \"\"\n url = ('https://github.com/mozilla/pdf.js/releases/download/'\n f'{version}/pdfjs-{target_version}{suffix}-dist.zip')\n\n os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)),\n '..', '..'))\n target_path = os.path.join('qutebrowser', '3rdparty', 'pdfjs')\n print(f\"=> Downloading pdf.js {version}{' (legacy)' if legacy else ''}\")\n try:\n (archive_path, _headers) = urllib.request.urlretrieve(url)\n except urllib.error.HTTPError as error:\n print(\"Could not retrieve pdfjs {}: {}\".format(version, error))\n return\n if os.path.isdir(target_path):\n print(\"Removing old version in {}\".format(target_path))\n shutil.rmtree(target_path)\n os.makedirs(target_path)\n print(\"Extracting new version\")\n shutil.unpack_archive(archive_path, target_path, 'zip')\n urllib.request.urlcleanup()\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 309, "n_words": 122, "vocab_size": 94, "complexity": 6, "nloc": 26, "token_counts": 208, "n_ast_nodes": 390, "n_identifiers": 32, "d_id": 117302, "documentation": { "docstring": "Download and extract the latest pdf.js version.\n\n If target_version is not None, download the given version instead.\n\n Args:\n target_version: None or version string ('x.y.z')\n legacy: Whether to download the legacy build for 83-based.\n gh_token: GitHub token to use for the API. Optional except on CI.\n ", "n_words": 45, "vocab_size": 38, "n_whitespaces": 75, "language": "en" } }, { "id": 247637, "commit_id": "5dd949bee6158a8b651db9f2ae417a62c8184bfd", "repo": "synapse", "path": "tests/handlers/test_oidc.py", "file_name": "test_oidc.py", "fun_name": "test_callback_error", "commit_message": "Add type hints to some tests/handlers files. (#12224)", "code": "def test_callback_error(self) -> None:\n \n request = Mock(args={})\n request.args[b\"error\"] = [b\"invalid_client\"]\n self.get_success(self.handler.handle_oidc_callback(request))\n self.assertRenderedError(\"invalid_client\", \"\")\n\n request.args[b\"error_description\"] = [b\"some description\"]\n self.get_success(self.handler.handle_oidc_callback(request))\n self.assertRenderedError(\"invalid_client\", \"some description\")\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 77, "n_words": 21, "vocab_size": 17, "complexity": 1, "nloc": 9, "token_counts": 83, "n_ast_nodes": 143, "n_identifiers": 9, "d_id": 71801, "documentation": { "docstring": "Errors from the provider returned in the callback are displayed.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 60414, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/scripts/cpp_lint.py", "file_name": "cpp_lint.py", "fun_name": "_Filters", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def _Filters():\n \n return _cpplint_state.filters\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 6, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 9, "n_ast_nodes": 18, "n_identifiers": 3, "d_id": 12142, "documentation": { "docstring": "Returns the module's list of output filters, as a list.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 226282, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_box.py", "file_name": "_box.py", "fun_name": "notchspansrc", "commit_message": "switch to black .22", "code": "def notchspansrc(self):\n \n return self[\"notchspansrc\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 57955, "documentation": { "docstring": "\n Sets the source reference on Chart Studio Cloud for\n `notchspan`.\n\n The 'notchspansrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 84, "language": "en" } }, { "id": 275708, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/preprocessing/image.py", "file_name": "image.py", "fun_name": "get_random_transform", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_random_transform(self, img_shape, seed=None):\n \n img_row_axis = self.row_axis - 1\n img_col_axis = self.col_axis - 1\n\n if seed is not None:\n np.random.seed(seed)\n\n if self.rotation_range:\n theta = np.random.uniform(-self.rotation_range, self.rotation_range)\n else:\n theta = 0\n\n if self.height_shift_range:\n try: # 1-D array-like or int\n tx = np.random.choice(self.height_shift_range)\n tx *= np.random.choice([-1, 1])\n except ValueError: # floating point\n tx = np.random.uniform(\n -self.height_shift_range, self.height_shift_range\n )\n if np.max(self.height_shift_range) < 1:\n tx *= img_shape[img_row_axis]\n else:\n tx = 0\n\n if self.width_shift_range:\n try: # 1-D array-like or int\n ty = np.random.choice(self.width_shift_range)\n ty *= np.random.choice([-1, 1])\n except ValueError: # floating point\n ty = np.random.uniform(\n -self.width_shift_range, self.width_shift_range\n )\n if np.max(self.width_shift_range) < 1:\n ty *= img_shape[img_col_axis]\n else:\n ty = 0\n\n if self.shear_range:\n shear = np.random.uniform(-self.shear_range, self.shear_range)\n else:\n shear = 0\n\n if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(\n self.zoom_range[0], self.zoom_range[1], 2\n )\n\n flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip\n flip_vertical = (np.random.random() < 0.5) * self.vertical_flip\n\n channel_shift_intensity = None\n if self.channel_shift_range != 0:\n channel_shift_intensity = np.random.uniform(\n -self.channel_shift_range, self.channel_shift_range\n )\n\n brightness = None\n if self.brightness_range is not None:\n brightness = np.random.uniform(\n self.brightness_range[0], self.brightness_range[1]\n )\n\n transform_parameters = {\n \"theta\": theta,\n \"tx\": tx,\n \"ty\": ty,\n \"shear\": shear,\n \"zx\": zx,\n \"zy\": zy,\n \"flip_horizontal\": flip_horizontal,\n \"flip_vertical\": flip_vertical,\n \"channel_shift_intensity\": channel_shift_intensity,\n \"brightness\": brightness,\n }\n\n return transform_parameters\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 931, "n_words": 203, "vocab_size": 108, "complexity": 14, "nloc": 68, "token_counts": 450, "n_ast_nodes": 703, "n_identifiers": 34, "d_id": 81446, "documentation": { "docstring": "Generates random parameters for a transformation.\n\n Args:\n img_shape: Tuple of integers.\n Shape of the image that is transformed.\n seed: Random seed.\n\n Returns:\n A dictionary containing randomly chosen parameters describing the\n transformation.\n ", "n_words": 31, "vocab_size": 27, "n_whitespaces": 111, "language": "en" } }, { "id": 126132, "commit_id": "659d25a3a9c4794db9dbe8f428ec587470b261b0", "repo": "ray", "path": "python/ray/workflow/tests/test_http_events.py", "file_name": "test_http_events.py", "fun_name": "test_dynamic_event_by_http", "commit_message": "[workflow] http_event_provider and accompanied listener (#26010)\n\n### Why are these changes needed?\r\nThis PR enhances workflow functionality to receive external events from a Serve based HTTP endpoint. A workflow can then consume events asynchronously as they arrive. \r\n\r\n### Design Logic\r\nA `workflow.wait_for_event` node subscribes to the endpoint instantiated by a Ray Serve deployment of class `http_event_provider.HTTPEventProvider`. The subscription is made through a helper class `http_event_provider.HTTPListener`. `HTTPListener` implements the methods of `EventListener` to poll from and confirm event checkpointing to `HTTPEventProvider`, before `HTTPEventProvider`acknowledges success or error to the event submitter. \r\n\r\n### Architecture Improvement\r\nThe logic of this enhancement conforms with existing workflow runtime design.", "code": "def test_dynamic_event_by_http(workflow_start_regular_shared_serve):\n \n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 4, "nloc": 20, "token_counts": 91, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 28063, "documentation": { "docstring": "If a workflow has dynamically generated event arguments, it should\n return the event as if the event was declared statically.\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 26, "language": "en" } }, { "id": 114589, "commit_id": "32edb0b1468a705d89af89ed2b3dca2a459dc23f", "repo": "mindsdb", "path": "mindsdb/integrations/postgres_handler/postgres_handler.py", "file_name": "postgres_handler.py", "fun_name": "select_query", "commit_message": "Select query", "code": "def select_query(self, targets, from_stmt, where_stmt):\n \n query = f\"SELECT {','.join([t.__str__() for t in targets])} FROM {from_stmt.parts[-1]}\"\n if where_stmt:\n query += f\" WHERE {str(where_stmt)}\"\n\n result = self.run_native_query(query)\n return result\n\n #TODO: JOIN, SELECT INTO", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 80, "n_words": 31, "vocab_size": 28, "complexity": 2, "nloc": 6, "token_counts": 33, "n_ast_nodes": 106, "n_identifiers": 13, "d_id": 25224, "documentation": { "docstring": "\n Retrieve the data from the SQL statement with eliminated rows that dont satisfy the WHERE condition\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 31, "language": "en" } }, { "id": 22051, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/api.py", "file_name": "api.py", "fun_name": "patch", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def patch(url, data=None, **kwargs):\n r\n\n return request(\"patch\", url, data=data, **kwargs)\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 15, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 12, "token_counts": 28, "n_ast_nodes": 43, "n_identifiers": 5, "d_id": 4138, "documentation": { "docstring": "Sends a PATCH request.\n\n :param url: URL for the new :class:`Request` object.\n :param data: (optional) Dictionary, list of tuples, bytes, or file-like\n object to send in the body of the :class:`Request`.\n :param json: (optional) json data to send in the body of the :class:`Request`.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n :return: :class:`Response ` object\n :rtype: requests.Response\n ", "n_words": 57, "vocab_size": 41, "n_whitespaces": 85, "language": "en" } }, { "id": 19833, "commit_id": "949ee95d6748e8777bed589f0d990aa4792b28f8", "repo": "pipenv", "path": "tests/integration/test_install_basic.py", "file_name": "test_install_basic.py", "fun_name": "test_install_venv_project_directory", "commit_message": "More granular control over PIPENV_VENV_IN_PROJECT variable. (#5026)\n\n* Allow PIPENV_VENV_IN_PROJECT to be read in as None, and ensure if it is set to False that it does not use .venv directory.\r\n\r\n* refactor based on PR feedback and add news fragment.\r\n\r\n* Review unit test coverage and add new tests. Remove unneccesary bits from other tests.", "code": "def test_install_venv_project_directory(PipenvInstance):\n \n with PipenvInstance(chdir=True) as p:\n with temp_environ(), TemporaryDirectory(\n prefix=\"pipenv-\", suffix=\"temp_workon_home\"\n ) as workon_home:\n os.environ[\"WORKON_HOME\"] = workon_home\n\n c = p.pipenv(\"install six\")\n assert c.returncode == 0\n\n venv_loc = None\n for line in c.stderr.splitlines():\n if line.startswith(\"Virtualenv location:\"):\n venv_loc = Path(line.split(\":\", 1)[-1].strip())\n assert venv_loc is not None\n assert venv_loc.joinpath(\".project\").exists()\n\n\n@pytest.mark.cli\n@pytest.mark.deploy\n@pytest.mark.system", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "@pytest.mark.cli\n@pytest.mark.deploy\n@pytest.mark.system", "n_ast_errors": 1, "ast_levels": 22, "n_whitespaces": 188, "n_words": 49, "vocab_size": 39, "complexity": 4, "nloc": 16, "token_counts": 129, "n_ast_nodes": 232, "n_identifiers": 29, "d_id": 3106, "documentation": { "docstring": "Test the project functionality during virtualenv creation.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 10, "language": "en" } }, { "id": 242236, "commit_id": "f8e4e9c2dd94c6f4789639dd891b8a6d5fb16e14", "repo": "Pillow", "path": "src/PIL/Image.py", "file_name": "Image.py", "fun_name": "quantize", "commit_message": "Added enums", "code": "def quantize(self, colors=256, method=None, kmeans=0, palette=None, dither=1):\n \n\n self.load()\n\n if method is None:\n # defaults:\n method = Quantize.MEDIANCUT\n if self.mode == \"RGBA\":\n method = Quantize.FASTOCTREE\n\n if self.mode == \"RGBA\" and method not in (\n Quantize.FASTOCTREE,\n Quantize.LIBIMAGEQUANT,\n ):\n # Caller specified an invalid mode.\n raise ValueError(\n \"Fast Octree (method == 2) and libimagequant (method == 3) \"\n \"are the only valid methods for quantizing RGBA images\"\n )\n\n if palette:\n # use palette from reference image\n palette.load()\n if palette.mode != \"P\":\n raise ValueError(\"bad mode for palette image\")\n if self.mode != \"RGB\" and self.mode != \"L\":\n raise ValueError(\n \"only RGB or L mode images can be quantized to a palette\"\n )\n im = self.im.convert(\"P\", dither, palette.im)\n new_im = self._new(im)\n new_im.palette = palette.palette.copy()\n return new_im\n\n im = self._new(self.im.quantize(colors, method, kmeans))\n\n from . import ImagePalette\n\n mode = im.im.getpalettemode()\n palette = im.im.getpalette(mode, mode)[: colors * len(mode)]\n im.palette = ImagePalette.ImagePalette(mode, palette)\n\n return im\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 514, "n_words": 145, "vocab_size": 102, "complexity": 9, "nloc": 32, "token_counts": 222, "n_ast_nodes": 367, "n_identifiers": 23, "d_id": 69798, "documentation": { "docstring": "\n Convert the image to 'P' mode with the specified number\n of colors.\n\n :param colors: The desired number of colors, <= 256\n :param method: :data:`Quantize.MEDIANCUT` (median cut),\n :data:`Quantize.MAXCOVERAGE` (maximum coverage),\n :data:`Quantize.FASTOCTREE` (fast octree),\n :data:`Quantize.LIBIMAGEQUANT` (libimagequant; check support\n using :py:func:`PIL.features.check_feature` with\n ``feature=\"libimagequant\"``).\n\n By default, :data:`Quantize.MEDIANCUT` will be used.\n\n The exception to this is RGBA images. :data:`Quantize.MEDIANCUT`\n and :data:`Quantize.MAXCOVERAGE` do not support RGBA images, so\n :data:`Quantize.FASTOCTREE` is used by default instead.\n :param kmeans: Integer\n :param palette: Quantize to the palette of given\n :py:class:`PIL.Image.Image`.\n :param dither: Dithering method, used when converting from\n mode \"RGB\" to \"P\" or from \"RGB\" or \"L\" to \"1\".\n Available methods are :data:`Dither.NONE` or :data:`Dither.FLOYDSTEINBERG`\n (default).\n Default: 1 (legacy setting)\n :returns: A new image\n\n ", "n_words": 114, "vocab_size": 85, "n_whitespaces": 439, "language": "en" } }, { "id": 32050, "commit_id": "77ea5130a1cd7de36796cc4d1bae6f21094d8863", "repo": "transformers", "path": "src/transformers/models/resnet/modeling_tf_resnet.py", "file_name": "modeling_tf_resnet.py", "fun_name": "dummy_inputs", "commit_message": "Add TF ResNet model (#17427)\n\n* Rought TF conversion outline\r\n\r\n* Tidy up\r\n\r\n* Fix padding differences between layers\r\n\r\n* Add back embedder - whoops\r\n\r\n* Match test file to main\r\n\r\n* Match upstream test file\r\n\r\n* Correctly pass and assign image_size parameter\r\n\r\nCo-authored-by: Sayak Paul \r\n\r\n* Add in MainLayer\r\n\r\n* Correctly name layer\r\n\r\n* Tidy up AdaptivePooler\r\n\r\n* Small tidy-up\r\n\r\nMore accurate type hints and remove whitespaces\r\n\r\n* Change AdaptiveAvgPool\r\n\r\nUse the AdaptiveAvgPool implementation by @Rocketknight1, which correctly pools if the output shape does not evenly divide by input shape c.f. https://github.com/huggingface/transformers/pull/17554/files/9e26607e22aa8d069c86b50196656012ff0ce62a#r900109509\r\n\r\nCo-authored-by: From: matt \r\nCo-authored-by: Sayak Paul \r\n\r\n* Use updated AdaptiveAvgPool\r\n\r\nCo-authored-by: matt \r\n\r\n* Make AdaptiveAvgPool compatible with CPU\r\n\r\n* Remove image_size from configuration\r\n\r\n* Fixup\r\n\r\n* Tensorflow -> TensorFlow\r\n\r\n* Fix pt references in tests\r\n\r\n* Apply suggestions from code review - grammar and wording\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* Add TFResNet to doc tests\r\n\r\n* PR comments - GlobalAveragePooling and clearer comments\r\n\r\n* Remove unused import\r\n\r\n* Add in keepdims argument\r\n\r\n* Add num_channels check\r\n\r\n* grammar fix: by -> of\r\n\r\nCo-authored-by: matt \r\n\r\nCo-authored-by: Matt \r\n\r\n* Remove transposes - keep NHWC throughout forward pass\r\n\r\n* Fixup look sharp\r\n\r\n* Add missing layer names\r\n\r\n* Final tidy up - remove from_pt now weights on hub\r\n\r\nCo-authored-by: Sayak Paul \r\nCo-authored-by: matt \r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\nCo-authored-by: Matt ", "code": "def dummy_inputs(self) -> Dict[str, tf.Tensor]:\n \n VISION_DUMMY_INPUTS = tf.random.uniform(shape=(3, self.config.num_channels, 224, 224), dtype=tf.float32)\n return {\"pixel_values\": tf.constant(VISION_DUMMY_INPUTS)}\n\n\nRESNET_START_DOCSTRING = r\n\n\nRESNET_INPUTS_DOCSTRING = r\n\n\n@keras_serializable", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@keras_serializable", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 40, "n_words": 22, "vocab_size": 19, "complexity": 1, "nloc": 7, "token_counts": 56, "n_ast_nodes": 104, "n_identifiers": 18, "d_id": 5843, "documentation": { "docstring": "\n Dummy inputs to build the network. Returns:\n `Dict[str, tf.Tensor]`: The dummy inputs.\n \n This model is a TensorFlow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular TensorFlow Module and refer to the TensorFlow documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See\n [`AutoFeatureExtractor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n", "n_words": 140, "vocab_size": 98, "n_whitespaces": 289, "language": "en" } }, { "id": 159676, "commit_id": "4cdceaab5271a5b51463ec562c8eb55f96b771c5", "repo": "rasa", "path": "rasa/utils/tensorflow/model_data.py", "file_name": "model_data.py", "fun_name": "__reduce__", "commit_message": "Bump numpy from 1.19.5 to 1.21.6 (#11078)\n\n* Bump numpy from 1.19.5 to 1.21.6\r\n\r\nBumps [numpy](https://github.com/numpy/numpy) from 1.19.5 to 1.21.6.\r\n- [Release notes](https://github.com/numpy/numpy/releases)\r\n- [Changelog](https://github.com/numpy/numpy/blob/main/doc/HOWTO_RELEASE.rst.txt)\r\n- [Commits](https://github.com/numpy/numpy/compare/v1.19.5...v1.21.6)\r\n\r\n---\r\nupdated-dependencies:\r\n- dependency-name: numpy\r\n dependency-type: direct:production\r\n update-type: version-update:semver-minor\r\n...\r\n\r\nSigned-off-by: dependabot[bot] \r\n\r\n* fixed mypy errors for numpy 1.21.6 upgrade\r\n\r\n* removed duplicate np.array call\r\n\r\nSigned-off-by: dependabot[bot] \r\nCo-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>\r\nCo-authored-by: Thomas Werkmeister \r\nCo-authored-by: melindaloubser1 ", "code": "def __reduce__(self) -> Tuple[Any, Any, Any]:\n \n pickled_state = super(FeatureArray, self).__reduce__()\n if isinstance(pickled_state, str):\n raise TypeError(\"np array __reduce__ returned string instead of tuple.\")\n new_state = pickled_state[2] + (\n self.number_of_dimensions,\n self.is_sparse,\n self.units,\n )\n return pickled_state[0], pickled_state[1], new_state\n", "url": "https://github.com/RasaHQ/rasa.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 121, "n_words": 35, "vocab_size": 33, "complexity": 2, "nloc": 15, "token_counts": 73, "n_ast_nodes": 110, "n_identifiers": 14, "d_id": 38392, "documentation": { "docstring": "Needed in order to pickle this object.\n\n Returns:\n A tuple.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 35, "language": "en" } }, { "id": 249472, "commit_id": "d3d9ca156e323fe194b1bcb1af1628f65a2f3c1c", "repo": "synapse", "path": "tests/rest/client/test_keys.py", "file_name": "test_keys.py", "fun_name": "test_key_query_cancellation", "commit_message": "Cancel the processing of key query requests when they time out. (#13680)", "code": "def test_key_query_cancellation(self) -> None:\n \n self.register_user(\"alice\", \"wonderland\")\n alice_token = self.login(\"alice\", \"wonderland\")\n\n bob = self.register_user(\"bob\", \"uncle\")\n\n channel = make_request_with_cancellation_test(\n \"test_key_query_cancellation\",\n self.reactor,\n self.site,\n \"POST\",\n \"/_matrix/client/r0/keys/query\",\n {\n \"device_keys\": {\n # Empty list means we request keys for all bob's devices\n bob: [],\n },\n },\n token=alice_token,\n )\n\n self.assertEqual(200, channel.code, msg=channel.result[\"body\"])\n self.assertIn(bob, channel.json_body[\"device_keys\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 259, "n_words": 47, "vocab_size": 42, "complexity": 1, "nloc": 23, "token_counts": 104, "n_ast_nodes": 177, "n_identifiers": 17, "d_id": 72939, "documentation": { "docstring": "\n Tests that /keys/query is cancellable and does not swallow the\n CancelledError.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 33, "language": "en" } }, { "id": 135946, "commit_id": "700618d0dbd27282ce72441d22a0000075b7a54f", "repo": "ray", "path": "python/ray/serve/tests/test_standalone2.py", "file_name": "test_standalone2.py", "fun_name": "test_normal_operation", "commit_message": "[Serve] Add the `SERVE_REQUEST_PROCESSING_TIMEOUT_S` environment variable (#29534)", "code": "def test_normal_operation(self, ray_instance):\n \n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 10, "n_words": 3, "vocab_size": 3, "complexity": 2, "nloc": 7, "token_counts": 52, "n_ast_nodes": 15, "n_identifiers": 3, "d_id": 30778, "documentation": { "docstring": "Checks that a moderate timeout doesn't affect normal operation.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 314918, "commit_id": "e64336cb91d1ce97ac82c57e98477acedfcbcf71", "repo": "core", "path": "tests/components/generic/test_config_flow.py", "file_name": "test_config_flow.py", "fun_name": "test_slug", "commit_message": "Allow configuring username and password in generic camera config flow (#73804)\n\n* Add ability to use user & pw not in stream url\r\n\r\n* Increase test coverage to 100%\r\n\r\n* Increase test coverage\r\n\r\n* Verify that stream source includes user:pass\r\n\r\n* Code review: refactor test to use MockConfigEntry\r\n\r\n* Code review: Improve test docstring\r\n\r\n* Edit comment; retrigger CI.\r\n\r\nCo-authored-by: Dave T ", "code": "async def test_slug(hass, caplog):\n \n result = slug(hass, \"http://127.0.0.2/testurl/{{1/0}}\")\n assert result is None\n assert \"Syntax error in\" in caplog.text\n\n\n@respx.mock", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@respx.mock", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 30, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 26, "n_ast_nodes": 54, "n_identifiers": 8, "d_id": 113519, "documentation": { "docstring": "\n Test that the slug function generates an error in case of invalid template.\n\n Other paths in the slug function are already tested by other tests.\n ", "n_words": 25, "vocab_size": 21, "n_whitespaces": 35, "language": "en" } }, { "id": 6396, "commit_id": "d77aaf8da39f04a353a3a08fb699ae8a96ffea3a", "repo": "ludwig", "path": "ludwig/automl/auto_tune_config.py", "file_name": "auto_tune_config.py", "fun_name": "_get_text_feature_max_length", "commit_message": "Improve AutoML heuristics for text classification (#1815)\n\n* Improve AutoML heuristics for text classification\r\n\r\nCo-authored-by: Anne Holler ", "code": "def _get_text_feature_max_length(config, training_set_metadata) -> int:\n \n max_length = 0\n for feature in config[\"input_features\"]:\n if feature[\"type\"] == TEXT:\n feature_max_len = training_set_metadata[feature[\"name\"]][\"word_max_sequence_length\"]\n if feature_max_len > max_length:\n max_length = feature_max_len\n if (\n (\"preprocessing\" in config)\n and (TEXT in config[\"preprocessing\"])\n and (\"word_sequence_length_limit\" in config[\"preprocessing\"][TEXT])\n ):\n limit = config[\"preprocessing\"][TEXT][\"word_sequence_length_limit\"]\n else:\n limit = 256 # Preprocessing default word_sequence_length_limit = 256\n if max_length > limit + 2: # For start and stop symbols.\n max_length = limit + 2\n return max_length\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 184, "n_words": 72, "vocab_size": 45, "complexity": 8, "nloc": 19, "token_counts": 110, "n_ast_nodes": 190, "n_identifiers": 9, "d_id": 970, "documentation": { "docstring": "Returns max sequence length over text features, subject to preprocessing limit.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 60126, "commit_id": "a368874d1b145c1ec5201e5efd3c26ce7c1e8611", "repo": "prefect", "path": "src/prefect/_internal/concurrency/primitives.py", "file_name": "primitives.py", "fun_name": "wait", "commit_message": "Add thread-safe async primitives `Event` and `Future` (#7865)\n\nCo-authored-by: Serina Grill <42048900+serinamarie@users.noreply.github.com>", "code": "async def wait(self) -> None:\n \n if self._is_set:\n return\n\n if not self._loop:\n self._loop = get_running_loop()\n self._event = asyncio.Event()\n\n await self._event.wait()\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 80, "n_words": 19, "vocab_size": 17, "complexity": 3, "nloc": 12, "token_counts": 44, "n_ast_nodes": 78, "n_identifiers": 8, "d_id": 11991, "documentation": { "docstring": "\n Wait until the flag has been set.\n\n If the flag has already been set when this method is called, it returns immediately.\n ", "n_words": 22, "vocab_size": 18, "n_whitespaces": 44, "language": "en" } }, { "id": 168932, "commit_id": "6787b8b73f4c54a0cf742a90433e6fb6c7edb231", "repo": "pandas", "path": "pandas/io/date_converters.py", "file_name": "date_converters.py", "fun_name": "generic_parser", "commit_message": "TST: Address/catch more test warnings (#48358)", "code": "def generic_parser(parse_func, *cols) -> np.ndarray:\n \n\n warnings.warn(\n \"Use pd.to_datetime instead.\",\n FutureWarning,\n stacklevel=find_stack_level(inspect.currentframe()),\n )\n\n N = _check_columns(cols)\n results = np.empty(N, dtype=object)\n\n for i in range(N):\n args = [c[i] for c in cols]\n results[i] = parse_func(*args)\n\n return results\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 91, "n_words": 35, "vocab_size": 29, "complexity": 3, "nloc": 17, "token_counts": 83, "n_ast_nodes": 131, "n_identifiers": 22, "d_id": 40344, "documentation": { "docstring": "\n Use dateparser to parse columns with data information into a single datetime column.\n\n .. deprecated:: 1.2\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 26, "language": "en" } }, { "id": 227377, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_layout.py", "file_name": "_layout.py", "fun_name": "computed", "commit_message": "switch to black .22", "code": "def computed(self):\n \n return self[\"computed\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 59050, "documentation": { "docstring": "\n Placeholder for exporting automargin-impacting values namely\n `margin.t`, `margin.b`, `margin.l` and `margin.r` in \"full-\n json\" mode.\n\n The 'computed' property accepts values of any type\n\n Returns\n -------\n Any\n ", "n_words": 26, "vocab_size": 25, "n_whitespaces": 83, "language": "en" } }, { "id": 130162, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/_private/profiling.py", "file_name": "profiling.py", "fun_name": "profile", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def profile(event_type, extra_data=None):\n \n if not PROFILING_ENABLED:\n return NULL_LOG_SPAN\n worker = ray.worker.global_worker\n if worker.mode == ray.worker.LOCAL_MODE:\n return NULL_LOG_SPAN\n return worker.core_worker.profile_event(event_type.encode(\"ascii\"), extra_data)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 49, "n_words": 20, "vocab_size": 16, "complexity": 3, "nloc": 7, "token_counts": 52, "n_ast_nodes": 85, "n_identifiers": 13, "d_id": 29129, "documentation": { "docstring": "Profile a span of time so that it appears in the timeline visualization.\n\n Note that this only works in the raylet code path.\n\n This function can be used as follows (both on the driver or within a task).\n\n .. code-block:: python\n import ray._private.profiling as profiling\n\n with profiling.profile(\"custom event\", extra_data={'key': 'val'}):\n # Do some computation here.\n\n Optionally, a dictionary can be passed as the \"extra_data\" argument, and\n it can have keys \"name\" and \"cname\" if you want to override the default\n timeline display text and box color. Other values will appear at the bottom\n of the chrome tracing GUI when you click on the box corresponding to this\n profile span.\n\n Args:\n event_type: A string describing the type of the event.\n extra_data: This must be a dictionary mapping strings to strings. This\n data will be added to the json objects that are used to populate\n the timeline, so if you want to set a particular color, you can\n simply set the \"cname\" attribute to an appropriate color.\n Similarly, if you set the \"name\" attribute, then that will set the\n text displayed on the box in the timeline.\n\n Returns:\n An object that can profile a span of time via a \"with\" statement.\n ", "n_words": 199, "vocab_size": 120, "n_whitespaces": 333, "language": "en" } }, { "id": 241808, "commit_id": "4871f3d1c61bdb296ae03e3480f5f584f5c67256", "repo": "scipy", "path": "scipy/special/_basic.py", "file_name": "_basic.py", "fun_name": "mathieu_even_coef", "commit_message": "MAINT: optimize, special, signal: Use custom warnings instead of print statements (#15259)\n\nCo-authored-by: Pamphile Roy \r\nCo-authored-by: Tirth Patel ", "code": "def mathieu_even_coef(m, q):\n r\n if not (isscalar(m) and isscalar(q)):\n raise ValueError(\"m and q must be scalars.\")\n if (q < 0):\n raise ValueError(\"q >=0\")\n if (m != floor(m)) or (m < 0):\n raise ValueError(\"m must be an integer >=0.\")\n\n if (q <= 1):\n qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q\n else:\n qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q\n km = int(qm + 0.5*m)\n if km > 251:\n warnings.warn(\"Too many predicted coefficients.\", RuntimeWarning, 2)\n kd = 1\n m = int(floor(m))\n if m % 2:\n kd = 2\n\n a = mathieu_a(m, q)\n fc = _specfun.fcoef(kd, m, q, a)\n return fc[:km]\n\n", "url": "https://github.com/scipy/scipy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 191, "n_words": 101, "vocab_size": 70, "complexity": 9, "nloc": 55, "token_counts": 205, "n_ast_nodes": 304, "n_identifiers": 19, "d_id": 69704, "documentation": { "docstring": "Fourier coefficients for even Mathieu and modified Mathieu functions.\n\n The Fourier series of the even solutions of the Mathieu differential\n equation are of the form\n\n .. math:: \\mathrm{ce}_{2n}(z, q) = \\sum_{k=0}^{\\infty} A_{(2n)}^{(2k)} \\cos 2kz\n\n .. math:: \\mathrm{ce}_{2n+1}(z, q) = \\sum_{k=0}^{\\infty} A_{(2n+1)}^{(2k+1)} \\cos (2k+1)z\n\n This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even\n input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input\n m=2n+1.\n\n Parameters\n ----------\n m : int\n Order of Mathieu functions. Must be non-negative.\n q : float (>=0)\n Parameter of Mathieu functions. Must be non-negative.\n\n Returns\n -------\n Ak : ndarray\n Even or odd Fourier coefficients, corresponding to even or odd m.\n\n References\n ----------\n .. [1] Zhang, Shanjie and Jin, Jianming. \"Computation of Special\n Functions\", John Wiley and Sons, 1996.\n https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html\n .. [2] NIST Digital Library of Mathematical Functions\n https://dlmf.nist.gov/28.4#i\n\n ", "n_words": 128, "vocab_size": 82, "n_whitespaces": 238, "language": "en" } }, { "id": 77627, "commit_id": "52ace9eae7311fa708dd19a7d6b6cabfb36a8fee", "repo": "wagtail", "path": "wagtail/images/models.py", "file_name": "models.py", "fun_name": "prefetch_renditions", "commit_message": "Add prefetch_renditions method on Image queryset manager\n\nUpdate logic when creating and looking for a rendtion", "code": "def prefetch_renditions(self, *filters):\n \n # Get a list of filter spec strings. The given value could contain Filter objects\n filter_specs = [\n filter.spec if isinstance(filter, Filter) else filter for filter in filters\n ]\n\n rendition_model = self.model.get_rendition_model()\n\n return self.prefetch_related(\n models.Prefetch(\n \"renditions\",\n queryset=rendition_model.objects.filter(filter_spec__in=filter_specs),\n to_attr=\"prefetched_renditions\",\n )\n )\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 170, "n_words": 43, "vocab_size": 39, "complexity": 3, "nloc": 12, "token_counts": 68, "n_ast_nodes": 109, "n_identifiers": 18, "d_id": 16680, "documentation": { "docstring": "\n Prefetches generated renditions for the given filters.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 125008, "commit_id": "569fe0109629048d08e1d9e023f7769f10bd2244", "repo": "ray", "path": "rllib/offline/tests/test_dataset_reader.py", "file_name": "test_dataset_reader.py", "fun_name": "test_dataset_shard_with_loader_fn", "commit_message": "[RLlib] improved unittests for dataset_reader and fixed bugs (#26458)", "code": "def test_dataset_shard_with_loader_fn(self):\n \n dset = ray.data.range(100)\n config = {\"input\": \"dataset\", \"input_config\": {\"loader_fn\": lambda: dset}}\n\n ret_dataset, _ = get_dataset_and_shards(config)\n assert ret_dataset.count() == dset.count()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 56, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 5, "token_counts": 53, "n_ast_nodes": 95, "n_identifiers": 11, "d_id": 27747, "documentation": { "docstring": "Tests whether the dataset_shard function works correctly with loader_fn.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 269509, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "_preprocess_conv3d_input", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _preprocess_conv3d_input(x, data_format):\n \n tf_data_format = \"NDHWC\"\n if data_format == \"channels_first\":\n if not _has_nchw_support():\n x = tf.compat.v1.transpose(x, (0, 2, 3, 4, 1))\n else:\n tf_data_format = \"NCDHW\"\n return x, tf_data_format\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 76, "n_words": 28, "vocab_size": 23, "complexity": 3, "nloc": 8, "token_counts": 55, "n_ast_nodes": 92, "n_identifiers": 9, "d_id": 80140, "documentation": { "docstring": "Transpose and cast the input before the conv3d.\n\n Args:\n x: input tensor.\n data_format: string, `\"channels_last\"` or `\"channels_first\"`.\n\n Returns:\n A tensor.\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 50, "language": "en" } }, { "id": 299612, "commit_id": "ae01ec02e28d4b83ef64636e36de2baf59c19874", "repo": "core", "path": "tests/test_loader.py", "file_name": "test_loader.py", "fun_name": "test_get_application_credentials", "commit_message": "Allow custom integrations to support application_credentials platform (#71129)", "code": "async def test_get_application_credentials(hass):\n \n test_1_integration = _get_test_integration(hass, \"test_1\", True)\n test_2_integration = _get_test_integration_with_application_credentials(\n hass, \"test_2\"\n )\n\n with patch(\"homeassistant.loader.async_get_custom_components\") as mock_get:\n mock_get.return_value = {\n \"test_1\": test_1_integration,\n \"test_2\": test_2_integration,\n }\n application_credentials = await loader.async_get_application_credentials(hass)\n assert \"test_2\" in application_credentials\n assert \"test_1\" not in application_credentials\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 118, "n_words": 39, "vocab_size": 31, "complexity": 1, "nloc": 13, "token_counts": 64, "n_ast_nodes": 118, "n_identifiers": 12, "d_id": 98529, "documentation": { "docstring": "Verify that custom components with application_credentials are found.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 130804, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/node.py", "file_name": "node.py", "fun_name": "start_reaper_process", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def start_reaper_process(self):\n \n assert (\n not self.kernel_fate_share\n ), \"a reaper should not be used with kernel fate-sharing\"\n process_info = ray._private.services.start_reaper(fate_share=False)\n assert ray_constants.PROCESS_TYPE_REAPER not in self.all_processes\n if process_info is not None:\n self.all_processes[ray_constants.PROCESS_TYPE_REAPER] = [\n process_info,\n ]\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 124, "n_words": 34, "vocab_size": 28, "complexity": 2, "nloc": 10, "token_counts": 57, "n_ast_nodes": 91, "n_identifiers": 12, "d_id": 29376, "documentation": { "docstring": "\n Start the reaper process.\n\n This must be the first process spawned and should only be called when\n ray processes should be cleaned up if this process dies.\n ", "n_words": 27, "vocab_size": 22, "n_whitespaces": 56, "language": "en" } }, { "id": 113578, "commit_id": "d68c786ff81bad19c04619d6a999ff34aaa724e7", "repo": "nni", "path": "nni/compression/pytorch/base/scheduler.py", "file_name": "scheduler.py", "fun_name": "get_best_result", "commit_message": "[Compression] remove pruning v1 & refactor directory (#5228)", "code": "def get_best_result(self) -> Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]:\n \n raise NotImplementedError()\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 26, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 9, "token_counts": 36, "n_ast_nodes": 51, "n_identifiers": 11, "d_id": 24963, "documentation": { "docstring": "\n Returns\n -------\n Tuple[int, Module, Dict[str, Dict[str, Tensor]], float, List[Dict]]\n Return the task result that has the best performance,\n inculde task id, the compact model, the masks on the compact model, score and config list used in this task.\n ", "n_words": 38, "vocab_size": 30, "n_whitespaces": 89, "language": "en" } }, { "id": 154457, "commit_id": "a6f47c8e1c27d85fc09926bb35c2f1a65a6d3e79", "repo": "modin", "path": "modin/core/dataframe/algebra/map.py", "file_name": "map.py", "fun_name": "register", "commit_message": "REFACTOR-#4942: remove call method in favor of register due to duplication (#4943)\n\nSigned-off-by: Myachev ", "code": "def register(cls, function, *call_args, **call_kwds):\n \n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 12, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 23, "n_identifiers": 5, "d_id": 35990, "documentation": { "docstring": "\n Build Map operator that will be performed across each partition.\n\n Parameters\n ----------\n function : callable(pandas.DataFrame) -> pandas.DataFrame\n Function that will be applied to the each partition.\n Function takes `pandas.DataFrame` and returns `pandas.DataFrame`\n of the same shape.\n *call_args : args\n Args that will be passed to the returned function.\n **call_kwds : kwargs\n Kwargs that will be passed to the returned function.\n\n Returns\n -------\n callable\n Function that takes query compiler and executes map function.\n ", "n_words": 72, "vocab_size": 44, "n_whitespaces": 209, "language": "en" } }, { "id": 29299, "commit_id": "d90be220d6b687d08153934a51354011a3cb5ca1", "repo": "saleor", "path": "saleor/graphql/product/tests/queries/test_product_types_query.py", "file_name": "test_product_types_query.py", "fun_name": "test_product_types_query_ids_not_exists", "commit_message": "Split test_product.py and test_variant.py into multiple files (#11173)\n\n* Split test_product.py into multiple files\r\n\r\n* Split test_variant.py into multiple files", "code": "def test_product_types_query_ids_not_exists(user_api_client, category):\n query = NOT_EXISTS_IDS_COLLECTIONS_QUERY\n variables = {\"filter\": {\"ids\": [\"fTEJRuFHU6fd2RU=\", \"2XwnQNNhwCdEjhP=\"]}}\n response = user_api_client.post_graphql(query, variables)\n content = get_graphql_content(response, ignore_errors=True)\n message_error = '{\"ids\": [{\"message\": \"Invalid ID specified.\", \"code\": \"\"}]}'\n\n assert len(content[\"errors\"]) == 1\n assert content[\"errors\"][0][\"message\"] == message_error\n assert content[\"data\"][\"productTypes\"] is None\n\n\nQUERY_FILTER_PRODUCT_TYPES = \n\n\n@pytest.mark.parametrize(\n \"search, expected_names\",\n (\n (\"\", [\"The best juices\", \"The best beers\", \"The worst beers\"]),\n (\"best\", [\"The best juices\", \"The best beers\"]),\n (\"worst\", [\"The worst beers\"]),\n (\"average\", []),\n ),\n)", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"search, expected_names\",\n (\n (\"\", [\"The best juices\", \"The best beers\", \"The worst beers\"]),\n (\"best\", [\"The best juices\", \"The best beers\"]),\n (\"worst\", [\"The worst beers\"]),\n (\"average\", []),\n ),\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 130, "n_words": 72, "vocab_size": 52, "complexity": 1, "nloc": 9, "token_counts": 81, "n_ast_nodes": 234, "n_identifiers": 17, "d_id": 5214, "documentation": { "docstring": "\n query($filters: ProductTypeFilterInput) {\n productTypes(first: 10, filter: $filters) {\n edges {\n node {\n name\n }\n }\n }\n }\n", "n_words": 17, "vocab_size": 11, "n_whitespaces": 76, "language": "en" } }, { "id": 208424, "commit_id": "ce62a7a4b2c97bf8a30e8074e8fc18103a0718a0", "repo": "ipython", "path": "IPython/core/magics/script.py", "file_name": "script.py", "fun_name": "script_args", "commit_message": "avoid deprecated get_event_loop\n\nuse our own `async_helpers.get_asyncio_loop` to track the global event loop\n\nscript magics use dedicated background asyncio loop\ninstead of trying to work on the main loop, which may or may not exist\n\n_AsyncIOProxy wraps background script objects to transfer awaitables across loops\n\nonly works for coroutine methods, which might be good enough? Works for read, etc.", "code": "def script_args(f):\n \n args = [\n magic_arguments.argument(\n '--out', type=str,\n help=\n ),\n magic_arguments.argument(\n '--err', type=str,\n help=\n ),\n magic_arguments.argument(\n '--bg', action=\"store_true\",\n help=\n ),\n magic_arguments.argument(\n '--proc', type=str,\n help=\n ),\n magic_arguments.argument(\n '--no-raise-error', action=\"store_false\", dest='raise_error',\n help=\n )\n ]\n for arg in args:\n f = arg(f)\n return f\n\n\n@magics_class", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "@magics_class", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 243, "n_words": 42, "vocab_size": 27, "complexity": 2, "nloc": 39, "token_counts": 101, "n_ast_nodes": 174, "n_identifiers": 12, "d_id": 52322, "documentation": { "docstring": "single decorator for adding script argsThe variable in which to store stdout from the script.\n If the script is backgrounded, this will be the stdout *pipe*,\n instead of the stderr text itself and will not be auto closed.\n The variable in which to store stderr from the script.\n If the script is backgrounded, this will be the stderr *pipe*,\n instead of the stderr text itself and will not be autoclosed.\n Whether to run the script in the background.\n If given, the only way to see the output of the command is\n with --out/err.\n The variable in which to store Popen instance.\n This is used only when --bg option is given.\n Whether you should raise an error message in addition to\n a stream on stderr if you get a nonzero exit code.\n ", "n_words": 131, "vocab_size": 67, "n_whitespaces": 274, "language": "en" } }, { "id": 73937, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/permission_policies/collections.py", "file_name": "collections.py", "fun_name": "_descendants_with_perm", "commit_message": "Reformat with black", "code": "def _descendants_with_perm(self, user, action):\n \n # Get the permission object corresponding to this action\n permission = self._get_permission_objects_for_actions([action]).first()\n\n # Get the collections that have a GroupCollectionPermission record\n # for this permission and any of the user's groups;\n # create a list of their paths\n collection_roots = Collection.objects.filter(\n group_permissions__group__in=user.groups.all(),\n group_permissions__permission=permission,\n ).values(\"path\", \"depth\")\n\n if collection_roots:\n # build a filter expression that will filter our model to just those\n # instances in collections with a path that starts with one of the above\n # but excluding the collection on which permission was granted\n collection_path_filter = Q(\n path__startswith=collection_roots[0][\"path\"]\n ) & Q(depth__gt=collection_roots[0][\"depth\"])\n for collection in collection_roots[1:]:\n collection_path_filter = collection_path_filter | (\n Q(path__startswith=collection[\"path\"])\n & Q(depth__gt=collection[\"depth\"])\n )\n return Collection.objects.all().filter(collection_path_filter)\n else:\n # no matching collections\n return Collection.objects.none()\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 391, "n_words": 117, "vocab_size": 78, "complexity": 3, "nloc": 18, "token_counts": 141, "n_ast_nodes": 239, "n_identifiers": 22, "d_id": 16183, "documentation": { "docstring": "\n Return a queryset of collections descended from a collection on which this user has\n a GroupCollectionPermission record for this action. Used for actions, like edit and\n delete where the user cannot modify the collection where they are granted permission.\n ", "n_words": 39, "vocab_size": 31, "n_whitespaces": 68, "language": "en" } }, { "id": 169378, "commit_id": "050b3b815604652bc445d2487f6e1fc83eaa8d1f", "repo": "pandas", "path": "pandas/io/pytables.py", "file_name": "pytables.py", "fun_name": "queryables", "commit_message": "TYP: Upgrade mypy to 0.981 (#48871)\n\nCo-authored-by: Matthew Roeschke <10647082+mroeschke@users.noreply.github.com>", "code": "def queryables(self) -> dict[str, Any]:\n \n # mypy doesn't recognize DataFrame._AXIS_NAMES, so we re-write it here\n axis_names = {0: \"index\", 1: \"columns\"}\n\n # compute the values_axes queryables\n d1 = [(a.cname, a) for a in self.index_axes]\n d2 = [(axis_names[axis], None) for axis, values in self.non_index_axes]\n d3 = [\n (v.cname, v) for v in self.values_axes if v.name in set(self.data_columns)\n ]\n\n return dict(d1 + d2 + d3)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 137, "n_words": 63, "vocab_size": 52, "complexity": 5, "nloc": 9, "token_counts": 98, "n_ast_nodes": 151, "n_identifiers": 20, "d_id": 40429, "documentation": { "docstring": "return a dict of the kinds allowable columns for this object", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 115044, "commit_id": "f105dbf028004044995817384413b4cdffd7afe2", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/postgres_handler/postgres_handler.py", "file_name": "postgres_handler.py", "fun_name": "get_tables", "commit_message": "handlers", "code": "def get_tables(self) -> HandlerResponse:\n \n query = \n return self.native_query(query)\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 30, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 16, "token_counts": 18, "n_ast_nodes": 34, "n_identifiers": 5, "d_id": 25335, "documentation": { "docstring": "\n List all tabels in PostgreSQL without the system tables information_schema and pg_catalog\n \n SELECT\n table_schema,\n table_name,\n table_type\n FROM\n information_schema.tables\n WHERE\n table_schema NOT IN ('information_schema', 'pg_catalog')\n and table_type in ('BASE TABLE', 'VIEW')\n ", "n_words": 30, "vocab_size": 27, "n_whitespaces": 176, "language": "en" } }, { "id": 209976, "commit_id": "664f5985c24c2eb7645bf76327bd333fab5f92b4", "repo": "scapy", "path": "scapy/utils.py", "file_name": "utils.py", "fun_name": "decode_locale_str", "commit_message": "Automata: improve memory management (#3743)\n\n* Automata memory improvements (cleanup..)\r\n\r\n* Add docstrings", "code": "def decode_locale_str(x):\n # type: (bytes) -> str\n \n return x.decode(encoding=locale.getlocale()[1] or \"utf-8\", errors=\"replace\")\n\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 21, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 2, "token_counts": 28, "n_ast_nodes": 51, "n_identifiers": 7, "d_id": 52840, "documentation": { "docstring": "\n Decode bytes into a string using the system locale.\n Useful on Windows where it can be unusual (e.g. cp1252)\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 29, "language": "en" } }, { "id": 226852, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_funnelarea.py", "file_name": "_funnelarea.py", "fun_name": "label0", "commit_message": "switch to black .22", "code": "def label0(self):\n \n return self[\"label0\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58525, "documentation": { "docstring": "\n Alternate to `labels`. Builds a numeric set of labels. Use with\n `dlabel` where `label0` is the starting label and `dlabel` the\n step.\n\n The 'label0' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n ", "n_words": 41, "vocab_size": 36, "n_whitespaces": 107, "language": "en" } }, { "id": 5081, "commit_id": "feb0d2f37803929a1ad0c723eea430f8cd6c201f", "repo": "airbyte", "path": "airbyte-integrations/connectors/destination-google-sheets/destination_google_sheets/spreadsheet.py", "file_name": "spreadsheet.py", "fun_name": "spreadsheet", "commit_message": "🎉 New Destination: Implement `Destination Google Sheets` using CDK (#12135)", "code": "def spreadsheet(self) -> Spreadsheet:\n \n return self.client.open_by_key(self.spreadsheet_id)\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 5, "token_counts": 19, "n_ast_nodes": 33, "n_identifiers": 6, "d_id": 720, "documentation": { "docstring": "\n Returns pygsheets.Spreadsheet with opened target spreadsheet by key.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 37507, "commit_id": "57e6464ac9a31156f1c93e59107323e6ec01309e", "repo": "transformers", "path": "src/transformers/testing_utils.py", "file_name": "testing_utils.py", "fun_name": "require_sentencepiece", "commit_message": "Update all require decorators to use skipUnless when possible (#16999)", "code": "def require_sentencepiece(test_case):\n \n return unittest.skipUnless(is_sentencepiece_available(), \"test requires SentencePiece\")(test_case)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 37, "n_identifiers": 5, "d_id": 6812, "documentation": { "docstring": "\n Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 22, "language": "en" } }, { "id": 94417, "commit_id": "f5e5a3b1ed97383e0699aff9eb0363e9eb5db479", "repo": "sentry", "path": "tests/sentry/integrations/jira/test_integration.py", "file_name": "test_integration.py", "fun_name": "test_migrate_plugin", "commit_message": "feat(Jira): Plugin issue migration endpoint (#37577)\n\n* feat(jira): Plugin issue migration endpoint", "code": "def test_migrate_plugin(self):\n \n project2 = self.create_project(\n name=\"hellbar\", organization=self.organization, teams=[self.team]\n )\n plugin2 = JiraPlugin()\n plugin2.set_option(\"enabled\", True, project2)\n plugin2.set_option(\"default_project\", \"BAR\", project2)\n plugin2.set_option(\"instance_url\", \"https://example.atlassian.net\", project2)\n\n group = self.create_group(message=\"Hello world\", culprit=\"foo.bar\")\n plugin_issue = GroupMeta.objects.create(\n key=f\"{self.plugin.slug}:tid\", group_id=group.id, value=\"SEN-1\"\n )\n group2 = self.create_group(message=\"Hello world\", culprit=\"foo.bar\")\n plugin2_issue = GroupMeta.objects.create(\n key=f\"{self.plugin.slug}:tid\", group_id=group2.id, value=\"BAR-1\"\n )\n org_integration = OrganizationIntegration.objects.get(integration_id=self.integration.id)\n org_integration.config.update({\"issues_ignored_fields\": [\"reporter\", \"test\"]})\n org_integration.save()\n\n with self.tasks():\n self.installation.migrate_issues()\n\n assert ExternalIssue.objects.filter(\n organization_id=self.organization.id,\n integration_id=self.integration.id,\n key=plugin_issue.value,\n ).exists()\n assert ExternalIssue.objects.filter(\n organization_id=self.organization.id,\n integration_id=self.integration.id,\n key=plugin2_issue.value,\n ).exists()\n assert not GroupMeta.objects.filter(\n key=f\"{self.plugin.slug}:tid\", group_id=group.id, value=\"SEN-1\"\n ).exists()\n assert not GroupMeta.objects.filter(\n key=f\"{self.plugin.slug}:tid\", group_id=group.id, value=\"BAR-1\"\n ).exists()\n\n oi = OrganizationIntegration.objects.get(integration_id=self.integration.id)\n assert len(oi.config[\"issues_ignored_fields\"]) == 4\n\n assert self.plugin.get_option(\"enabled\", self.project) is False\n assert plugin2.get_option(\"enabled\", project2) is False\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 433, "n_words": 98, "vocab_size": 58, "complexity": 1, "nloc": 41, "token_counts": 366, "n_ast_nodes": 636, "n_identifiers": 46, "d_id": 19084, "documentation": { "docstring": "Test that 2 projects with the Jira plugin enabled that each have an issue created\n from the plugin are migrated along with the ignored fields\n ", "n_words": 25, "vocab_size": 20, "n_whitespaces": 39, "language": "en" } }, { "id": 53448, "commit_id": "cb7814344ff2e34bafbd3a0c78e1c1ff41bb74c8", "repo": "prefect", "path": "src/prefect/cli/profile.py", "file_name": "profile.py", "fun_name": "inspect", "commit_message": "Add `prefect profile set/unset/inspect/ls`", "code": "def inspect():\n \n profile = prefect.context.get_profile_context()\n name, env = profile.name, profile.env\n console.out(toml.dumps({name: env}).strip())\n\n\n@profile_app.command()", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "@profile_app.command()", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 24, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 44, "n_ast_nodes": 86, "n_identifiers": 14, "d_id": 10811, "documentation": { "docstring": "\n View settings in the current profile.\n\n Use `prefect --profile profile inspect` to get settings for another profile.\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 28, "language": "en" } }, { "id": 46542, "commit_id": "2f5a567977e1219cab16c2548825a1b9eba07ab3", "repo": "airflow", "path": "airflow/migrations/versions/0106_909884dea523_update_migration_for_fab_tables_to_add_missing_constraints.py", "file_name": "0106_909884dea523_update_migration_for_fab_tables_to_add_missing_constraints.py", "fun_name": "downgrade", "commit_message": "Use Airflow.Base.metadata in FAB models (#22353)\n\nSince FAB models are now in airflow, it makes sense to monitor changes\r\nin them. Therefore we use Airflow.models.base.Base.metadata for FAB models", "code": "def downgrade():\n \n conn = op.get_bind()\n if conn.dialect.name == 'sqlite':\n op.execute('PRAGMA foreign_keys=OFF')\n with op.batch_alter_table('ab_view_menu', schema=None) as batch_op:\n batch_op.drop_constraint('ab_view_menu_name_uq', type_='unique')\n op.execute('PRAGMA foreign_keys=ON')\n elif conn.dialect.name == 'mysql':\n with op.batch_alter_table('ab_user', schema=None) as batch_op:\n batch_op.alter_column('email', existing_type=sa.String(256), nullable=True)\n batch_op.alter_column('username', existing_type=sa.String(256), nullable=True, unique=True)\n with op.batch_alter_table('ab_register_user', schema=None) as batch_op:\n batch_op.alter_column('email', existing_type=sa.String(256), nullable=True)\n batch_op.alter_column('username', existing_type=sa.String(256), nullable=True, unique=True)\n elif conn.dialect.name == 'mssql':\n with op.batch_alter_table('ab_register_user') as batch_op:\n # Drop the unique constraint on username and email\n constraints = get_mssql_table_constraints(conn, 'ab_register_user')\n for k, _ in constraints.get('UNIQUE').items():\n batch_op.drop_constraint(k, type_='unique')\n batch_op.alter_column('username', existing_type=sa.String(256), nullable=False, unique=True)\n batch_op.create_unique_constraint(None, ['username'])\n batch_op.alter_column('email', existing_type=sa.String(256), nullable=False, unique=True)\n with op.batch_alter_table('ab_user') as batch_op:\n # Drop the unique constraint on username and email\n constraints = get_mssql_table_constraints(conn, 'ab_user')\n for k, _ in constraints.get('UNIQUE').items():\n batch_op.drop_constraint(k, type_='unique')\n batch_op.alter_column('username', existing_type=sa.String(256), nullable=True)\n batch_op.create_unique_constraint(None, ['username'])\n batch_op.alter_column('email', existing_type=sa.String(256), nullable=True, unique=True)\n batch_op.create_unique_constraint(None, ['email'])\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 413, "n_words": 121, "vocab_size": 55, "complexity": 6, "nloc": 30, "token_counts": 393, "n_ast_nodes": 669, "n_identifiers": 25, "d_id": 8920, "documentation": { "docstring": "Unapply Update migration for FAB tables to add missing constraints", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 185720, "commit_id": "39a764f49fff7ec3363b8ea25fce3fbf1b67ca58", "repo": "textual", "path": "src/textual/app.py", "file_name": "app.py", "fun_name": "post_display_hook", "commit_message": "call later", "code": "def post_display_hook(self) -> None:\n \n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 8, "n_ast_nodes": 16, "n_identifiers": 2, "d_id": 45129, "documentation": { "docstring": "Called immediately after a display is done. Used in tests.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 49801, "commit_id": "f4d6e64cdc132ae868699a0ba442f4ab1d304a14", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/losses.py", "file_name": "losses.py", "fun_name": "approx_standard_normal_cdf", "commit_message": "add disco_diffusion_cnclip_vitb16 module", "code": "def approx_standard_normal_cdf(x):\n \n return 0.5 * (1.0 + paddle.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * paddle.pow(x, 3))))\n\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 23, "n_words": 17, "vocab_size": 14, "complexity": 1, "nloc": 2, "token_counts": 51, "n_ast_nodes": 69, "n_identifiers": 8, "d_id": 9921, "documentation": { "docstring": "\n A fast approximation of the cumulative distribution function of the\n standard normal.\n ", "n_words": 12, "vocab_size": 10, "n_whitespaces": 22, "language": "en" } }, { "id": 284494, "commit_id": "54a1b6f545a0016c576e9e00eef5c003d229dacf", "repo": "OpenBBTerminal", "path": "openbb_terminal/stocks/options/hedge/hedge_controller.py", "file_name": "hedge_controller.py", "fun_name": "print_help", "commit_message": "Feature/hedge (#1768)\n\n* [Bug] Incorrect log for reddit keys. #1733 fix\r\n\r\n* Create new feature-hedge\r\n\r\n* Significantly improve code of hedge menu\r\n\r\n* More robust\r\n\r\n* Robustness\r\n\r\n* Fix tests\r\n\r\n* Fix can't multiply sequence by non-int of type 'numpy.float64' error\r\n\r\n* Temporary fix of singular matrix error. Return first feasible solution\r\n\r\n* Update Hugo Documentation\r\n\r\n* Combining menus and cleaning up code\r\n\r\n* Tidy up call_exp\r\n\r\n* Update tests Round 1\r\n\r\n* Update tests Round 2\r\n\r\n* Fix linting error\r\n\r\n* Fix linting?\r\n\r\n* Fixed glitch\r\n\r\nCo-authored-by: JerBouma \r\nCo-authored-by: James Maslek \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: colin99d \r\nCo-authored-by: didierlopes.eth ", "code": "def print_help(self):\n \n has_portfolio_start = \"\" if \"Delta\" in self.greeks[\"Portfolio\"] else \"[unvl]\"\n has_portfolio_end = \"\" if \"Delta\" in self.greeks[\"Portfolio\"] else \"[/unvl]\"\n has_option_start = (\n \"\"\n if \"Delta\" in self.greeks[\"Option A\"] or \"Delta\" in self.greeks[\"Option B\"]\n else \"[unvl]\"\n )\n has_option_end = (\n \"\"\n if \"Delta\" in self.greeks[\"Option A\"] or \"Delta\" in self.greeks[\"Option B\"]\n else \"[/unvl]\"\n )\n help_text = f\n console.print(text=help_text, menu=\"Stocks - Options - Hedge\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 192, "n_words": 63, "vocab_size": 28, "complexity": 7, "nloc": 28, "token_counts": 100, "n_ast_nodes": 235, "n_identifiers": 15, "d_id": 84760, "documentation": { "docstring": "Print help\n[param]Ticker: [/param]{self.ticker or None}\n[param]Expiry: [/param]{self.expiration or None}\n[cmds]\n pick pick the underlying asset position\n[/cmds][param]\nUnderlying Asset Position: [/param]{self.underlying_asset_position}\n[cmds]\n list show the available strike prices for calls and puts{has_portfolio_start}\n add add an option to the list of options{has_portfolio_end}{has_option_start}\n rmv remove an option from the list of options\n sop show selected options and neutral portfolio weights\n plot show the option payoff diagram[/cmds]{has_option_end}\n ", "n_words": 65, "vocab_size": 46, "n_whitespaces": 141, "language": "en" } }, { "id": 20532, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pyparsing/actions.py", "file_name": "actions.py", "fun_name": "with_attribute", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def with_attribute(*args, **attr_dict):\n \n
    \n Some text\n
    1 4 0 1 0
    \n
    1,3 2,3 1,1
    \n
    this has no type
    \n
    \n\n \n if args:\n attrs = args[:]\n else:\n attrs = attr_dict.items()\n attrs = [(k, v) for k, v in attrs]\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 139, "n_words": 39, "vocab_size": 34, "complexity": 3, "nloc": 8, "token_counts": 47, "n_ast_nodes": 71, "n_identifiers": 7, "d_id": 3414, "documentation": { "docstring": "\n Helper to create a validating parse action to be used with start\n tags created with :class:`make_xml_tags` or\n :class:`make_html_tags`. Use ``with_attribute`` to qualify\n a starting tag with a required attribute value, to avoid false\n matches on common tags such as ```` or ``
    ``.\n\n Call ``with_attribute`` with a series of attribute names and\n values. Specify the list of filter attributes names and values as:\n\n - keyword arguments, as in ``(align=\"right\")``, or\n - as an explicit dict with ``**`` operator, when an attribute\n name is also a Python reserved word, as in ``**{\"class\":\"Customer\", \"align\":\"right\"}``\n - a list of name-value tuples, as in ``((\"ns1:class\", \"Customer\"), (\"ns2:align\", \"right\"))``\n\n For attribute names with a namespace prefix, you must use the second\n form. Attribute names are matched insensitive to upper/lower case.\n\n If just testing for ``class`` (with or without a namespace), use\n :class:`with_class`.\n\n To verify that the attribute exists, but without specifying a value,\n pass ``with_attribute.ANY_VALUE`` as the value.\n\n Example::\n\n html = \n div,div_end = make_html_tags(\"div\")\n\n # only match div tag having a type attribute with value \"grid\"\n div_grid = div().set_parse_action(with_attribute(type=\"grid\"))\n grid_expr = div_grid + SkipTo(div | div_end)(\"body\")\n for grid_header in grid_expr.search_string(html):\n print(grid_header.body)\n\n # construct a match with any div tag having a type attribute, regardless of the value\n div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE))\n div_expr = div_any_type + SkipTo(div | div_end)(\"body\")\n for div_header in div_expr.search_string(html):\n print(div_header.body)\n\n prints::\n\n 1 4 0 1 0\n\n 1 4 0 1 0\n 1,3 2,3 1,1\n ", "n_words": 230, "vocab_size": 143, "n_whitespaces": 408, "language": "en" } }, { "id": 111758, "commit_id": "8b2eb425274cdb4537fbce4a315aec12a378d6db", "repo": "nni", "path": "nni/retiarii/oneshot/pytorch/base_lightning.py", "file_name": "base_lightning.py", "fun_name": "architecture_optimizers", "commit_message": "Lightning implementation for retiarii oneshot nas (#4479)", "code": "def architecture_optimizers(self):\n \n opts = self.optimizers()\n if isinstance(opts,list):\n # pylint: disable=unsubscriptable-object\n arc_opts = opts[:self.arc_optim_count]\n if len(arc_opts) == 1:\n arc_opts = arc_opts[0]\n return arc_opts\n # If there is only 1 optimizer and it is the architecture optimizer\n if self.arc_optim_count == 1:\n return opts\n return None\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 155, "n_words": 43, "vocab_size": 29, "complexity": 4, "nloc": 10, "token_counts": 57, "n_ast_nodes": 95, "n_identifiers": 9, "d_id": 24481, "documentation": { "docstring": "\n Get architecture optimizers from all optimizers. Use this to get your architecture optimizers in ``training_step``.\n\n Returns\n ----------\n opts : List[Optimizer], Optimizer, None\n Architecture optimizers defined in ``configure_architecture_optimizers``. This will be None if there is no\n architecture optimizers.\n ", "n_words": 37, "vocab_size": 30, "n_whitespaces": 95, "language": "en" } }, { "id": 246132, "commit_id": "bf60da1a60096fac5fb778b732ff2214862ac808", "repo": "synapse", "path": "tests/rest/client/test_profile.py", "file_name": "test_profile.py", "fun_name": "test_avatar_allowed_mime_type_global", "commit_message": "Configurable limits on avatars (#11846)\n\nOnly allow files which file size and content types match configured\r\nlimits to be set as avatar.\r\n\r\nMost of the inspiration from the non-test code comes from matrix-org/synapse-dinsic#19", "code": "def test_avatar_allowed_mime_type_global(self):\n \n self._setup_local_files(\n {\n \"good\": {\"mimetype\": \"image/png\"},\n \"bad\": {\"mimetype\": \"application/octet-stream\"},\n }\n )\n\n channel = self.make_request(\n \"PUT\",\n f\"/profile/{self.owner}/avatar_url\",\n content={\"avatar_url\": \"mxc://test/bad\"},\n access_token=self.owner_tok,\n )\n self.assertEqual(channel.code, 403, channel.result)\n self.assertEqual(\n channel.json_body[\"errcode\"], Codes.FORBIDDEN, channel.json_body\n )\n\n channel = self.make_request(\n \"PUT\",\n f\"/profile/{self.owner}/avatar_url\",\n content={\"avatar_url\": \"mxc://test/good\"},\n access_token=self.owner_tok,\n )\n self.assertEqual(channel.code, 200, channel.result)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 269, "n_words": 41, "vocab_size": 28, "complexity": 1, "nloc": 24, "token_counts": 128, "n_ast_nodes": 228, "n_identifiers": 15, "d_id": 71033, "documentation": { "docstring": "Tests that the MIME type whitelist for avatars is enforced when updating a\n global profile.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 29, "language": "en" } }, { "id": 275531, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_v2/optimizer_v2.py", "file_name": "optimizer_v2.py", "fun_name": "_distribution_strategy_scope", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _distribution_strategy_scope(self):\n \n if self._distribution_strategy and not tf.distribute.has_strategy():\n with self._distribution_strategy.scope():\n yield self._distribution_strategy.scope()\n else:\n yield\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 71, "n_words": 13, "vocab_size": 12, "complexity": 3, "nloc": 6, "token_counts": 40, "n_ast_nodes": 74, "n_identifiers": 7, "d_id": 81420, "documentation": { "docstring": "Returns the `tf.distribute.Strategy` this optimizer was created under.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 197362, "commit_id": "65be461082dda54c8748922f9c29a19af1279fe1", "repo": "sympy", "path": "sympy/solvers/ode/ode.py", "file_name": "ode.py", "fun_name": "classify_sysode", "commit_message": "Remove abbreviations in documentation", "code": "def classify_sysode(eq, funcs=None, **kwargs):\n r\n\n # Sympify equations and convert iterables of equations into\n # a list of equations", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 27, "n_words": 19, "vocab_size": 15, "complexity": 31, "nloc": 154, "token_counts": 559, "n_ast_nodes": 24, "n_identifiers": 4, "d_id": 48505, "documentation": { "docstring": "\n Returns a dictionary of parameter names and values that define the system\n of ordinary differential equations in ``eq``.\n The parameters are further used in\n :py:meth:`~sympy.solvers.ode.dsolve` for solving that system.\n\n Some parameter names and values are:\n\n 'is_linear' (boolean), which tells whether the given system is linear.\n Note that \"linear\" here refers to the operator: terms such as ``x*diff(x,t)`` are\n nonlinear, whereas terms like ``sin(t)*diff(x,t)`` are still linear operators.\n\n 'func' (list) contains the :py:class:`~sympy.core.function.Function`s that\n appear with a derivative in the ODE, i.e. those that we are trying to solve\n the ODE for.\n\n 'order' (dict) with the maximum derivative for each element of the 'func'\n parameter.\n\n 'func_coeff' (dict or Matrix) with the coefficient for each triple ``(equation number,\n function, order)```. The coefficients are those subexpressions that do not\n appear in 'func', and hence can be considered constant for purposes of ODE\n solving. The value of this parameter can also be a Matrix if the system of ODEs are\n linear first order of the form X' = AX where X is the vector of dependent variables.\n Here, this function returns the coefficient matrix A.\n\n 'eq' (list) with the equations from ``eq``, sympified and transformed into\n expressions (we are solving for these expressions to be zero).\n\n 'no_of_equations' (int) is the number of equations (same as ``len(eq)``).\n\n 'type_of_equation' (string) is an internal classification of the type of\n ODE.\n\n 'is_constant' (boolean), which tells if the system of ODEs is constant coefficient\n or not. This key is temporary addition for now and is in the match dict only when\n the system of ODEs is linear first order constant coefficient homogeneous. So, this\n key's value is True for now if it is available else it does not exist.\n\n 'is_homogeneous' (boolean), which tells if the system of ODEs is homogeneous. Like the\n key 'is_constant', this key is a temporary addition and it is True since this key value\n is available only when the system is linear first order constant coefficient homogeneous.\n\n References\n ==========\n -http://eqworld.ipmnet.ru/en/solutions/sysode/sode-toc1.htm\n -A. D. Polyanin and A. V. Manzhirov, Handbook of Mathematics for Engineers and Scientists\n\n Examples\n ========\n\n >>> from sympy import Function, Eq, symbols, diff\n >>> from sympy.solvers.ode.ode import classify_sysode\n >>> from sympy.abc import t\n >>> f, x, y = symbols('f, x, y', cls=Function)\n >>> k, l, m, n = symbols('k, l, m, n', Integer=True)\n >>> x1 = diff(x(t), t) ; y1 = diff(y(t), t)\n >>> x2 = diff(x(t), t, t) ; y2 = diff(y(t), t, t)\n >>> eq = (Eq(x1, 12*x(t) - 6*y(t)), Eq(y1, 11*x(t) + 3*y(t)))\n >>> classify_sysode(eq)\n {'eq': [-12*x(t) + 6*y(t) + Derivative(x(t), t), -11*x(t) - 3*y(t) + Derivative(y(t), t)], 'func': [x(t), y(t)],\n 'func_coeff': {(0, x(t), 0): -12, (0, x(t), 1): 1, (0, y(t), 0): 6, (0, y(t), 1): 0, (1, x(t), 0): -11, (1, x(t), 1): 0, (1, y(t), 0): -3, (1, y(t), 1): 1}, 'is_linear': True, 'no_of_equation': 2, 'order': {x(t): 1, y(t): 1}, 'type_of_equation': None}\n >>> eq = (Eq(diff(x(t),t), 5*t*x(t) + t**2*y(t) + 2), Eq(diff(y(t),t), -t**2*x(t) + 5*t*y(t)))\n >>> classify_sysode(eq)\n {'eq': [-t**2*y(t) - 5*t*x(t) + Derivative(x(t), t) - 2, t**2*x(t) - 5*t*y(t) + Derivative(y(t), t)],\n 'func': [x(t), y(t)], 'func_coeff': {(0, x(t), 0): -5*t, (0, x(t), 1): 1, (0, y(t), 0): -t**2, (0, y(t), 1): 0,\n (1, x(t), 0): t**2, (1, x(t), 1): 0, (1, y(t), 0): -5*t, (1, y(t), 1): 1}, 'is_linear': True, 'no_of_equation': 2,\n 'order': {x(t): 1, y(t): 1}, 'type_of_equation': None}\n\n ", "n_words": 551, "vocab_size": 270, "n_whitespaces": 723, "language": "en" } }, { "id": 223640, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/_policybase.py", "file_name": "_policybase.py", "fun_name": "header_store_parse", "commit_message": "add python 3.10.4 for windows", "code": "def header_store_parse(self, name, value):\n \n raise NotImplementedError\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 20, "n_identifiers": 5, "d_id": 57024, "documentation": { "docstring": "Given the header name and the value provided by the application\n program, return the (name, value) that should be stored in the model.\n ", "n_words": 23, "vocab_size": 19, "n_whitespaces": 37, "language": "en" } }, { "id": 297540, "commit_id": "d9903c4cf985381002de8b923815b05dd24e0651", "repo": "core", "path": "homeassistant/components/snmp/sensor.py", "file_name": "sensor.py", "fun_name": "async_update", "commit_message": "Bump `brother` and `pysnmplib` (#84107)\n\n* Bump brother version\r\n\r\n* Bump pysnmplib version\r\n\r\n* Update sensor platform\r\n\r\n* Update switch platform\r\n\r\n* Update tests\r\n\r\n* Bump brother\r\n\r\nCo-authored-by: J. Nick Koston ", "code": "async def async_update(self):\n \n\n get_result = await getCmd(\n *self._request_args, ObjectType(ObjectIdentity(self._baseoid))\n )\n errindication, errstatus, errindex, restable = await get_result\n\n if errindication and not self._accept_errors:\n _LOGGER.error(\"SNMP error: %s\", errindication)\n elif errstatus and not self._accept_errors:\n _LOGGER.error(\n \"SNMP error: %s at %s\",\n errstatus.prettyPrint(),\n errindex and restable[-1][int(errindex) - 1] or \"?\",\n )\n elif (errindication or errstatus) and self._accept_errors:\n self.value = self._default_value\n else:\n for resrow in restable:\n self.value = resrow[-1].prettyPrint()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 245, "n_words": 63, "vocab_size": 46, "complexity": 11, "nloc": 18, "token_counts": 129, "n_ast_nodes": 210, "n_identifiers": 20, "d_id": 96508, "documentation": { "docstring": "Get the latest data from the remote SNMP capable host.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 153774, "commit_id": "3d4404e9d9a9b2a3327f8aee664a8e71ac1f18b8", "repo": "modin", "path": "modin/experimental/batch/pipeline.py", "file_name": "pipeline.py", "fun_name": "update_df", "commit_message": "FEAT-#4412: Add Batch Pipeline API to Modin (#4452)\n\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Mahesh Vashishtha \r\nSigned-off-by: Rehan Durrani ", "code": "def update_df(self, df):\n \n if get_current_execution() != \"PandasOnRay\" or (\n not isinstance(df._query_compiler._modin_frame, PandasOnRayDataframe)\n ): # pragma: no cover\n ErrorMessage.not_implemented(\n \"Batch Pipeline API is only implemented for `PandasOnRay` execution.\"\n )\n self.df = df\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 108, "n_words": 31, "vocab_size": 31, "complexity": 3, "nloc": 8, "token_counts": 40, "n_ast_nodes": 71, "n_identifiers": 10, "d_id": 35598, "documentation": { "docstring": "\n Update the dataframe to perform this pipeline on.\n\n Parameters\n ----------\n df : modin.pandas.DataFrame\n The new dataframe to perform this pipeline on.\n ", "n_words": 21, "vocab_size": 15, "n_whitespaces": 68, "language": "en" } }, { "id": 211430, "commit_id": "d4e34fe165c09db65fd00113708be1b711ac957c", "repo": "PaddleDetection", "path": "ppdet/modeling/losses/pose3d_loss.py", "file_name": "pose3d_loss.py", "fun_name": "forward", "commit_message": "pose3d metro modeling (#6612)\n\n* pose3d metro modeling\r\n\r\n* delete extra comments", "code": "def forward(self, pred3d, pred2d, inputs):\n \n gt_3d_joints = inputs['joints_3d']\n gt_2d_joints = inputs['joints_2d']\n has_3d_joints = inputs['has_3d_joints']\n has_2d_joints = inputs['has_2d_joints']\n\n loss_3d = mpjpe(pred3d, gt_3d_joints, has_3d_joints)\n loss_2d = keypoint_2d_loss(self.criterion_2dpose, pred2d, gt_2d_joints,\n has_2d_joints)\n return self.weight_3d * loss_3d + self.weight_2d * loss_2d\n\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 126, "n_words": 36, "vocab_size": 27, "complexity": 1, "nloc": 9, "token_counts": 72, "n_ast_nodes": 114, "n_identifiers": 16, "d_id": 53094, "documentation": { "docstring": "\n mpjpe: mpjpe loss between 3d joints\n keypoint_2d_loss: 2d joints loss compute by criterion_2dpose\n ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 35, "language": "en" } }, { "id": 153562, "commit_id": "605efa618e7994681f57b11d04d417f353ef8d50", "repo": "modin", "path": "modin/pandas/base.py", "file_name": "base.py", "fun_name": "swapaxes", "commit_message": "DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333)\n\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Alexander Myskov ", "code": "def swapaxes(self, axis1, axis2, copy=True): # noqa: PR01, RT01, D200\n \n axis1 = self._get_axis_number(axis1)\n axis2 = self._get_axis_number(axis2)\n if axis1 != axis2:\n return self.transpose()\n if copy:\n return self.copy()\n return self\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 93, "n_words": 28, "vocab_size": 23, "complexity": 3, "nloc": 8, "token_counts": 52, "n_ast_nodes": 85, "n_identifiers": 7, "d_id": 35443, "documentation": { "docstring": "\n Interchange axes and swap values axes appropriately.\n ", "n_words": 7, "vocab_size": 6, "n_whitespaces": 22, "language": "en" } }, { "id": 160145, "commit_id": "729ad4f92420231e2a7009b3223c6c7620b8b808", "repo": "numpy", "path": "numpy/f2py/tests/test_f2py2e.py", "file_name": "test_f2py2e.py", "fun_name": "test_shortlatex", "commit_message": "TST: Initialize f2py2e tests of the F2PY CLI (#20668)\n\nIncreases F2PY coverage by around 15 percent. For the CLI itself it covers the major features (around 70 percent), with the exception of mostly numpy.distutils stuff.\r\n\r\nMore importantly, sets the groundwork for #20056, in that passing the same testsuite should indicate feature parity.", "code": "def test_shortlatex(capfd, hello_world_f90, monkeypatch):\n \n ipath = Path(hello_world_f90)\n mname = \"blah\"\n monkeypatch.setattr(\n sys,\n \"argv\",\n f'f2py -m {mname} {ipath} --latex-doc --short-latex'.split(),\n )\n\n with util.switchdir(ipath.parent):\n f2pycli()\n out, _ = capfd.readouterr()\n assert \"Documentation is saved to file\" in out\n with Path(f\"./{mname}module.tex\").open() as otex:\n assert \"\\\\documentclass\" not in otex.read()\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 122, "n_words": 44, "vocab_size": 39, "complexity": 1, "nloc": 14, "token_counts": 83, "n_ast_nodes": 161, "n_identifiers": 20, "d_id": 38517, "documentation": { "docstring": "Ensures that truncated documentation is written out\n\n TODO: Test to ensure this has no effect without --latex-doc\n CLI :: --latex-doc --short-latex\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 30, "language": "en" } }, { "id": 108111, "commit_id": "80e672e0700fa8a268aed1bdefedbd0e493e91a7", "repo": "matplotlib", "path": "lib/matplotlib/axes/_axes.py", "file_name": "_axes.py", "fun_name": "inset_axes", "commit_message": "enable Axes subclass creation by Axes.inset_axes", "code": "def inset_axes(self, bounds, *, transform=None, zorder=5, **kwargs):\n \n if transform is None:\n transform = self.transAxes\n kwargs.setdefault('label', 'inset_axes')\n\n # This puts the rectangle into figure-relative coordinates.\n inset_locator = _TransformedBoundsLocator(bounds, transform)\n bounds = inset_locator(self, None).bounds\n projection_class, pkw = self.figure._process_projection_requirements(\n bounds, **kwargs)\n inset_ax = projection_class(self.figure, bounds, zorder=zorder, **pkw)\n\n # this locator lets the axes move if in data coordinates.\n # it gets called in `ax.apply_aspect() (of all places)\n inset_ax.set_axes_locator(inset_locator)\n\n self.add_child_axes(inset_ax)\n\n return inset_ax\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 185, "n_words": 68, "vocab_size": 54, "complexity": 2, "nloc": 12, "token_counts": 103, "n_ast_nodes": 165, "n_identifiers": 17, "d_id": 23066, "documentation": { "docstring": "\n Add a child inset Axes to this existing Axes.\n\n Warnings\n --------\n This method is experimental as of 3.0, and the API may change.\n\n Parameters\n ----------\n bounds : [x0, y0, width, height]\n Lower-left corner of inset Axes, and its width and height.\n\n transform : `.Transform`\n Defaults to `ax.transAxes`, i.e. the units of *rect* are in\n Axes-relative coordinates.\n\n projection : {None, 'aitoff', 'hammer', 'lambert', 'mollweide', \\\n'polar', 'rectilinear', str}, optional\n The projection type of the inset `~.axes.Axes`. *str* is the name\n of a custom projection, see `~matplotlib.projections`. The default\n None results in a 'rectilinear' projection.\n\n polar : bool, default: False\n If True, equivalent to projection='polar'.\n\n axes_class : subclass type of `~.axes.Axes`, optional\n The `.axes.Axes` subclass that is instantiated. This parameter\n is incompatible with *projection* and *polar*. See\n :ref:`axisartist_users-guide-index` for examples.\n\n zorder : number\n Defaults to 5 (same as `.Axes.legend`). Adjust higher or lower\n to change whether it is above or below data plotted on the\n parent Axes.\n\n **kwargs\n Other keyword arguments are passed on to the inset Axes class.\n\n Returns\n -------\n ax\n The created `~.axes.Axes` instance.\n\n Examples\n --------\n This example makes two inset Axes, the first is in Axes-relative\n coordinates, and the second in data-coordinates::\n\n fig, ax = plt.subplots()\n ax.plot(range(10))\n axin1 = ax.inset_axes([0.8, 0.1, 0.15, 0.15])\n axin2 = ax.inset_axes(\n [5, 7, 2.3, 2.3], transform=ax.transData)\n\n ", "n_words": 212, "vocab_size": 150, "n_whitespaces": 590, "language": "en" } }, { "id": 95411, "commit_id": "5efa5eeb57ae6ddf740256e08ce3b9ff4ec98eaa", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_codeowners_associations.py", "file_name": "test_organization_codeowners_associations.py", "fun_name": "test_no_access", "commit_message": "feat(codeowners): Add endpoint to view code owner associations per organization (#31030)\n\nSee API-2186\r\n\r\nSo the earlier version of this PR just had the endpoint return the entire serialized ProjectCodeOwners for an organization. While that works, the intention behind this feature is to read and use the associations, so sending the raw codeowners file, and timestamps are unnecessary and increase the latency with such large payloads, especially for larger orgs.\r\n\r\n@NisanthanNanthakumar suggested limiting what the endpoint returns to just what the feature will need on the frontend, and making the endpoint name a bit more specific. OrganizationCodeOwners -> OrganizationCodeOwnersAssocations.\r\n\r\nAlong with this refactor, tests have been updated.", "code": "def test_no_access(self):\n \n member = self.create_user(\"hernando@life.com\")\n self.create_member(user=member, organization=self.organization, role=\"member\")\n self.login_as(member)\n self.get_error_response(self.organization.slug, status=status.HTTP_403_FORBIDDEN)\n\n admin = self.create_user(\"sean@life.com\")\n self.create_member(user=admin, organization=self.organization, role=\"admin\")\n self.login_as(admin)\n self.get_success_response(self.organization.slug, status=status.HTTP_200_OK)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 83, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 9, "token_counts": 102, "n_ast_nodes": 167, "n_identifiers": 16, "d_id": 19206, "documentation": { "docstring": "\n Tests that users without the 'org:integrations' scope (i.e. Members) cannot access this endpoint.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 28, "language": "en" } }, { "id": 260917, "commit_id": "c18460f78441f11b3e6c15c12238695fcfe3c872", "repo": "scikit-learn", "path": "sklearn/ensemble/_stacking.py", "file_name": "_stacking.py", "fun_name": "predict_proba", "commit_message": "EHN Add multilabel classification support for `StackingClassifier` (#24146)\n\n* Add stacking multilabel functionality\n\n* Add underscore to a class attr\n\n* Remove model from base estimator in test_stacking\n\n* Remove scale in train/test split in test_stacking_classifier_multilabel\n\n* Add stack_method as a test parameter, change RandomForestClassifier to KNeighborsClassifier in test\n\n* Update Changelog\n\n* fix doc typos\n\n* predict_proba output will be concatenate this list in an array of shape n_samples, n_outputs * n_classes - 1. Update test.\n\n* Update sklearn/ensemble/_stacking.py\n\nCo-authored-by: Guillaume Lemaitre \n\n* Update doc/whats_new/v1.0.rst\n\nCo-authored-by: Guillaume Lemaitre \n\n* update whats_new\n\n* add passthrough test\n\n* update whats_new with current PR\n\n* Apply suggestions from code review\n\nCo-authored-by: Julien Jerphanion \n\n* update tests\n\n* Apply suggestion to update comments on `concatenate`\n\nCo-authored-by: Julien Jerphanion \n\n* parametrized the two tests into one\n\n* parametrized the two tests into one\n\n* strip the mysterious trailing _r\n\n* fix multilabel list scenario\n\n* add Guillaume's recommendations\n\n* add test for\n\n* some fix\n\n* split tests\n\n* fix flake8\n\n* add suggestions\n\n* Trigger CI\n\n* remove multiclass-multioutput from comments and docstrings\n\nCo-authored-by: Nicolas \nCo-authored-by: Nestor Navarro \nCo-authored-by: Nestor Navarro \nCo-authored-by: Guillaume Lemaitre \nCo-authored-by: Julien Jerphanion ", "code": "def predict_proba(self, X):\n \n check_is_fitted(self)\n y_pred = self.final_estimator_.predict_proba(self.transform(X))\n\n if isinstance(self._label_encoder, list):\n # Handle the multilabel-indicator cases\n y_pred = np.array([preds[:, 0] for preds in y_pred]).T\n return y_pred\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 82, "n_words": 25, "vocab_size": 22, "complexity": 3, "nloc": 6, "token_counts": 60, "n_ast_nodes": 97, "n_identifiers": 14, "d_id": 76564, "documentation": { "docstring": "Predict class probabilities for `X` using the final estimator.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vectors, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n Returns\n -------\n probabilities : ndarray of shape (n_samples, n_classes) or \\\n list of ndarray of shape (n_output,)\n The class probabilities of the input samples.\n ", "n_words": 60, "vocab_size": 41, "n_whitespaces": 153, "language": "en" } }, { "id": 129473, "commit_id": "2da2ac52ce3103ddb5192e7a161fec312dcdad53", "repo": "ray", "path": "python/ray/tests/test_output.py", "file_name": "test_output.py", "fun_name": "test_worker_stdout", "commit_message": "Unskipped test_worker_stdout (#21708)", "code": "def test_worker_stdout():\n script = ", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "script = \"\"\"@ray.remote", "n_ast_errors": 2, "ast_levels": 6, "n_whitespaces": 7, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 22, "token_counts": 96, "n_ast_nodes": 40, "n_identifiers": 7, "d_id": 28963, "documentation": { "docstring": "\nimport ray\nimport sys\n\nray.init(num_cpus=2)\n\n@ray.remote", "n_words": 6, "vocab_size": 5, "n_whitespaces": 2, "language": "en" } }, { "id": 177330, "commit_id": "8a325d26aa7fdd3a72580c4720fa97f971bbefcb", "repo": "networkx", "path": "networkx/linalg/graphmatrix.py", "file_name": "graphmatrix.py", "fun_name": "incidence_matrix", "commit_message": "Use scipy.sparse array datastructure (#6037)\n\n* Use scipy.sparse array datastructure\r\n\r\n* Add reminder to rm wrapper when scipy adds creation fns.\r\n\r\n* Rm mention of np matrix from code comment.\r\n\r\n* Update networkx/algorithms/bipartite/matrix.py\r\n\r\nCo-authored-by: Stefan van der Walt \r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Stefan van der Walt ", "code": "def incidence_matrix(G, nodelist=None, edgelist=None, oriented=False, weight=None):\n \n import scipy as sp\n import scipy.sparse # call as sp.sparse\n\n if nodelist is None:\n nodelist = list(G)\n if edgelist is None:\n if G.is_multigraph():\n edgelist = list(G.edges(keys=True))\n else:\n edgelist = list(G.edges())\n A = sp.sparse.lil_array((len(nodelist), len(edgelist)))\n node_index = {node: i for i, node in enumerate(nodelist)}\n for ei, e in enumerate(edgelist):\n (u, v) = e[:2]\n if u == v:\n continue # self loops give zero column\n try:\n ui = node_index[u]\n vi = node_index[v]\n except KeyError as err:\n raise nx.NetworkXError(\n f\"node {u} or {v} in edgelist but not in nodelist\"\n ) from err\n if weight is None:\n wt = 1\n else:\n if G.is_multigraph():\n ekey = e[2]\n wt = G[u][v][ekey].get(weight, 1)\n else:\n wt = G[u][v].get(weight, 1)\n if oriented:\n A[ui, ei] = -wt\n A[vi, ei] = wt\n else:\n A[ui, ei] = wt\n A[vi, ei] = wt\n return A.asformat(\"csc\")\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 458, "n_words": 138, "vocab_size": 86, "complexity": 11, "nloc": 38, "token_counts": 275, "n_ast_nodes": 438, "n_identifiers": 34, "d_id": 42349, "documentation": { "docstring": "Returns incidence matrix of G.\n\n The incidence matrix assigns each row to a node and each column to an edge.\n For a standard incidence matrix a 1 appears wherever a row's node is\n incident on the column's edge. For an oriented incidence matrix each\n edge is assigned an orientation (arbitrarily for undirected and aligning to\n direction for directed). A -1 appears for the source (tail) of an edge and\n 1 for the destination (head) of the edge. The elements are zero otherwise.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n nodelist : list, optional (default= all nodes in G)\n The rows are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n edgelist : list, optional (default= all edges in G)\n The columns are ordered according to the edges in edgelist.\n If edgelist is None, then the ordering is produced by G.edges().\n\n oriented: bool, optional (default=False)\n If True, matrix elements are +1 or -1 for the head or tail node\n respectively of each edge. If False, +1 occurs at both nodes.\n\n weight : string or None, optional (default=None)\n The edge data key used to provide each value in the matrix.\n If None, then each edge has weight 1. Edge weights, if used,\n should be positive so that the orientation can provide the sign.\n\n Returns\n -------\n A : SciPy sparse array\n The incidence matrix of G.\n\n Notes\n -----\n For MultiGraph/MultiDiGraph, the edges in edgelist should be\n (u,v,key) 3-tuples.\n\n \"Networks are the best discrete model for so many problems in\n applied mathematics\" [1]_.\n\n References\n ----------\n .. [1] Gil Strang, Network applications: A = incidence matrix,\n http://videolectures.net/mit18085f07_strang_lec03/\n ", "n_words": 272, "vocab_size": 141, "n_whitespaces": 428, "language": "en" } }, { "id": 86876, "commit_id": "941184cd24186324fd9f7f304b7f713041834726", "repo": "sentry", "path": "src/sentry/models/auditlogentry.py", "file_name": "auditlogentry.py", "fun_name": "save_or_write_to_kafka", "commit_message": "chore(hybrid-cloud): AuditLogEntry is a control silo model now (#39890)\n\nIn the control silo, creating an audit log entry writes to the db\r\ndirectly, whilst in region silo mode creating an audit log entry will\r\ninstead push to a new kafka producer that consumes into the control silo\r\nasynchronously.", "code": "def save_or_write_to_kafka(self):\n \n from sentry.region_to_control.producer import produce_audit_log_entry\n\n produce_audit_log_entry(self)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 28, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 31, "n_identifiers": 6, "d_id": 18179, "documentation": { "docstring": "\n Region Silos do not have access to the AuditLogEntry table which is specific to the control silo.\n For those silos, this method publishes the attempted audit log write to a durable kafka queue synchronously\n that will eventually be consumed by the control silo. For the control silo, this method ultimately results\n in a save() call.\n\n This method is most ideal for shared code paths that may be invoked from either control or region silos,\n but is not recommended on code paths that should always be invoked from the control silo and depend on the\n synchronous database access.\n ", "n_words": 97, "vocab_size": 66, "n_whitespaces": 155, "language": "en" } }, { "id": 173483, "commit_id": "26be5ee2372b08c2f906661283a12e84d6c181f8", "repo": "calibre-web", "path": "cps/tasks/metadata_backup.py", "file_name": "metadata_backup.py", "fun_name": "open_metadata", "commit_message": "Backup metadata 3rd step", "code": "def open_metadata(self, book, custom_columns):\n if config.config_use_google_drive:\n if not gdriveutils.is_gdrive_ready():\n raise Exception('Google Drive is configured but not ready')\n\n web_content_link = gdriveutils.get_metadata_backup_via_gdrive(book.path)\n if not web_content_link:\n raise Exception('Google Drive cover url not found')\n\n stream = None\n try:\n stream = urlopen(web_content_link)\n except Exception as ex:\n # Bubble exception to calling function\n self.log.debug('Error reading metadata.opf: ' + str(ex)) # ToDo Check whats going on\n raise ex\n finally:\n if stream is not None:\n stream.close()\n else:\n # ToDo: Handle book folder not found or not readable\n book_metadata_filepath = os.path.join(config.config_calibre_dir, book.path, 'metadata.opf')\n #if not os.path.isfile(book_metadata_filepath):\n self.create_new_metadata_backup(book, custom_columns, book_metadata_filepath)\n # else:\n \n", "url": "https://github.com/janeczku/calibre-web.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 380, "n_words": 92, "vocab_size": 68, "complexity": 7, "nloc": 37, "token_counts": 121, "n_ast_nodes": 209, "n_identifiers": 24, "d_id": 40848, "documentation": { "docstring": "namespaces = {'dc': PURL_NAMESPACE, 'opf': OPF_NAMESPACE}\n test = etree.parse(book_metadata_filepath)\n root = test.getroot()\n for i in root.iter():\n self.log.info(i)\n title = root.find(\"dc:metadata\", namespaces)\n pass\n with open(book_metadata_filepath, \"rb\") as f:\n xml = f.read()\n\n root = objectify.fromstring(xml)\n # root.metadata['{http://purl.org/dc/elements/1.1/}title']\n # root.metadata[PURL + 'title']\n # getattr(root.metadata, PURL +'title')\n # test = objectify.parse()\n pass\n # backup not found has to be created\n #raise Exception('Book cover file not found')", "n_words": 62, "vocab_size": 48, "n_whitespaces": 245, "language": "en" } }, { "id": 217908, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/imaplib.py", "file_name": "imaplib.py", "fun_name": "proxyauth", "commit_message": "add python 3.10.4 for windows", "code": "def proxyauth(self, user):\n \n\n name = 'PROXYAUTH'\n return self._simple_command('PROXYAUTH', user)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 37, "n_identifiers": 5, "d_id": 55006, "documentation": { "docstring": "Assume authentication as \"user\".\n\n Allows an authorised administrator to proxy into any user's\n mailbox.\n\n (typ, [data]) = .proxyauth(user)\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 46, "language": "en" } }, { "id": 81766, "commit_id": "33c0fb79d66f56374d7c042ba79887faa85e2885", "repo": "awx", "path": "awx/main/utils/common.py", "file_name": "common.py", "fun_name": "copy_m2m_relationships", "commit_message": "JT param everything (#12646)\n\n* Making almost all fields promptable on job templates and config models\r\n* Adding EE, IG and label access checks\r\n* Changing jobs preferred instance group function to handle the new IG cache field\r\n* Adding new ask fields to job template modules\r\n* Address unit/functional tests\r\n* Adding migration file", "code": "def copy_m2m_relationships(obj1, obj2, fields, kwargs=None):\n \n for field_name in fields:\n if hasattr(obj1, field_name):\n try:\n field_obj = obj1._meta.get_field(field_name)\n except FieldDoesNotExist:\n continue\n if isinstance(field_obj, ManyToManyField):\n # Many to Many can be specified as field_name\n src_field_value = getattr(obj1, field_name)\n if kwargs and field_name in kwargs:\n override_field_val = kwargs[field_name]\n # TODO: Should we spike this our or just put the for loop inside the next if and make everything respect order?\n if field_name == 'instance_groups':\n # instance_groups are a list but we need to preserve the order\n for ig_id in override_field_val:\n getattr(obj2, field_name).add(ig_id)\n continue\n if isinstance(override_field_val, (set, list, QuerySet)):\n getattr(obj2, field_name).add(*override_field_val)\n continue\n if override_field_val.__class__.__name__ == 'ManyRelatedManager':\n src_field_value = override_field_val\n dest_field = getattr(obj2, field_name)\n dest_field.add(*list(src_field_value.all().values_list('id', flat=True)))\n\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 521, "n_words": 110, "vocab_size": 77, "complexity": 11, "nloc": 22, "token_counts": 164, "n_ast_nodes": 263, "n_identifiers": 27, "d_id": 17256, "documentation": { "docstring": "\n In-place operation.\n Given two saved objects, copies related objects from obj1\n to obj2 to field of same name, if field occurs in `fields`\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 36, "language": "en" } }, { "id": 225637, "commit_id": "557b7b44b393d0701413ed8012a920a0691e06cb", "repo": "albumentations", "path": "albumentations/augmentations/geometric/functional.py", "file_name": "functional.py", "fun_name": "keypoint_rotate", "commit_message": "Fix Affine wrong rotation angle (#1091)\n\n* Fix Affine wrong rotation angle\r\n\r\n* Link to issue\r\n\r\n* Fix Perspective rot. angle for keypoints, fix Affine\r\n\r\n* Change angle sign, do not change it manually after all changes\r\n\r\n* Tests\r\n\r\n* Fix tests and image center\r\n\r\n* Fix shift_rotate tests\r\n\r\nCo-authored-by: Eugene Khvedchenya \r\nCo-authored-by: Vladimir Iglovikov ", "code": "def keypoint_rotate(keypoint, angle, rows, cols, **params):\n \n center = (cols - 1) * 0.5, (rows - 1) * 0.5\n matrix = cv2.getRotationMatrix2D(center, angle, 1.0)\n x, y, a, s = keypoint[:4]\n x, y = cv2.transform(np.array([[[x, y]]]), matrix).squeeze()\n return x, y, a + math.radians(angle), s\n\n\n@preserve_channel_dim", "url": "https://github.com/albumentations-team/albumentations.git", "language": "Python", "ast_errors": "@preserve_channel_dim", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 60, "n_words": 43, "vocab_size": 32, "complexity": 1, "nloc": 6, "token_counts": 107, "n_ast_nodes": 153, "n_identifiers": 21, "d_id": 57482, "documentation": { "docstring": "Rotate a keypoint by angle.\n\n Args:\n keypoint (tuple): A keypoint `(x, y, angle, scale)`.\n angle (float): Rotation angle.\n rows (int): Image height.\n cols (int): Image width.\n\n Returns:\n tuple: A keypoint `(x, y, angle, scale)`.\n\n ", "n_words": 34, "vocab_size": 23, "n_whitespaces": 78, "language": "en" } }, { "id": 20579, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pyparsing/core.py", "file_name": "core.py", "fun_name": "disable_memoization", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def disable_memoization() -> None:\n \n ParserElement.reset_cache()\n ParserElement._left_recursion_enabled = False\n ParserElement._packratEnabled = False\n ParserElement._parse = ParserElement._parseNoCache\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 49, "n_words": 14, "vocab_size": 11, "complexity": 1, "nloc": 12, "token_counts": 29, "n_ast_nodes": 51, "n_identifiers": 7, "d_id": 3434, "documentation": { "docstring": "\n Disables active Packrat or Left Recursion parsing and their memoization\n\n This method also works if neither Packrat nor Left Recursion are enabled.\n This makes it safe to call before activating Packrat nor Left Recursion\n to clear any previous settings.\n ", "n_words": 39, "vocab_size": 30, "n_whitespaces": 75, "language": "en" } }, { "id": 242247, "commit_id": "f8e4e9c2dd94c6f4789639dd891b8a6d5fb16e14", "repo": "Pillow", "path": "src/PIL/ImageOps.py", "file_name": "ImageOps.py", "fun_name": "scale", "commit_message": "Added enums", "code": "def scale(image, factor, resample=Image.Resampling.BICUBIC):\n \n if factor == 1:\n return image.copy()\n elif factor <= 0:\n raise ValueError(\"the factor must be greater than 0\")\n else:\n size = (round(factor * image.width), round(factor * image.height))\n return image.resize(size, resample)\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 74, "n_words": 34, "vocab_size": 30, "complexity": 3, "nloc": 8, "token_counts": 69, "n_ast_nodes": 111, "n_identifiers": 14, "d_id": 69806, "documentation": { "docstring": "\n Returns a rescaled image by a specific factor given in parameter.\n A factor greater than 1 expands the image, between 0 and 1 contracts the\n image.\n\n :param image: The image to rescale.\n :param factor: The expansion factor, as a float.\n :param resample: Resampling method to use. Default is\n :py:attr:`PIL.Image.BICUBIC`. See :ref:`concept-filters`.\n :returns: An :py:class:`~PIL.Image.Image` object.\n ", "n_words": 55, "vocab_size": 45, "n_whitespaces": 100, "language": "en" } }, { "id": 223791, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/message.py", "file_name": "message.py", "fun_name": "get_all", "commit_message": "add python 3.10.4 for windows", "code": "def get_all(self, name, failobj=None):\n \n values = []\n name = name.lower()\n for k, v in self._headers:\n if k.lower() == name:\n values.append(self.policy.header_fetch_parse(k, v))\n if not values:\n return failobj\n return values\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 107, "n_words": 28, "vocab_size": 24, "complexity": 4, "nloc": 9, "token_counts": 64, "n_ast_nodes": 103, "n_identifiers": 12, "d_id": 57068, "documentation": { "docstring": "Return a list of all the values for the named field.\n\n These will be sorted in the order they appeared in the original\n message, and may contain duplicates. Any fields deleted and\n re-inserted are always appended to the header list.\n\n If no such fields exist, failobj is returned (defaults to None).\n ", "n_words": 51, "vocab_size": 43, "n_whitespaces": 87, "language": "en" } }, { "id": 146747, "commit_id": "391901f86bc0bec6d3199ac05f316a05bcc4b910", "repo": "ray", "path": "python/ray/_private/test_utils.py", "file_name": "test_utils.py", "fun_name": "get_error_message", "commit_message": "[Remove Redis Pubsub 2/n] clean up remaining Redis references in gcs_utils.py (#23233)\n\nContinue to clean up Redis and other related Redis references, for\r\n- gcs_utils.py\r\n- log_monitor.py\r\n- `publish_error_to_driver()`", "code": "def get_error_message(subscriber, num=1e6, error_type=None, timeout=20):\n \n deadline = time.time() + timeout\n msgs = []\n while time.time() < deadline and len(msgs) < num:\n _, error_data = subscriber.poll(timeout=deadline - time.time())\n if not error_data:\n # Timed out before any data is received.\n break\n if error_type is None or error_type == error_data.type:\n msgs.append(error_data)\n else:\n time.sleep(0.01)\n\n return msgs\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 139, "n_words": 52, "vocab_size": 43, "complexity": 6, "nloc": 12, "token_counts": 99, "n_ast_nodes": 159, "n_identifiers": 15, "d_id": 33771, "documentation": { "docstring": "Gets errors from GCS subscriber.\n\n Returns maximum `num` error strings within `timeout`.\n Only returns errors of `error_type` if specified.\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 28, "language": "en" } }, { "id": 217955, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/imaplib.py", "file_name": "imaplib.py", "fun_name": "socket", "commit_message": "add python 3.10.4 for windows", "code": "def socket(self):\n \n return self.sock\n\n\n\n # Utility methods\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 30, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 20, "n_identifiers": 3, "d_id": 55035, "documentation": { "docstring": "Return socket instance used to connect to IMAP4 server.\n\n socket = .socket()\n ", "n_words": 12, "vocab_size": 10, "n_whitespaces": 26, "language": "en" } }, { "id": 276920, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/io_utils.py", "file_name": "io_utils.py", "fun_name": "path_to_string", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def path_to_string(path):\n \n if isinstance(path, os.PathLike):\n return os.fspath(path)\n return path\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 25, "n_words": 9, "vocab_size": 8, "complexity": 2, "nloc": 4, "token_counts": 25, "n_ast_nodes": 42, "n_identifiers": 6, "d_id": 81776, "documentation": { "docstring": "Convert `PathLike` objects to their string representation.\n\n If given a non-string typed path object, converts it to its string\n representation.\n\n If the object passed to `path` is not among the above, then it is\n returned unchanged. This allows e.g. passthrough of file objects\n through this function.\n\n Args:\n path: `PathLike` object that represents a path\n\n Returns:\n A string representation of the path argument, if Python support exists.\n ", "n_words": 66, "vocab_size": 49, "n_whitespaces": 100, "language": "en" } }, { "id": 73787, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/models/__init__.py", "file_name": "__init__.py", "fun_name": "creatable_subpage_models", "commit_message": "Reformat with black", "code": "def creatable_subpage_models(cls):\n \n return [\n page_model\n for page_model in cls.allowed_subpage_models()\n if page_model.is_creatable\n ]\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 66, "n_words": 12, "vocab_size": 11, "complexity": 3, "nloc": 6, "token_counts": 22, "n_ast_nodes": 37, "n_identifiers": 5, "d_id": 16110, "documentation": { "docstring": "\n Returns the list of page types that may be created under this page type,\n as a list of model classes\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 42, "language": "en" } }, { "id": 151540, "commit_id": "5ee3b8cbbb89c8a57cb42cc3253001e47720991b", "repo": "freqtrade", "path": "freqtrade/freqai/freqai_interface.py", "file_name": "freqai_interface.py", "fun_name": "set_full_path", "commit_message": "update config recording to use all configs, fix tests", "code": "def set_full_path(self) -> None:\n \n self.full_path = Path(\n self.config[\"user_data_dir\"] / \"models\" / f\"{self.freqai_info['identifier']}\"\n )\n self.full_path.mkdir(parents=True, exist_ok=True)\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 54, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 8, "token_counts": 40, "n_ast_nodes": 82, "n_identifiers": 9, "d_id": 35040, "documentation": { "docstring": "\n Creates and sets the full path for the identifier\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 24, "language": "en" } }, { "id": 206272, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/template/engine.py", "file_name": "engine.py", "fun_name": "get_template", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_template(self, template_name):\n \n template, origin = self.find_template(template_name)\n if not hasattr(template, \"render\"):\n # template needs to be compiled\n template = Template(template, origin, template_name, engine=self)\n return template\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 75, "n_words": 25, "vocab_size": 22, "complexity": 2, "nloc": 5, "token_counts": 43, "n_ast_nodes": 70, "n_identifiers": 9, "d_id": 51459, "documentation": { "docstring": "\n Return a compiled Template object for the given template name,\n handling template inheritance recursively.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 244149, "commit_id": "7d1ce22e3328ba89c11b6cdaafff6c96d9da3f4f", "repo": "mmdetection", "path": "mmdet/models/roi_heads/mask_heads/mask_point_head.py", "file_name": "mask_point_head.py", "fun_name": "get_roi_rel_points_test", "commit_message": "Fix `pointrend` missing `get_uncertainty` function bug (#7550)\n\n* [Fix] Adjust the order of get_classes and FileClient. (#7276)\r\n\r\n* delete -sv (#7277)\r\n\r\nCo-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>\r\n\r\n* [Docs] Add Chinese version of finetune (#7178)\r\n\r\n* [Fix] Fix wrong img name in onnx2tensorrt.py (#7157)\r\n\r\n* [Docs] fix albumentations installed way (#7143)\r\n\r\n* Update finetune.md\r\n\r\nTranslate the finetune.md doc to Chinese\r\n\r\n* Update finetune.md\r\n\r\n* Update finetune.md\r\n\r\n* Update finetune.md\r\n\r\n* fix lint\r\n\r\n* fx lint\r\n\r\n* fix pr\r\n\r\nCo-authored-by: Jamie \r\nCo-authored-by: BigDong \r\n\r\n* set unmap_results=True in ssd_head (#7328)\r\n\r\n* Update YOLOX log for non square input (#7235)\r\n\r\n* [Enhance] add cpu_num in cocopanoptic for pq computing (#7315)\r\n\r\n* add cpu_num in cocopanoptic for pq computing\r\n\r\n* cpu_num -> nproc\r\n\r\n* move nproc to evaluate\r\n\r\n* [Enhancement] Allow to set channel_order in LoadImageFromFile (#7258)\r\n\r\n* allow to set channel_order when loading images\r\n\r\n* fix lint\r\n\r\n* fix unit test\r\n\r\n* fix lint\r\n\r\n* [Fix] Force the inputs of `get_bboxes` in yolox_head to float32. (#7324)\r\n\r\n* Fix softnms bug\r\n\r\n* Add force_fp32 in corner_head and centripetal_head\r\n\r\n* [Fix] Fix typo in FPN neck (#7347)\r\n\r\n* update readme and pretrained related (#7301)\r\n\r\n* [Docs] Add Chinese version of onnx2tensorrt.md (#7219)\r\n\r\n* Fix bug of docs\r\n\r\n* translate onnx2tensorrt.md\r\n\r\n* fix\r\n\r\n* fix end-of-file-fixer\r\n\r\n* fix some bugs\r\n\r\n* 修复链接跳转\r\n\r\n* 修复链接跳转\r\n\r\n* 修复链接跳转-测试1\r\n\r\n* 修复链接跳转-测试2\r\n\r\n* 修复链接跳转-测试2\r\n\r\n* 修复链接跳转-测试3\r\n\r\n* 修复链接跳转-测试5\r\n\r\n* Fix\r\n\r\nCo-authored-by: jbwang1997 \r\n\r\n* Update useful_tools.md (#7180)\r\n\r\n* [Enhancement]: Update colab tutorials (#7310)\r\n\r\n* update colab tutorials\r\n\r\n* update\r\n\r\n* fix\r\n\r\n* fix wrong CUDA explaination\r\n\r\n* resolve comments\r\n\r\n* resolve comments\r\n\r\n* fix typo\r\n\r\nCo-authored-by: Cedric Luo \r\nCo-authored-by: tripleMu <92794867+q3394101@users.noreply.github.com>\r\nCo-authored-by: jbwang1997 \r\nCo-authored-by: kira <39787375+yangrisheng@users.noreply.github.com>\r\nCo-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>\r\n\r\n* Fix pointrend missing get_uncertainty function bug\r\n\r\nCo-authored-by: Wencheng Wu <41542251+274869388@users.noreply.github.com>\r\nCo-authored-by: Yue Zhou <592267829@qq.com>\r\nCo-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com>\r\nCo-authored-by: MingJian.L <45811724+matrixgame2018@users.noreply.github.com>\r\nCo-authored-by: Jamie \r\nCo-authored-by: BigDong \r\nCo-authored-by: Cedric Luo <26483343+chhluo@users.noreply.github.com>\r\nCo-authored-by: Yosuke Shinya <42844407+shinya7y@users.noreply.github.com>\r\nCo-authored-by: Cedric Luo \r\nCo-authored-by: Jingwei Zhang \r\nCo-authored-by: jbwang1997 \r\nCo-authored-by: Xiangxu-0103 \r\nCo-authored-by: tripleMu <92794867+q3394101@users.noreply.github.com>\r\nCo-authored-by: kira <39787375+yangrisheng@users.noreply.github.com>", "code": "def get_roi_rel_points_test(self, mask_pred, pred_label, cfg):\n \n num_points = cfg.subdivision_num_points\n uncertainty_map = get_uncertainty(mask_pred, pred_label)\n num_rois, _, mask_height, mask_width = uncertainty_map.shape\n\n # During ONNX exporting, the type of each elements of 'shape' is\n # `Tensor(float)`, while it is `float` during PyTorch inference.\n if isinstance(mask_height, torch.Tensor):\n h_step = 1.0 / mask_height.float()\n w_step = 1.0 / mask_width.float()\n else:\n h_step = 1.0 / mask_height\n w_step = 1.0 / mask_width\n # cast to int to avoid dynamic K for TopK op in ONNX\n mask_size = int(mask_height * mask_width)\n uncertainty_map = uncertainty_map.view(num_rois, mask_size)\n num_points = min(mask_size, num_points)\n point_indices = uncertainty_map.topk(num_points, dim=1)[1]\n xs = w_step / 2.0 + (point_indices % mask_width).float() * w_step\n ys = h_step / 2.0 + (point_indices // mask_width).float() * h_step\n point_coords = torch.stack([xs, ys], dim=2)\n return point_indices, point_coords\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 286, "n_words": 123, "vocab_size": 80, "complexity": 2, "nloc": 18, "token_counts": 175, "n_ast_nodes": 267, "n_identifiers": 31, "d_id": 70261, "documentation": { "docstring": "Get ``num_points`` most uncertain points during test.\n\n Args:\n mask_pred (Tensor): A tensor of shape (num_rois, num_classes,\n mask_height, mask_width) for class-specific or class-agnostic\n prediction.\n pred_label (list): The predication class for each instance.\n cfg (dict): Testing config of point head.\n\n Returns:\n point_indices (Tensor): A tensor of shape (num_rois, num_points)\n that contains indices from [0, mask_height x mask_width) of the\n most uncertain points.\n point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)\n that contains [0, 1] x [0, 1] normalized coordinates of the\n most uncertain points from the [mask_height, mask_width] grid .\n ", "n_words": 89, "vocab_size": 58, "n_whitespaces": 255, "language": "en" } }, { "id": 53100, "commit_id": "b110baccdbfde300f410b069c873e8b2a2c98e00", "repo": "prefect", "path": "tests/test_logging.py", "file_name": "test_logging.py", "fun_name": "logger_test_deployment", "commit_message": "Add test", "code": "async def logger_test_deployment(orion_client):\n \n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 11, "token_counts": 53, "n_ast_nodes": 14, "n_identifiers": 2, "d_id": 10717, "documentation": { "docstring": "\n A deployment with a flow that returns information about the given loggers\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 19, "language": "en" } }, { "id": 232547, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/template/_data.py", "file_name": "_data.py", "fun_name": "pointcloud", "commit_message": "switch to black .22", "code": "def pointcloud(self):\n \n return self[\"pointcloud\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 63991, "documentation": { "docstring": "\n The 'pointcloud' property is a tuple of instances of\n Pointcloud that may be specified as:\n - A list or tuple of instances of plotly.graph_objs.layout.template.data.Pointcloud\n - A list or tuple of dicts of string/value properties that\n will be passed to the Pointcloud constructor\n\n Supported dict properties:\n\n Returns\n -------\n tuple[plotly.graph_objs.layout.template.data.Pointcloud]\n ", "n_words": 48, "vocab_size": 33, "n_whitespaces": 131, "language": "en" } }, { "id": 181912, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tpot/gp_deap.py", "file_name": "gp_deap.py", "fun_name": "varOr", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def varOr(population, toolbox, lambda_, cxpb, mutpb):\n \n offspring = []\n\n for _ in range(lambda_):\n op_choice = np.random.random()\n if op_choice < cxpb: # Apply crossover\n ind1, ind2 = pick_two_individuals_eligible_for_crossover(population)\n if ind1 is not None:\n ind1, _ = toolbox.mate(ind1, ind2)\n del ind1.fitness.values\n else:\n # If there is no pair eligible for crossover, we still want to\n # create diversity in the population, and do so by mutation instead.\n ind1 = mutate_random_individual(population, toolbox)\n offspring.append(ind1)\n elif op_choice < cxpb + mutpb: # Apply mutation\n ind = mutate_random_individual(population, toolbox)\n offspring.append(ind)\n else: # Apply reproduction\n idx = np.random.randint(0, len(population))\n offspring.append(toolbox.clone(population[idx]))\n\n return offspring\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 301, "n_words": 95, "vocab_size": 68, "complexity": 5, "nloc": 19, "token_counts": 142, "n_ast_nodes": 228, "n_identifiers": 25, "d_id": 43664, "documentation": { "docstring": "Part of an evolutionary algorithm applying only the variation part\n (crossover, mutation **or** reproduction). The modified individuals have\n their fitness invalidated. The individuals are cloned so returned\n population is independent of the input population.\n :param population: A list of individuals to vary.\n :param toolbox: A :class:`~deap.base.Toolbox` that contains the evolution\n operators.\n :param lambda\\_: The number of children to produce\n :param cxpb: The probability of mating two individuals.\n :param mutpb: The probability of mutating an individual.\n :returns: The final population\n :returns: A class:`~deap.tools.Logbook` with the statistics of the\n evolution\n The variation goes as follow. On each of the *lambda_* iteration, it\n selects one of the three operations; crossover, mutation or reproduction.\n In the case of a crossover, two individuals are selected at random from\n the parental population :math:`P_\\mathrm{p}`, those individuals are cloned\n using the :meth:`toolbox.clone` method and then mated using the\n :meth:`toolbox.mate` method. Only the first child is appended to the\n offspring population :math:`P_\\mathrm{o}`, the second child is discarded.\n In the case of a mutation, one individual is selected at random from\n :math:`P_\\mathrm{p}`, it is cloned and then mutated using using the\n :meth:`toolbox.mutate` method. The resulting mutant is appended to\n :math:`P_\\mathrm{o}`. In the case of a reproduction, one individual is\n selected at random from :math:`P_\\mathrm{p}`, cloned and appended to\n :math:`P_\\mathrm{o}`.\n This variation is named *Or* beceause an offspring will never result from\n both operations crossover and mutation. The sum of both probabilities\n shall be in :math:`[0, 1]`, the reproduction probability is\n 1 - *cxpb* - *mutpb*.\n ", "n_words": 245, "vocab_size": 131, "n_whitespaces": 361, "language": "en" } }, { "id": 204039, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/geos/geometry.py", "file_name": "geometry.py", "fun_name": "make_valid", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def make_valid(self):\n \n if geos_version_tuple() < (3, 8):\n raise GEOSException(\"GEOSGeometry.make_valid() requires GEOS >= 3.8.0.\")\n return GEOSGeometry(capi.geos_makevalid(self.ptr), srid=self.srid)\n\n # #### Unary predicates ####", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 56, "n_words": 21, "vocab_size": 20, "complexity": 2, "nloc": 4, "token_counts": 40, "n_ast_nodes": 68, "n_identifiers": 9, "d_id": 50621, "documentation": { "docstring": "\n Attempt to create a valid representation of a given invalid geometry\n without losing any of the input vertices.\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 40, "language": "en" } }, { "id": 71919, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_contentstate.py", "file_name": "test_contentstate.py", "fun_name": "test_image_inside_paragraph", "commit_message": "Reformat with black", "code": "def test_image_inside_paragraph(self):\n # In Draftail's data model, images are block-level elements and therefore\n # split up preceding / following text into their own paragraphs\n converter = ContentstateConverter(features=[\"image\"])\n result = json.loads(\n converter.from_database_format(\n \n )\n )\n self.assertContentStateEqual(\n result,\n {\n \"blocks\": [\n {\n \"key\": \"00000\",\n \"inlineStyleRanges\": [],\n \"entityRanges\": [],\n \"depth\": 0,\n \"text\": \"before\",\n \"type\": \"unstyled\",\n },\n {\n \"key\": \"00000\",\n \"inlineStyleRanges\": [],\n \"entityRanges\": [{\"key\": 0, \"offset\": 0, \"length\": 1}],\n \"depth\": 0,\n \"text\": \" \",\n \"type\": \"atomic\",\n },\n {\n \"key\": \"00000\",\n \"inlineStyleRanges\": [],\n \"entityRanges\": [],\n \"depth\": 0,\n \"text\": \"after\",\n \"type\": \"unstyled\",\n },\n ],\n \"entityMap\": {\n \"0\": {\n \"data\": {\n \"format\": \"left\",\n \"alt\": \"an image\",\n \"id\": \"1\",\n \"src\": \"/media/not-found\",\n },\n \"mutability\": \"IMMUTABLE\",\n \"type\": \"IMAGE\",\n }\n },\n },\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1056, "n_words": 111, "vocab_size": 72, "complexity": 1, "nloc": 52, "token_counts": 181, "n_ast_nodes": 347, "n_identifiers": 10, "d_id": 15780, "documentation": { "docstring": "\n

    before after

    \n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 32, "language": "en" } }, { "id": 270970, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/base_layer_v1.py", "file_name": "base_layer_v1.py", "fun_name": "get_losses_for", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_losses_for(self, inputs):\n \n if inputs is None:\n # Requesting unconditional losses.\n return [l for l in self.losses if l._unconditional_loss]\n\n # Requesting input-conditional losses.\n losses = [l for l in self.losses if not l._unconditional_loss]\n inputs = tf.nest.flatten(inputs)\n reachable = tf_utils.get_reachable_from_inputs(inputs, losses)\n return [l for l in losses if l in reachable]\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 121, "n_words": 50, "vocab_size": 27, "complexity": 8, "nloc": 7, "token_counts": 75, "n_ast_nodes": 117, "n_identifiers": 12, "d_id": 80627, "documentation": { "docstring": "Retrieves losses relevant to a specific set of inputs.\n\n Args:\n inputs: Input tensor or list/tuple of input tensors.\n\n Returns:\n List of loss tensors of the layer that depend on `inputs`.\n ", "n_words": 30, "vocab_size": 27, "n_whitespaces": 69, "language": "en" } }, { "id": 22954, "commit_id": "a323fce66dd68a881cf599526185b52ab5df356b", "repo": "PaddleOCR", "path": "tools/program.py", "file_name": "program.py", "fun_name": "load_config", "commit_message": "vqa code integrated into ppocr training system", "code": "def load_config(file_path):\n \n _, ext = os.path.splitext(file_path)\n assert ext in ['.yml', '.yaml'], \"only support yaml files for now\"\n config = yaml.load(open(file_path, 'rb'), Loader=yaml.Loader)\n return config\n\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 39, "n_words": 24, "vocab_size": 21, "complexity": 1, "nloc": 5, "token_counts": 49, "n_ast_nodes": 84, "n_identifiers": 12, "d_id": 4491, "documentation": { "docstring": "\n Load config from yml/yaml file.\n Args:\n file_path (str): Path of the config file to be loaded.\n Returns: global config\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 39, "language": "en" } }, { "id": 85561, "commit_id": "d62c4935f02238a8f3991da5ef280a4bf249d771", "repo": "sentry", "path": "tests/sentry/sentry_metrics/test_parallel_indexer.py", "file_name": "test_parallel_indexer.py", "fun_name": "test_basic", "commit_message": "fix(metrics): Fix startup crash in parallel indexer [sns-1490] (#38455)\n\nSince https://github.com/getsentry/sentry/pull/38225 the parallel\r\nindexer fails to serialize the processing function here:\r\nhttps://github.com/getsentry/sentry/blob/9bf499ad95030ed1112f117c5c1be59b2e036509/src/sentry/sentry_metrics/consumers/indexer/parallel.py#L115\r\n\r\nWe need to make sure the message processor is pickleable. So the config\r\nalso needs to be pickleable.\r\n\r\nThe old code worked because it imported the config and indexer from\r\nsettings instead of attempting to pickle them.", "code": "def test_basic(request):\n \n processing_factory = MetricsConsumerStrategyFactory(\n max_msg_batch_size=1,\n max_msg_batch_time=1,\n max_parallel_batch_size=1,\n max_parallel_batch_time=1,\n max_batch_size=1,\n max_batch_time=1,\n processes=1,\n input_block_size=1024,\n output_block_size=1024,\n config=MetricsIngestConfiguration(\n db_backend=IndexerStorage.MOCK,\n db_backend_options={},\n input_topic=\"ingest-metrics\",\n output_topic=\"snuba-metrics\",\n use_case_id=UseCaseKey.RELEASE_HEALTH,\n internal_metrics_tag=\"test\",\n writes_limiter_cluster_options={},\n writes_limiter_namespace=\"test\",\n ),\n )\n\n strategy = processing_factory.create_with_partitions(\n lambda _: None,\n {Partition(topic=Topic(name=\"ingest-bogus-metrics\"), index=1): 1},\n )\n\n message = Message(\n Partition(Topic(\"topic\"), 0),\n 0,\n KafkaPayload(None, json.dumps(counter_payload).encode(\"utf-8\"), []),\n datetime.now(),\n )\n\n # Just assert that the strategy does not crash. Further assertions, such as\n # on the produced messages, would slow down the test significantly.\n strategy.submit(message=message)\n strategy.close()\n strategy.join()\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 316, "n_words": 73, "vocab_size": 65, "complexity": 1, "nloc": 35, "token_counts": 184, "n_ast_nodes": 284, "n_identifiers": 47, "d_id": 18005, "documentation": { "docstring": "\n Integration test to verify that the parallel indexer can spawn subprocesses\n properly. The main purpose is to verify that there are no\n pickling/unpickling errors when passing the strategy into the\n ParallelTransformStep, as that is easy to break.\n ", "n_words": 37, "vocab_size": 29, "n_whitespaces": 53, "language": "en" } }, { "id": 196786, "commit_id": "f757f3daae6e11ea0cfb7dadc133274d8d74315f", "repo": "sympy", "path": "sympy/integrals/meijerint.py", "file_name": "meijerint.py", "fun_name": "_get_coeff_exp", "commit_message": "Reordered imports 2", "code": "def _get_coeff_exp(expr, x):\n \n from sympy.simplify import powsimp\n (c, m) = expand_power_base(powsimp(expr)).as_coeff_mul(x)\n if not m:\n return c, S.Zero\n [m] = m\n if m.is_Pow:\n if m.base != x:\n raise _CoeffExpValueError('expr not of form a*x**b')\n return c, m.exp\n elif m == x:\n return c, S.One\n else:\n raise _CoeffExpValueError('expr not of form a*x**b: %s' % expr)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 122, "n_words": 52, "vocab_size": 37, "complexity": 5, "nloc": 14, "token_counts": 90, "n_ast_nodes": 148, "n_identifiers": 17, "d_id": 48173, "documentation": { "docstring": "\n When expr is known to be of the form c*x**b, with c and/or b possibly 1,\n return c, b.\n\n Examples\n ========\n\n >>> from sympy.abc import x, a, b\n >>> from sympy.integrals.meijerint import _get_coeff_exp\n >>> _get_coeff_exp(a*x**b, x)\n (a, b)\n >>> _get_coeff_exp(x, x)\n (1, 1)\n >>> _get_coeff_exp(2*x, x)\n (2, 1)\n >>> _get_coeff_exp(x**3, x)\n (1, 3)\n ", "n_words": 53, "vocab_size": 40, "n_whitespaces": 99, "language": "en" } }, { "id": 56999, "commit_id": "8f3ffd09dc47bfd2af6a635cc04c640febffd519", "repo": "prefect", "path": "src/prefect/blocks/kubernetes.py", "file_name": "kubernetes.py", "fun_name": "activate", "commit_message": "add test coerage for get_api_client and activate", "code": "def activate(self) -> str:\n \n load_kube_config_from_dict(\n config_dict=self.config,\n context=self.context,\n )\n\n return self.current_context()\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 60, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 11, "token_counts": 29, "n_ast_nodes": 48, "n_identifiers": 8, "d_id": 11603, "documentation": { "docstring": "\n Convenience method for activating the k8s config stored in an instance of this block\n\n Returns current_context for sanity check\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 41, "language": "en" } }, { "id": 267750, "commit_id": "f2abfc4b3d03a2baa078477d0ad2241263a00668", "repo": "ansible", "path": "test/lib/ansible_test/_internal/content_config.py", "file_name": "content_config.py", "fun_name": "parse_python_requires", "commit_message": "ansible-test - Parse content config only once. (#78418)", "code": "def parse_python_requires(value): # type: (t.Any) -> tuple[str, ...]\n \n if not isinstance(value, str):\n raise ValueError('python_requires must must be of type `str` not type `%s`' % type(value))\n\n versions: tuple[str, ...]\n\n if value == 'default':\n versions = SUPPORTED_PYTHON_VERSIONS\n elif value == 'controller':\n versions = CONTROLLER_PYTHON_VERSIONS\n else:\n specifier_set = SpecifierSet(value)\n versions = tuple(version for version in SUPPORTED_PYTHON_VERSIONS if specifier_set.contains(Version(version)))\n\n return versions\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 114, "n_words": 57, "vocab_size": 41, "complexity": 6, "nloc": 12, "token_counts": 79, "n_ast_nodes": 136, "n_identifiers": 15, "d_id": 79034, "documentation": { "docstring": "Parse the given 'python_requires' version specifier and return the matching Python versions.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 268567, "commit_id": "ff6e4da36addccb06001f7b05b1a9c04ae1d7984", "repo": "ansible", "path": "lib/ansible/playbook/base.py", "file_name": "base.py", "fun_name": "set_to_context", "commit_message": "fixes to FA inheritance (#78990)\n\nfinalized applies to all field attributes\r\nfix getting parent value\r\nalso remove unused/needed extend/prepend signature\r\nmoar testing", "code": "def set_to_context(self, name):\n \n\n attribute = self.fattributes[name]\n if isinstance(attribute, NonInheritableFieldAttribute):\n # setting to sentinel will trigger 'default/default()' on getter\n setattr(self, name, Sentinel)\n else:\n try:\n setattr(self, name, self._get_parent_attribute(name, omit=True))\n except AttributeError:\n # mostly playcontext as only tasks/handlers/blocks really resolve parent\n setattr(self, name, Sentinel)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 158, "n_words": 41, "vocab_size": 35, "complexity": 3, "nloc": 9, "token_counts": 64, "n_ast_nodes": 100, "n_identifiers": 12, "d_id": 79551, "documentation": { "docstring": " set to parent inherited value or Sentinel as appropriate", "n_words": 9, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 19514, "commit_id": "3387881a6d4fc2d8bdc0f05c484cb2f7222acfb8", "repo": "pipenv", "path": "pipenv/utils/internet.py", "file_name": "internet.py", "fun_name": "is_url_equal", "commit_message": "Code reorg utils into utils module reduces complexity (#4990)\n\n* Split apart the massive utils.py into a utils module", "code": "def is_url_equal(url, other_url):\n # type: (str, str) -> bool\n \n if not isinstance(url, str):\n raise TypeError(f\"Expected string for url, received {url!r}\")\n if not isinstance(other_url, str):\n raise TypeError(f\"Expected string for url, received {other_url!r}\")\n parsed_url = urllib3_util.parse_url(url)\n parsed_other_url = urllib3_util.parse_url(other_url)\n unparsed = parsed_url._replace(auth=None, query=None, fragment=None).url\n unparsed_other = parsed_other_url._replace(auth=None, query=None, fragment=None).url\n return unparsed == unparsed_other\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 92, "n_words": 51, "vocab_size": 35, "complexity": 3, "nloc": 10, "token_counts": 98, "n_ast_nodes": 164, "n_identifiers": 16, "d_id": 3008, "documentation": { "docstring": "\n Compare two urls by scheme, host, and path, ignoring auth\n\n :param str url: The initial URL to compare\n :param str url: Second url to compare to the first\n :return: Whether the URLs are equal without **auth**, **query**, and **fragment**\n :rtype: bool\n\n >>> is_url_equal(\"https://user:pass@mydomain.com/some/path?some_query\",\n \"https://user2:pass2@mydomain.com/some/path\")\n True\n\n >>> is_url_equal(\"https://user:pass@mydomain.com/some/path?some_query\",\n \"https://mydomain.com/some?some_query\")\n False\n ", "n_words": 49, "vocab_size": 39, "n_whitespaces": 116, "language": "en" } }, { "id": 168154, "commit_id": "e7afa4b641b146874d17c36caa8a050bfde31283", "repo": "pandas", "path": "pandas/core/indexing.py", "file_name": "indexing.py", "fun_name": "iloc", "commit_message": "DOC: Add tuple description to allowed inputs for iloc #47799 (#47989)\n\nDOC: Add tuple description to allowed inputs for iloc", "code": "def iloc(self) -> _iLocIndexer:\n \n return _iLocIndexer(\"iloc\", self)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 137, "token_counts": 15, "n_ast_nodes": 28, "n_identifiers": 3, "d_id": 40218, "documentation": { "docstring": "\n Purely integer-location based indexing for selection by position.\n\n ``.iloc[]`` is primarily integer position based (from ``0`` to\n ``length-1`` of the axis), but may also be used with a boolean\n array.\n\n Allowed inputs are:\n\n - An integer, e.g. ``5``.\n - A list or array of integers, e.g. ``[4, 3, 0]``.\n - A slice object with ints, e.g. ``1:7``.\n - A boolean array.\n - A ``callable`` function with one argument (the calling Series or\n DataFrame) and that returns valid output for indexing (one of the above).\n This is useful in method chains, when you don't have a reference to the\n calling object, but would like to base your selection on some value.\n - A tuple of row and column indexes. The tuple elements consist of one of the\n above inputs, e.g. ``(0, 1)``.\n\n ``.iloc`` will raise ``IndexError`` if a requested indexer is\n out-of-bounds, except *slice* indexers which allow out-of-bounds\n indexing (this conforms with python/numpy *slice* semantics).\n\n See more at :ref:`Selection by Position `.\n\n See Also\n --------\n DataFrame.iat : Fast integer location scalar accessor.\n DataFrame.loc : Purely label-location based indexer for selection by label.\n Series.iloc : Purely integer-location based indexing for\n selection by position.\n\n Examples\n --------\n >>> mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4},\n ... {'a': 100, 'b': 200, 'c': 300, 'd': 400},\n ... {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }]\n >>> df = pd.DataFrame(mydict)\n >>> df\n a b c d\n 0 1 2 3 4\n 1 100 200 300 400\n 2 1000 2000 3000 4000\n\n **Indexing just the rows**\n\n With a scalar integer.\n\n >>> type(df.iloc[0])\n \n >>> df.iloc[0]\n a 1\n b 2\n c 3\n d 4\n Name: 0, dtype: int64\n\n With a list of integers.\n\n >>> df.iloc[[0]]\n a b c d\n 0 1 2 3 4\n >>> type(df.iloc[[0]])\n \n\n >>> df.iloc[[0, 1]]\n a b c d\n 0 1 2 3 4\n 1 100 200 300 400\n\n With a `slice` object.\n\n >>> df.iloc[:3]\n a b c d\n 0 1 2 3 4\n 1 100 200 300 400\n 2 1000 2000 3000 4000\n\n With a boolean mask the same length as the index.\n\n >>> df.iloc[[True, False, True]]\n a b c d\n 0 1 2 3 4\n 2 1000 2000 3000 4000\n\n With a callable, useful in method chains. The `x` passed\n to the ``lambda`` is the DataFrame being sliced. This selects\n the rows whose index label even.\n\n >>> df.iloc[lambda x: x.index % 2 == 0]\n a b c d\n 0 1 2 3 4\n 2 1000 2000 3000 4000\n\n **Indexing both axes**\n\n You can mix the indexer types for the index and columns. Use ``:`` to\n select the entire axis.\n\n With scalar integers.\n\n >>> df.iloc[0, 1]\n 2\n\n With lists of integers.\n\n >>> df.iloc[[0, 2], [1, 3]]\n b d\n 0 2 4\n 2 2000 4000\n\n With `slice` objects.\n\n >>> df.iloc[1:3, 0:3]\n a b c\n 1 100 200 300\n 2 1000 2000 3000\n\n With a boolean array whose length matches the columns.\n\n >>> df.iloc[:, [True, False, True, False]]\n a c\n 0 1 3\n 1 100 300\n 2 1000 3000\n\n With a callable function that expects the Series or DataFrame.\n\n >>> df.iloc[:, lambda df: [0, 2]]\n a c\n 0 1 3\n 1 100 300\n 2 1000 3000\n ", "n_words": 527, "vocab_size": 251, "n_whitespaces": 1603, "language": "en" } }, { "id": 220322, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/base_events.py", "file_name": "base_events.py", "fun_name": "set_task_factory", "commit_message": "add python 3.10.4 for windows", "code": "def set_task_factory(self, factory):\n \n if factory is not None and not callable(factory):\n raise TypeError('task factory must be a callable or None')\n self._task_factory = factory\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 55, "n_words": 23, "vocab_size": 20, "complexity": 3, "nloc": 4, "token_counts": 30, "n_ast_nodes": 52, "n_identifiers": 6, "d_id": 55965, "documentation": { "docstring": "Set a task factory that will be used by loop.create_task().\n\n If factory is None the default task factory will be set.\n\n If factory is a callable, it should have a signature matching\n '(loop, coro)', where 'loop' will be a reference to the active\n event loop, 'coro' will be a coroutine object. The callable\n must return a Future.\n ", "n_words": 57, "vocab_size": 39, "n_whitespaces": 100, "language": "en" } }, { "id": 13573, "commit_id": "e4b930e6369f1ec69b07af6190d61aa3cb3d9cec", "repo": "jina", "path": "jina/serve/gateway.py", "file_name": "gateway.py", "fun_name": "ports", "commit_message": "refactor: add properties to gateway (#5417)", "code": "def ports(self):\n \n return self.runtime_args.port\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 4, "d_id": 2687, "documentation": { "docstring": "Gets all the list of ports from the runtime_args as a list.\n :return: The lists of ports to be exposed\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 34, "language": "en" } }, { "id": 204172, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/messages/storage/base.py", "file_name": "base.py", "fun_name": "_set_level", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _set_level(self, value=None):\n \n if value is None and hasattr(self, \"_level\"):\n del self._level\n else:\n self._level = int(value)\n\n level = property(_get_level, _set_level, _set_level)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 67, "n_words": 21, "vocab_size": 19, "complexity": 3, "nloc": 5, "token_counts": 36, "n_ast_nodes": 75, "n_identifiers": 9, "d_id": 50669, "documentation": { "docstring": "\n Set a custom minimum recorded level.\n\n If set to ``None``, the default level will be used (see the\n ``_get_level`` method).\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 49, "language": "en" } }, { "id": 269626, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "_as_graph_element", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _as_graph_element(obj):\n \n conv_fn = getattr(obj, \"_as_graph_element\", None)\n if conv_fn and callable(conv_fn):\n return conv_fn()\n return None\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 34, "n_words": 15, "vocab_size": 13, "complexity": 3, "nloc": 5, "token_counts": 30, "n_ast_nodes": 52, "n_identifiers": 5, "d_id": 80244, "documentation": { "docstring": "Convert `obj` to a graph element if possible, otherwise return `None`.\n\n Args:\n obj: Object to convert.\n\n Returns:\n The result of `obj._as_graph_element()` if that method is available;\n otherwise `None`.\n ", "n_words": 28, "vocab_size": 24, "n_whitespaces": 56, "language": "en" } }, { "id": 128999, "commit_id": "70db5c5592d94b611fee0a334414f1f4f5cc151a", "repo": "ray", "path": "python/ray/_private/services.py", "file_name": "services.py", "fun_name": "_find_gcs_address_or_die", "commit_message": "[GCS][Bootstrap n/n] Do not start Redis in GCS bootstrapping mode (#21232)\n\nAfter this change in GCS bootstrapping mode, Redis no longer starts and `address` is treated as the GCS address of the Ray cluster.\r\n\r\nCo-authored-by: Yi Cheng \r\nCo-authored-by: Yi Cheng <74173148+iycheng@users.noreply.github.com>", "code": "def _find_gcs_address_or_die():\n \n gcs_addresses = _find_address_from_flag(\"--gcs-address\")\n if len(gcs_addresses) > 1:\n raise ConnectionError(\n f\"Found multiple active Ray instances: {gcs_addresses}. \"\n \"Please specify the one to connect to by setting `--address` flag \"\n \"or `RAY_ADDRESS` environment variable.\")\n sys.exit(1)\n elif not gcs_addresses:\n raise ConnectionError(\n \"Could not find any running Ray instance. \"\n \"Please specify the one to connect to by setting `--address` flag \"\n \"or `RAY_ADDRESS` environment variable.\")\n return gcs_addresses.pop()\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 168, "n_words": 66, "vocab_size": 43, "complexity": 3, "nloc": 14, "token_counts": 50, "n_ast_nodes": 102, "n_identifiers": 8, "d_id": 28869, "documentation": { "docstring": "Find one GCS address unambiguously, or raise an error.\n\n Callers outside of this module should use get_ray_address_to_use_or_die()\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 23, "language": "en" } }, { "id": 68528, "commit_id": "05dd1d6d15c6c8c66165e9f267078c3cf9aec10e", "repo": "erpnext", "path": "erpnext/accounts/doctype/tax_rule/tax_rule.py", "file_name": "tax_rule.py", "fun_name": "get_tax_template", "commit_message": "refactor: tax rule validity query (#30934)", "code": "def get_tax_template(posting_date, args):\n\t\n\targs = frappe._dict(args)\n\tconditions = []\n\n\tif posting_date:\n\t\tconditions.append(\n\t\t\tf\n\t\t)\n\telse:\n\t\tconditions.append(\"(from_date is null) and (to_date is null)\")\n\n\tconditions.append(\n\t\t\"ifnull(tax_category, '') = {0}\".format(frappe.db.escape(cstr(args.get(\"tax_category\"))))\n\t)\n\tif \"tax_category\" in args.keys():\n\t\tdel args[\"tax_category\"]\n\n\tfor key, value in args.items():\n\t\tif key == \"use_for_shopping_cart\":\n\t\t\tconditions.append(\"use_for_shopping_cart = {0}\".format(1 if value else 0))\n\t\telif key == \"customer_group\":\n\t\t\tif not value:\n\t\t\t\tvalue = get_root_of(\"Customer Group\")\n\t\t\tcustomer_group_condition = get_customer_group_condition(value)\n\t\t\tconditions.append(\"ifnull({0}, '') in ('', {1})\".format(key, customer_group_condition))\n\t\telse:\n\t\t\tconditions.append(\"ifnull({0}, '') in ('', {1})\".format(key, frappe.db.escape(cstr(value))))\n\n\ttax_rule = frappe.db.sql(\n\t\t.format(\n\t\t\t\" and \".join(conditions)\n\t\t),\n\t\tas_dict=True,\n\t)\n\n\tif not tax_rule:\n\t\treturn None\n\n\tfor rule in tax_rule:\n\t\trule.no_of_keys_matched = 0\n\t\tfor key in args:\n\t\t\tif rule.get(key):\n\t\t\t\trule.no_of_keys_matched += 1\n\n\tdef cmp(a, b):\n\t\t# refernce: https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons\n\t\treturn int(a > b) - int(a < b)\n\n\trule = sorted(\n\t\ttax_rule,\n\t\tkey=functools.cmp_to_key(\n\t\t\tlambda b, a: cmp(a.no_of_keys_matched, b.no_of_keys_matched) or cmp(a.priority, b.priority)\n\t\t),\n\t)[0]\n\n\ttax_template = rule.sales_tax_template or rule.purchase_tax_template\n\tdoctype = \"{0} Taxes and Charges Template\".format(rule.tax_type)\n\n\tif frappe.db.get_value(doctype, tax_template, \"disabled\") == 1:\n\t\treturn None\n\n\treturn tax_template\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 108, "n_words": 159, "vocab_size": 103, "complexity": 15, "nloc": 51, "token_counts": 312, "n_ast_nodes": 559, "n_identifiers": 39, "d_id": 14812, "documentation": { "docstring": "Get matching tax rule(from_date is null or from_date <= '{posting_date}')\n\t\t\tand (to_date is null or to_date >= '{posting_date}')select * from `tabTax Rule`\n\t\twhere {0}", "n_words": 24, "vocab_size": 21, "n_whitespaces": 21, "language": "en" } }, { "id": 196303, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/geometry/polygon.py", "file_name": "polygon.py", "fun_name": "apothem", "commit_message": "Updated import locations", "code": "def apothem(self):\n \n return self.radius * cos(S.Pi/self._n)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 36, "n_identifiers": 7, "d_id": 47803, "documentation": { "docstring": "The inradius of the RegularPolygon.\n\n The apothem/inradius is the radius of the inscribed circle.\n\n Returns\n =======\n\n apothem : number or instance of Basic\n\n See Also\n ========\n\n sympy.geometry.line.Segment.length, sympy.geometry.ellipse.Circle.radius\n\n Examples\n ========\n\n >>> from sympy import Symbol\n >>> from sympy import RegularPolygon, Point\n >>> radius = Symbol('r')\n >>> rp = RegularPolygon(Point(0, 0), radius, 4)\n >>> rp.apothem\n sqrt(2)*r/2\n\n ", "n_words": 55, "vocab_size": 40, "n_whitespaces": 167, "language": "en" } }, { "id": 60894, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/network/lazy_wheel.py", "file_name": "lazy_wheel.py", "fun_name": "seek", "commit_message": "upd; format", "code": "def seek(self, offset, whence=0):\n # type: (int, int) -> int\n \n return self._file.seek(offset, whence)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 34, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 37, "n_identifiers": 5, "d_id": 12326, "documentation": { "docstring": "Change stream position and return the new absolute position.\n\n Seek to offset relative position indicated by whence:\n * 0: Start of stream (the default). pos should be >= 0;\n * 1: Current position - pos may be negative;\n * 2: End of stream - pos usually negative.\n ", "n_words": 47, "vocab_size": 36, "n_whitespaces": 83, "language": "en" } }, { "id": 161336, "commit_id": "2cf35b18a8716c963c0f9252544a3a8b9881cd6c", "repo": "rich", "path": "tests/test_syntax.py", "file_name": "test_syntax.py", "fun_name": "test_python_render_indent_guides", "commit_message": "Try using default theme in test to avoid ubuntu/macos variance", "code": "def test_python_render_indent_guides():\n syntax = Panel.fit(\n Syntax(\n CODE,\n lexer=\"python\",\n line_numbers=True,\n line_range=(2, 10),\n theme=\"default\",\n code_width=60,\n word_wrap=True,\n indent_guides=True,\n ),\n padding=0,\n )\n rendered_syntax = render(syntax)\n print(repr(rendered_syntax))\n expected = '╭────────────────────────────────────────────────────────────────╮\\n│\\x1b[1;38;2;24;24;24;48;2;248;248;248m \\x1b[0m\\x1b[38;2;173;173;173;48;2;248;248;248m 2 \\x1b[0m\\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ \\x1b[0m\\x1b[3;38;2;186;33;33;48;2;248;248;248m\\x1b[0m\\x1b[48;2;248;248;248m \\x1b[0m│\\n│\\x1b[1;38;2;24;24;24;48;2;248;248;248m \\x1b[0m\\x1b[38;2;173;173;173;48;2;248;248;248m 3 \\x1b[0m\\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ \\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248miter_values\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m \\x1b[0m\\x1b[38;2;102;102;102;48;2;248;248;248m=\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m \\x1b[0m\\x1b[38;2;0;128;0;48;2;248;248;248miter\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m(\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248mvalues\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m)\\x1b[0m\\x1b[48;2;248;248;248m \\x1b[0m│\\n│\\x1b[1;38;2;24;24;24;48;2;248;248;248m \\x1b[0m\\x1b[38;2;173;173;173;48;2;248;248;248m 4 \\x1b[0m\\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ \\x1b[0m\\x1b[1;38;2;0;128;0;48;2;248;248;248mtry\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m:\\x1b[0m\\x1b[48;2;248;248;248m \\x1b[0m│\\n│\\x1b[1;38;2;24;24;24;48;2;248;248;248m \\x1b[0m\\x1b[38;2;173;173;173;48;2;248;248;248m 5 \\x1b[0m\\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ │ \\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248mprevious_value\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m \\x1b[0m\\x1b[38;2;102;102;102;48;2;248;248;248m=\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m \\x1b[0m\\x1b[38;2;0;128;0;48;2;248;248;248mnext\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m(\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248miter_values\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m)\\x1b[0m\\x1b[48;2;248;248;248m \\x1b[0m│\\n│\\x1b[1;38;2;24;24;24;48;2;248;248;248m \\x1b[0m\\x1b[38;2;173;173;173;48;2;248;248;248m 6 \\x1b[0m\\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ \\x1b[0m\\x1b[1;38;2;0;128;0;48;2;248;248;248mexcept\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m \\x1b[0m\\x1b[1;38;2;210;65;58;48;2;248;248;248mStopIteration\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m:\\x1b[0m\\x1b[48;2;248;248;248m \\x1b[0m│\\n│\\x1b[1;38;2;24;24;24;48;2;248;248;248m \\x1b[0m\\x1b[38;2;173;173;173;48;2;248;248;248m 7 \\x1b[0m\\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ │ \\x1b[0m\\x1b[1;38;2;0;128;0;48;2;248;248;248mreturn\\x1b[0m\\x1b[48;2;248;248;248m \\x1b[0m│\\n│\\x1b[1;38;2;24;24;24;48;2;248;248;248m \\x1b[0m\\x1b[38;2;173;173;173;48;2;248;248;248m 8 \\x1b[0m\\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ \\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248mfirst\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m \\x1b[0m\\x1b[38;2;102;102;102;48;2;248;248;248m=\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m \\x1b[0m\\x1b[1;38;2;0;128;0;48;2;248;248;248mTrue\\x1b[0m\\x1b[48;2;248;248;248m \\x1b[0m│\\n│\\x1b[1;38;2;24;24;24;48;2;248;248;248m \\x1b[0m\\x1b[38;2;173;173;173;48;2;248;248;248m 9 \\x1b[0m\\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ \\x1b[0m\\x1b[1;38;2;0;128;0;48;2;248;248;248mfor\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m \\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248mvalue\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m \\x1b[0m\\x1b[1;38;2;170;34;255;48;2;248;248;248min\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m \\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248miter_values\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m:\\x1b[0m\\x1b[48;2;248;248;248m \\x1b[0m│\\n│\\x1b[1;38;2;24;24;24;48;2;248;248;248m \\x1b[0m\\x1b[38;2;173;173;173;48;2;248;248;248m10 \\x1b[0m\\x1b[2;3;38;2;64;128;128;48;2;248;248;248m│ │ \\x1b[0m\\x1b[1;38;2;0;128;0;48;2;248;248;248myield\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m \\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248mfirst\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m,\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m \\x1b[0m\\x1b[1;38;2;0;128;0;48;2;248;248;248mFalse\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m,\\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248m \\x1b[0m\\x1b[38;2;0;0;0;48;2;248;248;248mprevious_value\\x1b[0m\\x1b[48;2;248;248;248m \\x1b[0m│\\n╰────────────────────────────────────────────────────────────────╯\\n'\n assert rendered_syntax == expected\n\n", "url": "https://github.com/Textualize/rich.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 546, "n_words": 89, "vocab_size": 59, "complexity": 1, "nloc": 18, "token_counts": 73, "n_ast_nodes": 294, "n_identifiers": 19, "d_id": 38965, "documentation": { "docstring": "Iterate and generate a tuple with a flag for first \\x1b[0m\\x1b[48;2;248;248;248m \\x1b[0m│\\n│\\x1b[48;2;248;248;248m \\x1b[0m\\x1b[3;38;2;186;33;33;48;2;248;248;248mand last value.", "n_words": 15, "vocab_size": 14, "n_whitespaces": 18, "language": "en" } }, { "id": 56175, "commit_id": "6a1cec80715c2b633362403a6be9470fc70c31e8", "repo": "prefect", "path": "src/prefect/blocks/core.py", "file_name": "core.py", "fun_name": "install", "commit_message": "Makes block installation recursive", "code": "async def install(cls):\n \n for field in cls.__fields__.values():\n if Block.is_block_class(field.type_):\n await field.type_.install()\n if get_origin(field.type_) is Union:\n for type in get_args(field.type_):\n if Block.is_block_class(type):\n await type.install()\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 131, "n_words": 23, "vocab_size": 18, "complexity": 8, "nloc": 25, "token_counts": 160, "n_ast_nodes": 116, "n_identifiers": 12, "d_id": 11450, "documentation": { "docstring": "\n Makes block available for configuration with current Orion server.\n Recursively installs all nested blocks. Installation is idempotent.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 39, "language": "en" } }, { "id": 111418, "commit_id": "8387ce4c01db48d92ac5638e18316c0f1fc8861e", "repo": "spaCy", "path": "spacy/tests/doc/test_json_doc_conversion.py", "file_name": "test_json_doc_conversion.py", "fun_name": "test_json_to_doc_validation_error", "commit_message": "Add Doc.from_json() (#10688)\n\n* Implement Doc.from_json: rough draft.\r\n\r\n* Implement Doc.from_json: first draft with tests.\r\n\r\n* Implement Doc.from_json: added documentation on website for Doc.to_json(), Doc.from_json().\r\n\r\n* Implement Doc.from_json: formatting changes.\r\n\r\n* Implement Doc.to_json(): reverting unrelated formatting changes.\r\n\r\n* Implement Doc.to_json(): fixing entity and span conversion. Moving fixture and doc <-> json conversion tests into single file.\r\n\r\n* Implement Doc.from_json(): replaced entity/span converters with doc.char_span() calls.\r\n\r\n* Implement Doc.from_json(): handling sentence boundaries in spans.\r\n\r\n* Implementing Doc.from_json(): added parser-free sentence boundaries transfer.\r\n\r\n* Implementing Doc.from_json(): added parser-free sentence boundaries transfer.\r\n\r\n* Implementing Doc.from_json(): incorporated various PR feedback.\r\n\r\n* Renaming fixture for document without dependencies.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implementing Doc.from_json(): using two sent_starts instead of one.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implementing Doc.from_json(): doc_without_dependency_parser() -> doc_without_deps.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implementing Doc.from_json(): incorporating various PR feedback. Rebased on latest master.\r\n\r\n* Implementing Doc.from_json(): refactored Doc.from_json() to work with annotation IDs instead of their string representations.\r\n\r\n* Implement Doc.from_json(): reverting unwanted formatting/rebasing changes.\r\n\r\n* Implement Doc.from_json(): added check for char_span() calculation for entities.\r\n\r\n* Update spacy/tokens/doc.pyx\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): minor refactoring, additional check for token attribute consistency with corresponding test.\r\n\r\n* Implement Doc.from_json(): removed redundancy in annotation type key naming.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): Simplifying setting annotation values.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement doc.from_json(): renaming annot_types to token_attrs.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): adjustments for renaming of annot_types to token_attrs.\r\n\r\n* Implement Doc.from_json(): removing default categories.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): simplifying lexeme initialization.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): simplifying lexeme initialization.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): refactoring to only have keys for present annotations.\r\n\r\n* Implement Doc.from_json(): fix check for tokens' HEAD attributes.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): refactoring Doc.from_json().\r\n\r\n* Implement Doc.from_json(): fixing span_group retrieval.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): fixing span retrieval.\r\n\r\n* Implement Doc.from_json(): added schema for Doc JSON format. Minor refactoring in Doc.from_json().\r\n\r\n* Implement Doc.from_json(): added comment regarding Token and Span extension support.\r\n\r\n* Implement Doc.from_json(): renaming inconsistent_props to partial_attrs..\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): adjusting error message.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): extending E1038 message.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): added params to E1038 raises.\r\n\r\n* Implement Doc.from_json(): combined attribute collection with partial attributes check.\r\n\r\n* Implement Doc.from_json(): added optional schema validation.\r\n\r\n* Implement Doc.from_json(): fixed optional fields in schema, tests.\r\n\r\n* Implement Doc.from_json(): removed redundant None check for DEP.\r\n\r\n* Implement Doc.from_json(): added passing of schema validatoin message to E1037..\r\n\r\n* Implement Doc.from_json(): removing redundant error E1040.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): changing message for E1037.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): adjusted website docs and docstring of Doc.from_json().\r\n\r\n* Update spacy/tests/doc/test_json_doc_conversion.py\r\n\r\n* Implement Doc.from_json(): docstring update.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): docstring update.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): website docs update.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): docstring formatting.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): docstring formatting.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): fixing Doc reference in website docs.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): reformatted website/docs/api/doc.md.\r\n\r\n* Implement Doc.from_json(): bumped IDs of new errors to avoid merge conflicts.\r\n\r\n* Implement Doc.from_json(): fixing bug in tests.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Implement Doc.from_json(): fix setting of sentence starts for docs without DEP.\r\n\r\n* Implement Doc.from_json(): add check for valid char spans when manually setting sentence boundaries. Refactor sentence boundary setting slightly. Move error message for lack of support for partial token annotations to errors.py.\r\n\r\n* Implement Doc.from_json(): simplify token sentence start manipulation.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Combine related error messages\r\n\r\n* Update spacy/tests/doc/test_json_doc_conversion.py\r\n\r\nCo-authored-by: Adriane Boyd ", "code": "def test_json_to_doc_validation_error(doc):\n \n doc_json = doc.to_json()\n doc_json.pop(\"tokens\")\n with pytest.raises(ValueError):\n Doc(doc.vocab).from_json(doc_json, validate=True)\n", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 29, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 42, "n_ast_nodes": 75, "n_identifiers": 12, "d_id": 24401, "documentation": { "docstring": "Test that Doc.from_json() raises an exception when validating invalid input.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 268968, "commit_id": "01c906c4178db5ae03b7eb2d298a052c952a0667", "repo": "keras", "path": "keras/layers/rnn/gru_lstm_utils.py", "file_name": "gru_lstm_utils.py", "fun_name": "is_sequence_right_padded", "commit_message": "Reorganize RNN layers, cells and wrappers into smaller logically organized files hosted under an `rnn` directory.\n\nPiperOrigin-RevId: 428841673", "code": "def is_sequence_right_padded(mask):\n \n max_seq_length = tf.shape(mask)[1]\n count_of_true = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1)\n right_padded_mask = tf.sequence_mask(\n count_of_true, maxlen=max_seq_length)\n return tf.reduce_all(tf.equal(mask, right_padded_mask))\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 28, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 6, "token_counts": 64, "n_ast_nodes": 100, "n_identifiers": 15, "d_id": 79793, "documentation": { "docstring": "Check the mask tensor and see if it right padded.\n\n For cuDNN kernel, it uses the sequence length param to skip the tailing\n timestep. If the data is left padded, or not a strict right padding (has\n masked value in the middle of the sequence), then cuDNN kernel won't be work\n properly in those cases.\n\n Left padded data: [[False, False, True, True, True]].\n Right padded data: [[True, True, True, False, False]].\n Mixture of mask/unmasked data: [[True, False, True, False, False]].\n\n Note that for the mixed data example above, the actually data RNN should see\n are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not\n pollute the internal states.\n\n Args:\n mask: the Boolean tensor with shape [batch, timestep]\n\n Returns:\n boolean scalar tensor, whether the mask is strictly right padded.\n ", "n_words": 135, "vocab_size": 93, "n_whitespaces": 154, "language": "en" } }, { "id": 183346, "commit_id": "9e25752c859d25c172697236b94997a38c0799bf", "repo": "textual", "path": "src/textual/widgets/text_input.py", "file_name": "text_input.py", "fun_name": "query_cursor_left", "commit_message": "Scrolling within text input", "code": "def query_cursor_left(self) -> bool:\n \n previous_index = self.cursor_index\n new_index = max(0, previous_index - 1)\n return previous_index != new_index\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 45, "n_words": 17, "vocab_size": 13, "complexity": 1, "nloc": 5, "token_counts": 27, "n_ast_nodes": 45, "n_identifiers": 7, "d_id": 44157, "documentation": { "docstring": "Check if the cursor can move 1 character left in the text", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 260482, "commit_id": "82cd3d74f252e7d4c5e733b530897d499d5d640b", "repo": "scikit-learn", "path": "sklearn/utils/__init__.py", "file_name": "__init__.py", "fun_name": "resample", "commit_message": "DOC numpydoc validation for `resample` function (#23916)", "code": "def resample(*arrays, replace=True, n_samples=None, random_state=None, stratify=None):\n \n max_n_samples = n_samples\n random_state = check_random_state(random_state)\n\n if len(arrays) == 0:\n return None\n\n first = arrays[0]\n n_samples = first.shape[0] if hasattr(first, \"shape\") else len(first)\n\n if max_n_samples is None:\n max_n_samples = n_samples\n elif (max_n_samples > n_samples) and (not replace):\n raise ValueError(\n \"Cannot sample %d out of arrays with dim %d when replace is False\"\n % (max_n_samples, n_samples)\n )\n\n check_consistent_length(*arrays)\n\n if stratify is None:\n if replace:\n indices = random_state.randint(0, n_samples, size=(max_n_samples,))\n else:\n indices = np.arange(n_samples)\n random_state.shuffle(indices)\n indices = indices[:max_n_samples]\n else:\n # Code adapted from StratifiedShuffleSplit()\n y = check_array(stratify, ensure_2d=False, dtype=None)\n if y.ndim == 2:\n # for multi-label y, map each distinct row to a string repr\n # using join because str(row) uses an ellipsis if len(row) > 1000\n y = np.array([\" \".join(row.astype(\"str\")) for row in y])\n\n classes, y_indices = np.unique(y, return_inverse=True)\n n_classes = classes.shape[0]\n\n class_counts = np.bincount(y_indices)\n\n # Find the sorted list of instances for each class:\n # (np.unique above performs a sort, so code is O(n logn) already)\n class_indices = np.split(\n np.argsort(y_indices, kind=\"mergesort\"), np.cumsum(class_counts)[:-1]\n )\n\n n_i = _approximate_mode(class_counts, max_n_samples, random_state)\n\n indices = []\n\n for i in range(n_classes):\n indices_i = random_state.choice(class_indices[i], n_i[i], replace=replace)\n indices.extend(indices_i)\n\n indices = random_state.permutation(indices)\n\n # convert sparse matrices to CSR for row-based indexing\n arrays = [a.tocsr() if issparse(a) else a for a in arrays]\n resampled_arrays = [_safe_indexing(a, indices) for a in arrays]\n if len(resampled_arrays) == 1:\n # syntactic sugar for the unit argument case\n return resampled_arrays[0]\n else:\n return resampled_arrays\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 576, "n_words": 235, "vocab_size": 156, "complexity": 15, "nloc": 44, "token_counts": 365, "n_ast_nodes": 579, "n_identifiers": 54, "d_id": 76278, "documentation": { "docstring": "Resample arrays or sparse matrices in a consistent way.\n\n The default strategy implements one step of the bootstrapping\n procedure.\n\n Parameters\n ----------\n *arrays : sequence of array-like of shape (n_samples,) or \\\n (n_samples, n_outputs)\n Indexable data-structures can be arrays, lists, dataframes or scipy\n sparse matrices with consistent first dimension.\n\n replace : bool, default=True\n Implements resampling with replacement. If False, this will implement\n (sliced) random permutations.\n\n n_samples : int, default=None\n Number of samples to generate. If left to None this is\n automatically set to the first dimension of the arrays.\n If replace is False it should not be larger than the length of\n arrays.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for shuffling\n the data.\n Pass an int for reproducible results across multiple function calls.\n See :term:`Glossary `.\n\n stratify : array-like of shape (n_samples,) or (n_samples, n_outputs), \\\n default=None\n If not None, data is split in a stratified fashion, using this as\n the class labels.\n\n Returns\n -------\n resampled_arrays : sequence of array-like of shape (n_samples,) or \\\n (n_samples, n_outputs)\n Sequence of resampled copies of the collections. The original arrays\n are not impacted.\n\n See Also\n --------\n shuffle : Shuffle arrays or sparse matrices in a consistent way.\n\n Examples\n --------\n It is possible to mix sparse and dense arrays in the same run::\n\n >>> import numpy as np\n >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])\n >>> y = np.array([0, 1, 2])\n\n >>> from scipy.sparse import coo_matrix\n >>> X_sparse = coo_matrix(X)\n\n >>> from sklearn.utils import resample\n >>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)\n >>> X\n array([[1., 0.],\n [2., 1.],\n [1., 0.]])\n\n >>> X_sparse\n <3x2 sparse matrix of type '<... 'numpy.float64'>'\n with 4 stored elements in Compressed Sparse Row format>\n\n >>> X_sparse.toarray()\n array([[1., 0.],\n [2., 1.],\n [1., 0.]])\n\n >>> y\n array([0, 1, 0])\n\n >>> resample(y, n_samples=2, random_state=0)\n array([0, 1])\n\n Example using stratification::\n\n >>> y = [0, 0, 1, 1, 1, 1, 1, 1, 1]\n >>> resample(y, n_samples=5, replace=False, stratify=y,\n ... random_state=0)\n [1, 1, 1, 0, 1]\n ", "n_words": 329, "vocab_size": 188, "n_whitespaces": 705, "language": "en" } }, { "id": 74693, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/utils.py", "file_name": "utils.py", "fun_name": "resolve_model_string", "commit_message": "Reformat with black", "code": "def resolve_model_string(model_string, default_app=None):\n \n if isinstance(model_string, str):\n try:\n app_label, model_name = model_string.split(\".\")\n except ValueError:\n if default_app is not None:\n # If we can't split, assume a model in current app\n app_label = default_app\n model_name = model_string\n else:\n raise ValueError(\n \"Can not resolve {0!r} into a model. Model names \"\n \"should be in the form app_label.model_name\".format(model_string),\n model_string,\n )\n\n return apps.get_model(app_label, model_name)\n\n elif isinstance(model_string, type) and issubclass(model_string, Model):\n return model_string\n\n else:\n raise ValueError(\n \"Can not resolve {0!r} into a model\".format(model_string), model_string\n )\n\n\nSCRIPT_RE = re.compile(r\"<(-*)/script>\")\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 311, "n_words": 82, "vocab_size": 59, "complexity": 6, "nloc": 21, "token_counts": 101, "n_ast_nodes": 183, "n_identifiers": 18, "d_id": 16297, "documentation": { "docstring": "\n Resolve an 'app_label.model_name' string into an actual model class.\n If a model class is passed in, just return that.\n\n Raises a LookupError if a model can not be found, or ValueError if passed\n something that is neither a model or a string.\n ", "n_words": 42, "vocab_size": 30, "n_whitespaces": 58, "language": "en" } }, { "id": 109085, "commit_id": "d86a5050b57fc2f3f95d23d94f6c64f86dac2cd3", "repo": "matplotlib", "path": "lib/matplotlib/backend_tools.py", "file_name": "backend_tools.py", "fun_name": "trigger", "commit_message": "Fix method subclassing inconsistencies", "code": "def trigger(self, sender, event, data=None):\n \n if not self.figure.canvas.widgetlock.available(sender):\n return\n if data is not None:\n self.draw_rubberband(*data)\n else:\n self.remove_rubberband()\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 78, "n_words": 17, "vocab_size": 15, "complexity": 3, "nloc": 7, "token_counts": 50, "n_ast_nodes": 82, "n_identifiers": 11, "d_id": 23431, "documentation": { "docstring": "Call `draw_rubberband` or `remove_rubberband` based on data.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 213600, "commit_id": "d743336b1f3654cd0315f380f43eed4116997c1d", "repo": "ivy", "path": "ivy/core/device.py", "file_name": "device.py", "fun_name": "default_device", "commit_message": "renamed dev_str arg to dev for all methods.", "code": "def default_device(dev=None):\n \n if ivy.exists(dev):\n _assert_dev_correct_formatting(dev)\n return dev\n global default_device_stack\n if not default_device_stack:\n default_device_stack = ['gpu:0'] if ivy.gpu_is_available() else ['cpu']\n return default_device_stack[-1]\n\n\n# noinspection PyShadowingNames", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 59, "n_words": 24, "vocab_size": 20, "complexity": 4, "nloc": 8, "token_counts": 49, "n_ast_nodes": 86, "n_identifiers": 7, "d_id": 53669, "documentation": { "docstring": "\n Return the input dev if provided, otherwise return the global default device.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 19, "language": "en" } }, { "id": 275861, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/hdf5_format.py", "file_name": "hdf5_format.py", "fun_name": "save_optimizer_weights_to_hdf5_group", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def save_optimizer_weights_to_hdf5_group(hdf5_group, optimizer):\n \n\n symbolic_weights = getattr(optimizer, \"weights\")\n if symbolic_weights:\n weights_group = hdf5_group.create_group(\"optimizer_weights\")\n weight_names = [str(w.name).encode(\"utf8\") for w in symbolic_weights]\n save_attributes_to_hdf5_group(\n weights_group, \"weight_names\", weight_names\n )\n weight_values = backend.batch_get_value(symbolic_weights)\n for name, val in zip(weight_names, weight_values):\n param_dset = weights_group.create_dataset(\n name, val.shape, dtype=val.dtype\n )\n if not val.shape:\n # scalar\n param_dset[()] = val\n else:\n param_dset[:] = val\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 218, "n_words": 52, "vocab_size": 38, "complexity": 5, "nloc": 17, "token_counts": 113, "n_ast_nodes": 185, "n_identifiers": 22, "d_id": 81486, "documentation": { "docstring": "Saves optimizer weights of a optimizer to a HDF5 group.\n\n Args:\n hdf5_group: HDF5 group.\n optimizer: optimizer instance.\n ", "n_words": 17, "vocab_size": 12, "n_whitespaces": 37, "language": "en" } }, { "id": 198305, "commit_id": "2a1afca9477eb781f16d5d6b63fa37abed7740a3", "repo": "sympy", "path": "sympy/simplify/powsimp.py", "file_name": "powsimp.py", "fun_name": "powdenest", "commit_message": "Use sympify less", "code": "def powdenest(eq, force=False, polar=False):\n r\n from sympy.simplify.simplify import posify\n\n if force:", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 19, "n_words": 11, "vocab_size": 11, "complexity": 8, "nloc": 110, "token_counts": 182, "n_ast_nodes": 39, "n_identifiers": 7, "d_id": 48865, "documentation": { "docstring": "\n Collect exponents on powers as assumptions allow.\n\n Explanation\n ===========\n\n Given ``(bb**be)**e``, this can be simplified as follows:\n * if ``bb`` is positive, or\n * ``e`` is an integer, or\n * ``|be| < 1`` then this simplifies to ``bb**(be*e)``\n\n Given a product of powers raised to a power, ``(bb1**be1 *\n bb2**be2...)**e``, simplification can be done as follows:\n\n - if e is positive, the gcd of all bei can be joined with e;\n - all non-negative bb can be separated from those that are negative\n and their gcd can be joined with e; autosimplification already\n handles this separation.\n - integer factors from powers that have integers in the denominator\n of the exponent can be removed from any term and the gcd of such\n integers can be joined with e\n\n Setting ``force`` to ``True`` will make symbols that are not explicitly\n negative behave as though they are positive, resulting in more\n denesting.\n\n Setting ``polar`` to ``True`` will do simplifications on the Riemann surface of\n the logarithm, also resulting in more denestings.\n\n When there are sums of logs in exp() then a product of powers may be\n obtained e.g. ``exp(3*(log(a) + 2*log(b)))`` - > ``a**3*b**6``.\n\n Examples\n ========\n\n >>> from sympy.abc import a, b, x, y, z\n >>> from sympy import Symbol, exp, log, sqrt, symbols, powdenest\n\n >>> powdenest((x**(2*a/3))**(3*x))\n (x**(2*a/3))**(3*x)\n >>> powdenest(exp(3*x*log(2)))\n 2**(3*x)\n\n Assumptions may prevent expansion:\n\n >>> powdenest(sqrt(x**2))\n sqrt(x**2)\n\n >>> p = symbols('p', positive=True)\n >>> powdenest(sqrt(p**2))\n p\n\n No other expansion is done.\n\n >>> i, j = symbols('i,j', integer=True)\n >>> powdenest((x**x)**(i + j)) # -X-> (x**x)**i*(x**x)**j\n x**(x*(i + j))\n\n But exp() will be denested by moving all non-log terms outside of\n the function; this may result in the collapsing of the exp to a power\n with a different base:\n\n >>> powdenest(exp(3*y*log(x)))\n x**(3*y)\n >>> powdenest(exp(y*(log(a) + log(b))))\n (a*b)**y\n >>> powdenest(exp(3*(log(a) + log(b))))\n a**3*b**3\n\n If assumptions allow, symbols can also be moved to the outermost exponent:\n\n >>> i = Symbol('i', integer=True)\n >>> powdenest(((x**(2*i))**(3*y))**x)\n ((x**(2*i))**(3*y))**x\n >>> powdenest(((x**(2*i))**(3*y))**x, force=True)\n x**(6*i*x*y)\n\n >>> powdenest(((x**(2*a/3))**(3*y/i))**x)\n ((x**(2*a/3))**(3*y/i))**x\n >>> powdenest((x**(2*i)*y**(4*i))**z, force=True)\n (x*y**2)**(2*i*z)\n\n >>> n = Symbol('n', negative=True)\n\n >>> powdenest((x**i)**y, force=True)\n x**(i*y)\n >>> powdenest((n**i)**x, force=True)\n (n**i)**x\n\n ", "n_words": 341, "vocab_size": 199, "n_whitespaces": 560, "language": "en" } }, { "id": 109157, "commit_id": "e94dfed864a8bbeb215bab5705a490325ac07819", "repo": "matplotlib", "path": "lib/mpl_toolkits/axes_grid1/axes_divider.py", "file_name": "axes_divider.py", "fun_name": "set_anchor", "commit_message": "Improve argument checking", "code": "def set_anchor(self, anchor):\n \n if isinstance(anchor, str):\n _api.check_in_list(mtransforms.Bbox.coefs, anchor=anchor)\n elif not isinstance(anchor, (tuple, list)) or len(anchor) != 2:\n raise TypeError(\"anchor must be str or 2-tuple\")\n self._anchor = anchor\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 77, "n_words": 27, "vocab_size": 25, "complexity": 4, "nloc": 6, "token_counts": 60, "n_ast_nodes": 97, "n_identifiers": 15, "d_id": 23457, "documentation": { "docstring": "\n Parameters\n ----------\n anchor : (float, float) or {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', \\\n'NW', 'W'}\n Either an (*x*, *y*) pair of relative coordinates (0 is left or\n bottom, 1 is right or top), 'C' (center), or a cardinal direction\n ('SW', southwest, is bottom left, etc.).\n\n See Also\n --------\n .Axes.set_anchor\n ", "n_words": 51, "vocab_size": 46, "n_whitespaces": 133, "language": "en" } }, { "id": 223619, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/_parseaddr.py", "file_name": "_parseaddr.py", "fun_name": "parsedate_tz", "commit_message": "add python 3.10.4 for windows", "code": "def parsedate_tz(data):\n \n res = _parsedate_tz(data)\n if not res:\n return\n if res[9] is None:\n res[9] = 0\n return tuple(res)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 47, "n_words": 18, "vocab_size": 14, "complexity": 3, "nloc": 7, "token_counts": 36, "n_ast_nodes": 61, "n_identifiers": 5, "d_id": 57010, "documentation": { "docstring": "Convert a date string to a time tuple.\n\n Accounts for military timezones.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 18, "language": "en" } }, { "id": 19200, "commit_id": "1ddb2c9b5ace0fa605195a4b14c595e274a8c384", "repo": "mlflow", "path": "mlflow/sklearn/utils.py", "file_name": "utils.py", "fun_name": "_get_class_labels_from_estimator", "commit_message": "Use `len(classes_)` instead of `len(set(y_true))` (#5275)\n\n* Use n_classes instead of len(set(y_true))\r\n\r\nSigned-off-by: harupy <17039389+harupy@users.noreply.github.com>\r\n\r\n* fix attribute\r\n\r\nSigned-off-by: harupy <17039389+harupy@users.noreply.github.com>\r\n\r\n* use classes_\r\n\r\nSigned-off-by: harupy <17039389+harupy@users.noreply.github.com>\r\n\r\n* handle meta estimator\r\n\r\nSigned-off-by: harupy <17039389+harupy@users.noreply.github.com>\r\n\r\n* address comment\r\n\r\nSigned-off-by: harupy <17039389+harupy@users.noreply.github.com>\r\n\r\n* fix\r\n\r\nSigned-off-by: harupy <17039389+harupy@users.noreply.github.com>", "code": "def _get_class_labels_from_estimator(estimator):\n \n return estimator.classes_ if hasattr(estimator, \"classes_\") else None\n\n", "url": "https://github.com/mlflow/mlflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 15, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 2, "token_counts": 19, "n_ast_nodes": 33, "n_identifiers": 4, "d_id": 2911, "documentation": { "docstring": "\n Extracts class labels from `estimator` if `estimator.classes` is available.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 265845, "commit_id": "ffce5d968d8a77c97852999b6ef916e80c1de55f", "repo": "netbox", "path": "netbox/netbox/search/backends.py", "file_name": "backends.py", "fun_name": "get_search_choices", "commit_message": "8927 plugin search (#10489)\n\n* #7016 base search classes\r\n\r\n* 7016 add search indexes\r\n\r\n* 7016 add search indexes\r\n\r\n* 7016 add search indexes\r\n\r\n* 7016 add search indexes\r\n\r\n* 7016 add search indexes\r\n\r\n* 7016 add search indexes\r\n\r\n* 8927 refactor search\r\n\r\n* 8927 refactor search\r\n\r\n* 8927 refactor search\r\n\r\n* 8927 refactor search\r\n\r\n* 8927 get search choices working\r\n\r\n* 8927 cleanup - optimize\r\n\r\n* 8927 use backend search function\r\n\r\n* 8927 fix for plugin search\r\n\r\n* 8927 add docs\r\n\r\n* Move search app to a module under netbox/\r\n\r\n* Utilize global registry to register model search classes\r\n\r\n* Build search form options from registry\r\n\r\n* Determine search categories from model app by default\r\n\r\n* Enable dynamic search registration for plugins\r\n\r\n* Update docs & improve plugin support\r\n\r\n* Clean up search backend class\r\n\r\n* Docs for #8927\r\n\r\nCo-authored-by: jeremystretch ", "code": "def get_search_choices(self):\n \n if not self._search_choice_options:\n\n # Organize choices by category\n categories = defaultdict(dict)\n for app_label, models in registry['search'].items():\n for name, cls in models.items():\n title = cls.model._meta.verbose_name.title()\n categories[cls.get_category()][name] = title\n\n # Compile a nested tuple of choices for form rendering\n results = (\n ('', 'All Objects'),\n *[(category, choices.items()) for category, choices in categories.items()]\n )\n\n self._search_choice_options = results\n\n return self._search_choice_options\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 239, "n_words": 58, "vocab_size": 43, "complexity": 5, "nloc": 13, "token_counts": 110, "n_ast_nodes": 181, "n_identifiers": 20, "d_id": 78214, "documentation": { "docstring": "Return the set of choices for individual object types, organized by category.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 50754, "commit_id": "a6790a651a12eb391060e533868bf0ba197f6f7e", "repo": "PaddleHub", "path": "modules/image/text_to_image/stable_diffusion/diffusers/schedulers/scheduling_ddim.py", "file_name": "scheduling_ddim.py", "fun_name": "betas_for_alpha_bar", "commit_message": "Add stable diffusion module", "code": "def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):\n \n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 2, "nloc": 8, "token_counts": 74, "n_ast_nodes": 18, "n_identifiers": 3, "d_id": 10206, "documentation": { "docstring": "\n Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of\n (1-beta) over time from t = [0,1].\n\n :param num_diffusion_timesteps: the number of betas to produce. :param alpha_bar: a lambda that takes an argument t\n from 0 to 1 and\n produces the cumulative product of (1-beta) up to that part of the diffusion process.\n :param max_beta: the maximum beta to use; use values lower than 1 to\n prevent singularities.\n ", "n_words": 74, "vocab_size": 50, "n_whitespaces": 134, "language": "en" } }, { "id": 198386, "commit_id": "7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c", "repo": "sympy", "path": "sympy/integrals/intpoly.py", "file_name": "intpoly.py", "fun_name": "left_integral3D", "commit_message": "Cleanup loops and ranges", "code": "def left_integral3D(facets, index, expr, vertices, hp_param, degree):\n \n value = S.Zero\n facet = facets[index]\n x0 = vertices[facet[0]]\n facet_len = len(facet)\n for i, fac in enumerate(facet):\n side = (vertices[fac], vertices[facet[(i + 1) % facet_len]])\n value += distance_to_side(x0, side, hp_param[0]) * \\\n lineseg_integrate(facet, i, side, expr, degree)\n return value\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 92, "n_words": 46, "vocab_size": 37, "complexity": 2, "nloc": 10, "token_counts": 103, "n_ast_nodes": 149, "n_identifiers": 20, "d_id": 48898, "documentation": { "docstring": "Computes the left integral of Eq 10 in Chin et al.\n\n Explanation\n ===========\n\n For the 3D case, this is the sum of the integral values over constituting\n line segments of the face (which is accessed by facets[index]) multiplied\n by the distance between the first point of facet and that line segment.\n\n Parameters\n ==========\n\n facets :\n List of faces of the 3-Polytope.\n index :\n Index of face over which integral is to be calculated.\n expr :\n Input polynomial.\n vertices :\n List of vertices that constitute the 3-Polytope.\n hp_param :\n The hyperplane parameters of the face.\n degree :\n Degree of the ``expr``.\n\n Examples\n ========\n\n >>> from sympy.integrals.intpoly import left_integral3D\n >>> cube = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0),\\\n (5, 0, 5), (5, 5, 0), (5, 5, 5)],\\\n [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0],\\\n [3, 1, 0, 2], [0, 4, 6, 2]]\n >>> facets = cube[1:]\n >>> vertices = cube[0]\n >>> left_integral3D(facets, 3, 1, vertices, ([0, -1, 0], -5), 0)\n -50\n ", "n_words": 177, "vocab_size": 108, "n_whitespaces": 333, "language": "en" } }, { "id": 161060, "commit_id": "b617a87ee40ab384767a27335313c2c65ee094ec", "repo": "MockingBird", "path": "ppg2mel/utils/nets_utils.py", "file_name": "nets_utils.py", "fun_name": "pad_list", "commit_message": "Init ppg extractor and ppg2mel (#375)\n\n* Init ppg extractor and ppg2mel\r\n\r\n* add preprocess and training\r\n\r\n* FIx known issues\r\n\r\n* Update __init__.py\r\n\r\nAllow to gen audio\r\n\r\n* Fix length issue\r\n\r\n* Fix bug of preparing fid\r\n\r\n* Fix sample issues\r\n\r\n* Add UI usage of PPG-vc", "code": "def pad_list(xs, pad_value):\n \n n_batch = len(xs)\n max_len = max(x.size(0) for x in xs)\n pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value)\n\n for i in range(n_batch):\n pad[i, :xs[i].size(0)] = xs[i]\n\n return pad\n\n", "url": "https://github.com/babysor/MockingBird.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 53, "n_words": 28, "vocab_size": 22, "complexity": 3, "nloc": 7, "token_counts": 91, "n_ast_nodes": 140, "n_identifiers": 14, "d_id": 38876, "documentation": { "docstring": "Perform padding for the list of tensors.\n\n Args:\n xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].\n pad_value (float): Value for padding.\n\n Returns:\n Tensor: Padded tensor (B, Tmax, `*`).\n\n Examples:\n >>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]\n >>> x\n [tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]\n >>> pad_list(x, 0)\n tensor([[1., 1., 1., 1.],\n [1., 1., 0., 0.],\n [1., 0., 0., 0.]])\n\n ", "n_words": 63, "vocab_size": 49, "n_whitespaces": 161, "language": "en" } }, { "id": 154395, "commit_id": "1c0935c1bc0856d43f69c1e32498636ee24ebc85", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/omnisci_on_native/base_worker.py", "file_name": "base_worker.py", "fun_name": "_genName", "commit_message": "FEAT-#4913: Enabling pyhdk (#4900)\n\nCo-authored-by: ienkovich \r\nSigned-off-by: izamyati ", "code": "def _genName(cls, name):\n \n if not name:\n name = \"frame_\" + str(uuid.uuid4()).replace(\"-\", \"\")\n # TODO: reword name in case of caller's mistake\n return name\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 62, "n_words": 23, "vocab_size": 21, "complexity": 2, "nloc": 4, "token_counts": 33, "n_ast_nodes": 62, "n_identifiers": 7, "d_id": 35956, "documentation": { "docstring": "\n Generate or mangle a table name.\n\n Parameters\n ----------\n name : str or None\n Table name to mangle or None to generate a unique\n table name.\n\n Returns\n -------\n str\n Table name.\n ", "n_words": 30, "vocab_size": 18, "n_whitespaces": 120, "language": "en" } }, { "id": 111629, "commit_id": "3f6a8274a97bf003b5eadc05faa324162b7f4123", "repo": "nni", "path": "nni/experiment/config/base.py", "file_name": "base.py", "fun_name": "_canonicalize", "commit_message": "Some string changes around experiment module (#4442)", "code": "def _canonicalize(self, parents):\n \n for field in dataclasses.fields(self):\n value = getattr(self, field.name)\n if isinstance(value, (Path, str)) and utils.is_path_like(field.type):\n setattr(self, field.name, utils.resolve_path(value, self._base_path))\n else:\n _recursive_canonicalize_child(value, [self] + parents)\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 103, "n_words": 26, "vocab_size": 26, "complexity": 4, "nloc": 7, "token_counts": 80, "n_ast_nodes": 122, "n_identifiers": 19, "d_id": 24461, "documentation": { "docstring": "\n To be overrided by subclass.\n\n Convert the config object to canonical format.\n\n The default implementation will:\n\n 1. Resolve all ``PathLike`` fields to absolute path\n 2. Call ``_canonicalize([self] + parents)`` on all children config objects, including those inside list and dict\n\n If the subclass has nested config fields, be careful about where to call ``super()._canonicalize()``.\n\n Parameters\n ----------\n parents : list[ConfigBase]\n The upper level config objects.\n For example local training service's ``trialGpuNumber`` will be copied from top level when not set,\n in this case it will be invoked like ``localConfig._canonicalize([experimentConfig])``.\n ", "n_words": 88, "vocab_size": 75, "n_whitespaces": 192, "language": "en" } }, { "id": 259992, "commit_id": "6ca1f5e4d0d16bc9a7f28582079a15e14f012719", "repo": "scikit-learn", "path": "sklearn/ensemble/tests/test_iforest.py", "file_name": "test_iforest.py", "fun_name": "test_iforest_sparse", "commit_message": "TST use global_random_seed in sklearn/ensemble/tests/test_iforest.py (#22901)\n\n\r\n\r\nCo-authored-by: jeremie du boisberranger \r\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: Olivier Grisel ", "code": "def test_iforest_sparse(global_random_seed):\n \n rng = check_random_state(global_random_seed)\n X_train, X_test = train_test_split(diabetes.data[:50], random_state=rng)\n grid = ParameterGrid({\"max_samples\": [0.5, 1.0], \"bootstrap\": [True, False]})\n\n for sparse_format in [csc_matrix, csr_matrix]:\n X_train_sparse = sparse_format(X_train)\n X_test_sparse = sparse_format(X_test)\n\n for params in grid:\n # Trained on sparse format\n sparse_classifier = IsolationForest(\n n_estimators=10, random_state=global_random_seed, **params\n ).fit(X_train_sparse)\n sparse_results = sparse_classifier.predict(X_test_sparse)\n\n # Trained on dense format\n dense_classifier = IsolationForest(\n n_estimators=10, random_state=global_random_seed, **params\n ).fit(X_train)\n dense_results = dense_classifier.predict(X_test)\n\n assert_array_equal(sparse_results, dense_results)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 230, "n_words": 65, "vocab_size": 47, "complexity": 3, "nloc": 17, "token_counts": 144, "n_ast_nodes": 221, "n_identifiers": 27, "d_id": 76025, "documentation": { "docstring": "Check IForest for various parameter settings on sparse input.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 66060, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/doctype/department_approver/department_approver.py", "file_name": "department_approver.py", "fun_name": "get_approvers", "commit_message": "style: format code with black", "code": "def get_approvers(doctype, txt, searchfield, start, page_len, filters):\n\n\tif not filters.get(\"employee\"):\n\t\tfrappe.throw(_(\"Please select Employee first.\"))\n\n\tapprovers = []\n\tdepartment_details = {}\n\tdepartment_list = []\n\temployee = frappe.get_value(\n\t\t\"Employee\",\n\t\tfilters.get(\"employee\"),\n\t\t[\"employee_name\", \"department\", \"leave_approver\", \"expense_approver\", \"shift_request_approver\"],\n\t\tas_dict=True,\n\t)\n\n\temployee_department = filters.get(\"department\") or employee.department\n\tif employee_department:\n\t\tdepartment_details = frappe.db.get_value(\n\t\t\t\"Department\", {\"name\": employee_department}, [\"lft\", \"rgt\"], as_dict=True\n\t\t)\n\tif department_details:\n\t\tdepartment_list = frappe.db.sql(\n\t\t\t,\n\t\t\t(department_details.lft, department_details.rgt),\n\t\t\tas_list=True,\n\t\t)\n\n\tif filters.get(\"doctype\") == \"Leave Application\" and employee.leave_approver:\n\t\tapprovers.append(\n\t\t\tfrappe.db.get_value(\"User\", employee.leave_approver, [\"name\", \"first_name\", \"last_name\"])\n\t\t)\n\n\tif filters.get(\"doctype\") == \"Expense Claim\" and employee.expense_approver:\n\t\tapprovers.append(\n\t\t\tfrappe.db.get_value(\"User\", employee.expense_approver, [\"name\", \"first_name\", \"last_name\"])\n\t\t)\n\n\tif filters.get(\"doctype\") == \"Shift Request\" and employee.shift_request_approver:\n\t\tapprovers.append(\n\t\t\tfrappe.db.get_value(\n\t\t\t\t\"User\", employee.shift_request_approver, [\"name\", \"first_name\", \"last_name\"]\n\t\t\t)\n\t\t)\n\n\tif filters.get(\"doctype\") == \"Leave Application\":\n\t\tparentfield = \"leave_approvers\"\n\t\tfield_name = \"Leave Approver\"\n\telif filters.get(\"doctype\") == \"Expense Claim\":\n\t\tparentfield = \"expense_approvers\"\n\t\tfield_name = \"Expense Approver\"\n\telif filters.get(\"doctype\") == \"Shift Request\":\n\t\tparentfield = \"shift_request_approver\"\n\t\tfield_name = \"Shift Request Approver\"\n\tif department_list:\n\t\tfor d in department_list:\n\t\t\tapprovers += frappe.db.sql(\n\t\t\t\t,\n\t\t\t\t(d, \"%\" + txt + \"%\", parentfield),\n\t\t\t\tas_list=True,\n\t\t\t)\n\n\tif len(approvers) == 0:\n\t\terror_msg = _(\"Please set {0} for the Employee: {1}\").format(\n\t\t\tfield_name, frappe.bold(employee.employee_name)\n\t\t)\n\t\tif department_list:\n\t\t\terror_msg += _(\" or for Department: {0}\").format(frappe.bold(employee_department))\n\t\tfrappe.throw(error_msg, title=_(field_name + \" Missing\"))\n\n\treturn set(tuple(approver) for approver in approvers)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 137, "n_words": 198, "vocab_size": 115, "complexity": 19, "nloc": 69, "token_counts": 420, "n_ast_nodes": 728, "n_identifiers": 40, "d_id": 14097, "documentation": { "docstring": "select name from `tabDepartment` where lft <= %s\n\t\t\tand rgt >= %s\n\t\t\tand disabled=0\n\t\t\torder by lft descselect user.name, user.first_name, user.last_name from\n\t\t\t\ttabUser user, `tabDepartment Approver` approver where\n\t\t\t\tapprover.parent = %s\n\t\t\t\tand user.name like %s\n\t\t\t\tand approver.parentfield = %s\n\t\t\t\tand approver.approver=user.name", "n_words": 41, "vocab_size": 29, "n_whitespaces": 32, "language": "en" } }, { "id": 186364, "commit_id": "eeca208c8f57304590ac1af80b496e61021aaa45", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/configurator.py", "file_name": "configurator.py", "fun_name": "_add_name_vhost_if_necessary", "commit_message": "Various clean-ups in certbot-apache. Use f-strings. (#9132)\n\n* Various clean-ups in certbot-apache. Use f-strings.\r\n\r\n* Smaller tweaks", "code": "def _add_name_vhost_if_necessary(self, vhost):\n \n need_to_save = False\n\n # See if the exact address appears in any other vhost\n # Remember 1.1.1.1:* == 1.1.1.1 -> hence any()\n for addr in vhost.addrs:\n # In Apache 2.2, when a NameVirtualHost directive is not\n # set, \"*\" and \"_default_\" will conflict when sharing a port\n addrs = {addr,}\n if addr.get_addr() in (\"*\", \"_default_\"):\n addrs.update(obj.Addr((a, addr.get_port(),))\n for a in (\"*\", \"_default_\"))\n\n for test_vh in self.vhosts:\n if (vhost.filep != test_vh.filep and\n any(test_addr in addrs for\n test_addr in test_vh.addrs) and not self.is_name_vhost(addr)):\n self.add_name_vhost(addr)\n logger.info(\"Enabling NameVirtualHosts on %s\", addr)\n need_to_save = True\n break\n\n if need_to_save:\n self.save()\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 381, "n_words": 97, "vocab_size": 71, "complexity": 10, "nloc": 17, "token_counts": 130, "n_ast_nodes": 216, "n_identifiers": 22, "d_id": 45461, "documentation": { "docstring": "Add NameVirtualHost Directives if necessary for new vhost.\n\n NameVirtualHosts was a directive in Apache < 2.4\n https://httpd.apache.org/docs/2.2/mod/core.html#namevirtualhost\n\n :param vhost: New virtual host that was recently created.\n :type vhost: :class:`~certbot_apache._internal.obj.VirtualHost`\n\n ", "n_words": 29, "vocab_size": 27, "n_whitespaces": 64, "language": "en" } }, { "id": 261382, "commit_id": "ff9344f3d8d11d38fa3a2497199113e5bac9537c", "repo": "scikit-learn", "path": "sklearn/linear_model/_glm/_newton_solver.py", "file_name": "_newton_solver.py", "fun_name": "line_search", "commit_message": "FEA add (single) Cholesky Newton solver to GLMs (#24637)\n\n* FEA add NewtonSolver, CholeskyNewtonSolver and QRCholeskyNewtonSolver\n\n* ENH better singular hessian special solve\n\n* CLN fix some typos found by reviewer\n\n* TST assert ConvergenceWarning is raised\n\n* MNT add BaseCholeskyNewtonSolver\n\n* WIP colinear design in GLMs\n\n* FIX _solve_singular\n\n* FIX false unpacking in\n\n* TST add tests for unpenalized GLMs\n\n* TST fix solutions of glm_dataset\n\n* ENH add SVDFallbackSolver\n\n* CLN remove SVDFallbackSolver\n\n* ENH use gradient step for singular hessians\n\n* ENH print iteration number in warnings\n\n* TST improve test_linalg_warning_with_newton_solver\n\n* CLN LinAlgWarning fron scipy.linalg\n\n* ENH more robust hessian\n\n* ENH increase maxls for lbfgs to make it more robust\n\n* ENH add hessian_warning for too many negative hessian values\n\n* CLN some warning messages\n\n* ENH add lbfgs_step\n\n* ENH use lbfgs_step for hessian_warning\n\n* TST make them pass\n\n* TST tweek rtol for lbfgs\n\n* TST add rigoros test for GLMs\n\n* TST improve test_warm_start\n\n* ENH improve lbfgs options for better convergence\n\n* CLN fix test_warm_start\n\n* TST fix assert singular values in datasets\n\n* CLN address most review comments\n\n* ENH enable more vebosity levels for lbfgs\n\n* DOC add whatsnew\n\n* CLN remove xfail and clean a bit\n\n* CLN docstring about minimum norm\n\n* More informative repr for the glm_dataset fixture cases\n\n* Forgot to run black\n\n* CLN remove unnecessary filterwarnings\n\n* CLN address review comments\n\n* Trigger [all random seeds] on the following tests:\ntest_glm_regression\ntest_glm_regression_hstacked_X\ntest_glm_regression_vstacked_X\ntest_glm_regression_unpenalized\ntest_glm_regression_unpenalized_hstacked_X\ntest_glm_regression_unpenalized_vstacked_X\ntest_warm_start\n\n* CLN add comment for lbfgs ftol=64 * machine precision\n\n* CLN XXX code comment\n\n* Trigger [all random seeds] on the following tests:\n\ntest_glm_regression\ntest_glm_regression_hstacked_X\ntest_glm_regression_vstacked_X\ntest_glm_regression_unpenalized\ntest_glm_regression_unpenalized_hstacked_X\ntest_glm_regression_unpenalized_vstacked_X\ntest_warm_start\n\n* CLN link issue and remove code snippet in comment\n\n* Trigger [all random seeds] on the following tests:\n\ntest_glm_regression\ntest_glm_regression_hstacked_X\ntest_glm_regression_vstacked_X\ntest_glm_regression_unpenalized\ntest_glm_regression_unpenalized_hstacked_X\ntest_glm_regression_unpenalized_vstacked_X\ntest_warm_start\n\n* CLN add catch_warnings\n\n* Trigger [all random seeds] on the following tests:\n\ntest_glm_regression\ntest_glm_regression_hstacked_X\ntest_glm_regression_vstacked_X\ntest_glm_regression_unpenalized\ntest_glm_regression_unpenalized_hstacked_X\ntest_glm_regression_unpenalized_vstacked_X\ntest_warm_start\n\n* Trigger [all random seeds] on the following tests:\n\ntest_glm_regression\ntest_glm_regression_hstacked_X\ntest_glm_regression_vstacked_X\ntest_glm_regression_unpenalized\ntest_glm_regression_unpenalized_hstacked_X\ntest_glm_regression_unpenalized_vstacked_X\ntest_warm_start\n\n* [all random seeds]\n\ntest_glm_regression\ntest_glm_regression_hstacked_X\ntest_glm_regression_vstacked_X\ntest_glm_regression_unpenalized\ntest_glm_regression_unpenalized_hstacked_X\ntest_glm_regression_unpenalized_vstacked_X\ntest_warm_start\n\n* Trigger with -Werror [all random seeds]\n\ntest_glm_regression\ntest_glm_regression_hstacked_X\ntest_glm_regression_vstacked_X\ntest_glm_regression_unpenalized\ntest_glm_regression_unpenalized_hstacked_X\ntest_glm_regression_unpenalized_vstacked_X\ntest_warm_start\n\n* ENH increase maxls to 50\n\n* [all random seeds]\n\ntest_glm_regression\ntest_glm_regression_hstacked_X\ntest_glm_regression_vstacked_X\ntest_glm_regression_unpenalized\ntest_glm_regression_unpenalized_hstacked_X\ntest_glm_regression_unpenalized_vstacked_X\ntest_warm_start\n\n* Revert \"Trigger with -Werror [all random seeds]\"\n\nThis reverts commit 99f4cf99ca41b4ad2bdad537ad60f936970e3a88.\n\n* TST add catch_warnings to filterwarnings\n\n* TST adapt tests for newton solvers\n\n* CLN cleaner gradient step with gradient_times_newton\n\n* DOC add whatsnew\n\n* ENH always use lbfgs as fallback\n\n* TST adapt rtol\n\n* TST fix test_linalg_warning_with_newton_solver\n\n* CLN address some review comments\n\n* Improve tests related to convergence warning on collinear data\n\n* overfit -> fit\n\n* Typo in comment\n\n* Apply suggestions from code review\n\n* ENH fallback_lbfgs_solve\n- Do not use lbfgs steps, fall back complete to lbfgs\n\n* ENH adapt rtol\n\n* Improve test_linalg_warning_with_newton_solver\n\n* Better comments\n\n* Fixed Hessian casing and improved warning messages\n\n* [all random seeds]\n\ntest_linalg_warning_with_newton_solver\n\n* Ignore ConvergenceWarnings for now if convergence is good\n\n* CLN remove counting of warnings\n\n* ENH fall back to lbfgs if line search did not converge\n\n* DOC better comment on performance bottleneck\n\n* Update GLM related examples to use the new solver\n\n* CLN address reviewer comments\n\n* EXA improve some wordings\n\n* CLN do not pop \"solver in parameter constraints\n\n* CLN fix typos\n\n* DOC fix docstring\n\n* CLN remove solver newton-qr-cholesky\n\n* DOC update PR number in whatsnew\n\n* CLN address review comments\n\n* CLN remove unnecessary catch_warnings\n\n* CLN address some review comments\n\n* DOC more precise whatsnew\n\n* CLN use init_zero_coef\n\n* CLN use and test init_zero_coef\n\n* CLN address some review comments\n\n* CLN mark NewtonSolver as private by leading underscore\n\n* CLN exact comments for inner_solve\n\n* TST add test_newton_solver_verbosity\n\n* TST extend test_newton_solver_verbosity\n\n* TST logic in test_glm_regression_unpenalized\n\n* TST use count_nonzero\n\n* CLN remove super rare line search checks\n\n* MNT move Newton solver to new file _newton_solver.py\n\nCo-authored-by: Olivier Grisel \nCo-authored-by: Julien Jerphanion ", "code": "def line_search(self, X, y, sample_weight):\n \n # line search parameters\n beta, sigma = 0.5, 0.00048828125 # 1/2, 1/2**11\n eps = 16 * np.finfo(self.loss_value.dtype).eps\n t = 1 # step size\n\n # gradient_times_newton = self.gradient @ self.coef_newton\n # was computed in inner_solve.\n armijo_term = sigma * self.gradient_times_newton\n _, _, raw_prediction_newton = self.linear_loss.weight_intercept_raw(\n self.coef_newton, X\n )\n\n self.coef_old = self.coef\n self.loss_value_old = self.loss_value\n self.gradient_old = self.gradient\n\n # np.sum(np.abs(self.gradient_old))\n sum_abs_grad_old = -1\n\n is_verbose = self.verbose >= 2\n if is_verbose:\n print(\" Backtracking Line Search\")\n print(f\" eps=10 * finfo.eps={eps}\")\n\n for i in range(21): # until and including t = beta**20 ~ 1e-6\n self.coef = self.coef_old + t * self.coef_newton\n raw = self.raw_prediction + t * raw_prediction_newton\n self.loss_value, self.gradient = self.linear_loss.loss_gradient(\n coef=self.coef,\n X=X,\n y=y,\n sample_weight=sample_weight,\n l2_reg_strength=self.l2_reg_strength,\n n_threads=self.n_threads,\n raw_prediction=raw,\n )\n # Note: If coef_newton is too large, loss_gradient may produce inf values,\n # potentially accompanied by a RuntimeWarning.\n # This case will be captured by the Armijo condition.\n\n # 1. Check Armijo / sufficient decrease condition.\n # The smaller (more negative) the better.\n loss_improvement = self.loss_value - self.loss_value_old\n check = loss_improvement <= t * armijo_term\n if is_verbose:\n print(\n f\" line search iteration={i+1}, step size={t}\\n\"\n f\" check loss improvement <= armijo term: {loss_improvement} \"\n f\"<= {t * armijo_term} {check}\"\n )\n if check:\n break\n # 2. Deal with relative loss differences around machine precision.\n tiny_loss = np.abs(self.loss_value_old * eps)\n check = np.abs(loss_improvement) <= tiny_loss\n if is_verbose:\n print(\n \" check loss |improvement| <= eps * |loss_old|:\"\n f\" {np.abs(loss_improvement)} <= {tiny_loss} {check}\"\n )\n if check:\n if sum_abs_grad_old < 0:\n sum_abs_grad_old = scipy.linalg.norm(self.gradient_old, ord=1)\n # 2.1 Check sum of absolute gradients as alternative condition.\n sum_abs_grad = scipy.linalg.norm(self.gradient, ord=1)\n check = sum_abs_grad < sum_abs_grad_old\n if is_verbose:\n print(\n \" check sum(|gradient|) < sum(|gradient_old|): \"\n f\"{sum_abs_grad} < {sum_abs_grad_old} {check}\"\n )\n if check:\n break\n\n t *= beta\n else:\n warnings.warn(\n f\"Line search of Newton solver {self.__class__.__name__} at iteration \"\n f\"#{self.iteration} did no converge after 21 line search refinement \"\n \"iterations. It will now resort to lbfgs instead.\",\n ConvergenceWarning,\n )\n if self.verbose:\n print(\" Line search did not converge and resorts to lbfgs instead.\")\n self.use_fallback_lbfgs_solve = True\n return\n\n self.raw_prediction = raw\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1364, "n_words": 339, "vocab_size": 201, "complexity": 11, "nloc": 70, "token_counts": 350, "n_ast_nodes": 645, "n_identifiers": 52, "d_id": 76792, "documentation": { "docstring": "Backtracking line search.\n\n Sets:\n - self.coef_old\n - self.coef\n - self.loss_value_old\n - self.loss_value\n - self.gradient_old\n - self.gradient\n - self.raw_prediction\n ", "n_words": 18, "vocab_size": 12, "n_whitespaces": 109, "language": "en" } }, { "id": 21982, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/distlib/database.py", "file_name": "database.py", "fun_name": "_yield_distributions", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def _yield_distributions(self):\n \n # We need to check if we've seen some resources already, because on\n # some Linux systems (e.g. some Debian/Ubuntu variants) there are\n # symlinks which alias other files in the environment.\n seen = set()\n for path in self.path:\n finder = resources.finder_for_path(path)\n if finder is None:\n continue\n r = finder.find('')\n if not r or not r.is_container:\n continue\n rset = sorted(r.resources)\n for entry in rset:\n r = finder.find(entry)\n if not r or r.path in seen:\n continue\n try:\n if self._include_dist and entry.endswith(DISTINFO_EXT):\n possible_filenames = [METADATA_FILENAME,\n WHEEL_METADATA_FILENAME,\n LEGACY_METADATA_FILENAME]\n for metadata_filename in possible_filenames:\n metadata_path = posixpath.join(entry, metadata_filename)\n pydist = finder.find(metadata_path)\n if pydist:\n break\n else:\n continue\n\n with contextlib.closing(pydist.as_stream()) as stream:\n metadata = Metadata(fileobj=stream, scheme='legacy')\n logger.debug('Found %s', r.path)\n seen.add(r.path)\n yield new_dist_class(r.path, metadata=metadata,\n env=self)\n elif self._include_egg and entry.endswith(('.egg-info',\n '.egg')):\n logger.debug('Found %s', r.path)\n seen.add(r.path)\n yield old_dist_class(r.path, self)\n except Exception as e:\n msg = 'Unable to read distribution at %s, perhaps due to bad metadata: %s'\n logger.warning(msg, r.path, e)\n import warnings\n warnings.warn(msg % (r.path, e), stacklevel=2)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 1081, "n_words": 159, "vocab_size": 115, "complexity": 15, "nloc": 42, "token_counts": 277, "n_ast_nodes": 457, "n_identifiers": 48, "d_id": 4098, "documentation": { "docstring": "\n Yield .dist-info and/or .egg(-info) distributions.\n ", "n_words": 5, "vocab_size": 5, "n_whitespaces": 20, "language": "en" } }, { "id": 181804, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/tpot_tests.py", "file_name": "tpot_tests.py", "fun_name": "test_mutNodeReplacement", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_mutNodeReplacement():\n \n\n pipeline_string = (\n 'LogisticRegression(PolynomialFeatures'\n '(input_matrix, PolynomialFeatures__degree=2, PolynomialFeatures__include_bias=False, '\n 'PolynomialFeatures__interaction_only=False), LogisticRegression__C=10.0, '\n 'LogisticRegression__dual=False, LogisticRegression__penalty=l2)'\n )\n\n pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset)\n pipeline[0].ret = Output_Array\n old_ret_type_list = [node.ret for node in pipeline]\n old_prims_list = [node for node in pipeline if node.arity != 0]\n\n # test 10 times\n for _ in range(10):\n mut_ind = mutNodeReplacement(tpot_obj._toolbox.clone(pipeline), pset=tpot_obj._pset)\n new_ret_type_list = [node.ret for node in mut_ind[0]]\n new_prims_list = [node for node in mut_ind[0] if node.arity != 0]\n\n if new_prims_list == old_prims_list: # Terminal mutated\n assert new_ret_type_list == old_ret_type_list\n else: # Primitive mutated\n diff_prims = [x for x in new_prims_list if x not in old_prims_list]\n diff_prims += [x for x in old_prims_list if x not in new_prims_list]\n if len(diff_prims) > 1: # Sometimes mutation randomly replaces an operator that already in the pipelines\n assert diff_prims[0].ret == diff_prims[1].ret\n assert mut_ind[0][0].ret == Output_Array\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 292, "n_words": 134, "vocab_size": 75, "complexity": 14, "nloc": 23, "token_counts": 193, "n_ast_nodes": 302, "n_identifiers": 26, "d_id": 43590, "documentation": { "docstring": "Assert that mutNodeReplacement() returns the correct type of mutation node in a fixed pipeline.", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 298235, "commit_id": "df2d0cd3e3ade2339a18415f92c85810308a9926", "repo": "core", "path": "homeassistant/components/mysensors/device_tracker.py", "file_name": "device_tracker.py", "fun_name": "_async_update", "commit_message": "Refactor mysensors device tracker (#84747)", "code": "def _async_update(self) -> None:\n \n super()._async_update()\n node = self.gateway.sensors[self.node_id]\n child = node.children[self.child_id]\n position: str = child.values[self.value_type]\n latitude, longitude, _ = position.split(\",\")\n self._latitude = float(latitude)\n self._longitude = float(longitude)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 82, "n_words": 26, "vocab_size": 21, "complexity": 1, "nloc": 9, "token_counts": 77, "n_ast_nodes": 126, "n_identifiers": 21, "d_id": 97180, "documentation": { "docstring": "Update the controller with the latest value from a device.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 218367, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "from_builtin", "commit_message": "add python 3.10.4 for windows", "code": "def from_builtin(cls, func):\n \n\n warnings.warn(\"inspect.Signature.from_builtin() is deprecated since \"\n \"Python 3.5, use Signature.from_callable()\",\n DeprecationWarning, stacklevel=2)\n return _signature_from_builtin(cls, func)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 80, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 5, "token_counts": 28, "n_ast_nodes": 48, "n_identifiers": 8, "d_id": 55261, "documentation": { "docstring": "Constructs Signature for the given builtin function.\n\n Deprecated since Python 3.5, use `Signature.from_callable()`.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 27, "language": "en" } }, { "id": 275240, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_experimental/adadelta.py", "file_name": "adadelta.py", "fun_name": "update_step", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def update_step(self, grad, variable):\n \n lr = tf.cast(self.learning_rate, variable.dtype)\n\n var_key = self._var_key(variable)\n rho = self.rho\n accumulated_grad = self._accumulated_grads[self._index_dict[var_key]]\n accumulated_delta_var = self._accumulated_delta_vars[\n self._index_dict[var_key]\n ]\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 82, "n_words": 22, "vocab_size": 18, "complexity": 2, "nloc": 33, "token_counts": 211, "n_ast_nodes": 97, "n_identifiers": 17, "d_id": 81342, "documentation": { "docstring": "Update step given gradient and the associated model variable.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 130175, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/_private/prometheus_exporter.py", "file_name": "prometheus_exporter.py", "fun_name": "export", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def export(self, view_data):\n \n if view_data is not None: # pragma: NO COVER\n self.transport.export(view_data)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 39, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 3, "token_counts": 22, "n_ast_nodes": 38, "n_identifiers": 4, "d_id": 29142, "documentation": { "docstring": "export send the data to the transport class\n in order to be sent to Prometheus in a sync or async way.\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 35, "language": "en" } }, { "id": 140506, "commit_id": "517f78e2b810e506d61884c79d768a37a34f0f9c", "repo": "ray", "path": "python/ray/_private/test_utils.py", "file_name": "test_utils.py", "fun_name": "job_hook", "commit_message": "[minor] Add a job submission hook by env var (#25343)", "code": "def job_hook(**kwargs):\n \n cmd = \" \".join(kwargs[\"entrypoint\"])\n print(f\"hook intercepted: {cmd}\")\n sys.exit(0)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 22, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 29, "n_ast_nodes": 59, "n_identifiers": 7, "d_id": 31979, "documentation": { "docstring": "Function called by reflection by test_cli_integration.", "n_words": 6, "vocab_size": 5, "n_whitespaces": 5, "language": "en" } }, { "id": 194780, "commit_id": "81f722d29045a7a5841d0931a082ded1d1f13863", "repo": "ParlAI", "path": "parlai/scripts/generate_model_card.py", "file_name": "generate_model_card.py", "fun_name": "get_dataset_info", "commit_message": "autoformat (#4378)", "code": "def get_dataset_info(tasks):\n \n curr_task_info = []\n for task in tasks:\n # adding the name + attempted link\n tname = taskname(task)\n tsite = task_site + to_sublink(tname)\n curr_task_info.append(f\"- [{tname}]({tsite})\")\n # adding link\n links = make_task_links(task)\n curr_task_info[-1] += f\" ({links})\" if links else ''\n # adding description\n if all_tasks.get(task) and all_tasks[task].get('description'):\n curr_task_info[-1] += f\": {all_tasks[task]['description']}\"\n return curr_task_info\n\n\n#################################\n# Table-Related Functions\n#################################", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 141, "n_words": 58, "vocab_size": 42, "complexity": 5, "nloc": 11, "token_counts": 82, "n_ast_nodes": 170, "n_identifiers": 14, "d_id": 47073, "documentation": { "docstring": "\n dataset info comes from guessing where it would be at the tasks site and the\n task_list.py + anything else from the user.\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 32, "language": "en" } }, { "id": 212818, "commit_id": "9b814f003b0685757d76ce56ee9c98eae114d346", "repo": "PySimpleGUI", "path": "PySimpleGUI.py", "file_name": "PySimpleGUI.py", "fun_name": "widget_to_element", "commit_message": "Added key and widget Element properties, new focus methods Element.get_next_focus, Element.get_previous_focus. New Window method Window.widget_to_element", "code": "def widget_to_element(self, widget):\n \n if self.AllKeysDict is None or len(self.AllKeysDict) == 0:\n return None\n for key, element in self.AllKeysDict.items():\n if element.Widget == widget:\n return element\n return None\n\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 91, "n_words": 26, "vocab_size": 19, "complexity": 5, "nloc": 7, "token_counts": 50, "n_ast_nodes": 80, "n_identifiers": 9, "d_id": 53428, "documentation": { "docstring": "\n Returns the element that matches a supplied tkinter widget.\n If no matching element is found, then None is returned.\n\n\n :return: Element that uses the specified widget\n :rtype: Element | None\n ", "n_words": 30, "vocab_size": 24, "n_whitespaces": 73, "language": "en" } }, { "id": 100857, "commit_id": "98a65277d8c55cfcbdbfa629f790a8f8731621a8", "repo": "faceswap", "path": "lib/gpu_stats/amd.py", "file_name": "amd.py", "fun_name": "_get_fallback_devices", "commit_message": "Fix AMD Tests + docs", "code": "def _get_fallback_devices(self) -> List[plaidml._DeviceConfig]:\n \n # Try get a supported device\n experimental_setting = plaidml.settings.experimental\n plaidml.settings.experimental = False\n devices = plaidml.devices(self._ctx, limit=100, return_all=True)[0]\n\n # Try get any device\n if not devices:\n plaidml.settings.experimental = True\n devices = plaidml.devices(self._ctx, limit=100, return_all=True)[0]\n\n plaidml.settings.experimental = experimental_setting\n\n if not devices:\n raise RuntimeError(\"No valid devices could be found for plaidML.\")\n\n self._log(\"warning\", f\"PlaidML could not find a GPU. Falling back to: \"\n f\"{[d.id.decode('utf-8') for d in devices]}\")\n return devices\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 197, "n_words": 70, "vocab_size": 44, "complexity": 3, "nloc": 20, "token_counts": 109, "n_ast_nodes": 201, "n_identifiers": 17, "d_id": 20308, "documentation": { "docstring": " Called if a GPU has not been discovered. Return any devices we can run on.\n\n Returns\n -------\n list:\n The :class:`pladml._DeviceConfig` fallaback objects that PlaidML has discovered.\n ", "n_words": 26, "vocab_size": 24, "n_whitespaces": 66, "language": "en" } }, { "id": 110159, "commit_id": "973e475ef85524c5e9cef0638c90ca9a159935e4", "repo": "matplotlib", "path": "lib/matplotlib/_api/__init__.py", "file_name": "__init__.py", "fun_name": "nargs_error", "commit_message": "Factor out error generation for function calls with wrong nargs.\n\n... matching the wording for standard functions.\n\nNote that nargs_error returns the exception without raising it itself to\nmake the control flow clearer on the caller side.", "code": "def nargs_error(name, takes, given):\n \n return TypeError(f\"{name}() takes {takes} positional arguments but \"\n f\"{given} were given\")\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 41, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 43, "n_identifiers": 5, "d_id": 23954, "documentation": { "docstring": "Generate a TypeError to be raised by function calls with wrong arity.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 207170, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_inlines/tests.py", "file_name": "tests.py", "fun_name": "test_tabular_model_form_meta_readonly_field", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_tabular_model_form_meta_readonly_field(self):\n \n response = self.client.get(reverse(\"admin:admin_inlines_someparentmodel_add\"))\n self.assertContains(\n response,\n '',\n )\n self.assertContains(response, \"Label from ModelForm.Meta\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 119, "n_words": 29, "vocab_size": 24, "complexity": 1, "nloc": 10, "token_counts": 39, "n_ast_nodes": 75, "n_identifiers": 7, "d_id": 51888, "documentation": { "docstring": "\n Tabular inlines use ModelForm.Meta.help_texts and labels for read-only\n fields.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 31, "language": "en" } }, { "id": 227197, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_icicle.py", "file_name": "_icicle.py", "fun_name": "tiling", "commit_message": "switch to black .22", "code": "def tiling(self):\n \n return self[\"tiling\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58870, "documentation": { "docstring": "\n The 'tiling' property is an instance of Tiling\n that may be specified as:\n - An instance of :class:`plotly.graph_objs.icicle.Tiling`\n - A dict of string/value properties that will be passed\n to the Tiling constructor\n\n Supported dict properties:\n\n flip\n Determines if the positions obtained from\n solver are flipped on each axis.\n orientation\n When set in conjunction with `tiling.flip`,\n determines on which side the root nodes are\n drawn in the chart. If `tiling.orientation` is\n \"v\" and `tiling.flip` is \"\", the root nodes\n appear at the top. If `tiling.orientation` is\n \"v\" and `tiling.flip` is \"y\", the root nodes\n appear at the bottom. If `tiling.orientation`\n is \"h\" and `tiling.flip` is \"\", the root nodes\n appear at the left. If `tiling.orientation` is\n \"h\" and `tiling.flip` is \"x\", the root nodes\n appear at the right.\n pad\n Sets the inner padding (in px).\n\n Returns\n -------\n plotly.graph_objs.icicle.Tiling\n ", "n_words": 137, "vocab_size": 77, "n_whitespaces": 531, "language": "en" } }, { "id": 138734, "commit_id": "b4d9fcdbf8be4c0f4985c29b251d2585cf269f76", "repo": "ray", "path": "python/ray/serve/pipeline/deployment_node.py", "file_name": "deployment_node.py", "fun_name": "_execute_impl", "commit_message": "[Serve] Fix surprious `__call__` invocation in Deployment DAG's exec_impl (#24199)", "code": "def _execute_impl(self, *args, **kwargs):\n \n return self._deployment_handle\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 27, "n_identifiers": 5, "d_id": 31508, "documentation": { "docstring": "Executor of DeploymentNode getting called each time on dag.execute.\n\n The execute implementation is recursive, that is, the method nodes will receive\n whatever this method returns. We return a handle here so method node can\n directly call upon.\n ", "n_words": 37, "vocab_size": 35, "n_whitespaces": 65, "language": "en" } }, { "id": 100903, "commit_id": "d9c84a5f9f6ff22d6f91594f218bea15764de96b", "repo": "faceswap", "path": "plugins/extract/pipeline.py", "file_name": "pipeline.py", "fun_name": "get_image_copy", "commit_message": "Add Laplacian Pyramid Loss", "code": "def get_image_copy(self, color_format):\n \n logger.trace(\"Requested color format '%s' for frame '%s'\", color_format, self._filename)\n image = getattr(self, f\"_image_as_{color_format.lower()}\")()\n return image\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 46, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 33, "n_ast_nodes": 66, "n_identifiers": 9, "d_id": 20352, "documentation": { "docstring": " Get a copy of the image in the requested color format.\n\n Parameters\n ----------\n color_format: ['BGR', 'RGB', 'GRAY']\n The requested color format of :attr:`image`\n\n Returns\n -------\n :class:`numpy.ndarray`:\n A copy of :attr:`image` in the requested :attr:`color_format`\n ", "n_words": 34, "vocab_size": 24, "n_whitespaces": 106, "language": "en" } }, { "id": 223399, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/util.py", "file_name": "util.py", "fun_name": "subst_vars", "commit_message": "add python 3.10.4 for windows", "code": "def subst_vars (s, local_vars):\n \n check_environ()", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 2, "nloc": 7, "token_counts": 39, "n_ast_nodes": 21, "n_identifiers": 4, "d_id": 56886, "documentation": { "docstring": "Perform shell/Perl-style variable substitution on 'string'. Every\n occurrence of '$' followed by a name is considered a variable, and\n variable is substituted by the value found in the 'local_vars'\n dictionary, or in 'os.environ' if it's not in 'local_vars'.\n 'os.environ' is first checked/augmented to guarantee that it contains\n certain values: see 'check_environ()'. Raise ValueError for any\n variables not found in either 'local_vars' or 'os.environ'.\n ", "n_words": 63, "vocab_size": 49, "n_whitespaces": 86, "language": "en" } }, { "id": 241632, "commit_id": "dbf1acd5a553ffc1546734be164cc89cef2b741d", "repo": "lightning", "path": "tests/plugins/environments/test_lsf_environment.py", "file_name": "test_lsf_environment.py", "fun_name": "test_empty_lsb_djob_rankfile", "commit_message": "Modify LSFEnvironment to use more reliable environment variable (#10825)\n\n\r\nCo-authored-by: thomas chaton \r\nCo-authored-by: Carlos Mocholí \r\nCo-authored-by: Adrian Wälchli \r\nCo-authored-by: Jirka Borovec ", "code": "def test_empty_lsb_djob_rankfile():\n \n with pytest.raises(ValueError, match=\"The environment variable `LSB_DJOB_RANKFILE` is empty\"):\n LSFEnvironment()\n\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 24, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 40, "n_identifiers": 6, "d_id": 69633, "documentation": { "docstring": "Test an error when the LSB_DJOB_RANKFILE is not populated.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 285666, "commit_id": "f18c44b0668ef8e40d14d79780558521b2c02304", "repo": "OpenBBTerminal", "path": "openbb_terminal/helper_funcs.py", "file_name": "helper_funcs.py", "fun_name": "get_user_timezone", "commit_message": "New path for styles and add timezone as environment variable (#2509)\n\n* add log path\r\n\r\n* add test to check if log file is in correct dir\r\n\r\n* env path\r\n\r\n* black\r\n\r\n* mypy fix\r\n\r\n* add styles folder and styles from repo\r\n\r\n* add timezone as env variable\r\n\r\n* fix changes with main\r\n\r\n* fix test\r\n\r\n* flake8\r\n\r\n* fix linting\r\n\r\n* fix linting\r\n\r\n* fix issue with light mpl stylesheet\r\n\r\n* change timezone variable name\r\n\r\n* change names\r\n\r\n* change names\r\n\r\n* names\r\n\r\n* simplify paths.py\r\n\r\n* change some names\r\n\r\n* fix error in logic\r\n\r\n* remove 3.11 from testing for now", "code": "def get_user_timezone() -> str:\n \n dotenv.load_dotenv(USER_ENV_FILE)\n user_tz = os.getenv(\"OPENBB_TIMEZONE\")\n if user_tz:\n return user_tz\n return \"\"\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 36, "n_words": 14, "vocab_size": 12, "complexity": 2, "nloc": 13, "token_counts": 28, "n_ast_nodes": 53, "n_identifiers": 8, "d_id": 85370, "documentation": { "docstring": "Get user timezone if it is a valid one\n\n Returns\n -------\n str\n user timezone based on .env file\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 37, "language": "en" } }, { "id": 187103, "commit_id": "120c10302381600abb4044083ce0a106b31df8f0", "repo": "streamlink", "path": "src/streamlink/plugin/api/validate/_validators.py", "file_name": "_validators.py", "fun_name": "validator_xml_findtext", "commit_message": "plugin.api.validate: turn module into package\n\nTurn module into package with multiple logical sub-modules:\n- Define a public interface in the package's `__init__` module\n- Split validation schemas, validators and validate logic\n - schemas: classes which register attributes used by their\n respective `validate` implementations\n - validators: functions which can internally call `validate`\n and which return something that can be validated\n - validate: singledispatch functions which implement the validation\n logic for schemas and various other types\n- Rename validation schemas for better internal references\n- Rename singledispatch methods\n\nOther clean-up work:\n- Update comments and fix grammar\n- Add type annotations\n- Use f-strings\n- Use `str` instead of the `text` alias\n- Simplify some code blocks\n- Rearrange classes and functions\n- Rephrase certain error messages\n- Add a few more tests for better code coverage", "code": "def validator_xml_findtext(xpath) -> AllSchema:\n \n\n return AllSchema(\n validator_xml_find(xpath),\n validator_getattr(\"text\"),\n )\n\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 32, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 8, "token_counts": 22, "n_ast_nodes": 39, "n_identifiers": 5, "d_id": 45687, "documentation": { "docstring": "\n Find an XML element via xpath and extract its text.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 53408, "commit_id": "1d4218a287ef343f32f1e32482592b471be5df1d", "repo": "prefect", "path": "src/prefect/orion/schemas/data.py", "file_name": "data.py", "fun_name": "get_instance_data_location", "commit_message": "Move `prefect.settings` to `prefect.settings.from_env()`", "code": "def get_instance_data_location() -> DataLocation:\n \n return DataLocation(\n name=prefect.settings.from_env().orion.data.name,\n base_path=prefect.settings.from_env().orion.data.base_path,\n scheme=prefect.settings.from_env().orion.data.scheme.lower(),\n )\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 40, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 9, "token_counts": 63, "n_ast_nodes": 101, "n_identifiers": 11, "d_id": 10792, "documentation": { "docstring": "\n Return the current data location configured for this Orion instance\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 74800, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/documents/tests/test_admin_views.py", "file_name": "test_admin_views.py", "fun_name": "test_reupload_different_file_size_and_file_hash", "commit_message": "Reformat with black", "code": "def test_reupload_different_file_size_and_file_hash(self):\n \n # Build a fake file, and create it through the admin view\n # since self.document doesn't have a file_size set.\n fake_file = SimpleUploadedFile(\"some_file.txt\", b\"this is the content\")\n post_data = {\n \"title\": \"My doc\",\n \"file\": fake_file,\n }\n self.client.post(reverse(\"wagtaildocs:add\"), post_data)\n\n document = models.Document.objects.get(title=\"My doc\")\n old_file_size, old_file_hash = document.file_size, document.file_hash\n\n new_file = SimpleUploadedFile(document.filename, b\"less content\")\n\n self.client.post(\n reverse(\"wagtaildocs:edit\", args=(document.pk,)),\n {\n \"title\": document.title,\n \"file\": new_file,\n },\n )\n\n document.refresh_from_db()\n\n self.assertNotEqual(document.file_size, old_file_size)\n self.assertNotEqual(document.file_hash, old_file_hash)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 259, "n_words": 69, "vocab_size": 58, "complexity": 1, "nloc": 20, "token_counts": 135, "n_ast_nodes": 227, "n_identifiers": 24, "d_id": 16322, "documentation": { "docstring": "\n Checks that reuploading the document file with a different file\n changes the file size and file hash (see #5704).\n ", "n_words": 19, "vocab_size": 15, "n_whitespaces": 41, "language": "en" } }, { "id": 25205, "commit_id": "1f9400dd7374ce9cc47981372e324ff412e53ba3", "repo": "PaddleOCR", "path": "ppocr/modeling/heads/local_graph.py", "file_name": "local_graph.py", "fun_name": "__call__", "commit_message": "add drrg", "code": "def __call__(self, feat_maps, comp_attribs):\n \n\n assert isinstance(feat_maps, paddle.Tensor)\n assert comp_attribs.ndim == 3\n assert comp_attribs.shape[2] == 8\n\n sorted_dist_inds_batch = []\n local_graph_batch = []\n knn_batch = []\n node_feat_batch = []\n node_label_batch = []\n\n for batch_ind in range(comp_attribs.shape[0]):\n num_comps = int(comp_attribs[batch_ind, 0, 0])\n comp_geo_attribs = comp_attribs[batch_ind, :num_comps, 1:7]\n node_labels = comp_attribs[batch_ind, :num_comps, 7].astype(\n np.int32)\n\n comp_centers = comp_geo_attribs[:, 0:2]\n distance_matrix = euclidean_distance_matrix(comp_centers,\n comp_centers)\n\n batch_id = np.zeros(\n (comp_geo_attribs.shape[0], 1), dtype=np.float32) * batch_ind\n comp_geo_attribs[:, -2] = np.clip(comp_geo_attribs[:, -2], -1, 1)\n angle = np.arccos(comp_geo_attribs[:, -2]) * np.sign(\n comp_geo_attribs[:, -1])\n angle = angle.reshape((-1, 1))\n rotated_rois = np.hstack(\n [batch_id, comp_geo_attribs[:, :-2], angle])\n rois = paddle.to_tensor(rotated_rois)\n content_feats = self.pooling(feat_maps[batch_ind].unsqueeze(0),\n rois)\n\n content_feats = content_feats.reshape([content_feats.shape[0], -1])\n geo_feats = feature_embedding(comp_geo_attribs,\n self.node_geo_feat_dim)\n geo_feats = paddle.to_tensor(geo_feats)\n node_feats = paddle.concat([content_feats, geo_feats], axis=-1)\n\n sorted_dist_inds = np.argsort(distance_matrix, axis=1)\n pivot_local_graphs, pivot_knns = self.generate_local_graphs(\n sorted_dist_inds, node_labels)\n\n node_feat_batch.append(node_feats)\n node_label_batch.append(node_labels)\n local_graph_batch.append(pivot_local_graphs)\n knn_batch.append(pivot_knns)\n sorted_dist_inds_batch.append(sorted_dist_inds)\n\n (node_feats, adjacent_matrices, knn_inds, gt_linkage) = \\\n self.generate_gcn_input(node_feat_batch,\n node_label_batch,\n local_graph_batch,\n knn_batch,\n sorted_dist_inds_batch)\n\n return node_feats, adjacent_matrices, knn_inds, gt_linkage\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 845, "n_words": 146, "vocab_size": 103, "complexity": 2, "nloc": 48, "token_counts": 406, "n_ast_nodes": 607, "n_identifiers": 58, "d_id": 4867, "documentation": { "docstring": "Generate local graphs as GCN input.\n\n Args:\n feat_maps (Tensor): The feature maps to extract the content\n features of text components.\n comp_attribs (ndarray): The text component attributes.\n\n Returns:\n local_graphs_node_feat (Tensor): The node features of graph.\n adjacent_matrices (Tensor): The adjacent matrices of local graphs.\n pivots_knn_inds (Tensor): The k-nearest neighbor indices in local\n graph.\n gt_linkage (Tensor): The surpervision signal of GCN for linkage\n prediction.\n ", "n_words": 61, "vocab_size": 43, "n_whitespaces": 193, "language": "en" } }, { "id": 5030, "commit_id": "73c7fad7fce952a8c3ba827ca858e4280bd846f3", "repo": "airbyte", "path": "airbyte-cdk/python/airbyte_cdk/utils/traced_exception.py", "file_name": "traced_exception.py", "fun_name": "emit_message", "commit_message": "CDK: emit `AirbyteTraceMessage` with exception trace information (#12593)", "code": "def emit_message(self):\n \n message = self.as_airbyte_message().json(exclude_unset=True)\n filtered_message = filter_secrets(message)\n print(filtered_message)\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 37, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 53, "n_identifiers": 9, "d_id": 709, "documentation": { "docstring": "\n Prints the exception as an AirbyteTraceMessage.\n Note that this will be called automatically on uncaught exceptions when using the airbyte_cdk entrypoint.\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 43, "language": "en" } }, { "id": 249156, "commit_id": "c97042f7eef3748e17c90e48a4122389a89c4735", "repo": "synapse", "path": "tests/rest/admin/test_room.py", "file_name": "test_room.py", "fun_name": "test_purge_room_and_block", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13469)", "code": "def test_purge_room_and_block(self) -> None:\n \n # Test that room is not purged\n with self.assertRaises(AssertionError):\n self._is_purged(self.room_id)\n\n # Test that room is not blocked\n self._is_blocked(self.room_id, expect=False)\n\n # Assert one user in room\n self._is_member(room_id=self.room_id, user_id=self.other_user)\n\n channel = self.make_request(\n \"DELETE\",\n self.url.encode(\"ascii\"),\n content={\"block\": True, \"purge\": True},\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(None, channel.json_body[\"new_room_id\"])\n self.assertEqual(self.other_user, channel.json_body[\"kicked_users\"][0])\n self.assertIn(\"failed_to_kick_users\", channel.json_body)\n self.assertIn(\"local_aliases\", channel.json_body)\n\n self._is_purged(self.room_id)\n self._is_blocked(self.room_id, expect=True)\n self._has_no_members(self.room_id)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 231, "n_words": 57, "vocab_size": 46, "complexity": 1, "nloc": 22, "token_counts": 183, "n_ast_nodes": 298, "n_identifiers": 24, "d_id": 72663, "documentation": { "docstring": "Test to purge a room and block it.\n Members will not be moved to a new room and will not receive a message.\n ", "n_words": 23, "vocab_size": 16, "n_whitespaces": 37, "language": "en" } }, { "id": 162151, "commit_id": "6f32a0b5b70fe0f8b14c2946b40840b795044662", "repo": "yt-dlp", "path": "yt_dlp/utils.py", "file_name": "utils.py", "fun_name": "get_element_html_by_id", "commit_message": "[utils] Improve parsing for nested HTML elements (#2129)\n\nand add functions to return the HTML of elements\r\n\r\nAuthored by: zmousm", "code": "def get_element_html_by_id(id, html):\n \n return get_element_html_by_attribute('id', id, html)\n\n", "url": "https://github.com/yt-dlp/yt-dlp.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 29, "n_identifiers": 4, "d_id": 39167, "documentation": { "docstring": "Return the html of the tag with the specified ID in the passed HTML document", "n_words": 15, "vocab_size": 12, "n_whitespaces": 14, "language": "en" } }, { "id": 62884, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/packaging/specifiers.py", "file_name": "specifiers.py", "fun_name": "prereleases", "commit_message": "upd; format", "code": "def prereleases(self, value):\n # type: (bool) -> None\n \n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 1, "token_counts": 8, "n_ast_nodes": 16, "n_identifiers": 3, "d_id": 13060, "documentation": { "docstring": "\n Sets whether or not pre-releases as a whole are allowed by this\n specifier.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 35, "language": "en" } }, { "id": 113798, "commit_id": "7f1495c8b338c547005770cb83f2f7f4b88798f3", "repo": "nni", "path": "nni/runtime/trial_command_channel/base.py", "file_name": "base.py", "fun_name": "receive_parameter", "commit_message": "Trial command channel (#5254)", "code": "def receive_parameter(self) -> ParameterRecord | None:\n \n raise NotImplementedError()\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 10, "token_counts": 14, "n_ast_nodes": 26, "n_identifiers": 4, "d_id": 25031, "documentation": { "docstring": "Get the next parameter record from NNI manager.\n\n Returns\n -------\n :class:`~nni.typehint.ParameterRecord`\n The next parameter record.\n Could be ``None`` if no more parameter is available.\n ", "n_words": 24, "vocab_size": 21, "n_whitespaces": 74, "language": "en" } }, { "id": 264829, "commit_id": "5667a9c456e0514a2d00d6475e7013748b4a7c1e", "repo": "netbox", "path": "netbox/circuits/signals.py", "file_name": "signals.py", "fun_name": "rebuild_cablepaths", "commit_message": "Refactor CablePath.from_origin()", "code": "def rebuild_cablepaths(instance, raw=False, **kwargs):\n \n if not raw:\n peer_termination = instance.get_peer_termination()\n # if peer_termination:\n # rebuild_paths(peer_termination)\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 46, "n_words": 15, "vocab_size": 13, "complexity": 3, "nloc": 5, "token_counts": 31, "n_ast_nodes": 43, "n_identifiers": 6, "d_id": 77847, "documentation": { "docstring": "\n Rebuild any CablePaths which traverse the peer CircuitTermination.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 196755, "commit_id": "ad766d1c02943e86f50559abfd0c72e582c9ca6a", "repo": "sympy", "path": "sympy/assumptions/ask.py", "file_name": "ask.py", "fun_name": "register_handler", "commit_message": "Update the AskHandler deprecation warnings\n\nn.b., the issue number in the original warning message was wrong. It should\nhave been #20837.", "code": "def register_handler(key, handler):\n ", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "\"\"\"\n Register a handler in the ask system. key must be a string and handler athe ask system. key must be a", "n_ast_errors": 2, "ast_levels": 7, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 3, "nloc": 16, "token_counts": 77, "n_ast_nodes": 32, "n_identifiers": 11, "d_id": 48151, "documentation": { "docstring": "\n Register a handler in the ask system. key must be a string and handler a", "n_words": 15, "vocab_size": 12, "n_whitespaces": 18, "language": "en" } }, { "id": 259765, "commit_id": "0822851f5cb17827939a7d7b4f8c84f43184ae89", "repo": "scikit-learn", "path": "sklearn/cluster/tests/test_bisect_k_means.py", "file_name": "test_bisect_k_means.py", "fun_name": "test_float32_float64_equivalence", "commit_message": "FEA Bisecting K-Means (#20031)\n\nCo-authored-by: Gael Varoquaux \r\nCo-authored-by: Tom Dupré la Tour \r\nCo-authored-by: Julien Jerphanion \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def test_float32_float64_equivalence(is_sparse):\n \n rng = np.random.RandomState(0)\n X = rng.rand(10, 2)\n\n if is_sparse:\n X[X < 0.8] = 0\n X = sp.csr_matrix(X)\n\n km64 = BisectingKMeans(n_clusters=3, random_state=0).fit(X)\n km32 = BisectingKMeans(n_clusters=3, random_state=0).fit(X.astype(np.float32))\n\n assert_allclose(km32.cluster_centers_, km64.cluster_centers_)\n assert_array_equal(km32.labels_, km64.labels_)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 69, "n_words": 31, "vocab_size": 24, "complexity": 2, "nloc": 10, "token_counts": 108, "n_ast_nodes": 167, "n_identifiers": 22, "d_id": 75910, "documentation": { "docstring": "Check that the results are the same between float32 and float64.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 177572, "commit_id": "85152f2c8c7f8b301b28fcd771f13b5c166c59eb", "repo": "label-studio", "path": "label_studio/data_manager/actions/basic.py", "file_name": "basic.py", "fun_name": "delete_tasks_annotations", "commit_message": "fix: DEV-1486: fix dm action when deleting all annotations, finished state is not updated (#1923)\n\nCo-authored-by: Max Tkachenko ", "code": "def delete_tasks_annotations(project, queryset, **kwargs):\n \n task_ids = queryset.values_list('id', flat=True)\n annotations = Annotation.objects.filter(task__id__in=task_ids)\n count = annotations.count()\n annotations_ids = list(annotations.values('id'))\n annotations.delete()\n emit_webhooks_for_instance(project.organization, project, WebhookAction.ANNOTATIONS_DELETED, annotations_ids)\n bulk_update_stats_project_tasks(queryset)\n return {'processed_items': count,\n 'detail': 'Deleted ' + str(count) + ' annotations'}\n\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 72, "n_words": 34, "vocab_size": 29, "complexity": 1, "nloc": 10, "token_counts": 93, "n_ast_nodes": 158, "n_identifiers": 23, "d_id": 42445, "documentation": { "docstring": " Delete all annotations by tasks ids\n\n :param project: project instance\n :param queryset: filtered tasks db queryset\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 26, "language": "en" } }, { "id": 132945, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/client/server/proxier.py", "file_name": "proxier.py", "fun_name": "_check_processes", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _check_processes(self):\n \n while True:\n with self.server_lock:\n for client_id, specific_server in list(self.servers.items()):\n if specific_server.poll() is not None:\n logger.info(\n f\"Specific server {client_id} is no longer running\"\n f\", freeing its port {specific_server.port}\"\n )\n del self.servers[client_id]\n # Port is available to use again.\n self._free_ports.append(specific_server.port)\n\n time.sleep(CHECK_PROCESS_INTERVAL_S)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 280, "n_words": 41, "vocab_size": 39, "complexity": 4, "nloc": 12, "token_counts": 72, "n_ast_nodes": 133, "n_identifiers": 17, "d_id": 29875, "documentation": { "docstring": "\n Keeps the internal servers dictionary up-to-date with running servers.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 101252, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "tools/manual/detected_faces.py", "file_name": "detected_faces.py", "fun_name": "bounding_box", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def bounding_box(self, frame_index, face_index, pnt_x, width, pnt_y, height, aligner=\"FAN\"):\n \n logger.trace(\"frame_index: %s, face_index %s, pnt_x %s, width %s, pnt_y %s, height %s, \"\n \"aligner: %s\", frame_index, face_index, pnt_x, width, pnt_y, height, aligner)\n face = self._faces_at_frame_index(frame_index)[face_index]\n face.left = pnt_x\n face.width = width\n face.top = pnt_y\n face.height = height\n face._landmarks_xy = self._extractor.get_landmarks(frame_index, face_index, aligner)\n self._globals.tk_update.set(True)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 135, "n_words": 52, "vocab_size": 30, "complexity": 1, "nloc": 10, "token_counts": 100, "n_ast_nodes": 150, "n_identifiers": 21, "d_id": 20672, "documentation": { "docstring": " Update the bounding box for the :class:`~lib.align.DetectedFace` object at the\n given frame and face indices, with the given dimensions and update the 68 point landmarks\n from the :class:`~tools.manual.manual.Aligner` for the updated bounding box.\n\n Parameters\n ----------\n frame_index: int\n The frame that the face is being set for\n face_index: int\n The face index within the frame\n pnt_x: int\n The left point of the bounding box\n width: int\n The width of the bounding box\n pnt_y: int\n The top point of the bounding box\n height: int\n The height of the bounding box\n aligner: [\"cv2-dnn\", \"FAN\"], optional\n The aligner to use to generate the landmarks. Default: \"FAN\"\n ", "n_words": 102, "vocab_size": 55, "n_whitespaces": 264, "language": "en" } }, { "id": 222841, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/cygwinccompiler.py", "file_name": "cygwinccompiler.py", "fun_name": "_compile", "commit_message": "add python 3.10.4 for windows", "code": "def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):\n \n if ext == '.rc' or ext == '.res':\n # gcc needs '.res' and '.rc' compiled to object files !!!\n try:\n self.spawn([\"windres\", \"-i\", src, \"-o\", obj])\n except DistutilsExecError as msg:\n raise CompileError(msg)\n else: # for other files use the C-compiler\n try:\n self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +\n extra_postargs)\n except DistutilsExecError as msg:\n raise CompileError(msg)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 225, "n_words": 63, "vocab_size": 48, "complexity": 5, "nloc": 12, "token_counts": 89, "n_ast_nodes": 149, "n_identifiers": 13, "d_id": 56773, "documentation": { "docstring": "Compiles the source by spawning GCC and windres if needed.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 221746, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/copyreg.py", "file_name": "copyreg.py", "fun_name": "__newobj_ex__", "commit_message": "add python 3.10.4 for windows", "code": "def __newobj_ex__(cls, args, kwargs):\n \n return cls.__new__(cls, *args, **kwargs)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 36, "n_identifiers": 5, "d_id": 56500, "documentation": { "docstring": "Used by pickle protocol 4, instead of __newobj__ to allow classes with\n keyword-only arguments to be pickled correctly.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 24, "language": "en" } }, { "id": 196338, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/integrals/transforms.py", "file_name": "transforms.py", "fun_name": "mellin_transform", "commit_message": "Updated import locations", "code": "def mellin_transform(f, x, s, **hints):\n r\n return MellinTransform(f, x, s).doit(**hints)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 15, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 42, "token_counts": 29, "n_ast_nodes": 43, "n_identifiers": 7, "d_id": 47838, "documentation": { "docstring": "\n Compute the Mellin transform `F(s)` of `f(x)`,\n\n .. math :: F(s) = \\int_0^\\infty x^{s-1} f(x) \\mathrm{d}x.\n\n For all \"sensible\" functions, this converges absolutely in a strip\n `a < \\operatorname{Re}(s) < b`.\n\n Explanation\n ===========\n\n The Mellin transform is related via change of variables to the Fourier\n transform, and also to the (bilateral) Laplace transform.\n\n This function returns ``(F, (a, b), cond)``\n where ``F`` is the Mellin transform of ``f``, ``(a, b)`` is the fundamental strip\n (as above), and ``cond`` are auxiliary convergence conditions.\n\n If the integral cannot be computed in closed form, this function returns\n an unevaluated :class:`MellinTransform` object.\n\n For a description of possible hints, refer to the docstring of\n :func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=False``,\n then only `F` will be returned (i.e. not ``cond``, and also not the strip\n ``(a, b)``).\n\n Examples\n ========\n\n >>> from sympy import mellin_transform, exp\n >>> from sympy.abc import x, s\n >>> mellin_transform(exp(-x), x, s)\n (gamma(s), (0, oo), True)\n\n See Also\n ========\n\n inverse_mellin_transform, laplace_transform, fourier_transform\n hankel_transform, inverse_hankel_transform\n ", "n_words": 158, "vocab_size": 117, "n_whitespaces": 245, "language": "en" } }, { "id": 110301, "commit_id": "03a0b5ea238014ba87f74ef766928287726aa00a", "repo": "matplotlib", "path": "lib/matplotlib/patches.py", "file_name": "patches.py", "fun_name": "get_verts", "commit_message": "Doc: Fix grammar and spelling", "code": "def get_verts(self):\n \n trans = self.get_transform()\n path = self.get_path()\n polygons = path.to_polygons(trans)\n if len(polygons):\n return polygons[0]\n return []\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 70, "n_words": 17, "vocab_size": 14, "complexity": 2, "nloc": 7, "token_counts": 42, "n_ast_nodes": 72, "n_identifiers": 9, "d_id": 24041, "documentation": { "docstring": "\n Return a copy of the vertices used in this patch.\n\n If the patch contains Bézier curves, the curves will be interpolated by\n line segments. To access the curves as curves, use `get_path`.\n ", "n_words": 32, "vocab_size": 27, "n_whitespaces": 62, "language": "en" } }, { "id": 102162, "commit_id": "bb5b4cceb6f737448eaaa6817cd773b6f4b0e77d", "repo": "pytorch", "path": "tools/test/test_gen_backend_stubs.py", "file_name": "test_gen_backend_stubs.py", "fun_name": "test_missing_cpp_namespace", "commit_message": "Revert \"Revert D32498569: allow external backend codegen to toggle whether to generate out= and inplace kernels\" (#69950)\n\nSummary:\nPull Request resolved: https://github.com/pytorch/pytorch/pull/69950\n\nThis reverts commit f6cad53443704dfe5a20cc62bee14d91e3bffcaa.\n\nTest Plan: Imported from OSS\n\nReviewed By: albanD\n\nDifferential Revision: D33113545\n\nPulled By: bdhirsh\n\nfbshipit-source-id: d6590294662588d36c09662dea65919ad4e1e288", "code": "def test_missing_cpp_namespace(self) -> None:\n yaml_str = \n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, )\n", "url": "https://github.com/pytorch/pytorch.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 32, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 7, "token_counts": 26, "n_ast_nodes": 47, "n_identifiers": 6, "d_id": 21477, "documentation": { "docstring": "\\\nbackend: XLA\nsupported:\n- absYou must provide a value for \"cpp_namespace\"", "n_words": 12, "vocab_size": 12, "n_whitespaces": 8, "language": "en" } }, { "id": 227541, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_parcoords.py", "file_name": "_parcoords.py", "fun_name": "labelside", "commit_message": "switch to black .22", "code": "def labelside(self):\n \n return self[\"labelside\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 59214, "documentation": { "docstring": "\n Specifies the location of the `label`. \"top\" positions labels\n above, next to the title \"bottom\" positions labels below the\n graph Tilted labels with \"labelangle\" may be positioned better\n inside margins when `labelposition` is set to \"bottom\".\n\n The 'labelside' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['top', 'bottom']\n\n Returns\n -------\n Any\n ", "n_words": 59, "vocab_size": 46, "n_whitespaces": 147, "language": "en" } }, { "id": 302086, "commit_id": "52561ce0769ddcf1e8688c8909692b66495e524b", "repo": "core", "path": "tests/components/mqtt/test_cover.py", "file_name": "test_cover.py", "fun_name": "test_find_in_range_altered_inverted", "commit_message": "Update MQTT tests to use the config entry setup (#72373)\n\n* New testframework and tests for fan platform\r\n\r\n* Merge test_common_new to test_common\r\n\r\n* Add alarm_control_panel\r\n\r\n* Add binary_sensor\r\n\r\n* Add button\r\n\r\n* Add camera\r\n\r\n* Add climate\r\n\r\n* Add config_flow\r\n\r\n* Add cover\r\n\r\n* Add device_tracker_disovery\r\n\r\n* Add device_trigger\r\n\r\n* Add diagnostics\r\n\r\n* Add discovery\r\n\r\n* Add humidifier\r\n\r\n* Add init\r\n\r\n* Add lecacy_vacuum\r\n\r\n* Add light_json\r\n\r\n* Add light_template\r\n\r\n* Add light\r\n\r\n* Add lock\r\n\r\n* Add number\r\n\r\n* Add scene\r\n\r\n* Add select\r\n\r\n* Add sensor\r\n\r\n* Add siren\r\n\r\n* Add state_vacuum\r\n\r\n* Add subscription\r\n\r\n* Add switch\r\n\r\n* Add tag\r\n\r\n* Add trigger\r\n\r\n* Add missed tests\r\n\r\n* Add another missed test\r\n\r\n* Add device_tracker\r\n\r\n* Remove commented out code\r\n\r\n* Correct tests according comments\r\n\r\n* Improve mqtt_mock_entry and recover tests\r\n\r\n* Split fixtures with and without yaml setup\r\n\r\n* Update fixtures manual_mqtt\r\n\r\n* Update fixtures mqtt_json\r\n\r\n* Fix test tasmota\r\n\r\n* Update fixture mqtt_room\r\n\r\n* Revert fixture changes, improve test\r\n\r\n* re-add test", "code": "async def test_find_in_range_altered_inverted(hass):\n \n mqtt_cover = MqttCover(\n hass,\n {\n \"name\": \"cover.test\",\n \"state_topic\": \"state-topic\",\n \"get_position_topic\": None,\n \"command_topic\": \"command-topic\",\n \"availability_topic\": None,\n \"tilt_command_topic\": \"tilt-command-topic\",\n \"tilt_status_topic\": \"tilt-status-topic\",\n \"qos\": 0,\n \"retain\": False,\n \"state_open\": \"OPEN\",\n \"state_closed\": \"CLOSE\",\n \"position_open\": 80,\n \"position_closed\": 180,\n \"payload_open\": \"OPEN\",\n \"payload_close\": \"CLOSE\",\n \"payload_stop\": \"STOP\",\n \"payload_available\": None,\n \"payload_not_available\": None,\n \"optimistic\": False,\n \"value_template\": None,\n \"tilt_open_position\": 180,\n \"tilt_closed_position\": 80,\n \"tilt_min\": 180,\n \"tilt_max\": 80,\n \"tilt_optimistic\": False,\n \"set_position_topic\": None,\n \"set_position_template\": None,\n \"unique_id\": None,\n \"device_config\": None,\n },\n None,\n None,\n )\n\n assert mqtt_cover.find_in_range_from_percent(60) == 120\n assert mqtt_cover.find_in_range_from_percent(60, \"cover\") == 120\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 448, "n_words": 79, "vocab_size": 58, "complexity": 1, "nloc": 39, "token_counts": 156, "n_ast_nodes": 288, "n_identifiers": 5, "d_id": 100923, "documentation": { "docstring": "Test find in range with altered range and inverted.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 247069, "commit_id": "1901cb1d4a8b7d9af64493fbd336e9aa2561c20c", "repo": "synapse", "path": "tests/rest/client/test_shadow_banned.py", "file_name": "test_shadow_banned.py", "fun_name": "test_message", "commit_message": "Add type hints to `tests/rest/client` (#12084)", "code": "def test_message(self) -> None:\n \n\n room_id = self.helper.create_room_as(\n self.other_user_id, tok=self.other_access_token\n )\n\n # The user should be in the room.\n self.helper.join(room_id, self.banned_user_id, tok=self.banned_access_token)\n\n # Sending a message should complete successfully.\n result = self.helper.send_event(\n room_id=room_id,\n type=EventTypes.Message,\n content={\"msgtype\": \"m.text\", \"body\": \"with right label\"},\n tok=self.banned_access_token,\n )\n self.assertIn(\"event_id\", result)\n event_id = result[\"event_id\"]\n\n latest_events = self.get_success(\n self.store.get_latest_event_ids_in_room(room_id)\n )\n self.assertNotIn(event_id, latest_events)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 210, "n_words": 53, "vocab_size": 46, "complexity": 1, "nloc": 18, "token_counts": 118, "n_ast_nodes": 192, "n_identifiers": 24, "d_id": 71479, "documentation": { "docstring": "Messages from shadow-banned users don't actually get sent.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 177240, "commit_id": "bffcd74649fb95a57fb834846eb3c7d9693c55b8", "repo": "networkx", "path": "networkx/algorithms/isomorphism/vf2pp.py", "file_name": "vf2pp.py", "fun_name": "vf2pp_isomorphism", "commit_message": "Preliminary VF2++ Implementation (#5788)\n\n* Preliminary implementation of the candidate node pair ordering of VF2++\r\n\r\n* Removed unused lines of code\r\n\r\n* Added todos\r\n\r\n* Added demo and pseudocode for VF2++\r\n\r\n* Pointed out a problem with the pseudocode\r\n\r\n* Initialisation of the VF2++ basis structure\r\n\r\n* Initialise the GraphMatcher\r\n\r\n* Remove useless changes\r\n\r\n* Check labels for the node ordering + demo\r\n\r\n* Code to verify the ordering\r\n\r\n* Implement the ISO feasibility check\r\n\r\n* Implement the IND feasibility\r\n\r\n* Create State class\r\n\r\n* Fixed Dan's code for the ordering\r\n\r\n* Preliminary form of the node ordering\r\n\r\n* Add visualisation\r\n\r\n* Use list comprehension for the Ti computation\r\n\r\n* Remove function\r\n\r\n* Create Unit Tests\r\n\r\n* Add labels check + update unit tests\r\n\r\n* Add pre-computation of G-labels\r\n\r\n* Remove todo\r\n\r\n* First implementation of the candidate selection\r\n\r\n* Initial version of candidate selection\r\n\r\n* Remove unnecessary files\r\n\r\n* Merge candidate selection cases into one\r\n\r\n* Create a function to incrementally update Ti and Ti_out\r\n\r\n* Unit Test for the Ti updating\r\n\r\n* Implement the Ti/Ti_out restoring\r\n\r\n* Finish the restoring of Ti and create unit test\r\n\r\n* Update test file names\r\n\r\n* Uncommented test section\r\n\r\n* Replace redundant loop with for-any\r\n\r\n* Create unit test for candidate selection using the same label for all nodes\r\n\r\n* Create unit test for candidate selection using different labels for the nodes\r\n\r\n* Update feasibility tests without the use of the state class\r\n\r\n* Create more unit tests for the feasibility checking\r\n\r\n* Provide explanation for the unit tests\r\n\r\n* First successful test of the complete ISO VF2++ algorithm (except from the buggy ordering)\r\n\r\n* Fix bug: when popping a node to climb up the DFS tree we need the previous node ordering (containing the node that we just popped)\r\n\r\n* Create a separate file for the VF2++ ISO algorithm\r\n\r\n* Delete file\r\n\r\n* Remove redundant iteration and memory use\r\n\r\n* Demo for different labels\r\n\r\n* Add benchmark for the incremental Ti updating\r\n\r\n* Remove unnecessary class\r\n\r\n* Fix bug with the ordering WOOOHOOOOO\r\n\r\n* Unit tests for the node ordering\r\n\r\n* Add unit tests for the VF2++ ISO\r\n\r\n* Fix ordering\r\n\r\n* Probablly fix logic error in ordering\r\n\r\n* Reformatted with black\r\n\r\n* Test precommit\r\n\r\n* Test precommit\r\n\r\n* Test pre commit\r\n\r\n* Testing pre commit\r\n\r\n* Update networkx/algorithms/isomorphism/tests/VF2++/test_vf2pp.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Add unit tests for vf2++\r\n\r\n* Added vf2++ unit test\r\n\r\n* Added precheck for VF2++\r\n\r\n* Add unit tests for the precheck\r\n\r\n* Updated the benchmarking\r\n\r\n* Updated the benchmark\r\n\r\n* Apply hooks\r\n\r\n* Add documentation for the ordering\r\n\r\n* Add documentation for the candidate selection\r\n\r\n* Added documentation for the feasibility\r\n\r\n* Added documentation for vf2++\r\n\r\n* Separate functions for ISO feasibility\r\n\r\n* Refine unit tests\r\n\r\n* Apply hooks\r\n\r\n* Force reformat all files\r\n\r\n* Remove redundant return statements from VF2__\r\n\r\n* Apply hooks\r\n\r\n* Apply hooks\r\n\r\n* Format\r\n\r\n* Minor changes\r\n\r\n* Add unit tests\r\n\r\n* Adjusted benchmark\r\n\r\n* Fix benchmark\r\n\r\n* Isort\r\n\r\n* Isort benchmark\r\n\r\n* Apply optimization in the candidate selection\r\n\r\n* Track matched node with pointer\r\n\r\n* Adjust benchmark\r\n\r\n* Restructure in VF2 function\r\n\r\n* Make VF2++ EXTREMELY PRETTY\r\n\r\n* Removed sorting in feasibility rules\r\n\r\n* Get rid of visited set, check mapping instead\r\n\r\n* Update networkx/algorithms/isomorphism/tests/VF2++/test_vf2pp.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Made color assignement deterministic in VF2++ unit tests\r\n\r\n* Add keyword argument in unit tests\r\n\r\n* Hoepfully fix pipeline errors\r\n\r\n* Add vf2++ unit tests for multigraphs\r\n\r\n* Add Unit tests for Feasibility\r\n\r\n* Add unit tests for feasibility on multi graphs\r\n\r\n* Finalize feasibility tests for multigraph settings\r\n\r\n* Update documentation\r\n\r\n* Remove list comprehension and boost performance\r\n\r\n* Add unit tests for both graphs and multi graphs, using same labels\r\n\r\n* Isort\r\n\r\n* Optimized precheck\r\n\r\n* Replace loop with any\r\n\r\n* Optimize multigraph chceck\r\n\r\n* Transfer except statement\r\n\r\n* Check order consistency\r\n\r\n* Cache degrees and labels from the beginning\r\n\r\n* Delete benchmark to create new\r\n\r\n* Fix precheck bug\r\n\r\n* Adjust unit tests\r\n\r\n* Add benchmark for perofmance comparison between VF2 and VF2++\r\n\r\n* Fix Ti computing tests\r\n\r\n* Hopefully fix isort\r\n\r\n* Add benchmark for the candidate selection methods\r\n\r\n* Rename modules: lower case, remove +\r\n\r\n* Refactor VF2++ arguments\r\n\r\n* Adjust VF2++ to work with multiple node labels\r\n\r\n* Add unit tests for multiple labels\r\n\r\n* Adjust for different number of labels per node\r\n\r\n* Finish arguments of VF2++\r\n\r\n* Add user functions\r\n\r\n* Exported the two vf2++ functions\r\n\r\n* Added underscore prefix to private functions and fixed tests\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Update networkx/algorithms/isomorphism/demo.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Apply suggested changes\r\n\r\n* Refactor rst files\r\n\r\n* Rm unnecessary toctree from isomorphism page.\r\n\r\n* Autodoc vf2pp module + public functions.\r\n\r\n* Rm dedicated vf2pp reference article.\r\n\r\n* Rm extra vf2pp listing from autosummaries.\r\n\r\n* Add summary of three functions to module docstring.\r\n\r\n* Make sure docstrings match their functions.\r\n\r\n* Refactor everything\r\n\r\n* Format code\r\n\r\n* Add unit test\r\n\r\n* Inline process level function in node ordering\r\n\r\n* Perform intersection first rather than last\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp_helpers/candidates.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Replace return statement with multiple operations and make it more readable\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp_helpers/feasibility.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Fix multigraph bug in update_Tinout\r\n\r\n* Abstract the argmax function\r\n\r\n* Add unit test for first case of candidate selection\r\n\r\n* Create unit test for all candidate selection cases\r\n\r\n* Remove re-definition of namedtuple parameters\r\n\r\n* Update doc/reference/algorithms/isomorphism.rst\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/__init__.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Delete benchmark file\r\n\r\n* Add demo file\r\n\r\n* Create util file containing the helper functions, common across all unit tests\r\n\r\n* Fix CI/CD\r\n\r\n* Make unit tests for Ti updating specific\r\n\r\n* Remove util functions from vf2pp tests\r\n\r\n* Remove utils functions from multivf2pp tests\r\n\r\n* Remove utils functions from candidate tests\r\n\r\n* Remove utils functions from ordering checks\r\n\r\n* Remove utils functions from Ti tests\r\n\r\n* Add example in docstring\r\n\r\n* Remove unused utils functions\r\n\r\n* Separate initialization of vf2pp\r\n\r\n* Inline functions and add new abstract function for pushing to stack\r\n\r\n* Inline push to stack\r\n\r\n* Add commentsa\r\n\r\n* Separate precheck functions\r\n\r\n* Replace method with existing networkx function\r\n\r\n* Include label initialization inside parameter initializer function\r\n\r\n* Rename Tiout to Titilde\r\n\r\n* Update networkx/algorithms/isomorphism/tests/vf2pp/test_Ti_computing.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Use canonical setitem for dictionary insertions\r\n\r\n* Update networkx/algorithms/isomorphism/tests/vf2pp/test_precheck.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Remove variable assignement\r\n\r\n* Merge unit tests of vf2pp for graphs and multigraphs into the same file\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Change variable name\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Re-write ordering unit tests\r\n\r\n* Rename vf2pp solver\r\n\r\n* Update networkx/algorithms/isomorphism/vf2pp_helpers/feasibility.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Replace abstractified argmax function with two loops for readability\r\n\r\n* Apply final changes\r\n\r\n* Fix mistake\r\n\r\n* Update ref guide to reflect new fn names.\r\n\r\n* Update docstrings\r\n * Fix line length in module docstring\r\n * Copy updated parameter section to all 3 public fns.\r\n * Add Yields section to all_isomorphisms fn.\r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Dan Schult ", "code": "def vf2pp_isomorphism(G1, G2, node_label=None, default_label=None):\n \n try:\n mapping = next(vf2pp_all_isomorphisms(G1, G2, node_label, default_label))\n return mapping\n except StopIteration:\n return None\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 48, "n_words": 18, "vocab_size": 15, "complexity": 2, "nloc": 6, "token_counts": 40, "n_ast_nodes": 61, "n_identifiers": 9, "d_id": 42307, "documentation": { "docstring": "Return an isomorphic mapping between `G1` and `G2` if it exists.\n\n Parameters\n ----------\n G1, G2 : NetworkX Graph or MultiGraph instances.\n The two graphs to check for isomorphism.\n\n node_label : str, optional\n The name of the node attribute to be used when comparing nodes.\n The default is `None`, meaning node attributes are not considered\n in the comparison. Any node that doesn't have the `node_labels`\n attribute uses `default_label` instead.\n\n default_label : scalar\n Default value to use when a node doesn't have an attribute\n named `node_label`. Default is `None`.\n\n Returns\n -------\n dict or None\n Node mapping if the two graphs are isomorphic. None otherwise.\n ", "n_words": 102, "vocab_size": 75, "n_whitespaces": 185, "language": "en" } }, { "id": 203569, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/widgets.py", "file_name": "widgets.py", "fun_name": "url_params_from_lookup_dict", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def url_params_from_lookup_dict(lookups):\n \n params = {}\n if lookups and hasattr(lookups, \"items\"):\n for k, v in lookups.items():\n if callable(v):\n v = v()\n if isinstance(v, (tuple, list)):\n v = \",\".join(str(x) for x in v)\n elif isinstance(v, bool):\n v = (\"0\", \"1\")[v]\n else:\n v = str(v)\n params[k] = v\n return params\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 181, "n_words": 47, "vocab_size": 31, "complexity": 8, "nloc": 14, "token_counts": 103, "n_ast_nodes": 171, "n_identifiers": 15, "d_id": 50449, "documentation": { "docstring": "\n Convert the type of lookups specified in a ForeignKey limit_choices_to\n attribute to a dictionary of query parameters\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 27, "language": "en" } }, { "id": 249828, "commit_id": "115f0eb2334b13665e5c112bd87f95ea393c9047", "repo": "synapse", "path": "tests/storage/test_id_generators.py", "file_name": "test_id_generators.py", "fun_name": "test_single_gen_next", "commit_message": "Reintroduce #14376, with bugfix for monoliths (#14468)\n\n* Add tests for StreamIdGenerator\n\n* Drive-by: annotate all defs\n\n* Revert \"Revert \"Remove slaved id tracker (#14376)\" (#14463)\"\n\nThis reverts commit d63814fd736fed5d3d45ff3af5e6d3bfae50c439, which in\nturn reverted 36097e88c4da51fce6556a58c49bd675f4cf20ab. This restores\nthe latter.\n\n* Fix StreamIdGenerator not handling unpersisted IDs\n\nSpotted by @erikjohnston.\n\nCloses #14456.\n\n* Changelog\n\nCo-authored-by: Nick Mills-Barrett \nCo-authored-by: Erik Johnston ", "code": "def test_single_gen_next(self) -> None:\n \n id_gen = self._create_id_generator()\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 5, "token_counts": 26, "n_ast_nodes": 28, "n_identifiers": 4, "d_id": 73158, "documentation": { "docstring": "Check that we correctly increment the current token from the DB.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 265020, "commit_id": "bab6fb0de24d568371c8a55bcb22768b2d60f515", "repo": "netbox", "path": "netbox/dcim/svg/cables.py", "file_name": "cables.py", "fun_name": "render", "commit_message": "Update SVG trace rendering to support multiple terminations per cable end", "code": "def render(self):\n \n from dcim.models import Cable\n from wireless.models import WirelessLink\n\n traced_path = self.origin.trace()\n\n # Iterate through each (terms, cable, terms) segment in the path\n for i, segment in enumerate(traced_path):\n near_ends, connector, far_ends = segment\n\n # Near end parent\n if i == 0:\n # If this is the first segment, draw the originating termination's parent object\n parent_object = self._draw_box(\n x=0,\n width=self.width,\n color=self._get_color(near_ends[0].parent_object),\n url=near_ends[0].parent_object.get_absolute_url(),\n labels=self._get_labels(near_ends[0].parent_object)\n )\n self.parent_objects.append(parent_object)\n\n # Near end termination\n self.draw_terminations(near_ends)\n\n # Connector (a Cable or WirelessLink)\n connector = connector[0] # Remove Cable from list\n if connector is not None:\n\n # Cable\n if type(connector) is Cable:\n connector_labels = [\n f'Cable {connector}',\n connector.get_status_display()\n ]\n if connector.type:\n connector_labels.append(connector.get_type_display())\n if connector.length and connector.length_unit:\n connector_labels.append(f'{connector.length} {connector.get_length_unit_display()}')\n cable = self.draw_cable(\n color=connector.color or '000000',\n url=connector.get_absolute_url(),\n labels=connector_labels\n )\n self.connectors.append(cable)\n\n # WirelessLink\n elif type(connector) is WirelessLink:\n connector_labels = [\n f'Wireless link {connector}',\n connector.get_status_display()\n ]\n if connector.ssid:\n connector_labels.append(connector.ssid)\n wirelesslink = self.draw_wirelesslink(\n url=connector.get_absolute_url(),\n labels=connector_labels\n )\n self.connectors.append(wirelesslink)\n\n # Far end termination\n self.draw_terminations(far_ends)\n\n # Far end parent\n parent_object = self._draw_box(\n x=0,\n width=self.width,\n color=self._get_color(far_ends[0].parent_object),\n url=far_ends[0].parent_object.get_absolute_url(),\n labels=self._get_labels(far_ends[0].parent_object),\n )\n self.parent_objects.append(parent_object)\n\n elif far_ends:\n\n # Attachment\n attachment = self.draw_attachment()\n self.connectors.append(attachment)\n\n # ProviderNetwork\n parent_object = self._draw_box(\n x=0,\n width=self.width,\n color=self._get_color(far_ends[0]),\n url=far_ends[0].get_absolute_url(),\n labels=self._get_labels(far_ends[0])\n )\n self.parent_objects.append(parent_object)\n\n # Determine drawing size\n self.drawing = svgwrite.Drawing(\n size=(self.width, self.cursor + 2)\n )\n\n # Attach CSS stylesheet\n with open(f'{settings.STATIC_ROOT}/cable_trace.css') as css_file:\n self.drawing.defs.add(self.drawing.style(css_file.read()))\n\n # Add elements to the drawing in order of depth (Z axis)\n for element in self.connectors + self.parent_objects + self.terminations:\n self.drawing.add(element)\n\n return self.drawing\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1582, "n_words": 228, "vocab_size": 141, "complexity": 13, "nloc": 73, "token_counts": 504, "n_ast_nodes": 832, "n_identifiers": 59, "d_id": 77946, "documentation": { "docstring": "\n Return an SVG document representing a cable trace.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 223868, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/quoprimime.py", "file_name": "quoprimime.py", "fun_name": "_unquote_match", "commit_message": "add python 3.10.4 for windows", "code": "def _unquote_match(match):\n \n s = match.group(0)\n return unquote(s)\n\n\n# Header decoding is done a bit differently", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 23, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 35, "n_identifiers": 5, "d_id": 57119, "documentation": { "docstring": "Turn a match in the form =AB to the ASCII character with value 0xab", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 231182, "commit_id": "d5a345d01507f8b6792c51507d1d8f35d7386d29", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/scatter/_line.py", "file_name": "_line.py", "fun_name": "backoff", "commit_message": "update to plotly.js 2.16.1", "code": "def backoff(self):\n \n return self[\"backoff\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 62764, "documentation": { "docstring": "\n Sets the line back off from the end point of the nth line\n segment (in px). This option is useful e.g. to avoid overlap\n with arrowhead markers. With \"auto\" the lines would trim before\n markers if `marker.angleref` is set to \"previous\".\n\n The 'backoff' property is a number and may be specified as:\n - An int or float in the interval [0, inf]\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n int|float|numpy.ndarray\n ", "n_words": 76, "vocab_size": 64, "n_whitespaces": 158, "language": "en" } }, { "id": 100663, "commit_id": "a9908b46f77dc66ac7efe7100ea0eed4b1f2b460", "repo": "faceswap", "path": "tools/alignments/jobs.py", "file_name": "jobs.py", "fun_name": "_update_png_headers", "commit_message": "Alignments tool - Replace 'extract-large' with 'min-size'", "code": "def _update_png_headers(self):\n \n to_update = [ # Items whose face index has changed\n x for x in self._items.file_list_sorted\n if x[\"face_index\"] != self._items.items[x[\"source_filename\"]].index(x[\"face_index\"])]\n\n for file_info in tqdm(to_update, desc=\"Updating PNG Headers\", leave=False):\n frame = file_info[\"source_filename\"]\n face_index = file_info[\"face_index\"]\n new_index = self._items.items[frame].index(face_index)\n\n fullpath = os.path.join(self._items.folder, file_info[\"current_filename\"])\n logger.debug(\"Updating png header for '%s': face index from %s to %s\",\n fullpath, face_index, new_index)\n\n # Update file_list_sorted for rename task\n orig_filename = f\"{os.path.splitext(frame)[0]}_{new_index}.png\"\n file_info[\"face_index\"] = new_index\n file_info[\"original_filename\"] = orig_filename\n\n face = DetectedFace()\n face.from_alignment(self._alignments.get_faces_in_frame(frame)[new_index])\n meta = dict(alignments=face.to_png_meta(),\n source=dict(alignments_version=file_info[\"alignments_version\"],\n original_filename=orig_filename,\n face_index=new_index,\n source_filename=frame,\n source_is_video=file_info[\"source_is_video\"],\n source_frame_dims=file_info.get(\"source_frame_dims\")))\n update_existing_metadata(fullpath, meta)\n\n logger.info(\"%s Extracted face(s) had their header information updated\", len(to_update))\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 511, "n_words": 95, "vocab_size": 73, "complexity": 4, "nloc": 25, "token_counts": 224, "n_ast_nodes": 387, "n_identifiers": 43, "d_id": 20122, "documentation": { "docstring": " Update the EXIF iTXt field of any face PNGs that have had their face index changed.\n\n Notes\n -----\n This could be quicker if parellizing in threads, however, Windows (at least) does not seem\n to like this and has a tendency to throw permission errors, so this remains single threaded\n for now.\n ", "n_words": 51, "vocab_size": 48, "n_whitespaces": 94, "language": "en" } }, { "id": 153569, "commit_id": "605efa618e7994681f57b11d04d417f353ef8d50", "repo": "modin", "path": "modin/pandas/base.py", "file_name": "base.py", "fun_name": "copy", "commit_message": "DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333)\n\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Alexander Myskov ", "code": "def copy(self, deep=True): # noqa: PR01, RT01, D200\n \n if deep:\n return self.__constructor__(query_compiler=self._query_compiler.copy())\n new_obj = self.__constructor__(query_compiler=self._query_compiler)\n self._add_sibling(new_obj)\n return new_obj\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 65, "n_words": 18, "vocab_size": 16, "complexity": 2, "nloc": 6, "token_counts": 48, "n_ast_nodes": 80, "n_identifiers": 8, "d_id": 35450, "documentation": { "docstring": "\n Make a copy of the object's metadata.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 226452, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_choropleth.py", "file_name": "_choropleth.py", "fun_name": "z", "commit_message": "switch to black .22", "code": "def z(self):\n \n return self[\"z\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58125, "documentation": { "docstring": "\n Sets the color values.\n\n The 'z' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n ", "n_words": 26, "vocab_size": 26, "n_whitespaces": 76, "language": "en" } }, { "id": 98629, "commit_id": "2d33f7cba85abb192111733892f0e7ac49812054", "repo": "sentry", "path": "tests/sentry/middleware/test_ratelimit_middleware.py", "file_name": "test_ratelimit_middleware.py", "fun_name": "test_self_hosted_rate_limit_check", "commit_message": "ref(rate limits): Move settings out of sentry (#33806)\n\n* ref(rate limits): Move settings out of sentry", "code": "def test_self_hosted_rate_limit_check(self, default_rate_limit_mock):\n \n request = self.factory.get(\"/\")\n default_rate_limit_mock.return_value = RateLimit(10, 100)\n self.middleware.process_view(request, self._test_endpoint, [], {})\n assert not request.will_be_rate_limited\n\n default_rate_limit_mock.return_value = RateLimit(1, 1)\n with freeze_time(\"2000-01-01\") as frozen_time:\n self.middleware.process_view(request, self._test_endpoint, [], {})\n assert not request.will_be_rate_limited\n frozen_time.tick(1)\n self.middleware.process_view(request, self._test_endpoint, [], {})\n assert not request.will_be_rate_limited\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 144, "n_words": 40, "vocab_size": 23, "complexity": 1, "nloc": 12, "token_counts": 121, "n_ast_nodes": 193, "n_identifiers": 15, "d_id": 19592, "documentation": { "docstring": "Check that for self hosted installs we don't rate limit", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 265983, "commit_id": "484efdaf75f267a43f9321b938fda1bc967b9e53", "repo": "netbox", "path": "netbox/extras/views.py", "file_name": "views.py", "fun_name": "get_queryset", "commit_message": "Closes #9623: Implement saved filters (#10801)\n\n* Initial work on saved filters\r\n\r\n* Return only enabled/shared filters\r\n\r\n* Add tests\r\n\r\n* Clean up filtering of usable SavedFilters", "code": "def get_queryset(self, request):\n \n queryset = SavedFilter.objects.all()\n user = request.user\n if user.is_superuser:\n return queryset\n if user.is_anonymous:\n return queryset.filter(shared=True)\n return queryset.filter(\n Q(shared=True) | Q(user=user)\n )\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 105, "n_words": 23, "vocab_size": 18, "complexity": 3, "nloc": 10, "token_counts": 62, "n_ast_nodes": 101, "n_identifiers": 13, "d_id": 78254, "documentation": { "docstring": "\n Return only shared SavedFilters, or those owned by the current user, unless\n this is a superuser.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 38, "language": "en" } }, { "id": 226464, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_choropleth.py", "file_name": "_choropleth.py", "fun_name": "locations", "commit_message": "switch to black .22", "code": "def locations(self):\n \n return self[\"locations\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58137, "documentation": { "docstring": "\n Sets the coordinates via location IDs or names. See\n `locationmode` for more info.\n\n The 'locations' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n ", "n_words": 35, "vocab_size": 34, "n_whitespaces": 92, "language": "en" } }, { "id": 259120, "commit_id": "279388d9ed2ea83194dd45a2d78161be30b43aa7", "repo": "scikit-learn", "path": "sklearn/preprocessing/_polynomial.py", "file_name": "_polynomial.py", "fun_name": "get_feature_names_out", "commit_message": "DOC Improve get_feature_names_out docstrings (#22718)\n\nCo-authored-by: Thomas J. Fan ", "code": "def get_feature_names_out(self, input_features=None):\n \n powers = self.powers_\n input_features = _check_feature_names_in(self, input_features)\n feature_names = []\n for row in powers:\n inds = np.where(row)[0]\n if len(inds):\n name = \" \".join(\n \"%s^%d\" % (input_features[ind], exp)\n if exp != 1\n else input_features[ind]\n for ind, exp in zip(inds, row[inds])\n )\n else:\n name = \"1\"\n feature_names.append(name)\n return np.asarray(feature_names, dtype=object)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 258, "n_words": 51, "vocab_size": 41, "complexity": 5, "nloc": 17, "token_counts": 111, "n_ast_nodes": 176, "n_identifiers": 21, "d_id": 75579, "documentation": { "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features is None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n ", "n_words": 76, "vocab_size": 54, "n_whitespaces": 221, "language": "en" } }, { "id": 274560, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/losses.py", "file_name": "losses.py", "fun_name": "squared_hinge", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def squared_hinge(y_true, y_pred):\n \n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.cast(y_true, y_pred.dtype)\n y_true = _maybe_convert_labels(y_true)\n return backend.mean(\n tf.square(tf.maximum(1.0 - y_true * y_pred, 0.0)), axis=-1\n )\n\n\n@keras_export(\"keras.metrics.hinge\", \"keras.losses.hinge\")\n@tf.__internal__.dispatch.add_dispatch_support", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.metrics.hinge\", \"keras.losses.hinge\")\n@tf.__internal__.dispatch.add_dispatch_support", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 49, "n_words": 26, "vocab_size": 22, "complexity": 1, "nloc": 7, "token_counts": 66, "n_ast_nodes": 125, "n_identifiers": 17, "d_id": 81237, "documentation": { "docstring": "Computes the squared hinge loss between `y_true` and `y_pred`.\n\n `loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)`\n\n Standalone usage:\n\n >>> y_true = np.random.choice([-1, 1], size=(2, 3))\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = tf.keras.losses.squared_hinge(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> assert np.array_equal(\n ... loss.numpy(),\n ... np.mean(np.square(np.maximum(1. - y_true * y_pred, 0.)), axis=-1))\n\n Args:\n y_true: The ground truth values. `y_true` values are expected to be -1 or 1.\n If binary (0 or 1) labels are provided we will convert them to -1 or 1.\n shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n\n Returns:\n Squared hinge loss values. shape = `[batch_size, d0, .. dN-1]`.\n ", "n_words": 113, "vocab_size": 73, "n_whitespaces": 187, "language": "en" } }, { "id": 56186, "commit_id": "ab322ef9b1bb65887984854dc39b316f98da3b97", "repo": "prefect", "path": "tests/flow_runners/test_kubernetes.py", "file_name": "test_kubernetes.py", "fun_name": "results_directory", "commit_message": "Allow Kubernetes users to customize or replace the Job manifest for flow runs\n\nAdding support for either replacing the base `job=` for a KubernetesFlowRunner,\napplying a list of RFC 6902 JSON patches provided by `customizations=`, or both.\nThis implements the core changes, while preserving backwards compatiblity with\nthe current API. Users can still provide `image=`, `namepace=` and other\ntop-level parameters, which are now considered \"shortcuts\" for generating JSON\npatches.\n\nThis is most of the work for PrefectHQ/orion#1900, but does not include the planned CLI updates\nto allow users to preview their jobs. Those will come in a separate change.\n\nAlso updating the Kubernetes integration tests to be more reliable, and adding\ndocs about how to get set up for running them.", "code": "async def results_directory(self) -> Path:\n \n directory = Path(os.getcwd()) / \".prefect-results\"\n os.makedirs(directory, exist_ok=True)\n for filename in os.listdir(directory):\n os.unlink(directory / filename)\n return directory\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 67, "n_words": 21, "vocab_size": 19, "complexity": 2, "nloc": 9, "token_counts": 50, "n_ast_nodes": 85, "n_identifiers": 11, "d_id": 11458, "documentation": { "docstring": "In order to share results reliably with the Kubernetes cluster, we need to be\n somehwere in the user's directory tree for the most cross-platform\n compatibilty. It's challenging to coordinate /tmp/ directories across systems", "n_words": 33, "vocab_size": 29, "n_whitespaces": 46, "language": "en" } }, { "id": 226416, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_carpet.py", "file_name": "_carpet.py", "fun_name": "b", "commit_message": "switch to black .22", "code": "def b(self):\n \n return self[\"b\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58089, "documentation": { "docstring": "\n A two dimensional array of y coordinates at each carpet point.\n\n The 'b' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n ", "n_words": 33, "vocab_size": 32, "n_whitespaces": 83, "language": "en" } }, { "id": 233420, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/scatter/_line.py", "file_name": "_line.py", "fun_name": "simplify", "commit_message": "switch to black .22", "code": "def simplify(self):\n \n return self[\"simplify\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 64864, "documentation": { "docstring": "\n Simplifies lines by removing nearly-collinear points. When\n transitioning lines, it may be desirable to disable this so\n that the number of points along the resulting SVG path is\n unaffected.\n\n The 'simplify' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n ", "n_words": 45, "vocab_size": 42, "n_whitespaces": 116, "language": "en" } }, { "id": 110191, "commit_id": "723cd86d7d7bdc14a4d3fc0e08c3a01e72d310b6", "repo": "matplotlib", "path": "lib/matplotlib/widgets.py", "file_name": "widgets.py", "fun_name": "set_active", "commit_message": "Use scatter for check boxes instead of Rectangle\n\nWith the current implementation, the boxes get stretched into rectangles\nif the aspect ratio is not maintained. To overcome this, the boxes are\nnow created using scatter instead to maintain their shapes.", "code": "def set_active(self, index):\n \n if index not in range(len(self.labels)):\n raise ValueError(f'Invalid CheckButton index: {index}')\n\n if colors.same_color(\n self._crosses.get_facecolor()[index], colors.to_rgba(\"none\")\n ):\n self._crosses.get_facecolor()[index] = colors.to_rgba(\"k\")\n else:\n self._crosses.get_facecolor()[index] = colors.to_rgba(\"none\")\n\n if hasattr(self, \"_rectangles\"):\n for i, p in enumerate(self._rectangles):\n p.set_facecolor(\"k\" if colors.same_color(\n p.get_facecolor(), colors.to_rgba(\"none\"))\n else \"none\")\n\n if self.drawon:\n self.ax.figure.canvas.draw()\n\n if self.eventson:\n self._observers.process('clicked', self.labels[index].get_text())\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 249, "n_words": 47, "vocab_size": 37, "complexity": 8, "nloc": 18, "token_counts": 174, "n_ast_nodes": 295, "n_identifiers": 27, "d_id": 23965, "documentation": { "docstring": "\n Toggle (activate or deactivate) a check button by index.\n\n Callbacks will be triggered if :attr:`eventson` is True.\n\n Parameters\n ----------\n index : int\n Index of the check button to toggle.\n\n Raises\n ------\n ValueError\n If *index* is invalid.\n ", "n_words": 36, "vocab_size": 33, "n_whitespaces": 122, "language": "en" } }, { "id": 153178, "commit_id": "8d1004fdbdaa05700613c8e6287641a732acf606", "repo": "modin", "path": "modin/core/dataframe/pandas/partitioning/partition_manager.py", "file_name": "partition_manager.py", "fun_name": "column_partitions", "commit_message": "FIX-#3675: Expand virtual partitioning utility (#3886)\n\nCo-authored-by: mvashishtha \r\nCo-authored-by: jeffreykennethli \r\nCo-authored-by: Anatoly Myachev \r\nCo-authored-by: Vasily Litvinov \r\nCo-authored-by: Alexey Prutskov \r\nCo-authored-by: Mahesh Vashishtha \r\nCo-authored-by: Naren Krishna <92325366+naren-ponder@users.noreply.github.com>\r\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Dmitry Chigarev <62142979+dchigarev@users.noreply.github.com>\r\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Doris Lee \r\nCo-authored-by: Aditya Parameswaran \r\nCo-authored-by: Rehan Sohail Durrani \r\nCo-authored-by: Susmit Vengurlekar \r\nSigned-off-by: Devin Petersohn ", "code": "def column_partitions(cls, partitions, full_axis=True):\n \n if not isinstance(partitions, list):\n partitions = [partitions]\n return [\n cls._column_partitions_class(col, full_axis=full_axis)\n for frame in partitions\n for col in frame.T\n ]\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 96, "n_words": 24, "vocab_size": 21, "complexity": 4, "nloc": 8, "token_counts": 49, "n_ast_nodes": 74, "n_identifiers": 10, "d_id": 35281, "documentation": { "docstring": "\n Get the list of `BaseDataframeAxisPartition` objects representing column-wise paritions.\n\n Parameters\n ----------\n partitions : list-like\n List of (smaller) partitions to be combined to column-wise partitions.\n full_axis : bool, default: True\n Whether or not this partition contains the entire column axis.\n\n Returns\n -------\n list\n A list of `BaseDataframeAxisPartition` objects.\n\n Notes\n -----\n Each value in this list will be an `BaseDataframeAxisPartition` object.\n `BaseDataframeAxisPartition` is located in `axis_partition.py`.\n ", "n_words": 64, "vocab_size": 48, "n_whitespaces": 189, "language": "en" } }, { "id": 206187, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/template/base.py", "file_name": "base.py", "fun_name": "token_kwargs", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def token_kwargs(bits, parser, support_legacy=False):\n \n if not bits:\n return {}\n match = kwarg_re.match(bits[0])\n kwarg_format = match and match[1]\n if not kwarg_format:\n if not support_legacy:\n return {}\n if len(bits) < 3 or bits[1] != \"as\":\n return {}\n\n kwargs = {}\n while bits:\n if kwarg_format:\n match = kwarg_re.match(bits[0])\n if not match or not match[1]:\n return kwargs\n key, value = match.groups()\n del bits[:1]\n else:\n if len(bits) < 3 or bits[1] != \"as\":\n return kwargs\n key, value = bits[2], bits[0]\n del bits[:3]\n kwargs[key] = parser.compile_filter(value)\n if bits and not kwarg_format:\n if bits[0] != \"and\":\n return kwargs\n del bits[:1]\n return kwargs\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 334, "n_words": 95, "vocab_size": 40, "complexity": 16, "nloc": 29, "token_counts": 188, "n_ast_nodes": 303, "n_identifiers": 13, "d_id": 51399, "documentation": { "docstring": "\n Parse token keyword arguments and return a dictionary of the arguments\n retrieved from the ``bits`` token list.\n\n `bits` is a list containing the remainder of the token (split by spaces)\n that is to be checked for arguments. Valid arguments are removed from this\n list.\n\n `support_legacy` - if True, the legacy format ``1 as foo`` is accepted.\n Otherwise, only the standard ``foo=1`` format is allowed.\n\n There is no requirement for all remaining token ``bits`` to be keyword\n arguments, so return the dictionary as soon as an invalid argument format\n is reached.\n ", "n_words": 90, "vocab_size": 59, "n_whitespaces": 124, "language": "en" } }, { "id": 228737, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/bar/marker/_colorbar.py", "file_name": "_colorbar.py", "fun_name": "xpad", "commit_message": "switch to black .22", "code": "def xpad(self):\n \n return self[\"xpad\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 60410, "documentation": { "docstring": "\n Sets the amount of padding (in px) along the x direction.\n\n The 'xpad' property is a number and may be specified as:\n - An int or float in the interval [0, inf]\n\n Returns\n -------\n int|float\n ", "n_words": 35, "vocab_size": 33, "n_whitespaces": 87, "language": "en" } }, { "id": 58295, "commit_id": "f107fb0dcffae284cbefd7590274087b147c8483", "repo": "prefect", "path": "src/prefect/deployments.py", "file_name": "deployments.py", "fun_name": "load", "commit_message": "Implement load and update methods on deployment objects", "code": "async def load(self) -> bool:\n \n if not self.name and self.flow_name:\n raise ValueError(\"Both a deployment name and flow name must be provided.\")", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 46, "n_words": 21, "vocab_size": 19, "complexity": 6, "nloc": 34, "token_counts": 159, "n_ast_nodes": 43, "n_identifiers": 6, "d_id": 11750, "documentation": { "docstring": "\n Queries the API for a deployment with this name for this flow, and if found, prepopulates\n settings. Returns a boolean specifying whether a load was successful or not.\n ", "n_words": 28, "vocab_size": 24, "n_whitespaces": 51, "language": "en" } }, { "id": 288865, "commit_id": "f23b1750e85f07091eb896a0b12b8f95e5646338", "repo": "core", "path": "tests/components/homekit_controller/specific_devices/test_homeassistant_bridge.py", "file_name": "test_homeassistant_bridge.py", "fun_name": "test_homeassistant_bridge_fan_setup", "commit_message": "Migrate HomeKit Controller to use stable identifiers (#80064)", "code": "async def test_homeassistant_bridge_fan_setup(hass):\n \n accessories = await setup_accessories_from_file(\n hass, \"home_assistant_bridge_fan.json\"\n )\n await setup_test_accessories(hass, accessories)\n\n await assert_devices_and_entities_created(\n hass,\n DeviceTestInfo(\n unique_id=HUB_TEST_ACCESSORY_ID,\n name=\"Home Assistant Bridge\",\n model=\"Bridge\",\n manufacturer=\"Home Assistant\",\n sw_version=\"0.104.0.dev0\",\n hw_version=\"\",\n serial_number=\"homekit.bridge\",\n devices=[\n DeviceTestInfo(\n name=\"Living Room Fan\",\n model=\"Fan\",\n manufacturer=\"Home Assistant\",\n sw_version=\"0.104.0.dev0\",\n hw_version=\"\",\n serial_number=\"fan.living_room_fan\",\n unique_id=\"00:00:00:00:00:00:aid:1256851357\",\n devices=[],\n entities=[\n EntityTestInfo(\n entity_id=\"fan.living_room_fan\",\n friendly_name=\"Living Room Fan\",\n unique_id=\"00:00:00:00:00:00_1256851357_8\",\n supported_features=(\n FanEntityFeature.DIRECTION\n | FanEntityFeature.SET_SPEED\n | FanEntityFeature.OSCILLATE\n ),\n capabilities={\n \"preset_modes\": None,\n },\n state=\"off\",\n )\n ],\n ),\n ],\n entities=[],\n ),\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 23, "n_whitespaces": 828, "n_words": 66, "vocab_size": 50, "complexity": 1, "nloc": 46, "token_counts": 156, "n_ast_nodes": 257, "n_identifiers": 27, "d_id": 88014, "documentation": { "docstring": "Test that a SIMPLEconnect fan can be correctly setup in HA.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 30502, "commit_id": "1950acfbda3a659ca70658c848f900306ab2e35e", "repo": "OCRmyPDF", "path": "src/ocrmypdf/pluginspec.py", "file_name": "pluginspec.py", "fun_name": "get_progressbar_class", "commit_message": "docs: proofread plugins", "code": "def get_progressbar_class():\n \n\n\n@hookspec", "url": "https://github.com/ocrmypdf/OCRmyPDF.git", "language": "Python", "ast_errors": "@hookspec", "n_ast_errors": 1, "ast_levels": 6, "n_whitespaces": 5, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 1, "token_counts": 5, "n_ast_nodes": 16, "n_identifiers": 2, "d_id": 5624, "documentation": { "docstring": "Called to obtain a class that can be used to monitor progress.\n\n A progress bar is assumed, but this could be used for any type of monitoring.\n\n The class should follow a tqdm-like protocol. Calling the class should return\n a new progress bar object, which is activated with ``__enter__`` and terminated\n ``__exit__``. An update method is called whenever the progress bar is updated.\n Progress bar objects will not be reused; a new one will be created for each\n group of tasks.\n\n The progress bar is held in the main process/thread and not updated by child\n process/threads. When a child notifies the parent of completed work, the\n parent updates the progress bar.\n\n The arguments are the same as `tqdm `_ accepts.\n\n Progress bars should never write to ``sys.stdout``, or they will corrupt the\n output if OCRmyPDF writes a PDF to standard output.\n\n The type of events that OCRmyPDF reports to a progress bar may change in\n minor releases.\n\n Here is how OCRmyPDF will use the progress bar:\n\n Example:\n\n pbar_class = pm.hook.get_progressbar_class()\n with pbar_class(**tqdm_kwargs) as pbar:\n ...\n pbar.update(1)\n ", "n_words": 176, "vocab_size": 111, "n_whitespaces": 263, "language": "en" } }, { "id": 139754, "commit_id": "68d4dd3a8b2defa5549cfa70e59aa26f2d4825a3", "repo": "ray", "path": "python/ray/data/tests/test_context_propagation.py", "file_name": "test_context_propagation.py", "fun_name": "test_context_placement_group", "commit_message": "[Datasets] Add explicit resource allocation option via a top-level scheduling strategy (#24438)\n\nInstead of letting Datasets implicitly use cluster resources in the margins of explicit allocations of other libraries, such as Tune, Datasets should provide an option for explicitly allocating resources for a Datasets workload for users that want to box Datasets in. This PR adds such an explicit resource allocation option, via exposing a top-level scheduling strategy on the DatasetContext with which a placement group can be given.", "code": "def test_context_placement_group():\n driver_code = \n proc = run_string_as_driver_nonblocking(driver_code)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 13, "n_words": 7, "vocab_size": 6, "complexity": 1, "nloc": 30, "token_counts": 23, "n_ast_nodes": 27, "n_identifiers": 4, "d_id": 31769, "documentation": { "docstring": "\nimport ray\nfrom ray.data.context import DatasetContext\nfrom ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy\nfrom ray._private.test_utils import placement_group_assert_no_leak\n\nray.init(num_cpus=1)\n\ncontext = DatasetContext.get_current()\n# This placement group will take up all cores of the local cluster.\nplacement_group = ray.util.placement_group(\n name=\"core_hog\",\n strategy=\"SPREAD\",\n bundles=[\n {\"CPU\": 1},\n ],\n)\nray.get(placement_group.ready())\ncontext.scheduling_strategy = PlacementGroupSchedulingStrategy(placement_group)\npipe = ray.data.range(100, parallelism=2) \\\n .window(blocks_per_window=1) \\\n .map(lambda x: x + 1)\nassert pipe.take_all() == list(range(1, 101))\nplacement_group_assert_no_leak([placement_group])\nray.shutdown()\n ", "n_words": 64, "vocab_size": 55, "n_whitespaces": 78, "language": "en" } }, { "id": 78837, "commit_id": "5521e3b59f45af830ebac3c5686e092616eb82e4", "repo": "wagtail", "path": "wagtail/admin/panels.py", "file_name": "panels.py", "fun_name": "child_identifiers", "commit_message": "Update panel templates for new designs (EditHandler rewrite)\n\nCo-authored-by: Thibaud Colas ", "code": "def child_identifiers(self):\n \n used_names = set()\n result = []\n for panel in self.children:\n base_name = panel.clean_name or \"panel\"\n candidate_name = base_name\n suffix = 0\n while candidate_name in used_names:\n suffix += 1\n candidate_name = \"%s%d\" % (base_name, suffix)\n\n result.append(candidate_name)\n used_names.add(candidate_name)\n\n return result\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 171, "n_words": 40, "vocab_size": 29, "complexity": 4, "nloc": 13, "token_counts": 66, "n_ast_nodes": 113, "n_identifiers": 13, "d_id": 16826, "documentation": { "docstring": "\n A list of identifiers corresponding to child panels in ``self.children``, formed from the clean_name property\n but validated to be unique and non-empty.\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 44, "language": "en" } }, { "id": 221074, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/base64.py", "file_name": "base64.py", "fun_name": "standard_b64decode", "commit_message": "add python 3.10.4 for windows", "code": "def standard_b64decode(s):\n \n return b64decode(s)\n\n\n_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_')\n_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 16, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 59, "n_identifiers": 7, "d_id": 56186, "documentation": { "docstring": "Decode bytes encoded with the standard Base64 alphabet.\n\n Argument s is a bytes-like object or ASCII string to decode. The result\n is returned as a bytes object. A binascii.Error is raised if the input\n is incorrectly padded. Characters that are not in the standard alphabet\n are discarded prior to the padding check.\n ", "n_words": 52, "vocab_size": 41, "n_whitespaces": 70, "language": "en" } }, { "id": 201610, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/auth_tests/test_views.py", "file_name": "test_views.py", "fun_name": "test_logout_doesnt_cache", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_logout_doesnt_cache(self):\n \n response = self.client.get(\"/logout/\")\n self.assertIn(\"no-store\", response.headers[\"Cache-Control\"])\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 28, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 29, "n_ast_nodes": 54, "n_identifiers": 7, "d_id": 49975, "documentation": { "docstring": "\n The logout() view should send \"no-cache\" headers for reasons described\n in #25490.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 34, "language": "en" } }, { "id": 71107, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/edit_handlers.py", "file_name": "edit_handlers.py", "fun_name": "reset_page_edit_handler_cache", "commit_message": "Reformat with black", "code": "def reset_page_edit_handler_cache(**kwargs):\n \n if kwargs[\"setting\"] == \"WAGTAILADMIN_COMMENTS_ENABLED\":\n set_default_page_edit_handlers(Page)\n for model in apps.get_models():\n if issubclass(model, Page):\n model.get_edit_handler.cache_clear()\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 61, "n_words": 15, "vocab_size": 14, "complexity": 4, "nloc": 6, "token_counts": 43, "n_ast_nodes": 76, "n_identifiers": 10, "d_id": 15624, "documentation": { "docstring": "\n Clear page edit handler cache when global WAGTAILADMIN_COMMENTS_ENABLED settings are changed\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 18, "language": "en" } }, { "id": 198556, "commit_id": "44b65804ef1e39f99a890cac82b6d5143251173b", "repo": "sympy", "path": "sympy/solvers/solvers.py", "file_name": "solvers.py", "fun_name": "coefficient_system", "commit_message": "update free symbol idiom", "code": "def coefficient_system(f, params):\n r\n if isinstance(f, Eq):\n # got equation, so move all the\n # terms to the left hand side\n f = f.lhs - f.rhs\n syms = set(params)\n # e.g. A*exp(x) + B - (exp(x) + y)\n fex = _mexpand(f, recursive=True)\n # -(exp(x) + y), A*exp(x) + B\n _, dep = fex.as_independent(*syms)\n # {x} = {x, A, B} - {A, B}\n ex = dep.free_symbols - syms\n # {exp(x): A - 1, 1: B - y}\n gen_co = fex.as_coefficients_dict(*ex)\n # ignore those that are 0 and return None\n # if any are coefficients are numbers\n eqs = []\n for k, v in gen_co.items():\n if v.is_zero:\n continue\n elif v.is_number:\n return\n eqs.append(v)\n return set(eqs)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 223, "n_words": 112, "vocab_size": 78, "complexity": 5, "nloc": 45, "token_counts": 107, "n_ast_nodes": 180, "n_identifiers": 26, "d_id": 48999, "documentation": { "docstring": "Return a set of equations which can be solved to determine\n values for undetermined coefficients in an equation like\n $p(x; a_1, \\ldots, a_k) = q(x)$ where both\n $p$ and $q$ are univariate expressions (polynomial in generators of $x$\n but not necessarily in powers of $x$) that depend on $k$ parameters. If\n such a system cannot be determined or has no solution, return None.\n Return of a system does not imply that there is a solution to the\n system. No simplification of coefficients is done and there may be\n expressions which share a common factor.\n\n >>> from sympy import Eq\n >>> from sympy.solvers.solvers import coefficient_system\n >>> from sympy.abc import x, a, b, c\n >>> coefficient_system(Eq(3*a*x + b - 12*x, c), [a, b])\n {3*a - 12, b - c}\n >>> coefficient_system(a*x - x + b + c, [a, b])\n {a - 1, b + c}\n\n If a system is over-determined, it will still be returned. In the\n following, there are not 3 independent relationships for the\n 3 symbols:\n\n >>> coefficient_system(a*x + b + c, [a, b, c])\n {a, b + c}\n\n See Also\n ========\n solve_undetermined_coeffs\n ", "n_words": 183, "vocab_size": 116, "n_whitespaces": 255, "language": "en" } }, { "id": 61120, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py", "file_name": "found_candidates.py", "fun_name": "_iter_built_with_prepended", "commit_message": "upd; format", "code": "def _iter_built_with_prepended(installed, infos):\n # type: (Candidate, Iterator[IndexCandidateInfo]) -> Iterator[Candidate]\n \n yield installed\n versions_found = {installed.version} # type: Set[_BaseVersion]\n for version, func in infos:\n if version in versions_found:\n continue\n candidate = func()\n if candidate is None:\n continue\n yield candidate\n versions_found.add(version)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 111, "n_words": 38, "vocab_size": 29, "complexity": 4, "nloc": 11, "token_counts": 49, "n_ast_nodes": 85, "n_identifiers": 8, "d_id": 12415, "documentation": { "docstring": "Iterator for ``FoundCandidates``.\n\n This iterator is used when the resolver prefers the already-installed\n candidate and NOT to upgrade. The installed candidate is therefore\n always yielded first, and candidates from index come later in their\n normal ordering, except skipped when the version is already installed.\n ", "n_words": 44, "vocab_size": 37, "n_whitespaces": 59, "language": "en" } }, { "id": 206576, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/utils/cache.py", "file_name": "cache.py", "fun_name": "patch_response_headers", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def patch_response_headers(response, cache_timeout=None):\n \n if cache_timeout is None:\n cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS\n if cache_timeout < 0:\n cache_timeout = 0 # Can't have max-age negative\n if not response.has_header(\"Expires\"):\n response.headers[\"Expires\"] = http_date(time.time() + cache_timeout)\n patch_cache_control(response, max_age=cache_timeout)\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 69, "n_words": 32, "vocab_size": 25, "complexity": 4, "nloc": 8, "token_counts": 62, "n_ast_nodes": 106, "n_identifiers": 11, "d_id": 51574, "documentation": { "docstring": "\n Add HTTP caching headers to the given HttpResponse: Expires and\n Cache-Control.\n\n Each header is only added if it isn't already set.\n\n cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used\n by default.\n ", "n_words": 32, "vocab_size": 30, "n_whitespaces": 51, "language": "en" } }, { "id": 9959, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "jina/types/request/data.py", "file_name": "data.py", "fun_name": "data", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "def data(self) -> 'DataRequest._DataContent':\n \n return DataRequest._DataContent(self.proto.data)\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 6, "token_counts": 19, "n_ast_nodes": 35, "n_identifiers": 5, "d_id": 1807, "documentation": { "docstring": "Get the data contaned in this data request\n\n :return: the data content as an instance of _DataContent wrapping docs and groundtruths\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 35, "language": "en" } }, { "id": 100174, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/sentry/data_export/endpoints/test_data_export.py", "file_name": "test_data_export.py", "fun_name": "test_export_too_many_fields", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def test_export_too_many_fields(self):\n \n payload = self.make_payload(\"discover\", {\"field\": [\"id\"] * (MAX_FIELDS + 1)})\n with self.feature(\"organizations:discover-query\"):\n response = self.get_error_response(self.org.slug, status_code=400, **payload)\n assert response.data == {\n \"non_field_errors\": [\n \"You can export up to 20 fields at a time. Please delete some and try again.\"\n ]\n }\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 125, "n_words": 42, "vocab_size": 41, "complexity": 1, "nloc": 9, "token_counts": 67, "n_ast_nodes": 119, "n_identifiers": 12, "d_id": 19768, "documentation": { "docstring": "\n Ensures that if too many fields are requested, returns a 400 status code with the\n corresponding error message.\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 40, "language": "en" } }, { "id": 250286, "commit_id": "652d1669c5a103b1c20478770c4aaf18849c09a3", "repo": "synapse", "path": "tests/handlers/test_e2e_room_keys.py", "file_name": "test_e2e_room_keys.py", "fun_name": "test_delete_missing_current_version", "commit_message": "Add missing type hints to tests.handlers. (#14680)\n\nAnd do not allow untyped defs in tests.handlers.", "code": "def test_delete_missing_current_version(self) -> None:\n \n e = self.get_failure(self.handler.delete_version(self.local_user), SynapseError)\n res = e.value.code\n self.assertEqual(res, 404)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 41, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 42, "n_ast_nodes": 68, "n_identifiers": 12, "d_id": 73364, "documentation": { "docstring": "Check that we get a 404 on deleting nonexistent current version", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 242282, "commit_id": "e05b8d74819fa18a908ea201a86138ea3168aba9", "repo": "Pillow", "path": "Tests/test_file_webp_animated.py", "file_name": "test_file_webp_animated.py", "fun_name": "test_write_animation_L", "commit_message": "libwebp 1.2.2 fixed endian bugs", "code": "def test_write_animation_L(tmp_path):\n \n\n with Image.open(\"Tests/images/iss634.gif\") as orig:\n assert orig.n_frames > 1\n\n temp_file = str(tmp_path / \"temp.webp\")\n orig.save(temp_file, save_all=True)\n with Image.open(temp_file) as im:\n assert im.n_frames == orig.n_frames\n\n # Compare first and last frames to the original animated GIF\n orig.load()\n im.load()\n assert_image_similar(im, orig.convert(\"RGBA\"), 32.9)\n\n if is_big_endian():\n webp = parse_version(features.version_module(\"webp\"))\n if webp < parse_version(\"1.2.2\"):\n return\n orig.seek(orig.n_frames - 1)\n im.seek(im.n_frames - 1)\n orig.load()\n im.load()\n assert_image_similar(im, orig.convert(\"RGBA\"), 32.9)\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 266, "n_words": 62, "vocab_size": 48, "complexity": 3, "nloc": 19, "token_counts": 153, "n_ast_nodes": 261, "n_identifiers": 20, "d_id": 69817, "documentation": { "docstring": "\n Convert an animated GIF to animated WebP, then compare the frame count, and first\n and last frames to ensure they're visually similar.\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 32, "language": "en" } }, { "id": 58977, "commit_id": "1a3a3adf0bf4d83206f0367b98905a9db15cfec4", "repo": "prefect", "path": "src/prefect/orion/api/block_types.py", "file_name": "block_types.py", "fun_name": "install_protected_system_blocks", "commit_message": "Adds ability to delete block types via the CLI (#6849)\n\n* Ensure system blocks are protected on destructive API calls\r\n\r\n* Enable deleting block types\r\n\r\n* Ensure Block Types are protected against destructive API actions\r\n\r\n* Ensure updating protected Block Types on update doesn't impact saving\r\n\r\n* ⚫\r\n\r\n* isort\r\n\r\n* Suppress status errors\r\n\r\n* ⚫", "code": "async def install_protected_system_blocks(session):\n \n for block in [\n prefect.blocks.system.JSON,\n prefect.blocks.system.DateTime,\n prefect.blocks.system.Secret,\n prefect.filesystems.LocalFileSystem,\n prefect.infrastructure.Process,\n ]:\n block_type = block._to_block_type()\n block_type.is_protected = True\n\n block_type = await models.block_types.create_block_type(\n session=session, block_type=block_type, override=True\n )\n block_schema = await models.block_schemas.create_block_schema(\n session=session,\n block_schema=block._to_block_schema(block_type_id=block_type.id),\n override=True,\n )\n\n\n@router.post(\"/install_system_block_types\")", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "@router.post(\"/install_system_block_types\")", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 165, "n_words": 36, "vocab_size": 29, "complexity": 2, "nloc": 18, "token_counts": 112, "n_ast_nodes": 183, "n_identifiers": 28, "d_id": 11846, "documentation": { "docstring": "Install block types that the system expects to be present", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 296854, "commit_id": "73a368c24246b081cdb98923ca3180937d436c3b", "repo": "core", "path": "homeassistant/components/history_stats/helpers.py", "file_name": "helpers.py", "fun_name": "pretty_duration", "commit_message": "Refactor history_stats to minimize database access (part 2) (#70255)", "code": "def pretty_duration(hours):\n \n seconds = int(3600 * hours)\n days, seconds = divmod(seconds, 86400)\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n if days > 0:\n return \"%dd %dh %dm\" % (days, hours, minutes)\n if hours > 0:\n return \"%dh %dm\" % (hours, minutes)\n return \"%dm\" % minutes\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 126, "n_words": 48, "vocab_size": 30, "complexity": 3, "nloc": 10, "token_counts": 76, "n_ast_nodes": 123, "n_identifiers": 7, "d_id": 95828, "documentation": { "docstring": "Format a duration in days, hours, minutes, seconds.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 274546, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/losses.py", "file_name": "losses.py", "fun_name": "deserialize", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def deserialize(name, custom_objects=None):\n \n return deserialize_keras_object(\n name,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name=\"loss function\",\n )\n\n\n@keras_export(\"keras.losses.get\")", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.losses.get\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 48, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 7, "token_counts": 30, "n_ast_nodes": 59, "n_identifiers": 8, "d_id": 81224, "documentation": { "docstring": "Deserializes a serialized loss class/function instance.\n\n Args:\n name: Loss configuration.\n custom_objects: Optional dictionary mapping names (strings) to custom\n objects (classes and functions) to be considered during deserialization.\n\n Returns:\n A Keras `Loss` instance or a loss function.\n ", "n_words": 36, "vocab_size": 33, "n_whitespaces": 75, "language": "en" } }, { "id": 318780, "commit_id": "fc695896dd8b0169001c438054a79e347053fac6", "repo": "paperless-ngx", "path": "src/paperless/checks.py", "file_name": "checks.py", "fun_name": "paths_check", "commit_message": "Format Python code with black", "code": "def paths_check(app_configs, **kwargs):\n \n\n return (\n path_check(\"PAPERLESS_DATA_DIR\", settings.DATA_DIR)\n + path_check(\"PAPERLESS_TRASH_DIR\", settings.TRASH_DIR)\n + path_check(\"PAPERLESS_MEDIA_ROOT\", settings.MEDIA_ROOT)\n + path_check(\"PAPERLESS_CONSUMPTION_DIR\", settings.CONSUMPTION_DIR)\n )\n\n\n@register()", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "@register()", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 54, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 7, "token_counts": 47, "n_ast_nodes": 88, "n_identifiers": 10, "d_id": 116915, "documentation": { "docstring": "\n Check the various paths for existence, readability and writeability\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 195437, "commit_id": "0f129e9c38b6b10d80982ecc412785db62842938", "repo": "ParlAI", "path": "parlai/tasks/reasoning/reason_types/step_by_step.py", "file_name": "step_by_step.py", "fun_name": "extract_operations", "commit_message": "ROSCOE suite of metrics (#4839)\n\n* ROSCOE suite of metrics\r\n\r\n* updating tests\r\n\r\n* lint\r\n\r\n* fixing protobuf version to stop cleaninstall failures\r\n\r\n* updating requirements\r\n\r\n* convert to absolute path\r\n\r\n* moving tests because of the dependency issues\r\n\r\n* adding new dependencies in tests\r\n\r\n* add test dependencies\r\n\r\n* fixing deps\r\n\r\n* updating task list\r\n\r\n* checklist deps can't be installed on circleci\r\n\r\n* actually fix protobuf version\r\n\r\n* protobuf range\r\n\r\n* protobuf conflict with google-api-core\r\n\r\n* return tests\r\n\r\n* convert imports to absolute path\r\n\r\n* trying checklist again\r\n\r\n* trying to avoid checklist failures\r\n\r\n* checklist to teacher tests\r\n\r\n* add user option to avoid installation failure\r\n\r\n* jupiter as well\r\n\r\n* typo\r\n\r\n* moving into virtual env setup\r\n\r\n* user param not allowed in virtual env\r\n\r\n* move spacy to circleCI because it's big\r\n\r\n* replace local model with HF\r\n\r\n* fixes based on comments\r\n\r\n* remove unused nli scores, fix tests\r\n\r\n* Added path to BART model\r\n\r\nCo-authored-by: Spencer Poff ", "code": "def extract_operations(self) -> List[str]:\n \n if not self.step:\n return []\n try:\n operations = re.findall(r'[-+*^/]', self.step)\n except TypeError as e:\n print(f\"TYPE: {type(self.step)}\")\n print(f\"STEP: {self.step}\")\n raise e\n return operations\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 116, "n_words": 26, "vocab_size": 24, "complexity": 3, "nloc": 13, "token_counts": 54, "n_ast_nodes": 111, "n_identifiers": 12, "d_id": 47260, "documentation": { "docstring": "\n Finds all instances of the math operations: -, +, *, ^, / in the step.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 30, "language": "en" } }, { "id": 101620, "commit_id": "98d01760e469fd2108eed8d0b0a1ba6297c3177c", "repo": "faceswap", "path": "tools/sort/sort_methods.py", "file_name": "sort_methods.py", "fun_name": "_metadata_reader", "commit_message": "Overhaul sort:\n - Standardize image data reading and writing\n - Optimize loading (just one pass required)\n - Make all sort groups binnable (to greater or lesser results)\n - Add sort by pitch\n - Deprecate multiple options\n - linting, docs + locales", "code": "def _metadata_reader(self) -> ImgMetaType:\n \n for filename, metadata in tqdm(read_image_meta_batch(self._loader.file_list),\n total=self._loader.count,\n desc=self._description,\n leave=False):\n alignments = self._get_alignments(metadata.get(\"itxt\", {}))\n yield filename, None, alignments\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 170, "n_words": 20, "vocab_size": 18, "complexity": 2, "nloc": 18, "token_counts": 65, "n_ast_nodes": 102, "n_identifiers": 17, "d_id": 21028, "documentation": { "docstring": " Load metadata from saved aligned faces\n\n Yields\n ------\n filename: str\n The filename that has been read\n image: None\n This will always be ``None`` with the metadata reader\n alignments: dict or ``None``\n The alignment data for the given face or ``None`` if no alignments found\n ", "n_words": 44, "vocab_size": 38, "n_whitespaces": 120, "language": "en" } }, { "id": 242745, "commit_id": "ee85e387bab535e2339b9d3cd1ab87c61d23af15", "repo": "Pillow", "path": "src/PIL/Jpeg2KImagePlugin.py", "file_name": "Jpeg2KImagePlugin.py", "fun_name": "_parse_jp2_header", "commit_message": "Remove redundant parentheses", "code": "def _parse_jp2_header(fp):\n \n\n # Find the JP2 header box\n reader = BoxReader(fp)\n header = None\n mimetype = None\n while reader.has_next_box():\n tbox = reader.next_box_type()\n\n if tbox == b\"jp2h\":\n header = reader.read_boxes()\n break\n elif tbox == b\"ftyp\":\n if reader.read_fields(\">4s\")[0] == b\"jpx \":\n mimetype = \"image/jpx\"\n\n size = None\n mode = None\n bpc = None\n nc = None\n dpi = None # 2-tuple of DPI info, or None\n\n while header.has_next_box():\n tbox = header.next_box_type()\n\n if tbox == b\"ihdr\":\n height, width, nc, bpc = header.read_fields(\">IIHB\")\n size = (width, height)\n if nc == 1 and (bpc & 0x7F) > 8:\n mode = \"I;16\"\n elif nc == 1:\n mode = \"L\"\n elif nc == 2:\n mode = \"LA\"\n elif nc == 3:\n mode = \"RGB\"\n elif nc == 4:\n mode = \"RGBA\"\n elif tbox == b\"res \":\n res = header.read_boxes()\n while res.has_next_box():\n tres = res.next_box_type()\n if tres == b\"resc\":\n vrcn, vrcd, hrcn, hrcd, vrce, hrce = res.read_fields(\">HHHHBB\")\n hres = _res_to_dpi(hrcn, hrcd, hrce)\n vres = _res_to_dpi(vrcn, vrcd, vrce)\n if hres is not None and vres is not None:\n dpi = (hres, vres)\n break\n\n if size is None or mode is None:\n raise SyntaxError(\"Malformed JP2 header\")\n\n return size, mode, mimetype, dpi\n\n\n##\n# Image plugin for JPEG2000 images.\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 658, "n_words": 198, "vocab_size": 101, "complexity": 20, "nloc": 46, "token_counts": 285, "n_ast_nodes": 476, "n_identifiers": 30, "d_id": 69908, "documentation": { "docstring": "Parse the JP2 header box to extract size, component count,\n color space information, and optionally DPI information,\n returning a (size, mode, mimetype, dpi) tuple.", "n_words": 24, "vocab_size": 23, "n_whitespaces": 29, "language": "en" } }, { "id": 6299, "commit_id": "20a8a6fdb516e543d4598c852063ba0fb407f3ba", "repo": "ludwig", "path": "ludwig/utils/entmax/root_finding.py", "file_name": "root_finding.py", "fun_name": "sparsemax_bisect", "commit_message": "Removes dependency on entmax from PyPI, adds entmax source to utils (#1778)\n\n* Removes dependency on entmax from PyPi, add entmax source code into utils instead.\r\n\r\n* Removes build status and image from README\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix python formatting in docs for pre-commit.\r\n\r\n* Removes __main__ from test_losses.py\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Update entmax imports.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: Daniel Treiman \r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def sparsemax_bisect(X, dim=-1, n_iter=50, ensure_sum_one=True):\n \n return SparsemaxBisectFunction.apply(X, dim, n_iter, ensure_sum_one)\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 16, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 32, "n_ast_nodes": 47, "n_identifiers": 7, "d_id": 957, "documentation": { "docstring": "sparsemax: normalizing sparse transform (a la softmax), via bisection.\n\n Solves the projection:\n\n min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.\n\n Parameters\n ----------\n X : torch.Tensor\n The input tensor.\n\n dim : int\n The dimension along which to apply sparsemax.\n\n n_iter : int\n Number of bisection iterations. For float32, 24 iterations should\n suffice for machine precision.\n\n ensure_sum_one : bool,\n Whether to divide the result by its sum. If false, the result might\n sum to close but not exactly 1, which might cause downstream problems.\n\n Note: This function does not yet support normalizing along anything except\n the last dimension. Please use transposing and views to achieve more\n general behavior.\n\n Returns\n -------\n P : torch tensor, same shape as X\n The projection result, such that P.sum(dim=dim) == 1 elementwise.\n ", "n_words": 128, "vocab_size": 107, "n_whitespaces": 231, "language": "en" } }, { "id": 316852, "commit_id": "16900dcef15bdb9016feabd12bfec94d61ed4df6", "repo": "core", "path": "homeassistant/helpers/storage.py", "file_name": "storage.py", "fun_name": "async_load", "commit_message": "Make Store a generic class (#74617)", "code": "async def async_load(self) -> _T | None:\n \n if self._load_task is None:\n self._load_task = self.hass.async_create_task(self._async_load())\n\n return await self._load_task\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 49, "n_words": 17, "vocab_size": 14, "complexity": 2, "nloc": 12, "token_counts": 38, "n_ast_nodes": 65, "n_identifiers": 7, "d_id": 115428, "documentation": { "docstring": "Load data.\n\n If the expected version and minor version do not match the given versions, the\n migrate function will be invoked with migrate_func(version, minor_version, config).\n\n Will ensure that when a call comes in while another one is in progress,\n the second call will wait and return the result of the first call.\n ", "n_words": 52, "vocab_size": 42, "n_whitespaces": 87, "language": "en" } }, { "id": 221795, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ctypes/_aix.py", "file_name": "_aix.py", "fun_name": "get_ld_headers", "commit_message": "add python 3.10.4 for windows", "code": "def get_ld_headers(file):\n \n # get_ld_headers parsing:\n # 1. Find a line that starts with /, ./, or ../ - set as ld_header\n # 2. If \"INDEX\" in occurs in a following line - return ld_header\n # 3. get info (lines starting with [0-9])\n ldr_headers = []\n p = Popen([\"/usr/bin/dump\", f\"-X{AIX_ABI}\", \"-H\", file],\n universal_newlines=True, stdout=PIPE, stderr=DEVNULL)\n # be sure to read to the end-of-file - getting all entries\n while True:\n ld_header = get_ld_header(p)\n if ld_header:\n ldr_headers.append((ld_header, get_ld_header_info(p)))\n else:\n break\n p.stdout.close()\n p.wait()\n return ldr_headers\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 167, "n_words": 81, "vocab_size": 64, "complexity": 3, "nloc": 13, "token_counts": 79, "n_ast_nodes": 139, "n_identifiers": 17, "d_id": 56514, "documentation": { "docstring": "\n Parse the header of the loader section of executable and archives\n This function calls /usr/bin/dump -H as a subprocess\n and returns a list of (ld_header, ld_header_info) tuples.\n ", "n_words": 27, "vocab_size": 22, "n_whitespaces": 40, "language": "en" } }, { "id": 65630, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/controllers/item_variant.py", "file_name": "item_variant.py", "fun_name": "make_variant_item_code", "commit_message": "style: format code with black", "code": "def make_variant_item_code(template_item_code, template_item_name, variant):\n\t\n\tif variant.item_code:\n\t\treturn\n\n\tabbreviations = []\n\tfor attr in variant.attributes:\n\t\titem_attribute = frappe.db.sql(\n\t\t\t,\n\t\t\t{\"attribute\": attr.attribute, \"attribute_value\": attr.attribute_value},\n\t\t\tas_dict=True,\n\t\t)\n\n\t\tif not item_attribute:\n\t\t\tcontinue\n\t\t\t# frappe.throw(_('Invalid attribute {0} {1}').format(frappe.bold(attr.attribute),\n\t\t\t# \tfrappe.bold(attr.attribute_value)), title=_('Invalid Attribute'),\n\t\t\t# \texc=InvalidItemAttributeValueError)\n\n\t\tabbr_or_value = (\n\t\t\tcstr(attr.attribute_value) if item_attribute[0].numeric_values else item_attribute[0].abbr\n\t\t)\n\t\tabbreviations.append(abbr_or_value)\n\n\tif abbreviations:\n\t\tvariant.item_code = \"{0}-{1}\".format(template_item_code, \"-\".join(abbreviations))\n\t\tvariant.item_name = \"{0}-{1}\".format(template_item_name, \"-\".join(abbreviations))\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 37, "n_words": 60, "vocab_size": 49, "complexity": 6, "nloc": 22, "token_counts": 128, "n_ast_nodes": 224, "n_identifiers": 24, "d_id": 13962, "documentation": { "docstring": "Uses template's item code and abbreviations to make variant's item codeselect i.numeric_values, v.abbr\n\t\t\tfrom `tabItem Attribute` i left join `tabItem Attribute Value` v\n\t\t\t\ton (i.name=v.parent)\n\t\t\twhere i.name=%(attribute)s and (v.attribute_value=%(attribute_value)s or i.numeric_values = 1)", "n_words": 33, "vocab_size": 30, "n_whitespaces": 29, "language": "en" } }, { "id": 338490, "commit_id": "295831a481241d3d06b49f646a40f27b1297fab5", "repo": "text-generation-inference", "path": "server/bloom_inference/pb/generate_pb2_grpc.py", "file_name": "generate_pb2_grpc.py", "fun_name": "Generate", "commit_message": "Init", "code": "def Generate(self, request, context):\n \n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "url": "https://github.com/huggingface/text-generation-inference.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 40, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 31, "n_ast_nodes": 55, "n_identifiers": 10, "d_id": 121221, "documentation": { "docstring": "/ Generate tokens for a batch without cache\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 156894, "commit_id": "a9ee6c2fdf0a3093747e675997143e0dbe584bad", "repo": "dask", "path": "dask/compatibility.py", "file_name": "compatibility.py", "fun_name": "entry_points", "commit_message": "Add `entry_points` compatibility utility (#9388)", "code": "def entry_points(group=None):\n \n eps = importlib.metadata.entry_points()\n if group:\n try:\n return eps.select(group=group)\n except AttributeError:\n return eps.get(group, [])\n return eps\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 65, "n_words": 17, "vocab_size": 14, "complexity": 3, "nloc": 8, "token_counts": 46, "n_ast_nodes": 77, "n_identifiers": 8, "d_id": 36798, "documentation": { "docstring": "Returns an iterable of entrypoints.\n\n For compatibility with Python 3.8/3.9.\n In 3.10 the return type changed from a dict to an ``importlib.metadata.EntryPoints``.\n This compatibility utility can be removed once Python 3.10 is the minimum.\n ", "n_words": 34, "vocab_size": 29, "n_whitespaces": 46, "language": "en" } }, { "id": 197934, "commit_id": "df873af365fff5b89164ed8eb3a1b62b6180f1bb", "repo": "sympy", "path": "sympy/concrete/summations.py", "file_name": "summations.py", "fun_name": "eval_sum_residue", "commit_message": "replaced i with a dummy symbol with no assumptions for get_residue_factor", "code": "def eval_sum_residue(f, i_a_b):\n r\n i, a, b = i_a_b\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 14, "n_words": 9, "vocab_size": 9, "complexity": 32, "nloc": 149, "token_counts": 629, "n_ast_nodes": 25, "n_identifiers": 6, "d_id": 48746, "documentation": { "docstring": "Compute the infinite summation with residues\n\n Notes\n =====\n\n If $f(n), g(n)$ are polynomials with $\\deg(g(n)) - \\deg(f(n)) \\ge 2$,\n some infinite summations can be computed by the following residue\n evaluations.\n\n .. math::\n \\sum_{n=-\\infty, g(n) \\ne 0}^{\\infty} \\frac{f(n)}{g(n)} =\n -\\pi \\sum_{\\alpha|g(\\alpha)=0}\n \\text{Res}(\\cot(\\pi x) \\frac{f(x)}{g(x)}, \\alpha)\n\n .. math::\n \\sum_{n=-\\infty, g(n) \\ne 0}^{\\infty} (-1)^n \\frac{f(n)}{g(n)} =\n -\\pi \\sum_{\\alpha|g(\\alpha)=0}\n \\text{Res}(\\csc(\\pi x) \\frac{f(x)}{g(x)}, \\alpha)\n\n Examples\n ========\n\n >>> from sympy import Sum, oo, Symbol\n >>> x = Symbol('x')\n\n Doubly infinite series of rational functions.\n\n >>> Sum(1 / (x**2 + 1), (x, -oo, oo)).doit()\n pi/tanh(pi)\n\n Doubly infinite alternating series of rational functions.\n\n >>> Sum((-1)**x / (x**2 + 1), (x, -oo, oo)).doit()\n pi/sinh(pi)\n\n Infinite series of even rational functions.\n\n >>> Sum(1 / (x**2 + 1), (x, 0, oo)).doit()\n 1/2 + pi/(2*tanh(pi))\n\n Infinite series of alternating even rational functions.\n\n >>> Sum((-1)**x / (x**2 + 1), (x, 0, oo)).doit()\n pi/(2*sinh(pi)) + 1/2\n\n This also have heuristics to transform arbitrarily shifted summand or\n arbitrarily shifted summation range to the canonical problem the\n formula can handle.\n\n >>> Sum(1 / (x**2 + 2*x + 2), (x, -1, oo)).doit()\n 1/2 + pi/(2*tanh(pi))\n >>> Sum(1 / (x**2 + 4*x + 5), (x, -2, oo)).doit()\n 1/2 + pi/(2*tanh(pi))\n >>> Sum(1 / (x**2 + 1), (x, 1, oo)).doit()\n -1/2 + pi/(2*tanh(pi))\n >>> Sum(1 / (x**2 + 1), (x, 2, oo)).doit()\n -1 + pi/(2*tanh(pi))\n\n References\n ==========\n\n .. [#] http://www.supermath.info/InfiniteSeriesandtheResidueTheorem.pdf\n\n .. [#] Asmar N.H., Grafakos L. (2018) Residue Theory.\n In: Complex Analysis with Applications.\n Undergraduate Texts in Mathematics. Springer, Cham.\n https://doi.org/10.1007/978-3-319-94063-2_5\n ", "n_words": 242, "vocab_size": 124, "n_whitespaces": 431, "language": "en" } }, { "id": 133739, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/agents/impala/tests/test_vtrace.py", "file_name": "test_vtrace.py", "fun_name": "test_vtrace", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_vtrace(self):\n \n seq_len = 5\n batch_size = 10\n\n # Create log_rhos such that rho will span from near-zero to above the\n # clipping thresholds. In particular, calculate log_rhos in\n # [-2.5, 2.5),\n # so that rho is in approx [0.08, 12.2).\n space_w_time = Box(-1.0, 1.0, (seq_len, batch_size), np.float32)\n space_only_batch = Box(-1.0, 1.0, (batch_size,), np.float32)\n log_rhos = space_w_time.sample() / (batch_size * seq_len)\n log_rhos = 5 * (log_rhos - 0.5) # [0.0, 1.0) -> [-2.5, 2.5).\n values = {\n \"log_rhos\": log_rhos,\n # T, B where B_i: [0.9 / (i+1)] * T\n \"discounts\": np.array(\n [[0.9 / (b + 1) for b in range(batch_size)] for _ in range(seq_len)]\n ),\n \"rewards\": space_w_time.sample(),\n \"values\": space_w_time.sample() / batch_size,\n \"bootstrap_value\": space_only_batch.sample() + 1.0,\n \"clip_rho_threshold\": 3.7,\n \"clip_pg_rho_threshold\": 2.2,\n }\n\n for fw, sess in framework_iterator(frameworks=(\"torch\", \"tf\"), session=True):\n vtrace = vtrace_tf if fw != \"torch\" else vtrace_torch\n output = vtrace.from_importance_weights(**values)\n if sess:\n output = sess.run(output)\n\n ground_truth_v = _ground_truth_calculation(vtrace, **values)\n check(output, ground_truth_v)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 433, "n_words": 150, "vocab_size": 109, "complexity": 6, "nloc": 25, "token_counts": 230, "n_ast_nodes": 344, "n_identifiers": 30, "d_id": 30090, "documentation": { "docstring": "Tests V-trace against ground truth data calculated in python.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 111524, "commit_id": "1f23c615d7a7326ca5a38a7d768b8b70caaa0e17", "repo": "spaCy", "path": "spacy/tests/pipeline/test_entity_linker.py", "file_name": "test_entity_linker.py", "fun_name": "test_kb_valid_entities", "commit_message": "Refactor KB for easier customization (#11268)\n\n* Add implementation of batching + backwards compatibility fixes. Tests indicate issue with batch disambiguation for custom singular entity lookups.\r\n\r\n* Fix tests. Add distinction w.r.t. batch size.\r\n\r\n* Remove redundant and add new comments.\r\n\r\n* Adjust comments. Fix variable naming in EL prediction.\r\n\r\n* Fix mypy errors.\r\n\r\n* Remove KB entity type config option. Change return types of candidate retrieval functions to Iterable from Iterator. Fix various other issues.\r\n\r\n* Update spacy/pipeline/entity_linker.py\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Update spacy/pipeline/entity_linker.py\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Update spacy/kb_base.pyx\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Update spacy/kb_base.pyx\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Update spacy/pipeline/entity_linker.py\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Add error messages to NotImplementedErrors. Remove redundant comment.\r\n\r\n* Fix imports.\r\n\r\n* Remove redundant comments.\r\n\r\n* Rename KnowledgeBase to InMemoryLookupKB and BaseKnowledgeBase to KnowledgeBase.\r\n\r\n* Fix tests.\r\n\r\n* Update spacy/errors.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Move KB into subdirectory.\r\n\r\n* Adjust imports after KB move to dedicated subdirectory.\r\n\r\n* Fix config imports.\r\n\r\n* Move Candidate + retrieval functions to separate module. Fix other, small issues.\r\n\r\n* Fix docstrings and error message w.r.t. class names. Fix typing for candidate retrieval functions.\r\n\r\n* Update spacy/kb/kb_in_memory.pyx\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Update spacy/ml/models/entity_linker.py\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Fix typing.\r\n\r\n* Change typing of mentions to be Span instead of Union[Span, str].\r\n\r\n* Update docs.\r\n\r\n* Update EntityLinker and _architecture docs.\r\n\r\n* Update website/docs/api/entitylinker.md\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\n\r\n* Adjust message for E1046.\r\n\r\n* Re-add section for Candidate in kb.md, add reference to dedicated page.\r\n\r\n* Update docs and docstrings.\r\n\r\n* Re-add section + reference for KnowledgeBase.get_alias_candidates() in docs.\r\n\r\n* Update spacy/kb/candidate.pyx\r\n\r\n* Update spacy/kb/kb_in_memory.pyx\r\n\r\n* Update spacy/pipeline/legacy/entity_linker.py\r\n\r\n* Remove canididate.md. Remove mistakenly added config snippet in entity_linker.py.\r\n\r\nCo-authored-by: Paul O'Leary McCann \r\nCo-authored-by: Sofie Van Landeghem ", "code": "def test_kb_valid_entities(nlp):\n \n mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3)\n\n # adding entities\n mykb.add_entity(entity=\"Q1\", freq=19, entity_vector=[8, 4, 3])\n mykb.add_entity(entity=\"Q2\", freq=5, entity_vector=[2, 1, 0])\n mykb.add_entity(entity=\"Q3\", freq=25, entity_vector=[-1, -6, 5])\n\n # adding aliases\n mykb.add_alias(alias=\"douglas\", entities=[\"Q2\", \"Q3\"], probabilities=[0.8, 0.2])\n mykb.add_alias(alias=\"adam\", entities=[\"Q2\"], probabilities=[0.9])\n\n # test the size of the corresponding KB\n assert mykb.get_size_entities() == 3\n assert mykb.get_size_aliases() == 2\n\n # test retrieval of the entity vectors\n assert mykb.get_vector(\"Q1\") == [8, 4, 3]\n assert mykb.get_vector(\"Q2\") == [2, 1, 0]\n assert mykb.get_vector(\"Q3\") == [-1, -6, 5]\n\n # test retrieval of prior probabilities\n assert_almost_equal(mykb.get_prior_prob(entity=\"Q2\", alias=\"douglas\"), 0.8)\n assert_almost_equal(mykb.get_prior_prob(entity=\"Q3\", alias=\"douglas\"), 0.2)\n assert_almost_equal(mykb.get_prior_prob(entity=\"Q342\", alias=\"douglas\"), 0.0)\n assert_almost_equal(mykb.get_prior_prob(entity=\"Q3\", alias=\"douglassssss\"), 0.0)\n\n", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 157, "n_words": 94, "vocab_size": 67, "complexity": 1, "nloc": 16, "token_counts": 275, "n_ast_nodes": 423, "n_identifiers": 19, "d_id": 24423, "documentation": { "docstring": "Test the valid construction of a KB with 3 entities and two aliases", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 204484, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/files/storage.py", "file_name": "storage.py", "fun_name": "delete", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def delete(self, name):\n \n raise NotImplementedError(\n \"subclasses of Storage must provide a delete() method\"\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 46, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 4, "token_counts": 13, "n_ast_nodes": 25, "n_identifiers": 4, "d_id": 50745, "documentation": { "docstring": "\n Delete the specified file from the storage system.\n ", "n_words": 8, "vocab_size": 7, "n_whitespaces": 23, "language": "en" } }, { "id": 207658, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_sort_indicators_admin_order", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_sort_indicators_admin_order(self):\n \n models = [\n (AdminOrderedField, \"adminorderedfield\"),\n (AdminOrderedModelMethod, \"adminorderedmodelmethod\"),\n (AdminOrderedAdminMethod, \"adminorderedadminmethod\"),\n (AdminOrderedCallable, \"adminorderedcallable\"),\n ]\n for model, url in models:\n model.objects.create(stuff=\"The Last Item\", order=3)\n model.objects.create(stuff=\"The First Item\", order=1)\n model.objects.create(stuff=\"The Middle Item\", order=2)\n response = self.client.get(\n reverse(\"admin:admin_views_%s_changelist\" % url), {}\n )\n # Should have 3 columns including action checkbox col.\n self.assertContains(response, '", "code": "def __setattr__(self, name, value):\n \n if name in (\"power_status\", \"status\"):\n self._values[name] = value\n self._update()\n else:\n super().__setattr__(name, value)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 70, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 6, "token_counts": 44, "n_ast_nodes": 75, "n_identifiers": 7, "d_id": 102387, "documentation": { "docstring": "Set attributes in `_values` if not one of the known attributes.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 220945, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/unix_events.py", "file_name": "unix_events.py", "fun_name": "remove_child_handler", "commit_message": "add python 3.10.4 for windows", "code": "def remove_child_handler(self, pid):\n \n\n raise NotImplementedError()\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 4, "d_id": 56172, "documentation": { "docstring": "Removes the handler for process 'pid'.\n\n The function returns True if the handler was successfully removed,\n False if there was nothing to remove.", "n_words": 23, "vocab_size": 19, "n_whitespaces": 36, "language": "en" } }, { "id": 299023, "commit_id": "24b4690e5d855be362613583a3ba6fd6f60e9929", "repo": "core", "path": "homeassistant/components/zwave_js/climate.py", "file_name": "climate.py", "fun_name": "_set_modes_and_presets", "commit_message": "Use climate enums in zwave_js (#70757)", "code": "def _set_modes_and_presets(self) -> None:\n \n all_modes: dict[HVACMode, int | None] = {}\n all_presets: dict[str, int | None] = {PRESET_NONE: None}\n\n # Z-Wave uses one list for both modes and presets.\n # Iterate over all Z-Wave ThermostatModes and extract the hvac modes and presets.\n if self._current_mode is None:\n self._hvac_modes = {\n ZW_HVAC_MODE_MAP[ThermostatMode.HEAT]: ThermostatMode.HEAT\n }\n return\n for mode_id, mode_name in self._current_mode.metadata.states.items():\n mode_id = int(mode_id)\n if mode_id in THERMOSTAT_MODES:\n # treat value as hvac mode\n if hass_mode := ZW_HVAC_MODE_MAP.get(mode_id):\n all_modes[hass_mode] = mode_id\n else:\n # treat value as hvac preset\n all_presets[mode_name] = mode_id\n self._hvac_modes = all_modes\n self._hvac_presets = all_presets\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 317, "n_words": 94, "vocab_size": 62, "complexity": 5, "nloc": 18, "token_counts": 123, "n_ast_nodes": 197, "n_identifiers": 23, "d_id": 97961, "documentation": { "docstring": "Convert Z-Wave Thermostat modes into Home Assistant modes and presets.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 112922, "commit_id": "39ec21ca1118e7a8df533aa06bcb5e515a93aa02", "repo": "nni", "path": "nni/retiarii/oneshot/pytorch/dataloader.py", "file_name": "dataloader.py", "fun_name": "__next__", "commit_message": "Multi-GPU support of one-shot NAS (#4603)", "code": "def __next__(self) -> Any:\n \n if not len(self.loader_iters) == len(self.loaders):\n raise RuntimeError('loader_iters must have the same length as loaders.')\n for i, (loader_name, iterator) in enumerate(self.loader_iters.items()):\n try:\n return (self.request_next_batch(iterator), loader_name)\n except StopIteration:\n if i + 1 == len(self.loader_iters):\n raise\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 140, "n_words": 37, "vocab_size": 34, "complexity": 5, "nloc": 12, "token_counts": 78, "n_ast_nodes": 128, "n_identifiers": 14, "d_id": 24789, "documentation": { "docstring": "Fetches the next batch from multiple data loaders,\n by looking for the first iterator that isn't exhausted yet.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 32, "language": "en" } }, { "id": 107934, "commit_id": "917c7c8b2cecf3ff85ed7527bb2aca13779fac13", "repo": "matplotlib", "path": "lib/matplotlib/cm.py", "file_name": "cm.py", "fun_name": "register", "commit_message": "Add missing space before : for parameters", "code": "def register(self, cmap, *, name=None, force=False):\n \n name = name or cmap.name\n if name in self and not force:\n raise ValueError(\n f'A colormap named \"{name}\" is already registered.')\n register_cmap(name, cmap.copy())\n\n\n_cmap_registry = _gen_cmap_registry()\nglobals().update(_cmap_registry)\n# This is no longer considered public API\ncmap_d = _DeprecatedCmapDictWrapper(_cmap_registry)\n__builtin_cmaps = tuple(_cmap_registry)\n\n# public access to the colormaps should be via `matplotlib.colormaps`. For now,\n# we still create the registry here, but that should stay an implementation\n# detail.\n_colormaps = ColormapRegistry(_cmap_registry)\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 122, "n_words": 77, "vocab_size": 64, "complexity": 4, "nloc": 6, "token_counts": 49, "n_ast_nodes": 139, "n_identifiers": 18, "d_id": 22976, "documentation": { "docstring": "\n Register a new colormap.\n\n The colormap name can then be used as a string argument to any ``cmap``\n parameter in Matplotlib. It is also available in ``pyplot.get_cmap``.\n\n The colormap registry stores a copy of the given colormap, so that\n future changes to the original colormap instance do not affect the\n registered colormap. Think of this as the registry taking a snapshot\n of the colormap at registration.\n\n Parameters\n ----------\n cmap : matplotlib.colors.Colormap\n The colormap to register.\n\n name : str, optional\n The name for the colormap. If not given, ``cmap.name`` is used.\n\n force : bool, default: False\n If False, a ValueError is raised if trying to overwrite an already\n registered name. True supports overwriting registered colormaps\n other than the builtin colormaps.\n ", "n_words": 119, "vocab_size": 82, "n_whitespaces": 266, "language": "en" } }, { "id": 186661, "commit_id": "7d9e9a49005de7961e84d2a7c608db57dbab3046", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/override_centos.py", "file_name": "override_centos.py", "fun_name": "config_test", "commit_message": "Add typing to certbot.apache (#9071)\n\n* Add typing to certbot.apache\r\n\r\nCo-authored-by: Adrien Ferrand ", "code": "def config_test(self) -> None:\n \n\n os_info = util.get_os_info()\n fedora = os_info[0].lower() == \"fedora\"\n\n try:\n super().config_test()\n except errors.MisconfigurationError:\n if fedora:\n self._try_restart_fedora()\n else:\n raise\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 119, "n_words": 21, "vocab_size": 20, "complexity": 3, "nloc": 16, "token_counts": 52, "n_ast_nodes": 94, "n_identifiers": 11, "d_id": 45569, "documentation": { "docstring": "\n Override config_test to mitigate configtest error in vanilla installation\n of mod_ssl in Fedora. The error is caused by non-existent self-signed\n certificates referenced by the configuration, that would be autogenerated\n during the first (re)start of httpd.\n ", "n_words": 35, "vocab_size": 30, "n_whitespaces": 71, "language": "en" } }, { "id": 204340, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/staticfiles/finders.py", "file_name": "finders.py", "fun_name": "find_location", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def find_location(self, root, path, prefix=None):\n \n if prefix:\n prefix = \"%s%s\" % (prefix, os.sep)\n if not path.startswith(prefix):\n return None\n path = path[len(prefix) :]\n path = safe_join(root, path)\n if os.path.exists(path):\n return path\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 117, "n_words": 30, "vocab_size": 23, "complexity": 4, "nloc": 9, "token_counts": 69, "n_ast_nodes": 110, "n_identifiers": 11, "d_id": 50703, "documentation": { "docstring": "\n Find a requested static file in a location and return the found\n absolute path (or ``None`` if no match).\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 41, "language": "en" } }, { "id": 215895, "commit_id": "a8d2d1e1397cdc79b2c5f1ad7f6e3b729dcf8857", "repo": "salt", "path": "salt/states/win_certutil.py", "file_name": "win_certutil.py", "fun_name": "del_store", "commit_message": "Add tests, fix state module", "code": "def del_store(name, store, saltenv=\"base\"):\n \n ret = {\"name\": name, \"result\": True, \"comment\": \"\", \"changes\": {}}\n\n cert_file = __salt__[\"cp.cache_file\"](name, saltenv)\n if cert_file is False:\n ret[\"comment\"] = \"Certificate file not found: {}\".format(name)\n ret[\"result\"] = False\n return ret\n\n cert_serial = __salt__[\"certutil.get_cert_serial\"](name)\n if cert_serial is None:\n ret[\"comment\"] = \"Invalid certificate file: {}\".format(name)\n ret[\"result\"] = False\n return ret\n\n old_serials = __salt__[\"certutil.get_stored_cert_serials\"](store=store)\n if cert_serial not in old_serials:\n ret[\"comment\"] = \"Certificate already absent: {}\".format(name)\n return ret\n\n if __opts__[\"test\"]:\n ret[\"comment\"] = \"Certificate will be removed: {}\".format(name)\n ret[\"result\"] = None\n return ret\n\n retcode = __salt__[\"certutil.del_store\"](name, store, retcode=True)\n if retcode != 0:\n ret[\"comment\"] = \"Error removing certificate: {}\".format(name)\n ret[\"result\"] = False\n return ret\n\n new_serials = __salt__[\"certutil.get_stored_cert_serials\"](store=store)\n if cert_serial not in new_serials:\n ret[\"changes\"][\"removed\"] = name\n ret[\"comment\"] = \"Removed certificate: {}\".format(name)\n else:\n ret[\"comment\"] = \"Failed to remove certificate: {}\".format(name)\n ret[\"result\"] = False\n\n return ret\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 302, "n_words": 131, "vocab_size": 63, "complexity": 7, "nloc": 33, "token_counts": 252, "n_ast_nodes": 449, "n_identifiers": 13, "d_id": 54230, "documentation": { "docstring": "\n Remove a certificate from the given certificate store\n\n Args:\n\n name (str):\n The path to the certificate to remove from the store. This is either\n the path to a local file or a file from the file server in the form\n of ``salt://path/to/file``\n\n store (str):\n The certificate store to remove the certificate from\n\n saltenv (str):\n The salt environment to use. This is ignored if the path is local\n\n Returns:\n dict: A dictionary containing the results\n\n CLI Example:\n\n .. code-block:: yaml\n\n remove_certificate:\n certutil.del_store:\n name: salt://web_cert.cer\n store: TrustedPublisher\n ", "n_words": 85, "vocab_size": 49, "n_whitespaces": 225, "language": "en" } }, { "id": 194838, "commit_id": "74e12d10bdf6e8f8abc82056e00d6e2360b871af", "repo": "ParlAI", "path": "parlai/utils/conversations.py", "file_name": "conversations.py", "fun_name": "_load_raw", "commit_message": "[teachers] Speed up conversations teacher (#4404)\n\n* Speed up conversations teacher.\r\n\r\n* Whoops.\r\n\r\n* Okay let's try bringing back train mode.\r\n\r\n* Update test_conversations.py\r\n\r\n* Update conversations.py\r\n\r\n* Update test.py", "code": "def _load_raw(self, datapath):\n \n if not PathManager.exists(datapath):\n raise RuntimeError(\n f'Conversations at path {datapath} not found. '\n 'Double check your path.'\n )\n\n with PathManager.open(datapath, 'r') as f:\n lines = f.read().splitlines()\n for line in lines:\n yield line\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 144, "n_words": 34, "vocab_size": 32, "complexity": 3, "nloc": 10, "token_counts": 54, "n_ast_nodes": 101, "n_identifiers": 12, "d_id": 47108, "documentation": { "docstring": "\n Load the data as a raw, unparsed file.\n\n Useful for fast IO stuff like random indexing.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 38, "language": "en" } }, { "id": 60411, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/scripts/cpp_lint.py", "file_name": "cpp_lint.py", "fun_name": "Error", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def Error(filename, linenum, category, confidence, message):\n \n if _ShouldPrintError(category, confidence, linenum):\n _cpplint_state.IncrementErrorCount(category)\n if _cpplint_state.output_format == 'vs7':\n sys.stderr.write('%s(%s): %s [%s] [%d]\\n' % (\n filename, linenum, message, category, confidence))\n elif _cpplint_state.output_format == 'eclipse':\n sys.stderr.write('%s:%s: warning: %s [%s] [%d]\\n' % (\n filename, linenum, message, category, confidence))\n else:\n sys.stderr.write('%s:%s: %s [%s] [%d]\\n' % (\n filename, linenum, message, category, confidence))\n\n\n# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.\n_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(\n r'\\\\([abfnrtv?\"\\\\\\']|\\d+|x[0-9a-fA-F]+)')\n# Matches strings. Escape codes should already be removed by ESCAPES.\n_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'\"[^\"]*\"')\n# Matches characters. Escape codes should already be removed by ESCAPES.\n_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r\"'.'\")\n# Matches multi-line C++ comments.\n# This RE is a little bit more complicated than one might expect, because we\n# have to take care of space removals tools so we can handle comments inside\n# statements better.\n# The current rule is: We only clear spaces from both sides when we're at the\n# end of the line. Otherwise, we try to remove spaces from the right side,\n# if this doesn't work we try on left side but only if there's a non-character\n# on the right.\n_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(\n r, re.VERBOSE)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 246, "n_words": 192, "vocab_size": 120, "complexity": 4, "nloc": 12, "token_counts": 106, "n_ast_nodes": 241, "n_identifiers": 20, "d_id": 12139, "documentation": { "docstring": "Logs the fact we've found a lint error.\n\n We log where the error was found, and also our confidence in the error,\n that is, how certain we are this is a legitimate style regression, and\n not a misidentification or a use that's sometimes justified.\n\n False positives can be suppressed by the use of\n \"cpplint(category)\" comments on the offending line. These are\n parsed into _error_suppressions.\n\n Args:\n filename: The name of the file containing the error.\n linenum: The number of the line containing the error.\n category: A string used to describe the \"category\" this bug\n falls under: \"whitespace\", say, or \"runtime\". Categories\n may have a hierarchy separated by slashes: \"whitespace/indent\".\n confidence: A number from 1-5 representing a confidence score for\n the error, with 5 meaning that we are certain of the problem,\n and 1 meaning that it could be a legitimate construct.\n message: The error message.\n (\\s*/\\*.*\\*/\\s*$|\n /\\*.*\\*/\\s+|\n \\s+/\\*.*\\*/(?=\\W)|\n /\\*.*\\*/)", "n_words": 148, "vocab_size": 103, "n_whitespaces": 223, "language": "en" } }, { "id": 124015, "commit_id": "52bb8e47d483082e528fc8595005e0813a46efb8", "repo": "ray", "path": "rllib/evaluation/env_runner_v2.py", "file_name": "env_runner_v2.py", "fun_name": "_new_batch_builder", "commit_message": "[RLlib] EnvRunnerV2 and EpisodeV2 that support Connectors. (#25922)", "code": "def _new_batch_builder(self, _) -> _PolicyCollectorGroup:\n \n return _PolicyCollectorGroup(self._worker.policy_map)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 7, "token_counts": 19, "n_ast_nodes": 32, "n_identifiers": 6, "d_id": 27496, "documentation": { "docstring": "Create a new batch builder.\n\n We create a _PolicyCollectorGroup based on the full policy_map\n as the batch builder.\n ", "n_words": 18, "vocab_size": 14, "n_whitespaces": 39, "language": "en" } }, { "id": 191455, "commit_id": "b9f61390e9cf7f4b8b809cba56392d1f7b3ef6e6", "repo": "langchain", "path": "tests/integration_tests/llms/test_huggingface_hub.py", "file_name": "test_huggingface_hub.py", "fun_name": "test_huggingface_text_generation", "commit_message": "add text2text generation (#93)\n\nfixes issue #90", "code": "def test_huggingface_text_generation() -> None:\n \n llm = HuggingFaceHub(repo_id=\"gpt2\", model_kwargs={\"max_new_tokens\": 10})\n output = llm(\"Say foo:\")\n assert isinstance(output, str)\n\n", "url": "https://github.com/hwchase17/langchain.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 28, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 36, "n_ast_nodes": 65, "n_identifiers": 8, "d_id": 46587, "documentation": { "docstring": "Test valid call to HuggingFace text generation model.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 209498, "commit_id": "dd7a5c97d68c00d1d03ecf8ac27c6c7038525065", "repo": "scapy", "path": "scapy/layers/netbios.py", "file_name": "netbios.py", "fun_name": "parse_options", "commit_message": "Answering machines improvements (NBNS/DNS/LLMNR) (#3699)\n\n* Minor NBNS improvements\r\n\r\n* Improve Netbios/LLMNR/DNS answering machines\r\n\r\n* DNS_am: support IPv6\r\n\r\n* More customization of some answering machines", "code": "def parse_options(self, server_name=None, from_ip=None, ip=None):\n \n self.ServerName = server_name\n self.ip = ip\n if isinstance(from_ip, str):\n self.from_ip = Net(from_ip)\n else:\n self.from_ip = from_ip\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 78, "n_words": 21, "vocab_size": 17, "complexity": 2, "nloc": 7, "token_counts": 51, "n_ast_nodes": 81, "n_identifiers": 9, "d_id": 52692, "documentation": { "docstring": "\n NBNS answering machine\n\n :param server_name: the netbios server name to match\n :param from_ip: an IP (can have a netmask) to filter on\n :param ip: the IP to answer with\n ", "n_words": 29, "vocab_size": 23, "n_whitespaces": 65, "language": "en" } }, { "id": 259173, "commit_id": "f9d74236e26f6169b32e23887f30879c32ac76c7", "repo": "scikit-learn", "path": "sklearn/utils/multiclass.py", "file_name": "multiclass.py", "fun_name": "check_classification_targets", "commit_message": "fix docstring of dict_learning.sparse_encode and multiclass.check_classification_targets #21350 #pariswimlds (#22793)\n\n* fix docstring\r\n\r\n* fixed linting in multiclass\r\n\r\n* fixed linting in dict learning\r\n\r\n* fixed linting in dict learning\r\n\r\n* fixed linting in dict learning\r\n\r\n* fixed linting in dict learning\r\n\r\n* fixed linting in dict learning\r\n\r\nCo-authored-by: Sakina ", "code": "def check_classification_targets(y):\n \n y_type = type_of_target(y, input_name=\"y\")\n if y_type not in [\n \"binary\",\n \"multiclass\",\n \"multiclass-multioutput\",\n \"multilabel-indicator\",\n \"multilabel-sequences\",\n ]:\n raise ValueError(\"Unknown label type: %r\" % y_type)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 78, "n_words": 24, "vocab_size": 23, "complexity": 2, "nloc": 10, "token_counts": 40, "n_ast_nodes": 74, "n_identifiers": 6, "d_id": 75624, "documentation": { "docstring": "Ensure that target y is of a non-regression type.\n\n Only the following target types (as defined in type_of_target) are allowed:\n 'binary', 'multiclass', 'multiclass-multioutput',\n 'multilabel-indicator', 'multilabel-sequences'\n\n Parameters\n ----------\n y : array-like\n Target values.\n ", "n_words": 32, "vocab_size": 30, "n_whitespaces": 68, "language": "en" } }, { "id": 281206, "commit_id": "f40ba0d256a78ab2b8461f0df3a9a52ca7dc5704", "repo": "OpenBBTerminal", "path": "discordbot/stocks/technical_analysis/wma.py", "file_name": "wma.py", "fun_name": "wma_command", "commit_message": "Bot logging fix (#1105)\n\n* Write bot logs to stdout instead of a file\r\nHeroku's logging uses the stdout and has problems with files\r\n\r\n* Send \"you snooze you lose\" only if debug flag is enabled\r\n\r\n* Replace print statements with logger entries in the economy menu\r\n\r\n* Add logging to bot menu command calls\r\n\r\n* Silence bandit warnings about the REPLACE_ME token\r\n\r\n* Organize imports and update logging in economy menu\r\n\r\n* Organize imports and update logging in dps menu\r\n\r\n* Organize imports and update logging in dd menu\r\n\r\n* Organize imports and update logging in gov menu\r\n\r\n* Organize imports and update logging in options menu\r\n\r\n* Organize imports and update logging in screener menu\r\n\r\n* Organize imports and update logging in ta menu\r\n\r\n* Revert automatic import sorting\r\n\r\n* Add logging to the options reaction helper", "code": "async def wma_command(ctx, ticker=\"\", window=\"\", offset=\"\", start=\"\", end=\"\"):\n \n\n try:\n # Debug\n if cfg.DEBUG:\n logger.debug(\n \"!stocks.ta.wma %s %s %s %s %s\",\n ticker,\n window,\n offset,\n start,\n end,\n )\n\n # Check for argument\n if ticker == \"\":\n raise Exception(\"Stock ticker is required\")\n\n if start == \"\":\n start = datetime.now() - timedelta(days=365)\n else:\n start = datetime.strptime(start, cfg.DATE_FORMAT)\n\n if end == \"\":\n end = datetime.now()\n else:\n end = datetime.strptime(end, cfg.DATE_FORMAT)\n\n l_legend = [ticker]\n\n if window == \"\":\n window = [20, 50]\n else:\n window_temp = list()\n for wind in window.split(\",\"):\n try:\n window_temp.append(float(wind))\n except Exception as e:\n raise Exception(\"Window needs to be a float\") from e\n window = window_temp\n\n if offset == \"\":\n offset = 0\n else:\n if not offset.lstrip(\"-\").isnumeric():\n raise Exception(\"Number has to be an integer\")\n offset = float(offset)\n\n ticker = ticker.upper()\n stock = discordbot.helpers.load(ticker, start)\n if stock.empty:\n raise Exception(\"Stock ticker is invalid\")\n\n # Retrieve Data\n price_df = pd.DataFrame(\n stock[\"Adj Close\"].values, columns=[\"Price\"], index=stock.index\n )\n i = 1\n for win in window:\n wma_data = overlap_model.wma(\n s_interval=\"1440min\", df_stock=stock, length=win, offset=offset\n )\n price_df = price_df.join(wma_data)\n l_legend.append(f\"WMA {win}\")\n i += 1\n\n # Output Data\n start = start.strftime(\"%Y-%m-%d\")\n end = end.strftime(\"%Y-%m-%d\")\n price_df = price_df.loc[(price_df.index >= start) & (price_df.index < end)]\n\n fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)\n ax.set_title(f\"{ticker} WMA\")\n\n ax.plot(price_df.index, price_df[\"Price\"], lw=3, c=\"k\")\n\n ax.set_xlabel(\"Time\")\n ax.set_xlim([price_df.index[0], price_df.index[-1]])\n ax.set_ylabel(f\"{ticker} Price\")\n\n for idx in range(1, price_df.shape[1]):\n ax.plot(price_df.iloc[:, idx])\n\n ax.legend(l_legend)\n ax.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n\n plt.gcf().autofmt_xdate()\n fig.tight_layout(pad=1)\n\n plt.savefig(\"ta_wma.png\")\n uploaded_image = gst_imgur.upload_image(\"ta_wma.png\", title=\"something\")\n image_link = uploaded_image.link\n if cfg.DEBUG:\n logger.debug(\"Image URL: %s\", image_link)\n title = \"Stocks: Weighted-Moving-Average \" + ticker\n embed = discord.Embed(title=title, colour=cfg.COLOR)\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n embed.set_image(url=image_link)\n os.remove(\"ta_wma.png\")\n\n await ctx.send(embed=embed)\n\n except Exception as e:\n embed = discord.Embed(\n title=\"ERROR Stocks: Weighted-Moving-Average\",\n colour=cfg.COLOR,\n description=e,\n )\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n\n await ctx.send(embed=embed)\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1162, "n_words": 271, "vocab_size": 169, "complexity": 15, "nloc": 93, "token_counts": 653, "n_ast_nodes": 1097, "n_identifiers": 103, "d_id": 83612, "documentation": { "docstring": "Displays chart with weighted moving average [Yahoo Finance]", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 136642, "commit_id": "c976799dfd96806ec9972a287835f7a034ec3d2c", "repo": "ray", "path": "python/ray/tests/kuberay/test_kuberay_node_provider.py", "file_name": "test_kuberay_node_provider.py", "fun_name": "test_worker_group_replicas", "commit_message": "KubeRay node provider refactor (#30281)\n\nImplements KubeRay node provider as a \"BatchingNodeProvider\".\r\nBuilds on #29933.\r\n\r\nSummary of design\r\nAn autoscaler update now works like this:\r\n\r\nlist pod data from k8s\r\ncheck if it's safe to proceed with update. Abort the update if not.\r\ndo some internal calculation to determine desired scale\r\nsubmit a single patch to the RayCluster CR if a scale change is required\r\nEverything is single-threaded and there are O(1) K8s API calls per autoscaler update.\r\n\r\nSigned-off-by: Dmitri Gekhtman ", "code": "def test_worker_group_replicas(group_index, expected_max_replicas, expected_replicas):\n \n raycluster = get_basic_ray_cr()\n\n # Add a worker group without maxReplicas to confirm behavior\n # when maxReplicas is not specified.\n no_max_replicas_group = copy.deepcopy(raycluster[\"spec\"][\"workerGroupSpecs\"][0])\n no_max_replicas_group[\"groupName\"] = \"no-max-replicas\"\n del no_max_replicas_group[\"maxReplicas\"]\n # Also, replicas field, just for the sake of testing.\n no_max_replicas_group[\"replicas\"] = 0\n raycluster[\"spec\"][\"workerGroupSpecs\"].append(no_max_replicas_group)\n\n assert _worker_group_max_replicas(raycluster, group_index) == expected_max_replicas\n assert _worker_group_replicas(raycluster, group_index) == expected_replicas\n\n\n@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"Not relevant on Windows.\")\n@pytest.mark.parametrize(\n \"attempted_target_replica_count,expected_target_replica_count\",\n [(200, 200), (250, 250), (300, 300), (400, 300), (1000, 300)],\n)", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.skipif(sys.platform.startswith(\"win\"), reason=\"Not relevant on Windows.\")\n@pytest.mark.parametrize(\n \"attempted_target_replica_count,expected_target_replica_count\",\n [(200, 200), (250, 250), (300, 300), (400, 300), (1000, 300)],\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 112, "n_words": 73, "vocab_size": 63, "complexity": 1, "nloc": 9, "token_counts": 79, "n_ast_nodes": 229, "n_identifiers": 20, "d_id": 30964, "documentation": { "docstring": "Basic unit test for _worker_group_max_replicas and _worker_group_replicas\n\n Uses a RayCluster CR with worker groups with 300 maxReplicas, 200 maxReplicas,\n and unspecified maxReplicas, in that order.\n ", "n_words": 25, "vocab_size": 21, "n_whitespaces": 34, "language": "en" } }, { "id": 14420, "commit_id": "594effa279668bd955e98f1cd5c036b37d3bbd40", "repo": "pydantic", "path": "tests/test_forward_ref.py", "file_name": "test_forward_ref.py", "fun_name": "test_resolve_forward_ref_dataclass", "commit_message": "Switching to `pydantic_core` (#4516)\n\n* working on core schema generation\r\n\r\n* adapting main.py\r\n\r\n* getting tests to run\r\n\r\n* fix tests\r\n\r\n* disable pyright, fix mypy\r\n\r\n* moving to class-based model generation\r\n\r\n* working on validators\r\n\r\n* change how models are created\r\n\r\n* start fixing test_main.py\r\n\r\n* fixing mypy\r\n\r\n* SelfType\r\n\r\n* recursive models working, more tests fixed\r\n\r\n* fix tests on <3.10\r\n\r\n* get docs build to pass\r\n\r\n* starting to cleanup types.py\r\n\r\n* starting works on custom types\r\n\r\n* working on using annotated-types\r\n\r\n* using annoated types for constraints\r\n\r\n* lots of cleanup, fixing network tests\r\n\r\n* network tests passing :tada:\r\n\r\n* working on types\r\n\r\n* working on types and cleanup\r\n\r\n* fixing UUID type, restructing again\r\n\r\n* more types and newer pydantic-core\r\n\r\n* working on Iterable\r\n\r\n* more test_types tests\r\n\r\n* support newer pydantic-core, fixing more test_types.py\r\n\r\n* working through more test_types.py\r\n\r\n* test_types.py at last passing locally :tada:\r\n\r\n* fixing more tests in test_types.py\r\n\r\n* fix datetime_parse tests and linting\r\n\r\n* get tests running again, rename to test_datetime.py\r\n\r\n* renaming internal modules\r\n\r\n* working through mypy errors\r\n\r\n* fixing mypy\r\n\r\n* refactoring _generate_schema.py\r\n\r\n* test_main.py passing\r\n\r\n* uprev deps\r\n\r\n* fix conftest and linting?\r\n\r\n* importing Annotated\r\n\r\n* ltining\r\n\r\n* import Annotated from typing_extensions\r\n\r\n* fixing 3.7 compatibility\r\n\r\n* fixing tests on 3.9\r\n\r\n* fix linting\r\n\r\n* fixing SecretField and 3.9 tests\r\n\r\n* customising get_type_hints\r\n\r\n* ignore warnings on 3.11\r\n\r\n* spliting repr out of utils\r\n\r\n* removing unused bits of _repr, fix tests for 3.7\r\n\r\n* more cleanup, removing many type aliases\r\n\r\n* clean up repr\r\n\r\n* support namedtuples and typeddicts\r\n\r\n* test is_union\r\n\r\n* removing errors, uprev pydantic-core\r\n\r\n* fix tests on 3.8\r\n\r\n* fixing private attributes and model_post_init\r\n\r\n* renaming and cleanup\r\n\r\n* remove unnecessary PydanticMetadata inheritance\r\n\r\n* fixing forward refs and mypy tests\r\n\r\n* fix signatures, change how xfail works\r\n\r\n* revert mypy tests to 3.7 syntax\r\n\r\n* correct model title\r\n\r\n* try to fix tests\r\n\r\n* fixing ClassVar forward refs\r\n\r\n* uprev pydantic-core, new error format\r\n\r\n* add \"force\" argument to model_rebuild\r\n\r\n* Apply suggestions from code review\r\n\r\nSuggestions from @tiangolo and @hramezani :pray:\r\n\r\nCo-authored-by: Hasan Ramezani \r\nCo-authored-by: Sebastián Ramírez \r\n\r\n* more suggestions from @tiangolo\r\n\r\n* extra -> json_schema_extra on Field\r\n\r\nCo-authored-by: Hasan Ramezani \r\nCo-authored-by: Sebastián Ramírez ", "code": "def test_resolve_forward_ref_dataclass(create_module):\n module = create_module(\n # language=Python\n ", "url": "https://github.com/pydantic/pydantic.git", "language": "Python", "ast_errors": "module = create_module(\n # language=Python\n \"\"\"@dataclass", "n_ast_errors": 2, "ast_levels": 4, "n_whitespaces": 24, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 20, "token_counts": 36, "n_ast_nodes": 47, "n_identifiers": 10, "d_id": 2848, "documentation": { "docstring": "\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\nfrom pydantic import BaseModel\nfrom typing_extensions import Literal\n\n@dataclass", "n_words": 17, "vocab_size": 11, "n_whitespaces": 12, "language": "en" } }, { "id": 107945, "commit_id": "23338c7eb4b315cd4af0c57b61afc80f8c2086f9", "repo": "matplotlib", "path": "lib/matplotlib/pyplot.py", "file_name": "pyplot.py", "fun_name": "subplot2grid", "commit_message": "Deprecate auto-removal of overlapping Axes by plt.subplot{,2grid}.\n\nIn particular, note that the OO add_subplot does not have this behavior.", "code": "def subplot2grid(shape, loc, rowspan=1, colspan=1, fig=None, **kwargs):\n \n\n if fig is None:\n fig = gcf()\n\n rows, cols = shape\n gs = GridSpec._check_gridspec_exists(fig, rows, cols)\n\n subplotspec = gs.new_subplotspec(loc, rowspan=rowspan, colspan=colspan)\n ax = fig.add_subplot(subplotspec, **kwargs)\n\n axes_to_delete = [other for other in fig.axes\n if other != ax and ax.bbox.fully_overlaps(other.bbox)]\n if axes_to_delete:\n _api.warn_deprecated(\n \"3.6\", message=\"Auto-removal of overlapping axes is deprecated \"\n \"since %(since)s and will be removed %(removal)s; explicitly call \"\n \"ax.remove() as needed.\")\n for ax_to_del in axes_to_delete:\n delaxes(ax_to_del)\n\n return ax\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 181, "n_words": 76, "vocab_size": 58, "complexity": 7, "nloc": 17, "token_counts": 129, "n_ast_nodes": 203, "n_identifiers": 27, "d_id": 22983, "documentation": { "docstring": "\n Create a subplot at a specific location inside a regular grid.\n\n Parameters\n ----------\n shape : (int, int)\n Number of rows and of columns of the grid in which to place axis.\n loc : (int, int)\n Row number and column number of the axis location within the grid.\n rowspan : int, default: 1\n Number of rows for the axis to span downwards.\n colspan : int, default: 1\n Number of columns for the axis to span to the right.\n fig : `.Figure`, optional\n Figure to place the subplot in. Defaults to the current figure.\n **kwargs\n Additional keyword arguments are handed to `~.Figure.add_subplot`.\n\n Returns\n -------\n `.axes.SubplotBase`, or another subclass of `~.axes.Axes`\n\n The axes of the subplot. The returned axes base class depends on the\n projection used. It is `~.axes.Axes` if rectilinear projection is used\n and `.projections.polar.PolarAxes` if polar projection is used. The\n returned axes is then a subplot subclass of the base class.\n\n Notes\n -----\n The following call ::\n\n ax = subplot2grid((nrows, ncols), (row, col), rowspan, colspan)\n\n is identical to ::\n\n fig = gcf()\n gs = fig.add_gridspec(nrows, ncols)\n ax = fig.add_subplot(gs[row:row+rowspan, col:col+colspan])\n ", "n_words": 179, "vocab_size": 103, "n_whitespaces": 332, "language": "en" } }, { "id": 29532, "commit_id": "67df28935c555fdd673f17e8c9183e24dde7c51f", "repo": "saleor", "path": "saleor/tax/migrations/0004_migrate_tax_classes.py", "file_name": "0004_migrate_tax_classes.py", "fun_name": "migrate_product_tax_codes", "commit_message": "Simple (flat rate) taxes API (#9784)\n\n* Add empty tax module\r\n\r\n* Add tax models (#9839)\r\n\r\n* Add tax API queries (#9856)\r\n\r\n* Add MANAGE_TAXES permission\r\n\r\n* Add tax configuration queries\r\n\r\n* Create tax configuration when channel is created\r\n\r\n* Drop sorters for now\r\n\r\n* Add TaxConfigurationPerCountry type\r\n\r\n* Update migration\r\n\r\n* Add metadata to TaxConfiguration type\r\n\r\n* Add tests for tax configuration queries\r\n\r\n* Add TaxClass types\r\n\r\n* Improve tests\r\n\r\n* Add queries for tax configuration per country\r\n\r\n* Fix query in tests\r\n\r\n* Update query cost map\r\n\r\n* Add tax API mutations (#9934)\r\n\r\n* Add taxConfigurationUpdate mutation\r\n\r\n* Update schema\r\n\r\n* Add tax class CRUD mutations\r\n\r\n* Add mutations to update/delete tax class rates per country\r\n\r\n* Review fixes\r\n\r\n* Add taxClass field to ProductType type (#9999)\r\n\r\n* Add taxClass field to ProductType type\r\n\r\n* Add taxClass field to Product type\r\n\r\n* Add taxClass field to shipping method type\r\n\r\n* Add displayGrossPrices to ProductPricingInfo (#10008)\r\n\r\n* Add displayGrossPrices to ProductPricingInfo\r\n\r\n* Add displayGrossPrices to Checkout\r\n\r\n* Add displayGrossPrices to Order\r\n\r\n* Add tests\r\n\r\n* Add ADDED_IN_35 label to new fields' descriptions\r\n\r\n* Use new display_gross_prices flag (#10121)\r\n\r\n* Use new display_gross_prices flag\r\n\r\n* Update tests\r\n\r\n* Add tests\r\n\r\n* Review fixes\r\n\r\n* Drop Vatlayer (#10335)\r\n\r\n* Add migration from Vatlayer to simple taxes\r\n\r\n* Review fixes\r\n\r\n* Review fixes\r\n\r\n* Drop usages of global include_taxes_in_prices flag (#10406)\r\n\r\n* Drop `include_taxes_in_prices` function from site settings\r\n\r\n* Adjust tests\r\n\r\n* Review fixes\r\n\r\n* Drop the `charge_taxes_on_shipping` flag from site settings. (#10466)\r\n\r\n* Include migrating Avatax tax codes in tax class migration\r\n\r\n* Drop `charge_taxes_on_shipping` function\r\n\r\n* Add tax_class to ShippingMethodData\r\n\r\n* Review fixes\r\n\r\n* Always calculate shipping tax with Avalara\r\n\r\n* Add default country rate (#10497)\r\n\r\n* Allow setting default tax rate for a country (without providing a tax class)\r\n\r\n* Add validation to allow settings only one default rate at once\r\n\r\n* Code review fixes\r\n\r\n* Add taxCalculationStrategy field\r\n\r\n* Add tests\r\n\r\n* CR fixes\r\n\r\n* Adjust resolver to use new tax configuration (#10533)\r\n\r\n* CR fixes\r\n\r\n* Add database router to fix false positives on relation mismatch. (#10524)\r\n\r\n* Add database router to fix false positives on relation mismatch.\r\n\r\n* The db router should have only 'allow_relation' implemented.\r\n\r\n* The 'db_for_write' part should stay.\r\n\r\n* Subscription for sync tax webooks (#10433)\r\n\r\n* Add proposed changes to schema\r\n\r\n* Add base implementation for sync tax subscription\r\n\r\n* Add test for empty order\r\n\r\n* Add clean up and missing part for tests\r\n\r\n* Use subscription for tax webhooks. Add more tests\r\n\r\n* Improve descriptions for tax objects\r\n\r\n* Adjust resolver to use new tax configuration (#10533)\r\n\r\n* Add taxCalculationStrategy field (#10532)\r\n\r\n* Add taxCalculationStrategy field\r\n\r\n* Add tests\r\n\r\n* CR fixes\r\n\r\n* CR fixes\r\n\r\n* Add datamigration to populate taxCalculationStrategy\r\n\r\n* Migrate Product.charge_taxes to new tax configuration (#10585)\r\n\r\n* Migrate Product.charge_taxes field to new tax configuration\r\n\r\n* Rename function\r\n\r\n* Fix tests\r\n\r\n* Change assign_tax_code_to_object_meta function to support tax classes\r\n\r\n* Update tax class fixtures\r\n\r\n* Improve dataloader\r\n\r\n* CR fixes\r\n\r\n* CR fixes\r\n\r\n* Add deprecation notice to dataloader\r\n\r\n* Allow removing country rates in the `taxCountryConfigurationUpdate` mutation. (#10647)\r\n\r\n* Allow deleting rates in taxCountryConfigurationUpdate mutation\r\n\r\n* Change tax rates ordering to keep default rates first (with null tax classes)\r\n\r\n* Update existing migration\r\n\r\n* Remove TaxClass.is_default field (#10660)\r\n\r\n* Change tax rates ordering to keep default rates first (with null tax classes)\r\n\r\n* Update existing migration\r\n\r\n* Drop is_default field from TaxClass model\r\n\r\n* Drop extra Avalara config (#10673)\r\n\r\n* Drop extra Avatax config options\r\n\r\n* Adjust tests\r\n\r\n* Use flat rates in tax calculations (#10747)\r\n\r\n* WIP Use new tax configuration in tax calculations\r\n\r\n* Use new tax calculations for checkout\r\n\r\n* Adjust tests\r\n\r\n* Add flat rates calculations for checkout and order\r\n\r\n* Calculate flat rates in product pricing objects\r\n\r\n* Adjust tests\r\n\r\n* Add tests for order calculations\r\n\r\n* Add tests for product queries tax calculations\r\n\r\n* Add tests for order calculations\r\n\r\n* Use base calculations to get default checkout shipping price\r\n\r\n* Add tests for using tax_class from product_type\r\n\r\n* Add tests for get_order_country\r\n\r\n* Adjust tests\r\n\r\n* Code review fixes\r\n\r\n* Drop update_taxes_for_order_lines (#11000)\r\n\r\n* Fix calls to Avalara not validating order (#11012)\r\n\r\n* Add validation to disallow creating negative rates (#11010)\r\n\r\n* Add missing recalculation of order.undiscounted_total (#11039)\r\n\r\n* Optimize getting tax class country rates (#11040)\r\n\r\n* Tax API adjustments for dashboard (#11042)\r\n\r\n* Ignore null rates in taxCountryConfigurationUpdate mutation\r\n\r\n* Allow to pass null rates in taxClassUpdate mutation\r\n\r\n* Improve tests\r\n\r\n* Update saleor/graphql/tax/mutations/tax_class_update.py\r\n\r\nCo-authored-by: Krzysztof Waliczek \r\n\r\n* Update schema\r\n\r\nCo-authored-by: Krzysztof Waliczek \r\n\r\n* Cleanup before release (#11049)\r\n\r\n* Update ADDED_IN labels\r\n\r\n* Fix skippeded test\r\n\r\n* Regenerate migrations\r\n\r\n* Deprecate CountryDisplay.vat field\r\n\r\n* Add changelog\r\n\r\n* Update order.undiscounted_total calculation to not include taxes (#11068)\r\n\r\n* Fix assigning rates to tax classes (#11105)\r\n\r\n* Allow all staff users and apps to query tax-related data (#11113)\r\n\r\n* Bump dependencies for origin/SALEOR-6391-simple-taxes (#11127)\r\n\r\nBumps:\r\n- cryptography to 38.0.3\r\n- pillow to 9.3.0\r\n\r\n* Fix using tax code from product and product type's tax class (#11111)\r\n\r\n* Fix using tax code from product and product type's tax class\r\n\r\n* Extract function\r\n\r\n* Replace synchronous load_site with promise (#11165)\r\n\r\n* Denormalize tax class for order lines and orders (#11172)\r\n\r\n* WIP Denormalize tax class for order lines and orders\r\n\r\n* Add denormalized fields in GraphQL types\r\n\r\n* Add tests for denormalized API fields\r\n\r\n* Return 0 rate in API when rate is null\r\n\r\n* Add preview/version notes in new field descriptions\r\n\r\n* Update changelog\r\n\r\nCo-authored-by: Dominik Kozaczko \r\nCo-authored-by: Maciej Korycinski \r\nCo-authored-by: Krzysztof Waliczek \r\nCo-authored-by: Mika <6186720+NyanKiyoshi@users.noreply.github.com>\r\nCo-authored-by: Krzysztof Kwaśniak ", "code": "def migrate_product_tax_codes(apps, _schema_editor):\n \n\n Product = apps.get_model(\"product\", \"Product\")\n ProductType = apps.get_model(\"product\", \"ProductType\")\n TaxClass = apps.get_model(\"tax\", \"TaxClass\")\n\n query = Q(metadata__has_key=VATLAYER_CODE_META_KEY) | Q(\n metadata__has_key=AVATAX_CODE_META_KEY\n )\n\n tax_class_metadata = {}\n\n product_types = (\n ProductType.objects.filter(query).values(\"id\", \"metadata\").order_by(\"pk\")\n )\n for batch_pks in queryset_in_batches(product_types):\n tax_classes_from_product_types = defaultdict(list)\n product_types = ProductType.objects.filter(pk__in=batch_pks)\n for product_type in product_types:\n tax_class_name, metadata = _populate_tax_class_name_and_metadata(\n product_type\n )\n if tax_class_name:\n tax_classes_from_product_types[tax_class_name].append(product_type.pk)\n tax_class_metadata[tax_class_name] = metadata\n\n for name, ids in tax_classes_from_product_types.items():\n tax_class, _ = TaxClass.objects.get_or_create(\n name=name, metadata=tax_class_metadata.get(name, {})\n )\n ProductType.objects.filter(id__in=ids).update(tax_class=tax_class)\n\n products = Product.objects.filter(query).values(\"id\", \"metadata\").order_by(\"pk\")\n tax_classes_from_products = defaultdict(list)\n for batch_pks in queryset_in_batches(products):\n products = Product.objects.filter(pk__in=batch_pks)\n for product in products:\n tax_class_name, metadata = _populate_tax_class_name_and_metadata(product)\n if tax_class_name:\n tax_classes_from_products[tax_class_name].append(product.pk)\n tax_class_metadata[tax_class_name] = metadata\n\n for name, ids in tax_classes_from_products.items():\n tax_class, _ = TaxClass.objects.get_or_create(\n name=name, metadata=tax_class_metadata.get(name, {})\n )\n Product.objects.filter(id__in=ids).update(tax_class=tax_class)\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 441, "n_words": 113, "vocab_size": 61, "complexity": 9, "nloc": 40, "token_counts": 329, "n_ast_nodes": 536, "n_identifiers": 42, "d_id": 5234, "documentation": { "docstring": "Create tax classes by migrating currently used tax codes.\n\n Tax codes are stored in metadata of products and product types. For each found code\n we get or create a TaxClass instance and assign the object to the tax class.\n If object has both Avalara and Vatlayer codes, keep only the Avalara code.\n ", "n_words": 52, "vocab_size": 44, "n_whitespaces": 64, "language": "en" } }, { "id": 244324, "commit_id": "2631e2879acf0bd20a64dfdd7039f37a8e6afbf6", "repo": "mmdetection", "path": "mmdet/apis/inference.py", "file_name": "inference.py", "fun_name": "async_inference_detector", "commit_message": "Support Datasampler", "code": "async def async_inference_detector(model, imgs):\n \n if not isinstance(imgs, (list, tuple)):\n imgs = [imgs]\n\n cfg = model.cfg\n\n if isinstance(imgs[0], np.ndarray):\n cfg = cfg.copy()\n # set loading pipeline type\n cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'\n\n cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)\n test_pipeline = Compose(cfg.data.test.pipeline)\n\n datas = []\n for img in imgs:\n # prepare data\n if isinstance(img, np.ndarray):\n # directly add img\n data = dict(img=img)\n else:\n # add information into dict\n data = dict(img_info=dict(filename=img), img_prefix=None)\n # build the data pipeline\n data = test_pipeline(data)\n datas.append(data)\n\n for m in model.modules():\n assert not isinstance(\n m,\n RoIPool), 'CPU inference with RoIPool is not supported currently.'\n\n # We don't restore `torch.is_grad_enabled()` value during concurrent\n # inference since execution can overlap\n torch.set_grad_enabled(False)\n results = await model.aforward_test(data, rescale=True)\n return results\n\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 298, "n_words": 113, "vocab_size": 80, "complexity": 6, "nloc": 24, "token_counts": 193, "n_ast_nodes": 314, "n_identifiers": 32, "d_id": 70325, "documentation": { "docstring": "Async inference image(s) with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n img (str | ndarray): Either image files or loaded images.\n\n Returns:\n Awaitable detection results.\n ", "n_words": 26, "vocab_size": 24, "n_whitespaces": 56, "language": "en" } }, { "id": 22411, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "Eight_Puzzle_Solver/eight_puzzle.py", "file_name": "eight_puzzle.py", "fun_name": "isGoalState", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def isGoalState(self):\n \n for i in range(self.size):\n for j in range(self.size):\n if i == j and j == self.size - 1:\n continue\n if self.state[i][j] != (i) * self.size + (j + 1):\n return False\n return True\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 135, "n_words": 35, "vocab_size": 24, "complexity": 6, "nloc": 8, "token_counts": 69, "n_ast_nodes": 108, "n_identifiers": 7, "d_id": 4321, "documentation": { "docstring": "\n Parameters: State\n Returns: True if Goal State, otherwise False\n Restrictions: State is self.size x self.size Array\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 45, "language": "en" } }, { "id": 153568, "commit_id": "605efa618e7994681f57b11d04d417f353ef8d50", "repo": "modin", "path": "modin/pandas/base.py", "file_name": "base.py", "fun_name": "combine_first", "commit_message": "DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333)\n\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Alexander Myskov ", "code": "def combine_first(self, other): # noqa: PR01, RT01, D200\n \n return self._binary_op(\"combine_first\", other, _axis=0)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 27, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 36, "n_identifiers": 5, "d_id": 35449, "documentation": { "docstring": "\n Update null elements with value in the same location in `other`.\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 26, "language": "en" } }, { "id": 132416, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/tests/test_checkpoint_manager.py", "file_name": "test_checkpoint_manager.py", "fun_name": "testOnCheckpointOrdered", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def testOnCheckpointOrdered(self):\n \n keep_checkpoints_num = 2\n checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num)\n checkpoints = [\n Checkpoint(Checkpoint.PERSISTENT, {i}, self.mock_result(i))\n for i in range(3)\n ]\n\n with patch.object(checkpoint_manager, \"delete\") as delete_mock:\n for j in range(3):\n checkpoint_manager.on_checkpoint(checkpoints[j])\n expected_deletes = 0 if j != 2 else 1\n self.assertEqual(delete_mock.call_count, expected_deletes, j)\n self.assertEqual(\n checkpoint_manager.newest_persistent_checkpoint, checkpoints[j]\n )\n\n best_checkpoints = checkpoint_manager.best_checkpoints()\n self.assertEqual(len(best_checkpoints), keep_checkpoints_num)\n self.assertIn(checkpoints[1], best_checkpoints)\n self.assertIn(checkpoints[2], best_checkpoints)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 251, "n_words": 54, "vocab_size": 45, "complexity": 4, "nloc": 19, "token_counts": 148, "n_ast_nodes": 231, "n_identifiers": 22, "d_id": 29752, "documentation": { "docstring": "\n Tests increasing priorities. Also tests that that the worst checkpoints\n are deleted when necessary.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 260502, "commit_id": "01fcf8a0acc7e6517faa4fc6887eb45f5d2ea77b", "repo": "scikit-learn", "path": "sklearn/decomposition/_sparse_pca.py", "file_name": "_sparse_pca.py", "fun_name": "inverse_transform", "commit_message": "ENH add inverse_transform in *SparsePCA (#23905)", "code": "def inverse_transform(self, X):\n \n check_is_fitted(self)\n X = check_array(X)\n\n return (X @ self.components_) + self.mean_\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 41, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 50, "n_identifiers": 7, "d_id": 76297, "documentation": { "docstring": "Transform data from the latent space to the original space.\n\n This inversion is an approximation due to the loss of information\n induced by the forward decomposition.\n\n .. versionadded:: 1.2\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_components)\n Data in the latent space.\n\n Returns\n -------\n X_original : ndarray of shape (n_samples, n_features)\n Reconstructed data in the original space.\n ", "n_words": 58, "vocab_size": 40, "n_whitespaces": 150, "language": "en" } }, { "id": 65944, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/education/report/student_batch_wise_attendance/student_batch_wise_attendance.py", "file_name": "student_batch_wise_attendance.py", "fun_name": "get_student_group_strength", "commit_message": "style: format code with black", "code": "def get_student_group_strength(student_group):\n\tstudent_group_strength = frappe.db.sql(\n\t\t,\n\t\tstudent_group,\n\t)[0][0]\n\treturn student_group_strength\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 4, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 7, "token_counts": 26, "n_ast_nodes": 41, "n_identifiers": 6, "d_id": 14064, "documentation": { "docstring": "select count(*) from `tabStudent Group Student`\n\t\twhere parent = %s and active=1", "n_words": 12, "vocab_size": 12, "n_whitespaces": 10, "language": "en" } }, { "id": 40215, "commit_id": "c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c", "repo": "dash", "path": "dash/testing/browser.py", "file_name": "browser.py", "fun_name": "wait_for_text_to_equal", "commit_message": "f-strings everywhere! fffff", "code": "def wait_for_text_to_equal(self, selector, text, timeout=None):\n \n return self._wait_for(\n method=text_to_equal,\n args=(selector, text),\n timeout=timeout,\n msg=f\"text -> {text} not found within {timeout or self._wait_timeout}s\",\n )\n", "url": "https://github.com/plotly/dash.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 86, "n_words": 21, "vocab_size": 21, "complexity": 1, "nloc": 7, "token_counts": 41, "n_ast_nodes": 73, "n_identifiers": 11, "d_id": 7354, "documentation": { "docstring": "Explicit wait until the element's text equals the expected `text`.\n\n timeout if not set, equals to the fixture's `wait_timeout`\n shortcut to `WebDriverWait` with customized `text_to_equal`\n condition.\n ", "n_words": 26, "vocab_size": 22, "n_whitespaces": 54, "language": "en" } }, { "id": 251443, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "mitmproxy/proxy/layer.py", "file_name": "layer.py", "fun_name": "stack_pos", "commit_message": "make it black!", "code": "def stack_pos(self) -> str:\n \n try:\n idx = self.context.layers.index(self)\n except ValueError:\n return repr(self)\n else:\n return \" >> \".join(repr(x) for x in self.context.layers[: idx + 1])\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 85, "n_words": 24, "vocab_size": 22, "complexity": 3, "nloc": 8, "token_counts": 56, "n_ast_nodes": 94, "n_identifiers": 11, "d_id": 73741, "documentation": { "docstring": "repr() for this layer and all its parent layers, only useful for debugging.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 131089, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/aws/utils/helpers.py", "file_name": "helpers.py", "fun_name": "apply_node_provider_config_updates", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def apply_node_provider_config_updates(config, node_cfg, node_type_name, max_count):\n \n tags = node_provider_tags(config, node_type_name)\n tags[TAG_RAY_CLUSTER_NAME] = DEFAULT_CLUSTER_NAME\n user_tag_specs = node_cfg.get(\"TagSpecifications\", [])\n tag_specs = [\n {\n \"ResourceType\": \"instance\",\n \"Tags\": [{\"Key\": k, \"Value\": v} for k, v in sorted(tags.items())],\n }\n ]\n node_provider_cfg_updates = {\n \"MinCount\": 1,\n \"MaxCount\": max_count,\n \"TagSpecifications\": tag_specs,\n }\n tags.pop(TAG_RAY_CLUSTER_NAME)\n node_cfg.update(node_provider_cfg_updates)\n # merge node provider tag specs with user overrides\n AWSNodeProvider._merge_tag_specs(tag_specs, user_tag_specs)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 150, "n_words": 57, "vocab_size": 50, "complexity": 2, "nloc": 18, "token_counts": 110, "n_ast_nodes": 184, "n_identifiers": 21, "d_id": 29475, "documentation": { "docstring": "\n Applies default updates made by AWSNodeProvider to node_cfg during node\n creation. This should only be used for testing purposes.\n\n Args:\n config: autoscaler config\n node_cfg: node config\n node_type_name: node type name\n max_count: max nodes of the given type to launch\n ", "n_words": 39, "vocab_size": 34, "n_whitespaces": 80, "language": "en" } }, { "id": 215828, "commit_id": "a35b29b2651bf33c5d5b45e64bc7765ffde4aff4", "repo": "salt", "path": "tests/pytests/functional/states/file/test_rename.py", "file_name": "test_rename.py", "fun_name": "test_relative_name", "commit_message": "Add some funtional tests\n\nAdd functional tests for the following:\n- file.readlink\n- file.replace\n- file.symlink\n\nRemove unit tests for file.replace as they are duplicated in the added\nfunctional test", "code": "def test_relative_name(file):\n \n result = file.rename(name=\"..\\\\rel\\\\path\\\\test\", source=str(source))\n assert \"is not an absolute path\" in result.filtered[\"comment\"]\n assert result.filtered[\"result\"] is False\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 30, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 41, "n_ast_nodes": 76, "n_identifiers": 8, "d_id": 54194, "documentation": { "docstring": "\n Test file.rename when name is a relative path\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 227054, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_histogram2d.py", "file_name": "_histogram2d.py", "fun_name": "ybingroup", "commit_message": "switch to black .22", "code": "def ybingroup(self):\n \n return self[\"ybingroup\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58727, "documentation": { "docstring": "\n Set a group of histogram traces which will have compatible\n y-bin settings. Using `ybingroup`, histogram2d and\n histogram2dcontour traces (on axes of the same axis type) can\n have compatible y-bin settings. Note that the same `ybingroup`\n value can be used to set (1D) histogram `bingroup`\n\n The 'ybingroup' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n\n Returns\n -------\n str\n ", "n_words": 71, "vocab_size": 49, "n_whitespaces": 161, "language": "en" } }, { "id": 111857, "commit_id": "5b7dac5c4054115854b3684ba86b9a79fb18d5eb", "repo": "nni", "path": "nni/retiarii/experiment/pytorch.py", "file_name": "pytorch.py", "fun_name": "debug_mutated_model", "commit_message": "Wrap one-shot algorithms as strategies (#4571)", "code": "def debug_mutated_model(base_model, evaluator, applied_mutators):\n \n base_model_ir, applied_mutators = preprocess_model(base_model, evaluator, applied_mutators)\n from ..strategy import _LocalDebugStrategy\n strategy = _LocalDebugStrategy()\n strategy.run(base_model_ir, applied_mutators)\n _logger.info('local debug completed!')\n\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 40, "n_words": 22, "vocab_size": 19, "complexity": 1, "nloc": 6, "token_counts": 47, "n_ast_nodes": 78, "n_identifiers": 11, "d_id": 24492, "documentation": { "docstring": "\n Locally run only one trial without launching an experiment for debug purpose, then exit.\n For example, it can be used to quickly check shape mismatch.\n\n Specifically, it applies mutators (default to choose the first candidate for the choices)\n to generate a new model, then run this model locally.\n\n The model will be parsed with graph execution engine.\n\n Parameters\n ----------\n base_model : nni.retiarii.nn.pytorch.nn.Module\n the base model\n evaluator : nni.retiarii.graph.Evaluator\n the training class of the generated models\n applied_mutators : list\n a list of mutators that will be applied on the base model for generating a new model\n ", "n_words": 95, "vocab_size": 67, "n_whitespaces": 150, "language": "en" } }, { "id": 176278, "commit_id": "17fa9942568bfca34d4a68f8d93c538014f69389", "repo": "networkx", "path": "networkx/algorithms/core.py", "file_name": "core.py", "fun_name": "k_crust", "commit_message": "Fix functions appearing in variables `__all__` but not in docs for NX2.7 (#5289)\n\n* Adjust functions appearing in `__all__` but not in docs\r\n\r\n* clean up coloring: merge two modules make interchange private\r\n\r\n* fix duplicate name. Probably should be changed\r\n\r\n* fix \"see also\" doc of recursive_simple_cycles\r\n\r\n* Rm internal uses of deprecated .\r\n\r\n* Fixup warnings filters regex.\r\n\r\n* clean up a bit more, make Node & AdjList private classes\r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Mridul Seth ", "code": "def k_crust(G, k=None, core_number=None):\n \n # Default for k is one less than in _core_subgraph, so just inline.\n # Filter is c[v] <= k\n if core_number is None:\n core_number = nx.core_number(G)\n if k is None:\n k = max(core_number.values()) - 1\n nodes = (v for v in core_number if core_number[v] <= k)\n return G.subgraph(nodes).copy()\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 90, "n_words": 52, "vocab_size": 35, "complexity": 5, "nloc": 7, "token_counts": 71, "n_ast_nodes": 115, "n_identifiers": 11, "d_id": 41801, "documentation": { "docstring": "Returns the k-crust of G.\n\n The k-crust is the graph G with the edges of the k-core removed\n and isolated nodes found after the removal of edges are also removed.\n\n Parameters\n ----------\n G : NetworkX graph\n A graph or directed graph.\n k : int, optional\n The order of the shell. If not specified return the main crust.\n core_number : dictionary, optional\n Precomputed core numbers for the graph G.\n\n Returns\n -------\n G : NetworkX graph\n The k-crust subgraph\n\n Raises\n ------\n NetworkXError\n The k-crust is not implemented for graphs with self loops\n or parallel edges.\n\n Notes\n -----\n This definition of k-crust is different than the definition in [1]_.\n The k-crust in [1]_ is equivalent to the k+1 crust of this algorithm.\n\n Not implemented for graphs with parallel edges or self loops.\n\n For directed graphs the node degree is defined to be the\n in-degree + out-degree.\n\n Graph, node, and edge attributes are copied to the subgraph.\n\n See Also\n --------\n core_number\n\n References\n ----------\n .. [1] A model of Internet topology using k-shell decomposition\n Shai Carmi, Shlomo Havlin, Scott Kirkpatrick, Yuval Shavitt,\n and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154\n http://www.pnas.org/content/104/27/11150.full\n ", "n_words": 190, "vocab_size": 121, "n_whitespaces": 334, "language": "en" } }, { "id": 297698, "commit_id": "a6ddac9004b7f73633f2019f3b5267e1486756c1", "repo": "core", "path": "homeassistant/components/isy994/sensor.py", "file_name": "sensor.py", "fun_name": "native_unit_of_measurement", "commit_message": "Use UnitOfTemperature in integrations (i-m) (#84307)", "code": "def native_unit_of_measurement(self) -> str | None:\n \n raw_units = self.raw_unit_of_measurement\n # Check if this is a known index pair UOM\n if isinstance(raw_units, dict) or raw_units in (UOM_ON_OFF, UOM_INDEX):\n return None\n if raw_units in (\n UnitOfTemperature.FAHRENHEIT,\n UnitOfTemperature.CELSIUS,\n UOM_DOUBLE_TEMP,\n ):\n return self.hass.config.units.temperature_unit\n return raw_units\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 145, "n_words": 41, "vocab_size": 33, "complexity": 4, "nloc": 12, "token_counts": 61, "n_ast_nodes": 94, "n_identifiers": 17, "d_id": 96664, "documentation": { "docstring": "Get the Home Assistant unit of measurement for the device.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 86734, "commit_id": "d6bcead1be02914e9734ab23f5e476b3d6f3f2cb", "repo": "sentry", "path": "tests/sentry/integrations/github/test_integration.py", "file_name": "test_integration.py", "fun_name": "test_get_repositories_all_and_pagination", "commit_message": "fix(github): Add pagination when fetching repositories (#39750)\n\nWe are not using pagination for Github's repositories endpoint. This means we were getting up to a maximum of 100 repositories.\r\n\r\nI do not know how no one hit any issues in the past.\r\n\r\nThis is work to support WOR-2234 and creating automatic code mappings.", "code": "def test_get_repositories_all_and_pagination(self):\n \n with self.tasks():\n self.assert_setup_flow()\n\n integration = Integration.objects.get(provider=self.provider.key)\n installation = integration.get_installation(self.organization)\n\n result = installation.get_repositories()\n assert result == [\n {\"name\": \"foo\", \"identifier\": \"Test-Organization/foo\"},\n {\"name\": \"bar\", \"identifier\": \"Test-Organization/bar\"},\n {\"name\": \"baz\", \"identifier\": \"Test-Organization/baz\"},\n ]\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 124, "n_words": 31, "vocab_size": 24, "complexity": 1, "nloc": 11, "token_counts": 86, "n_ast_nodes": 160, "n_identifiers": 15, "d_id": 18162, "documentation": { "docstring": "Fetch all repositories and test the pagination logic.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 196418, "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", "repo": "sympy", "path": "sympy/solvers/bivariate.py", "file_name": "bivariate.py", "fun_name": "bivariate_type", "commit_message": "Moved imports to higher level", "code": "def bivariate_type(f, x, y, *, first=True):\n \n\n u = Dummy('u', positive=True)\n\n if first:\n p = Poly(f, x, y)\n f = p.as_expr()\n _x = Dummy()\n _y = Dummy()\n rv = bivariate_type(Poly(f.subs({x: _x, y: _y}), _x, _y), _x, _y, first=False)\n if rv:\n reps = {_x: x, _y: y}\n return rv[0].xreplace(reps), rv[1].xreplace(reps), rv[2]\n return\n\n p = f\n f = p.as_expr()\n\n # f(x*y)\n args = Add.make_args(p.as_expr())\n new = []\n for a in args:\n a = _mexpand(a.subs(x, u/y))\n free = a.free_symbols\n if x in free or y in free:\n break\n new.append(a)\n else:\n return x*y, Add(*new), u\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 237, "n_words": 90, "vocab_size": 60, "complexity": 11, "nloc": 43, "token_counts": 412, "n_ast_nodes": 328, "n_identifiers": 26, "d_id": 47918, "documentation": { "docstring": "Given an expression, f, 3 tests will be done to see what type\n of composite bivariate it might be, options for u(x, y) are::\n\n x*y\n x+y\n x*y+x\n x*y+y\n\n If it matches one of these types, ``u(x, y)``, ``P(u)`` and dummy\n variable ``u`` will be returned. Solving ``P(u)`` for ``u`` and\n equating the solutions to ``u(x, y)`` and then solving for ``x`` or\n ``y`` is equivalent to solving the original expression for ``x`` or\n ``y``. If ``x`` and ``y`` represent two functions in the same\n variable, e.g. ``x = g(t)`` and ``y = h(t)``, then if ``u(x, y) - p``\n can be solved for ``t`` then these represent the solutions to\n ``P(u) = 0`` when ``p`` are the solutions of ``P(u) = 0``.\n\n Only positive values of ``u`` are considered.\n\n Examples\n ========\n\n >>> from sympy import solve\n >>> from sympy.solvers.bivariate import bivariate_type\n >>> from sympy.abc import x, y\n >>> eq = (x**2 - 3).subs(x, x + y)\n >>> bivariate_type(eq, x, y)\n (x + y, _u**2 - 3, _u)\n >>> uxy, pu, u = _\n >>> usol = solve(pu, u); usol\n [sqrt(3)]\n >>> [solve(uxy - s) for s in solve(pu, u)]\n [[{x: -y + sqrt(3)}]]\n >>> all(eq.subs(s).equals(0) for sol in _ for s in sol)\n True\n\n ", "n_words": 204, "vocab_size": 126, "n_whitespaces": 310, "language": "en" } }, { "id": 205197, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/sqlite3/introspection.py", "file_name": "introspection.py", "fun_name": "get_table_description", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_table_description(self, cursor, table_name):\n \n cursor.execute(\n \"PRAGMA table_info(%s)\" % self.connection.ops.quote_name(table_name)\n )\n table_info = cursor.fetchall()\n if not table_info:\n raise DatabaseError(f\"Table {table_name} does not exist (empty pragma).\")\n collations = self._get_column_collations(cursor, table_name)\n json_columns = set()\n if self.connection.features.can_introspect_json_field:\n for line in table_info:\n column = line[1]\n json_constraint_sql = '%%json_valid(\"%s\")%%' % column\n has_json_constraint = cursor.execute(\n ,\n [table_name, json_constraint_sql],\n ).fetchone()\n if has_json_constraint:\n json_columns.add(column)\n return [\n FieldInfo(\n name,\n data_type,\n None,\n get_field_size(data_type),\n None,\n None,\n not notnull,\n default,\n collations.get(name),\n pk == 1,\n name in json_columns,\n )\n for cid, name, data_type, notnull, default, pk in table_info\n ]\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 518, "n_words": 85, "vocab_size": 60, "complexity": 6, "nloc": 42, "token_counts": 167, "n_ast_nodes": 257, "n_identifiers": 32, "d_id": 51030, "documentation": { "docstring": "\n Return a description of the table with the DB-API cursor.description\n interface.\n \n SELECT sql\n FROM sqlite_master\n WHERE\n type = 'table' AND\n name = %s AND\n sql LIKE %s\n ", "n_words": 27, "vocab_size": 22, "n_whitespaces": 191, "language": "en" } }, { "id": 75309, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/tests/test_templatetags.py", "file_name": "test_templatetags.py", "fun_name": "test_render_none_as_context_variable", "commit_message": "Reformat with black", "code": "def test_render_none_as_context_variable(self):\n \n context = {\"image\": None, \"image_node\": \"fake value\"}\n node = ImageNode(Variable(\"image\"), \"original\", \"image_node\")\n\n rendered = node.render(context)\n\n self.assertEqual(rendered, \"\")\n self.assertIsNone(context[\"image_node\"])\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 62, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 6, "token_counts": 55, "n_ast_nodes": 103, "n_identifiers": 10, "d_id": 16388, "documentation": { "docstring": "\n Tests that an ImageNode without an image and a context variable name\n renders an empty string and puts None in the context variable\n ", "n_words": 23, "vocab_size": 18, "n_whitespaces": 45, "language": "en" } }, { "id": 22743, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "primelib/primelib.py", "file_name": "primelib.py", "fun_name": "sieveEr", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def sieveEr(N):\n \n from math import sqrt\n\n # precondition\n assert isinstance(N, int) and (N > 2), \"'N' must been an int and > 2\"\n\n primes = [True for x in range(N + 1)]\n\n for p in range(2, int(sqrt(N)) + 1):\n if primes[p]:\n for i in range(p * p, N + 1, p):\n primes[i] = False\n primes[0] = False\n primes[1] = False\n ret = []\n for p in range(N + 1):\n if primes[p]:\n ret.append(p)\n\n return ret\n\n\n# --------------------------------\n\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 159, "n_words": 76, "vocab_size": 52, "complexity": 8, "nloc": 15, "token_counts": 125, "n_ast_nodes": 195, "n_identifiers": 13, "d_id": 4434, "documentation": { "docstring": "\n input: positive integer 'N' > 2\n returns a list of prime numbers from 2 up to N.\n\n This function implements the algorithm called\n sieve of erathostenes.\n\n ", "n_words": 26, "vocab_size": 24, "n_whitespaces": 42, "language": "en" } }, { "id": 40304, "commit_id": "f7e25e18983f2f36a1529cd9e4bda6fa008cbd6d", "repo": "seaborn", "path": "seaborn/rcmod.py", "file_name": "rcmod.py", "fun_name": "plotting_context", "commit_message": "Use f-strings for string formatting (#2800)\n\nReformats all the text from the old \"%-formatted\" and .format(...) format to the newer f-string format, as defined in PEP 498. This requires Python 3.6+.\r\n\r\nFlynt 0.76 was used to reformat the strings. 45 f-strings were created in 13 files.\r\n\r\nF-strings are in general more readable, concise and performant. See also: https://www.python.org/dev/peps/pep-0498/#rationale", "code": "def plotting_context(context=None, font_scale=1, rc=None):\n \n if context is None:\n context_dict = {k: mpl.rcParams[k] for k in _context_keys}\n\n elif isinstance(context, dict):\n context_dict = context\n\n else:\n\n contexts = [\"paper\", \"notebook\", \"talk\", \"poster\"]\n if context not in contexts:\n raise ValueError(f\"context must be in {', '.join(contexts)}\")\n\n # Set up dictionary of default parameters\n texts_base_context = {\n\n \"font.size\": 12,\n \"axes.labelsize\": 12,\n \"axes.titlesize\": 12,\n \"xtick.labelsize\": 11,\n \"ytick.labelsize\": 11,\n \"legend.fontsize\": 11,\n \"legend.title_fontsize\": 12,\n\n }\n\n base_context = {\n\n \"axes.linewidth\": 1.25,\n \"grid.linewidth\": 1,\n \"lines.linewidth\": 1.5,\n \"lines.markersize\": 6,\n \"patch.linewidth\": 1,\n\n \"xtick.major.width\": 1.25,\n \"ytick.major.width\": 1.25,\n \"xtick.minor.width\": 1,\n \"ytick.minor.width\": 1,\n\n \"xtick.major.size\": 6,\n \"ytick.major.size\": 6,\n \"xtick.minor.size\": 4,\n \"ytick.minor.size\": 4,\n\n }\n base_context.update(texts_base_context)\n\n # Scale all the parameters by the same factor depending on the context\n scaling = dict(paper=.8, notebook=1, talk=1.5, poster=2)[context]\n context_dict = {k: v * scaling for k, v in base_context.items()}\n\n # Now independently scale the fonts\n font_keys = texts_base_context.keys()\n font_dict = {k: context_dict[k] * font_scale for k in font_keys}\n context_dict.update(font_dict)\n\n # Override these settings with the provided rc dictionary\n if rc is not None:\n rc = {k: v for k, v in rc.items() if k in _context_keys}\n context_dict.update(rc)\n\n # Wrap in a _PlottingContext object so this can be used in a with statement\n context_object = _PlottingContext(context_dict)\n\n return context_object\n\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 586, "n_words": 195, "vocab_size": 120, "complexity": 10, "nloc": 44, "token_counts": 290, "n_ast_nodes": 472, "n_identifiers": 29, "d_id": 7383, "documentation": { "docstring": "\n Get the parameters that control the scaling of plot elements.\n\n This affects things like the size of the labels, lines, and other elements\n of the plot, but not the overall style. This is accomplished using the\n :ref:`matplotlib rcParams system `.\n\n The base context is \"notebook\", and the other contexts are \"paper\", \"talk\",\n and \"poster\", which are version of the notebook parameters scaled by different\n values. Font elements can also be scaled independently of (but relative to)\n the other values.\n\n This function can also be used as a context manager to temporarily\n alter the global defaults. See :func:`set_theme` or :func:`set_context`\n to modify the global defaults for all plots.\n\n Parameters\n ----------\n context : None, dict, or one of {paper, notebook, talk, poster}\n A dictionary of parameters or the name of a preconfigured set.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n context dictionaries. This only updates parameters that are\n considered part of the context definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/plotting_context.rst\n\n ", "n_words": 180, "vocab_size": 115, "n_whitespaces": 283, "language": "en" } }, { "id": 175794, "commit_id": "b04dfbbe4bd7071d46c8688c2263726ea31d33cd", "repo": "cpython", "path": "Lib/inspect.py", "file_name": "inspect.py", "fun_name": "getcoroutinestate", "commit_message": "bpo-46409: Make generators in bytecode (GH-30633)\n\n* Add RETURN_GENERATOR and JUMP_NO_INTERRUPT opcodes.\r\n\r\n* Trim frame and generator by word each.\r\n\r\n* Minor refactor of frame.c\r\n\r\n* Update test.test_sys to account for smaller frames.\r\n\r\n* Treat generator functions as normal functions when evaluating and specializing.", "code": "def getcoroutinestate(coroutine):\n \n if coroutine.cr_running:\n return CORO_RUNNING\n if coroutine.cr_suspended:\n return CORO_SUSPENDED\n if coroutine.cr_frame is None:\n return CORO_CLOSED\n return CORO_CREATED\n\n", "url": "https://github.com/python/cpython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 54, "n_words": 18, "vocab_size": 13, "complexity": 4, "nloc": 8, "token_counts": 31, "n_ast_nodes": 52, "n_identifiers": 9, "d_id": 41653, "documentation": { "docstring": "Get current state of a coroutine object.\n\n Possible states are:\n CORO_CREATED: Waiting to start execution.\n CORO_RUNNING: Currently being executed by the interpreter.\n CORO_SUSPENDED: Currently suspended at an await expression.\n CORO_CLOSED: Execution has completed.\n ", "n_words": 33, "vocab_size": 32, "n_whitespaces": 59, "language": "en" } }, { "id": 245805, "commit_id": "6fca2160bd676cf011e10bdf4b622efb5688bae0", "repo": "mmdetection", "path": "mmdet/evaluation/metrics/crowdhuman_metric.py", "file_name": "crowdhuman_metric.py", "fun_name": "compare_caltech", "commit_message": "[Feature] Add CrowdHumanDataset and Metric (#8437)\n\n* [Fix] Fix UT to be compatible with pytorch 1.6 (#8707)\r\n\r\n* Update\r\n\r\n* Update\r\n\r\n* force reinstall pycocotools\r\n\r\n* Fix build_cuda\r\n\r\n* docker install git\r\n\r\n* Update\r\n\r\n* comment other job to speedup process\r\n\r\n* update\r\n\r\n* uncomment\r\n\r\n* Update\r\n\r\n* Update\r\n\r\n* Add comments for --force-reinstall\r\n\r\n* [WIP] Add CrowdHumanDataset\r\n\r\n* [WIP] Add CrowdHumanDataset\r\n\r\n* [WIP] Add CrowdHumanDataset\r\n\r\n* [WIP] Add CrowdHumanDataset\r\n\r\n* [WIP] Add CrowdHumanDataset\r\n\r\n* [WIP] Add CrowdHumanDataset\r\n\r\n* [WIP] Add CrowdHumanDataset\r\n\r\n* Add CrowdHumanDataset\r\n\r\n* [WIP] Add CrowdHumanDataset\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [WIP] Add CrowdHumanDataset\r\n\r\n* [WIP] Add CrowdHumanDataset\r\n\r\n* [WIP] Add CrowdHumanDataset\r\n\r\n* [WIP] Add CrowdHumanDataset\r\n\r\n* [WIP] Add CrowdHumanDataset\r\n\r\n* [WIP] Add CrowdHumanDataset\r\n\r\n* [WIP] Add CrowdHumanDataset\r\n\r\n* Add CrowdHumanDataset\r\n\r\n* [WIP] Add CrowdHumanDataset\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\n* [Feature] Add CrowdHumanDataset and Metric\r\n\r\nCo-authored-by: jbwang1997 ", "code": "def compare_caltech(self, thres):\n \n if self.dt_boxes is None or self.gt_boxes is None:\n return list()\n\n dtboxes = self.dt_boxes if self.dt_boxes is not None else list()\n gtboxes = self.gt_boxes if self.gt_boxes is not None else list()\n dt_matched = np.zeros(dtboxes.shape[0])\n gt_matched = np.zeros(gtboxes.shape[0])\n\n dtboxes = np.array(sorted(dtboxes, key=lambda x: x[-1], reverse=True))\n gtboxes = np.array(sorted(gtboxes, key=lambda x: x[-1], reverse=True))\n if len(dtboxes):\n overlap_iou = bbox_overlaps(dtboxes, gtboxes, mode='iou')\n overlap_ioa = bbox_overlaps(dtboxes, gtboxes, mode='iof')\n else:\n return list()\n\n score_list = list()\n for i, dt in enumerate(dtboxes):\n maxpos = -1\n maxiou = thres\n for j, gt in enumerate(gtboxes):\n if gt_matched[j] == 1:\n continue\n if gt[-1] > 0:\n overlap = overlap_iou[i][j]\n if overlap > maxiou:\n maxiou = overlap\n maxpos = j\n else:\n if maxpos >= 0:\n break\n else:\n overlap = overlap_ioa[i][j]\n if overlap > thres:\n maxiou = overlap\n maxpos = j\n if maxpos >= 0:\n if gtboxes[maxpos, -1] > 0:\n gt_matched[maxpos] = 1\n dt_matched[i] = 1\n score_list.append((dt, 1, self.ID))\n else:\n dt_matched[i] = -1\n else:\n dt_matched[i] = 0\n score_list.append((dt, 0, self.ID))\n return score_list\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 795, "n_words": 160, "vocab_size": 73, "complexity": 15, "nloc": 45, "token_counts": 346, "n_ast_nodes": 539, "n_identifiers": 34, "d_id": 70897, "documentation": { "docstring": "Match the detection results with the ground_truth by Caltech\n matching strategy.\n\n Args:\n thres (float): IOU threshold.\n\n Returns:\n score_list(list[tuple[ndarray, int, str]]): Matching result.\n a list of tuples (dtbox, label, imgID) in the descending\n sort of dtbox.score.\n ", "n_words": 35, "vocab_size": 32, "n_whitespaces": 107, "language": "en" } }, { "id": 106990, "commit_id": "c25cf96cfb7e6fc9ad75347cb2a32193c501e82c", "repo": "matplotlib", "path": "lib/matplotlib/collections.py", "file_name": "collections.py", "fun_name": "get_offset_transform", "commit_message": "Switch transOffset to offset_transform.\n\nNote that most APIs *previously* already accepted *offset_transform* as\nkwarg, due to the presence of the `set_offset_transform` setter. Prefer\nthat name (shortening it to `offset_trf` for local variables).\n\nBackcompat for the old `transOffset` name is kept in most places by\nintroducing a property alias.", "code": "def get_offset_transform(self):\n \n if self._offset_transform is None:\n self._offset_transform = transforms.IdentityTransform()\n elif (not isinstance(self._offset_transform, transforms.Transform)\n and hasattr(self._offset_transform, '_as_mpl_transform')):\n self._offset_transform = \\\n self._offset_transform._as_mpl_transform(self.axes)\n return self._offset_transform\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 100, "n_words": 22, "vocab_size": 18, "complexity": 4, "nloc": 8, "token_counts": 65, "n_ast_nodes": 105, "n_identifiers": 10, "d_id": 22540, "documentation": { "docstring": "Return the `.Transform` instance used by this artist offset.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 166960, "commit_id": "89be1f053b695c4ce1c0569f737caf3f03c12128", "repo": "pandas", "path": "pandas/tests/arrays/masked/test_function.py", "file_name": "test_function.py", "fun_name": "data", "commit_message": "DOC: Added docstrings to fixtures defined in array module (#47211)", "code": "def data(request):\n \n return request.param\n\n\n@pytest.fixture()", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.fixture()", "n_ast_errors": 1, "ast_levels": 6, "n_whitespaces": 10, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 30, "n_identifiers": 5, "d_id": 39892, "documentation": { "docstring": "\n Fixture returning parametrized 'data' array with different integer and\n floating point types\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 22, "language": "en" } }, { "id": 267673, "commit_id": "b2cde3a8964373d51d24e66692394d98858a2b33", "repo": "ansible", "path": "lib/ansible/module_utils/facts/hardware/linux.py", "file_name": "linux.py", "fun_name": "get_lvm_facts", "commit_message": "Add default value for lvm_facts when lvm or lvm2 is not installed or … (#75989)\n\n* Add default value for lvm_facts when lvm or lvm2 is not installed or there are no lvm facts", "code": "def get_lvm_facts(self):\n \n\n lvm_facts = {'lvm': 'N/A'}\n\n if os.getuid() == 0 and self.module.get_bin_path('vgs'):\n lvm_util_options = '--noheadings --nosuffix --units g --separator ,'\n\n vgs_path = self.module.get_bin_path('vgs')\n # vgs fields: VG #PV #LV #SN Attr VSize VFree\n vgs = {}\n if vgs_path:\n rc, vg_lines, err = self.module.run_command('%s %s' % (vgs_path, lvm_util_options))\n for vg_line in vg_lines.splitlines():\n items = vg_line.strip().split(',')\n vgs[items[0]] = {'size_g': items[-2],\n 'free_g': items[-1],\n 'num_lvs': items[2],\n 'num_pvs': items[1]}\n\n lvs_path = self.module.get_bin_path('lvs')\n # lvs fields:\n # LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert\n lvs = {}\n if lvs_path:\n rc, lv_lines, err = self.module.run_command('%s %s' % (lvs_path, lvm_util_options))\n for lv_line in lv_lines.splitlines():\n items = lv_line.strip().split(',')\n lvs[items[0]] = {'size_g': items[3], 'vg': items[1]}\n\n pvs_path = self.module.get_bin_path('pvs')\n # pvs fields: PV VG #Fmt #Attr PSize PFree\n pvs = {}\n if pvs_path:\n rc, pv_lines, err = self.module.run_command('%s %s' % (pvs_path, lvm_util_options))\n for pv_line in pv_lines.splitlines():\n items = pv_line.strip().split(',')\n pvs[self._find_mapper_device_name(items[0])] = {\n 'size_g': items[4],\n 'free_g': items[5],\n 'vg': items[1]}\n\n lvm_facts['lvm'] = {'lvs': lvs, 'vgs': vgs, 'pvs': pvs}\n\n return lvm_facts\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 735, "n_words": 161, "vocab_size": 104, "complexity": 9, "nloc": 33, "token_counts": 325, "n_ast_nodes": 550, "n_identifiers": 28, "d_id": 78995, "documentation": { "docstring": " Get LVM Facts if running as root and lvm utils are available ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 13, "language": "en" } }, { "id": 89863, "commit_id": "ce841204ef3b20d0f6ac812ebb06aebbc63547ac", "repo": "sentry", "path": "tests/sentry/receivers/test_onboarding.py", "file_name": "test_onboarding.py", "fun_name": "test_first_event_without_minified_stack_trace_received", "commit_message": "ref(onboarding): Add function to record first event per project with min stack trace -(#42208)", "code": "def test_first_event_without_minified_stack_trace_received(self, record_analytics):\n \n now = timezone.now()\n project = self.create_project(first_event=now)\n project_created.send(project=project, user=self.user, sender=type(project))\n data = load_data(\"javascript\")\n self.store_event(\n data=data,\n project_id=project.id,\n )\n\n with pytest.raises(AssertionError):\n record_analytics.assert_called_with(\n \"first_event_with_minified_stack_trace_for_project.sent\",\n user_id=self.user.id,\n organization_id=project.organization_id,\n project_id=project.id,\n platform=\"javascript\",\n url=\"http://localhost:3000\",\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 219, "n_words": 29, "vocab_size": 25, "complexity": 1, "nloc": 18, "token_counts": 110, "n_ast_nodes": 177, "n_identifiers": 26, "d_id": 18580, "documentation": { "docstring": "\n Test that an analytics event is NOT recorded when\n there no event with minified stack trace is received\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 40, "language": "en" } }, { "id": 109257, "commit_id": "5af97515b3823b2efa1961253a11e2d77df88637", "repo": "matplotlib", "path": "lib/matplotlib/axes/_base.py", "file_name": "_base.py", "fun_name": "get_yaxis", "commit_message": "Add discouraged admonitions\n\nThe [*Discouraged*] prefix in the summary line is added in analogy to\nthe [*Deprecated*] prefix we add automatically. We do this so that\nthese \"labels\" are prominently visible also in summary overviews of\nthe functions in the docs.\n\nSince we rarely discourage whole functions, for now I just do this\nmanually.", "code": "def get_yaxis(self):\n \n return self.yaxis\n\n get_xgridlines = _axis_method_wrapper(\"xaxis\", \"get_gridlines\")\n get_xticklines = _axis_method_wrapper(\"xaxis\", \"get_ticklines\")\n get_ygridlines = _axis_method_wrapper(\"yaxis\", \"get_gridlines\")\n get_yticklines = _axis_method_wrapper(\"yaxis\", \"get_ticklines\")\n\n # Adding and tracking artists\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 54, "n_words": 25, "vocab_size": 18, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 84, "n_identifiers": 8, "d_id": 23495, "documentation": { "docstring": "\n [*Discouraged*] Return the YAxis instance.\n\n .. admonition:: Discouraged\n\n The use of this function is discouraged. You should instead\n directly access the attribute ``ax.yaxis``.\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 67, "language": "en" } }, { "id": 221321, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/chunk.py", "file_name": "chunk.py", "fun_name": "read", "commit_message": "add python 3.10.4 for windows", "code": "def read(self, size=-1):\n \n\n if self.closed:\n raise ValueError(\"I/O operation on closed file\")\n if self.size_read >= self.chunksize:\n return b''\n if size < 0:\n size = self.chunksize - self.size_read\n if size > self.chunksize - self.size_read:\n size = self.chunksize - self.size_read\n data = self.file.read(size)\n self.size_read = self.size_read + len(data)\n if self.size_read == self.chunksize and \\\n self.align and \\\n (self.chunksize & 1):\n dummy = self.file.read(1)\n self.size_read = self.size_read + len(dummy)\n return data\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 216, "n_words": 67, "vocab_size": 38, "complexity": 8, "nloc": 17, "token_counts": 136, "n_ast_nodes": 215, "n_identifiers": 12, "d_id": 56344, "documentation": { "docstring": "Read at most size bytes from the chunk.\n If size is omitted or negative, read until the end\n of the chunk.\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 42, "language": "en" } }, { "id": 20881, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/typing_extensions.py", "file_name": "typing_extensions.py", "fun_name": "_get_cons", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _get_cons(self):\n \n if self.__origin__ is None:\n raise TypeError(\"Cannot get the underlying type of a \"\n \"non-specialized Annotated type.\")\n tree = self._subs_tree()\n while isinstance(tree, tuple) and tree[0] is Annotated:\n tree = tree[1]\n if isinstance(tree, tuple):\n return tree[0]\n else:\n return tree\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 196, "n_words": 39, "vocab_size": 31, "complexity": 5, "nloc": 11, "token_counts": 64, "n_ast_nodes": 107, "n_identifiers": 9, "d_id": 3608, "documentation": { "docstring": "Return the class used to create instance of this type.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 208901, "commit_id": "afa3a8051f6768a23895d72239de44f23c1c210c", "repo": "scapy", "path": "scapy/contrib/isotp/isotp_soft_socket.py", "file_name": "isotp_soft_socket.py", "fun_name": "_task", "commit_message": "Refactoring of ISOTPSoftSockets", "code": "def _task(cls):\n # type: () -> None\n \n\n log_runtime.debug(\"TimeoutScheduler Thread spawning @ %f\", cls._time())\n\n time_empty = None\n\n try:\n while 1:\n handle = cls._peek_next()\n if handle is None:\n now = cls._time()\n if time_empty is None:\n time_empty = now\n # 100 ms of grace time before killing the thread\n if cls.GRACE < now - time_empty:\n return\n else:\n time_empty = None\n cls._wait(handle)\n cls._poll()\n\n finally:\n # Worst case scenario: if this thread dies, the next scheduled\n # timeout will start a new one\n log_runtime.debug(\"TimeoutScheduler Thread died @ %f\", cls._time())\n cls._thread = None\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 400, "n_words": 87, "vocab_size": 58, "complexity": 6, "nloc": 19, "token_counts": 98, "n_ast_nodes": 174, "n_identifiers": 13, "d_id": 52558, "documentation": { "docstring": "Executed in a background thread, this thread will automatically\n start when the first timeout is added and stop when the last timeout\n is removed or executed.", "n_words": 26, "vocab_size": 22, "n_whitespaces": 39, "language": "en" } }, { "id": 48738, "commit_id": "48a21aa0eb3a95d32456c2a927eff9552a04231e", "repo": "django-rest-framework", "path": "tests/test_routers.py", "file_name": "test_routers.py", "fun_name": "test_nonconflicting_autogenerated_basename_different_models", "commit_message": "raise ImproperlyConfigured exception if `basename` is not unique (#8438)\n\n* raise ImproperlyConfigured if basename already exists\r\n\r\n* rename already_registered function; return True/False\r\n\r\n* additional basename tests\r\n\r\n* additional basename tests\r\n\r\n* Update rest_framework/routers.py\r\n\r\nCo-authored-by: David Graves \r\nCo-authored-by: Asif Saif Uddin ", "code": "def test_nonconflicting_autogenerated_basename_different_models(self):\n \n self.router.register(r'notes', NoteViewSet)\n self.router.register(r'notes_basename', BasenameViewSet)\n\n", "url": "https://github.com/encode/django-rest-framework.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 27, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 28, "n_ast_nodes": 47, "n_identifiers": 6, "d_id": 9589, "documentation": { "docstring": "\n Ensure 2 routers with different models, and a distinct basename specified\n on each does not throw an exception\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 40, "language": "en" } }, { "id": 101282, "commit_id": "2beceffad9b15c1fd78f06b9b272563321c5a41e", "repo": "faceswap", "path": "lib/training/cache.py", "file_name": "cache.py", "fun_name": "cache_full", "commit_message": "Data Augmentation update (#1263)\n\n- lib.detected_face\r\n - Subclass Masks for Landmark based masks\r\n - Add training mask propery + methods to DetectedFace\r\n - lib.training_training\r\n - subclass TrainingDataGenerator for training and preview data\r\n - Split cache into own module\r\n - Reduce thread count to 1 to prevent image corruption + data re-use\r\n - Process on largest model input/output size rather than stored image size\r\n - Size and crop masks during caching stage\r\n - Implement ring buffer for data flow\r\n - Fix preview reload bug\r\n - augmentation\r\n - typing\r\n - switch color aug order\r\n - better initialization\r\n - Fix warp + landmark warp to correctly apply at different image scales\r\n - Slightly improved warp caching\r\n - Don't store whether image is_preview. Handle all data as training images implicitly\r\n - plugins.trainer: Typing and fixes to work with trainingdata refactor", "code": "def cache_full(self) -> bool:\n \n if self._cache_info[\"cache_full\"]:\n return self._cache_info[\"cache_full\"]\n with self._lock:\n return self._cache_info[\"cache_full\"]\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 55, "n_words": 12, "vocab_size": 10, "complexity": 2, "nloc": 7, "token_counts": 35, "n_ast_nodes": 64, "n_identifiers": 5, "d_id": 20701, "documentation": { "docstring": "bool: ``True`` if the cache has been fully populated. ``False`` if there are items still\n to be cached. ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 25, "language": "en" } }, { "id": 301868, "commit_id": "62a5854e40cb554fecb1eec897d7bcb4c94628fe", "repo": "core", "path": "homeassistant/components/feedreader/__init__.py", "file_name": "__init__.py", "fun_name": "put_timestamp", "commit_message": "Fix bare except (#72906)", "code": "def put_timestamp(self, feed_id, timestamp):\n \n self._fetch_data()\n with self._lock, open(self._data_file, \"wb\") as myfile:\n self._data.update({feed_id: timestamp})\n _LOGGER.debug(\n \"Overwriting feed %s timestamp in storage file %s\",\n feed_id,\n self._data_file,\n )\n try:\n pickle.dump(self._data, myfile)\n except Exception: # pylint: disable=broad-except\n _LOGGER.error(\"Error saving pickled data to %s\", self._data_file)\n self._cache_outdated = True\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 202, "n_words": 43, "vocab_size": 41, "complexity": 2, "nloc": 14, "token_counts": 86, "n_ast_nodes": 144, "n_identifiers": 18, "d_id": 100706, "documentation": { "docstring": "Update timestamp for given feed id (usually the url).", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 208396, "commit_id": "3983484defb4564c9baf2a24a6b3af2d0b3c0df7", "repo": "celery", "path": "t/unit/tasks/test_canvas.py", "file_name": "test_canvas.py", "fun_name": "test_link_error_on_chord_header", "commit_message": "Housekeeping for Canvas.py (#7942)\n\n* Removed pass from @abstractmethod StampingVisitor.on_signature()\r\n\r\n* Added unit test: test_repr_empty_group()\r\n\r\n* Added unit test: test_signature_on_error_adds_error_callback()\r\n\r\n* Cleaned chord.link_error() implementation\r\n\r\n* Added a new test suite: test_merge_dictionaries\r\n\r\n* Fixed bug in _merge_dictionaries() function when using None values, tested with test_none_values()\r\n\r\n* Added test case for \"Signature | non-Signature\" in unit test: test_OR()\r\n\r\n* Added new unit test: test_freezing_args_set_in_options()\r\n\r\n* Added new unit test: test_group_prepared(), for the inner method of group._prepared()\r\n\r\n* Added unit test for chord: test_link_error_on_chord_header(), using the task_allow_error_cb_on_chord_header flag\r\n\r\n* Added subtests explanation to test_OR() unit test for \"sig | non-sig\" test case\r\n\r\n* Added unit test: test_on_signature_gets_the_signature()\r\n\r\n* Matched (copied) the unit tests \"Install tox\" step to the integration tests to have the same command for both", "code": "def test_link_error_on_chord_header(self, header):\n \n self.app.conf.task_allow_error_cb_on_chord_header = True\n c = chord(header, signature('body'))\n err = signature('err')\n errback = c.link_error(err)\n assert errback == err\n for header_task in c.tasks:\n assert header_task.options['link_error'] == [err]\n assert c.body.options['link_error'] == [err]\n\n", "url": "https://github.com/celery/celery.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 99, "n_words": 32, "vocab_size": 22, "complexity": 2, "nloc": 9, "token_counts": 77, "n_ast_nodes": 128, "n_identifiers": 16, "d_id": 52316, "documentation": { "docstring": " Test that link_error on a chord also links the header ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 189448, "commit_id": "902e7eb4f0147b5882a613b67467e38a1d47f01e", "repo": "manim", "path": "manim/mobject/svg/code_mobject.py", "file_name": "code_mobject.py", "fun_name": "gen_html_string", "commit_message": "Hide more private methods from the docs. (#2468)\n\n* hide privs from text_mobject.py\r\n\r\n* hide privs from tex_mobject.py\r\n\r\n* hide privs from code_mobject.py\r\n\r\n* hide privs from svg_mobject.py\r\n\r\n* remove SVGPath and utils from __init__.py\r\n\r\n* don't import string_to_numbers\r\n\r\n* hide privs from geometry.py\r\n\r\n* hide privs from matrix.py\r\n\r\n* hide privs from numbers.py\r\n\r\n* hide privs from three_dimensions.py\r\n\r\n* forgot underscore under set_stroke_width_from_length\r\n\r\n* there were more i missed\r\n\r\n* unhidea method that was used in docs\r\n\r\n* forgot other text2hash\r\n\r\n* remove svg_path from docs", "code": "def _gen_html_string(self):\n \n self.html_string = _hilite_me(\n self.code_string,\n self.language,\n self.style,\n self.insert_line_no,\n \"border:solid gray;border-width:.1em .1em .1em .8em;padding:.2em .6em;\",\n self.file_path,\n self.line_no_from,\n )\n\n if self.generate_html_file:\n os.makedirs(\n os.path.join(\"assets\", \"codes\", \"generated_html_files\"),\n exist_ok=True,\n )\n with open(\n os.path.join(\n \"assets\",\n \"codes\",\n \"generated_html_files\",\n self.file_name + \".html\",\n ),\n \"w\",\n ) as file:\n file.write(self.html_string)\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 356, "n_words": 41, "vocab_size": 37, "complexity": 2, "nloc": 25, "token_counts": 103, "n_ast_nodes": 170, "n_identifiers": 20, "d_id": 46056, "documentation": { "docstring": "Function to generate html string with code highlighted and stores in variable html_string.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 260597, "commit_id": "b9f623cff0f61c43b194e794da45c81518c57f60", "repo": "scikit-learn", "path": "sklearn/utils/tests/test_param_validation.py", "file_name": "test_param_validation.py", "fun_name": "test_iterable_not_string", "commit_message": "MAINT Param validation: add helper constraint for cv object (#24010)", "code": "def test_iterable_not_string():\n \n constraint = _IterablesNotString()\n assert constraint.is_satisfied_by([1, 2, 3])\n assert constraint.is_satisfied_by(range(10))\n assert not constraint.is_satisfied_by(\"some string\")\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 15, "vocab_size": 13, "complexity": 1, "nloc": 5, "token_counts": 41, "n_ast_nodes": 70, "n_identifiers": 5, "d_id": 76365, "documentation": { "docstring": "Check that a string does not satisfy the _IterableNotString constraint.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 92382, "commit_id": "c4cc0467974bcfb2b3c95120bd19c337aa977183", "repo": "sentry", "path": "src/sentry/sentry_metrics/indexer/base.py", "file_name": "base.py", "fun_name": "get_mapped_key_strings_to_ints", "commit_message": "feat(metrics_indexer): Add rate limits functionality to indexer [INGEST-1380] (#36263)\n\n* feat(metrics_indexer): Add rate limits functionality to indexer [INGEST-1380]\r\n\r\nThe postgres string indexer now is able to rate limit writes using four\r\nsentry options. If that happens, `None` is returned in place of an\r\ninteger, and the FetchType is RATE_LIMITED.\r\n\r\nThe kafka consumer/message processor explicitly checks for those `None`\r\nvalues and throws away every message that references a rate-limited\r\nstring. It logs a Sentry error for every dropped message just because\r\nthat's already what we do for other kinds of dropped messages.\r\n\r\nRate limiting and quota management currently creates a ton of\r\ndataclasses and that probably wastes time. There are a ton of\r\nlow-hanging fruits:\r\n\r\n* the return value of _construct_quotas could be globally cached, as\r\n long as the cache is wiped when the sentry options change.\r\n\r\n* the same Quota object (for global limits) is referenced from multiple\r\n RequestedQuota instances (one for each org).\r\n `sentry.ratelimits.sliding_windows` could check the `id()` of the\r\n quota (if there is no prefix override) to avoid computing and checking\r\n the same quota multiple times.\r\n\r\nAn even lower hanging fruit is that we're fetching the same keys from\r\nRedis multiple times, because multiple organizations (and therefore\r\nmultiple RequestedQuota instances) adhere to the global quota. So that's\r\nbeen fixed, but as for the rest let's wait for timings from prod.\r\n\r\n* fix typo\r\n\r\n* fix typing\r\n\r\n* apply review feedback\r\n\r\n* fix typing, add test\r\n\r\n* fix tests\r\n\r\n* apply review feedback about logging too many msgs\r\n\r\n* fix leaking option in test\r\n\r\n* sike, more test failures", "code": "def get_mapped_key_strings_to_ints(self) -> MutableMapping[str, int]:\n \n cache_key_results: MutableMapping[str, int] = {}\n for org_id, result_dict in self.results.items():\n for string, id in result_dict.items():\n key = f\"{org_id}:{string}\"\n if id is not None:\n cache_key_results[key] = id\n\n return cache_key_results\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 121, "n_words": 33, "vocab_size": 26, "complexity": 4, "nloc": 18, "token_counts": 66, "n_ast_nodes": 111, "n_identifiers": 13, "d_id": 18905, "documentation": { "docstring": "\n Return the results, but formatted as the following:\n {\n \"1:a\": 10,\n \"1:b\": 11,\n \"1:c\", 12,\n \"2:e\": 13\n }\n This is for when we use indexer_cache.set_many()\n ", "n_words": 25, "vocab_size": 24, "n_whitespaces": 129, "language": "en" } }, { "id": 73881, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/models/__init__.py", "file_name": "__init__.py", "fun_name": "copy", "commit_message": "Reformat with black", "code": "def copy(self, update_attrs=None, exclude_fields=None):\n \n exclude_fields = (\n self.default_exclude_fields_in_copy\n + self.exclude_fields_in_copy\n + (exclude_fields or [])\n )\n instance, child_object_map = _copy(self.specific, exclude_fields, update_attrs)\n instance.save()\n _copy_m2m_relations(self, instance, exclude_fields=exclude_fields)\n return instance\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 109, "n_words": 27, "vocab_size": 24, "complexity": 2, "nloc": 10, "token_counts": 63, "n_ast_nodes": 97, "n_identifiers": 12, "d_id": 16158, "documentation": { "docstring": "Copy this task state, excluding the attributes in the ``exclude_fields`` list and updating any attributes to values\n specified in the ``update_attrs`` dictionary of ``attribute``: ``new value`` pairs", "n_words": 27, "vocab_size": 23, "n_whitespaces": 33, "language": "en" } }, { "id": 49679, "commit_id": "8468e1ac6cfe165aa1e3cf4f77ab6fb66ce98614", "repo": "PaddleHub", "path": "modules/text/lexical_analysis/jieba_paddle/module.py", "file_name": "module.py", "fun_name": "extract_tags", "commit_message": "Remove fluid api in modules and pkg. (#1906)", "code": "def extract_tags(self, sentence, topK=20, withWeight=False, allowPOS=(), withFlag=False):\n \n self.check_dependency()\n import jieba\n import jieba.analyse\n jieba.setLogLevel(logging.ERROR)\n res = jieba.analyse.extract_tags(sentence,\n topK=topK,\n withWeight=withWeight,\n allowPOS=allowPOS,\n withFlag=withFlag)\n return res\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 231, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 25, "token_counts": 72, "n_ast_nodes": 108, "n_identifiers": 14, "d_id": 9867, "documentation": { "docstring": "\n Extract keywords from sentence using TF-IDF algorithm.\n Args:\n topK(int): return how many top keywords. `None` for all possible words.\n withWeight(bool): if True, return a list of (word, weight);\n if False, return a list of words.\n allowPOS(tuple): the allowed POS list eg. ['ns', 'n', 'vn', 'v','nr'].\n if the POS of w is not in this list,it will be filtered.\n withFlag(bool): only work with allowPOS is not empty.\n if True, return a list of pair(word, weight) like posseg.cut\n if False, return a list of words\n Returns:\n result(list): The key words.\n ", "n_words": 88, "vocab_size": 61, "n_whitespaces": 266, "language": "en" } }, { "id": 8421, "commit_id": "4d2d81f9fdefc52eea6a9bf0826a6f2ffc8d681b", "repo": "ludwig", "path": "ludwig/schema/model_config.py", "file_name": "model_config.py", "fun_name": "to_list", "commit_message": "Config Object (#2426)\n\n* Fixed loss instances across features\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed binary OneOfImplementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Flake 8\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix custom loss components\r\n\r\n* Fix gbm category\r\n\r\n* Remove config object code, out of scope\r\n\r\n* Fixed more tests\r\n\r\n* Fixed incorrect text preproc default, added clip to category feature level\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixes additional tests\r\n\r\n* Cache jsonschema validator to reduce memory pressure\r\n\r\n* Fix imports\r\n\r\n* Skip neuropod test\r\n\r\n* Added upgrade audio to default preproc back compat and cleaned up\r\n\r\n* Small nits\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Change backfill constant for audio\r\n\r\n* Add docstring to compute feature hash\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Unused import\r\n\r\n* Another backfill constant change\r\n\r\n* Unused import\r\n\r\n* remove default population functions\r\n\r\n* Added config object test\r\n\r\n* rewired build_inputs\r\n\r\n* rewired combiner in ecd, added logic to config object\r\n\r\n* Refactored ecd.py\r\n\r\n* Fixing up merge_with_defaults, need metadata changes in master\r\n\r\n* Refactored defaults section and mega upgraded config obj\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed some formatting\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed feature col, proc col, and render config from defaults.py\r\n\r\n* Fix duplicate import\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added config initializer to merge defaults flow\r\n\r\n* Refactored update_config_with_metadata\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added dict conversion method to config object and refactored merge config function in config_utils\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Refactored until preproc entrypoint\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed update_config_with_metadata\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Removed load config base feature method - no longer necessary\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Formatting\r\n\r\n* Fixed input size assignment\r\n\r\n* Temp fix\r\n\r\n* Fixed pretrained encoder path referencing temp until preproc refactor\r\n\r\n* Solved the WORST BUG EVER\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Switch reduce_input to None for sequence tagger\r\n\r\n* Fixed another one\r\n\r\n* Fixed typo\r\n\r\n* Various test fixes\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Flake 8\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed excess defaults params issue\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Minor fixes\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed some defaults tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed more tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed more tests\r\n\r\n* Formatting\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* More test fixes\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed defaults tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix more tests\r\n\r\n* Flake 8\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix more tests\r\n\r\n* Fixed more tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed more tests\r\n\r\n* Fixed more tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fixing ghost tests attempt\r\n\r\n* Deep copy to smash the ghost failures\r\n\r\n* Copied top level modules now too\r\n\r\n* Started fixing hyperopt\r\n\r\n* Fixed Hyperopt Issues\r\n\r\n* Flake 8\r\n\r\n* Remove commented out code\r\n\r\n* Address Piero feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Flake 8\r\n\r\n* Removed merge with defaults\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed various issues with preprocessing and splitting positioning\r\n\r\n* Fixed hyperopt issues\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Refactored api pipeline to use all config obj references\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed more tests\r\n\r\n* Flake 8\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix more tests\r\n\r\n* Fixed auto tune learning rate and batch size\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed sequence feature tests\r\n\r\n* Fixed image feature test\r\n\r\n* Fixed last test\r\n\r\n* flake 8\r\n\r\n* Marshmallowify Config object, remove manual to dict method, add Factory method constructors\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Validate config within config object\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* All Travis feedback addressed\r\n\r\n* Using all new constructors now\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* removed from class attributes\r\n\r\n* Added deep copies back and piped repr inheritance\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Format\r\n\r\n* Small error fix, moved back compat into Config Object\r\n\r\n* Flake8\r\n\r\n* Docstring for hyperopt defaults method\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Address Joppe feedback\r\n\r\n* Revert \"Address Joppe feedback\"\r\n\r\nThis reverts commit 42f1665ef917d062a010550bb960594c355285ff.\r\n\r\n* Fix tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Flake8\r\n\r\n* fix test\r\n\r\n* Small improvement\r\n\r\n* Changed repr for input features, added feature enabling/disabling\r\n\r\n* Added feature enabling/disabling, and better reprs for SDK dev\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Flake 8\r\n\r\n* Added rich to requirements.txt\r\n\r\n* Add some more CO tests and comment more on CO code\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix explain issue\r\n\r\n* Julian feedback\r\n\r\n* Added TODOs for future refactor PRs\r\n\r\n* Fix explain test failure, test shared state improvement and bug fix, remove unncessary code from convert_submodules\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* implement Daniel's feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix residual errors\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Error fix\r\n\r\n* Using mixins now so no loose attributes on defaults, fixed height width schema restrictions\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Removed unnecessary filtering from defaults schema logic\r\n\r\n* Piero's simplification and cleanup\r\n\r\n* Flake 8\r\n\r\n* Fix test and update docstrings from Pieros change\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Address most of Justin's feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix tests and more feedback implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Address feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Renamed files to correspond to ModelConfig class name\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Missing constant import\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed incorrect merge conflict resolution\r\n\r\n* Flake8\r\n\r\n* Fix remaining tests (except old models training from trainer type removal)\r\n\r\n* Fixed old models not validating trainer type\r\n\r\n* Add output_feature=False to test_hyperopt_ray.py\r\n\r\n* Implement Kabir's feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Travis Addair \r\nCo-authored-by: w4nderlust ", "code": "def to_list(self):\n \n return list(convert_submodules(self.__dict__).values())\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 36, "n_identifiers": 6, "d_id": 1429, "documentation": { "docstring": "Method for getting a list representation of the input features.\n\n Returns:\n List of input features specified.\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 41, "language": "en" } }, { "id": 48645, "commit_id": "df584350b4f77143d84615f05000f71408aec9c0", "repo": "django-rest-framework", "path": "tests/schemas/test_coreapi.py", "file_name": "test_coreapi.py", "fun_name": "test_action_not_coerced_for_get_and_head", "commit_message": "Prevent head method mapping to coerce action name (#7729)", "code": "def test_action_not_coerced_for_get_and_head(self):\n ", "url": "https://github.com/encode/django-rest-framework.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 9, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 46, "token_counts": 269, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 9554, "documentation": { "docstring": "\n Ensure that action name is preserved when action map contains \"head\".\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 26, "language": "en" } }, { "id": 125358, "commit_id": "3a48a79fd7d2ed9195baec275562e64e96596de4", "repo": "ray", "path": "python/ray/_private/usage/usage_lib.py", "file_name": "usage_lib.py", "fun_name": "get_cluster_metadata", "commit_message": "[Usage stats] Report total number of running jobs for usage stats purpose. (#26787)\n\n- Report total number of running jobs\r\n- Fix total number of nodes to include only alive nodes\r\n\r\nSigned-off-by: Jiajun Yao ", "code": "def get_cluster_metadata(gcs_client) -> dict:\n \n return json.loads(\n gcs_client.internal_kv_get(\n usage_constant.CLUSTER_METADATA_KEY,\n namespace=ray_constants.KV_NAMESPACE_CLUSTER,\n ).decode(\"utf-8\")\n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 56, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 22, "token_counts": 34, "n_ast_nodes": 57, "n_identifiers": 12, "d_id": 27847, "documentation": { "docstring": "Get the cluster metadata from GCS.\n\n It is a blocking API.\n\n This will return None if `put_cluster_metadata` was never called.\n\n Params:\n gcs_client: The GCS client to perform KV operation GET.\n\n Returns:\n The cluster metadata in a dictinoary.\n\n Raises:\n RuntimeError if it fails to obtain cluster metadata from GCS.\n ", "n_words": 48, "vocab_size": 38, "n_whitespaces": 87, "language": "en" } }, { "id": 249148, "commit_id": "c97042f7eef3748e17c90e48a4122389a89c4735", "repo": "synapse", "path": "tests/rest/admin/test_room.py", "file_name": "test_room.py", "fun_name": "test_unblock_room_twice", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13469)", "code": "def test_unblock_room_twice(self) -> None:\n \n\n self._block_room(self.room_id)\n for _ in range(2):\n channel = self.make_request(\n \"PUT\",\n self.url % self.room_id,\n content={\"block\": False},\n access_token=self.admin_user_tok,\n )\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertFalse(channel.json_body[\"block\"])\n self._is_blocked(self.room_id, expect=False)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 162, "n_words": 26, "vocab_size": 26, "complexity": 2, "nloc": 13, "token_counts": 94, "n_ast_nodes": 149, "n_identifiers": 19, "d_id": 72655, "documentation": { "docstring": "Test that unblock a room that is not blocked is successful.", "n_words": 11, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 246112, "commit_id": "6a72c910f180ee8b4bd78223775af48492769472", "repo": "synapse", "path": "tests/rest/admin/test_federation.py", "file_name": "test_federation.py", "fun_name": "test_limit_and_from", "commit_message": "Add admin API to get a list of federated rooms (#11658)", "code": "def test_limit_and_from(self) -> None:\n \n\n number_destinations = 20\n self._create_destinations(number_destinations)\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?from=5&limit=10\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], number_destinations)\n self.assertEqual(channel.json_body[\"next_token\"], \"15\")\n self.assertEqual(len(channel.json_body[\"destinations\"]), 10)\n self._check_fields(channel.json_body[\"destinations\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 130, "n_words": 27, "vocab_size": 26, "complexity": 1, "nloc": 14, "token_counts": 109, "n_ast_nodes": 180, "n_identifiers": 17, "d_id": 71018, "documentation": { "docstring": "Testing list of destinations with a defined starting point and limit", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 101190, "commit_id": "a2de4a97985dc62db3b140a924aeac2be733abf8", "repo": "faceswap", "path": "lib/align/aligned_face.py", "file_name": "aligned_face.py", "fun_name": "face", "commit_message": "lib.align.aligned_face updates\n - Typing\n - Legacy support for pre-aligned faces\n - Coverage support for pre-aligned faces\n - Standardized retrieval of sub-crops", "code": "def face(self) -> Optional[np.ndarray]:\n \n return self._face\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 5, "token_counts": 17, "n_ast_nodes": 29, "n_identifiers": 6, "d_id": 20611, "documentation": { "docstring": " :class:`numpy.ndarray`: The aligned face at the given :attr:`size` at the specified\n :attr:`coverage` in the given :attr:`dtype`. If an :attr:`image` has not been provided\n then an the attribute will return ``None``. ", "n_words": 30, "vocab_size": 24, "n_whitespaces": 45, "language": "en" } }, { "id": 118742, "commit_id": "72703b38029f9358a0ec7ca5ed875a6b438ece19", "repo": "streamlit", "path": "lib/streamlit/elements/pyplot.py", "file_name": "pyplot.py", "fun_name": "pyplot", "commit_message": "Replace static apps with live Cloud apps (#4317)\n\nCo-authored-by: kajarenc ", "code": "def pyplot(self, fig=None, clear_figure=None, **kwargs):\n \n\n if not fig and config.get_option(\"deprecation.showPyplotGlobalUse\"):\n self.dg.exception(PyplotGlobalUseWarning())\n\n image_list_proto = ImageListProto()\n marshall(\n self.dg._get_delta_path_str(), image_list_proto, fig, clear_figure, **kwargs\n )\n return self.dg._enqueue(\"imgs\", image_list_proto)\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 88, "n_words": 24, "vocab_size": 24, "complexity": 3, "nloc": 8, "token_counts": 73, "n_ast_nodes": 118, "n_identifiers": 15, "d_id": 26399, "documentation": { "docstring": "Display a matplotlib.pyplot figure.\n\n Parameters\n ----------\n fig : Matplotlib Figure\n The figure to plot. When this argument isn't specified, this\n function will render the global figure (but this is deprecated,\n as described below)\n\n clear_figure : bool\n If True, the figure will be cleared after being rendered.\n If False, the figure will not be cleared after being rendered.\n If left unspecified, we pick a default based on the value of `fig`.\n\n * If `fig` is set, defaults to `False`.\n\n * If `fig` is not set, defaults to `True`. This simulates Jupyter's\n approach to matplotlib rendering.\n\n **kwargs : any\n Arguments to pass to Matplotlib's savefig function.\n\n Example\n -------\n >>> import matplotlib.pyplot as plt\n >>> import numpy as np\n >>>\n >>> arr = np.random.normal(1, 1, size=100)\n >>> fig, ax = plt.subplots()\n >>> ax.hist(arr, bins=20)\n >>>\n >>> st.pyplot(fig)\n\n .. output::\n https://share.streamlit.io/streamlit/docs/main/python/api-examples-source/charts.pyplot.py\n height: 630px\n\n Notes\n -----\n .. note::\n Deprecation warning. After December 1st, 2020, we will remove the ability\n to specify no arguments in `st.pyplot()`, as that requires the use of\n Matplotlib's global figure object, which is not thread-safe. So\n please always pass a figure object as shown in the example section\n above.\n\n Matplotlib support several different types of \"backends\". If you're\n getting an error using Matplotlib with Streamlit, try setting your\n backend to \"TkAgg\"::\n\n echo \"backend: TkAgg\" >> ~/.matplotlib/matplotlibrc\n\n For more information, see https://matplotlib.org/faq/usage_faq.html.\n\n ", "n_words": 220, "vocab_size": 150, "n_whitespaces": 581, "language": "en" } }, { "id": 269468, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "print_tensor", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def print_tensor(x, message=\"\", summarize=3):\n \n if isinstance(x, tf.Tensor) and hasattr(x, \"graph\"):\n with get_graph().as_default():\n op = tf.print(\n message, x, output_stream=sys.stdout, summarize=summarize\n )\n with tf.control_dependencies([op]):\n return tf.identity(x)\n else:\n tf.print(message, x, output_stream=sys.stdout, summarize=summarize)\n return x\n\n\n# GRAPH MANIPULATION\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 126, "n_words": 34, "vocab_size": 30, "complexity": 3, "nloc": 11, "token_counts": 99, "n_ast_nodes": 160, "n_identifiers": 17, "d_id": 80103, "documentation": { "docstring": "Prints `message` and the tensor value when evaluated.\n\n Note that `print_tensor` returns a new tensor identical to `x`\n which should be used in the following code. Otherwise the\n print operation is not taken into account during evaluation.\n\n Example:\n\n >>> x = tf.constant([[1.0, 2.0], [3.0, 4.0]])\n >>> tf.keras.backend.print_tensor(x)\n \n\n Args:\n x: Tensor to print.\n message: Message to print jointly with the tensor.\n summarize: The first and last `summarize` elements within each dimension\n are recursively printed per Tensor. If None, then the first 3 and last\n 3 elements of each dimension are printed for each tensor. If set to\n -1, it will print all elements of every tensor.\n\n Returns:\n The same tensor `x`, unchanged.\n ", "n_words": 121, "vocab_size": 92, "n_whitespaces": 229, "language": "en" } }, { "id": 101031, "commit_id": "7b9fc0454d982a2425ec44e90e5b05a87d149953", "repo": "faceswap", "path": "scripts/train.py", "file_name": "train.py", "fun_name": "_reassign_keys", "commit_message": "Live Preview - Replace cv2 with matplotlib viewer", "code": "def _reassign_keys(cls):\n \n rcParams[\"keymap.fullscreen\"] = [k for k in rcParams[\"keymap.fullscreen\"] if k != \"f\"]\n rcParams[\"keymap.save\"] = [k for k in rcParams[\"keymap.save\"] if k != \"s\"]\n rcParams[\"keymap.home\"] = [k for k in rcParams[\"keymap.home\"] if k != \"r\"]\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 63, "n_words": 35, "vocab_size": 15, "complexity": 7, "nloc": 4, "token_counts": 63, "n_ast_nodes": 112, "n_identifiers": 4, "d_id": 20471, "documentation": { "docstring": " Remove `F`, 'S' and 'R' from their default bindings. ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 314673, "commit_id": "8bed2e6459bfc1efb25d6a55aaea2eb1b9953cf9", "repo": "core", "path": "homeassistant/components/zha/core/registries.py", "file_name": "registries.py", "fun_name": "weight", "commit_message": "Remove zha from mypy ignore list (#73603)", "code": "def weight(self) -> int:\n \n weight = 0\n if self.models:\n weight += 401 - (1 if callable(self.models) else len(self.models))\n\n if self.manufacturers:\n weight += 301 - (\n 1 if callable(self.manufacturers) else len(self.manufacturers)\n )\n\n weight += 10 * len(self.channel_names)\n weight += 5 * len(self.generic_ids)\n if isinstance(self.aux_channels, frozenset):\n weight += 1 * len(self.aux_channels)\n return weight\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 166, "n_words": 51, "vocab_size": 32, "complexity": 6, "nloc": 23, "token_counts": 105, "n_ast_nodes": 167, "n_identifiers": 12, "d_id": 113278, "documentation": { "docstring": "Return the weight of the matching rule.\n\n Most specific matches should be preferred over less specific. Model matching\n rules have a priority over manufacturer matching rules and rules matching a\n single model/manufacturer get a better priority over rules matching multiple\n models/manufacturers. And any model or manufacturers matching rules get better\n priority over rules matching only channels.\n But in case of a channel name/channel id matching, we give rules matching\n multiple channels a better priority over rules matching a single channel.\n ", "n_words": 80, "vocab_size": 46, "n_whitespaces": 136, "language": "en" } }, { "id": 83152, "commit_id": "d560d124a304a2f6dd467200aab7f070a78bf155", "repo": "zulip", "path": "zerver/tests/test_message_edit.py", "file_name": "test_message_edit.py", "fun_name": "test_edit_cases", "commit_message": "python: Replace string concatenations with f-strings.", "code": "def test_edit_cases(self) -> None:\n \n self.login(\"hamlet\")\n hamlet = self.example_user(\"hamlet\")\n msg_id = self.send_stream_message(\n self.example_user(\"hamlet\"), \"Denmark\", topic_name=\"topic 1\", content=\"content 1\"\n )\n result = self.client_patch(\n f\"/json/messages/{msg_id}\",\n {\n \"message_id\": msg_id,\n \"content\": \"content 2\",\n },\n )\n self.assert_json_success(result)\n history = orjson.loads(Message.objects.get(id=msg_id).edit_history)\n self.assertEqual(history[0][\"prev_content\"], \"content 1\")\n self.assertEqual(history[0][\"user_id\"], hamlet.id)\n self.assertEqual(\n set(history[0].keys()),\n {\n \"timestamp\",\n \"prev_content\",\n \"user_id\",\n \"prev_rendered_content\",\n \"prev_rendered_content_version\",\n },\n )\n\n result = self.client_patch(\n f\"/json/messages/{msg_id}\",\n {\n \"message_id\": msg_id,\n \"topic\": \"topic 2\",\n },\n )\n self.assert_json_success(result)\n history = orjson.loads(Message.objects.get(id=msg_id).edit_history)\n self.assertEqual(history[0][LEGACY_PREV_TOPIC], \"topic 1\")\n self.assertEqual(history[0][\"user_id\"], hamlet.id)\n self.assertEqual(set(history[0].keys()), {\"timestamp\", LEGACY_PREV_TOPIC, \"user_id\"})\n\n result = self.client_patch(\n f\"/json/messages/{msg_id}\",\n {\n \"message_id\": msg_id,\n \"content\": \"content 3\",\n \"topic\": \"topic 3\",\n },\n )\n self.assert_json_success(result)\n history = orjson.loads(Message.objects.get(id=msg_id).edit_history)\n self.assertEqual(history[0][\"prev_content\"], \"content 2\")\n self.assertEqual(history[0][LEGACY_PREV_TOPIC], \"topic 2\")\n self.assertEqual(history[0][\"user_id\"], hamlet.id)\n self.assertEqual(\n set(history[0].keys()),\n {\n \"timestamp\",\n LEGACY_PREV_TOPIC,\n \"prev_content\",\n \"user_id\",\n \"prev_rendered_content\",\n \"prev_rendered_content_version\",\n },\n )\n\n result = self.client_patch(\n f\"/json/messages/{msg_id}\",\n {\n \"message_id\": msg_id,\n \"content\": \"content 4\",\n },\n )\n self.assert_json_success(result)\n history = orjson.loads(Message.objects.get(id=msg_id).edit_history)\n self.assertEqual(history[0][\"prev_content\"], \"content 3\")\n self.assertEqual(history[0][\"user_id\"], hamlet.id)\n\n self.login(\"iago\")\n result = self.client_patch(\n f\"/json/messages/{msg_id}\",\n {\n \"message_id\": msg_id,\n \"topic\": \"topic 4\",\n },\n )\n self.assert_json_success(result)\n history = orjson.loads(Message.objects.get(id=msg_id).edit_history)\n self.assertEqual(history[0][LEGACY_PREV_TOPIC], \"topic 3\")\n self.assertEqual(history[0][\"user_id\"], self.example_user(\"iago\").id)\n\n history = orjson.loads(Message.objects.get(id=msg_id).edit_history)\n self.assertEqual(history[0][LEGACY_PREV_TOPIC], \"topic 3\")\n self.assertEqual(history[2][LEGACY_PREV_TOPIC], \"topic 2\")\n self.assertEqual(history[3][LEGACY_PREV_TOPIC], \"topic 1\")\n self.assertEqual(history[1][\"prev_content\"], \"content 3\")\n self.assertEqual(history[2][\"prev_content\"], \"content 2\")\n self.assertEqual(history[4][\"prev_content\"], \"content 1\")\n\n # Now, we verify that the edit history data sent back has the\n # correct filled-out fields\n message_edit_history = self.client_get(f\"/json/messages/{msg_id}/history\")\n\n json_response = orjson.loads(message_edit_history.content)\n\n # We reverse the message history view output so that the IDs line up with the above.\n message_history = list(reversed(json_response[\"message_history\"]))\n i = 0\n for entry in message_history:\n expected_entries = {\"content\", \"rendered_content\", \"topic\", \"timestamp\", \"user_id\"}\n if i in {0, 2, 3}:\n expected_entries.add(\"prev_topic\")\n if i in {1, 2, 4}:\n expected_entries.add(\"prev_content\")\n expected_entries.add(\"prev_rendered_content\")\n expected_entries.add(\"content_html_diff\")\n i += 1\n self.assertEqual(expected_entries, set(entry.keys()))\n self.assert_length(message_history, 6)\n self.assertEqual(message_history[0][\"prev_topic\"], \"topic 3\")\n self.assertEqual(message_history[0][\"topic\"], \"topic 4\")\n self.assertEqual(message_history[1][\"topic\"], \"topic 3\")\n self.assertEqual(message_history[2][\"topic\"], \"topic 3\")\n self.assertEqual(message_history[2][\"prev_topic\"], \"topic 2\")\n self.assertEqual(message_history[3][\"topic\"], \"topic 2\")\n self.assertEqual(message_history[3][\"prev_topic\"], \"topic 1\")\n self.assertEqual(message_history[4][\"topic\"], \"topic 1\")\n\n self.assertEqual(message_history[0][\"content\"], \"content 4\")\n self.assertEqual(message_history[1][\"content\"], \"content 4\")\n self.assertEqual(message_history[1][\"prev_content\"], \"content 3\")\n self.assertEqual(message_history[2][\"content\"], \"content 3\")\n self.assertEqual(message_history[2][\"prev_content\"], \"content 2\")\n self.assertEqual(message_history[3][\"content\"], \"content 2\")\n self.assertEqual(message_history[4][\"content\"], \"content 2\")\n self.assertEqual(message_history[4][\"prev_content\"], \"content 1\")\n\n self.assertEqual(message_history[5][\"content\"], \"content 1\")\n self.assertEqual(message_history[5][\"topic\"], \"topic 1\")\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 1529, "n_words": 310, "vocab_size": 136, "complexity": 4, "nloc": 128, "token_counts": 1019, "n_ast_nodes": 1737, "n_identifiers": 35, "d_id": 17602, "documentation": { "docstring": "This test verifies the accuracy of construction of Zulip's edit\n history data structures.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 19, "language": "en" } }, { "id": 243737, "commit_id": "2ae55ccbdad9c842929fb238ea1eb81d1f999024", "repo": "Pillow", "path": "src/PIL/ImageFile.py", "file_name": "ImageFile.py", "fun_name": "close", "commit_message": "Improve exception traceback readability", "code": "def close(self):\n \n # finish decoding\n if self.decoder:\n # get rid of what's left in the buffers\n self.feed(b\"\")\n self.data = self.decoder = None\n if not self.finished:\n msg = \"image was incomplete\"\n raise OSError(msg)\n if not self.image:\n msg = \"cannot parse this image\"\n raise OSError(msg)\n if self.data:\n # incremental parsing not possible; reopen the file\n # not that we have all data\n with io.BytesIO(self.data) as fp:\n try:\n self.image = Image.open(fp)\n finally:\n self.image.load()\n return self.image\n\n\n# --------------------------------------------------------------------\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 312, "n_words": 74, "vocab_size": 55, "complexity": 6, "nloc": 17, "token_counts": 97, "n_ast_nodes": 174, "n_identifiers": 15, "d_id": 70104, "documentation": { "docstring": "\n (Consumer) Close the stream.\n\n :returns: An image object.\n :exception OSError: If the parser failed to parse the image file either\n because it cannot be identified or cannot be\n decoded.\n ", "n_words": 29, "vocab_size": 24, "n_whitespaces": 112, "language": "en" } }, { "id": 107966, "commit_id": "442e7082140f85de53349bf0bf0e3c98e2eaa44c", "repo": "matplotlib", "path": "lib/matplotlib/transforms.py", "file_name": "transforms.py", "fun_name": "transform_non_affine", "commit_message": "Correct cross-references in documentation", "code": "def transform_non_affine(self, values):\n \n return values\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 18, "n_identifiers": 3, "d_id": 22992, "documentation": { "docstring": "\n Apply only the non-affine part of this transformation.\n\n ``transform(values)`` is always equivalent to\n ``transform_affine(transform_non_affine(values))``.\n\n In non-affine transformations, this is generally equivalent to\n ``transform(values)``. In affine transformations, this is\n always a no-op.\n\n Parameters\n ----------\n values : array\n The input values as NumPy array of length :attr:`input_dims` or\n shape (N x :attr:`input_dims`).\n\n Returns\n -------\n array\n The output values as NumPy array of length :attr:`output_dims` or\n shape (N x :attr:`output_dims`), depending on the input.\n ", "n_words": 71, "vocab_size": 45, "n_whitespaces": 208, "language": "en" } }, { "id": 249473, "commit_id": "cf11919ddd4f48b2f59062542ba62969042f80aa", "repo": "synapse", "path": "synapse/util/metrics.py", "file_name": "metrics.py", "fun_name": "collect", "commit_message": "Fix cache metrics not being updated when not using the legacy exposition module. (#13717)", "code": "def collect(self) -> Generator[Metric, None, None]:\n \n\n for pre_update_hook in self._pre_update_hooks:\n pre_update_hook()\n\n yield from super().collect()\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 46, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 7, "token_counts": 34, "n_ast_nodes": 56, "n_identifiers": 7, "d_id": 72940, "documentation": { "docstring": "\n Collects metrics, calling pre-update hooks first.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 216630, "commit_id": "38ca08446d560797522b7828720032799584d32a", "repo": "Open-Assistant", "path": "backend/postprocessing/rankings.py", "file_name": "rankings.py", "fun_name": "get_ranking", "commit_message": "ran pre-commit hook", "code": "def get_ranking(pairs):\n \n if len(pairs) == 1:\n return list(pairs[0])\n w = get_winner(pairs)\n # now remove the winner from the list of pairs\n p_new = np.array([(a, b) for a, b in pairs if a != w])\n return [w] + get_ranking(p_new)\n\n", "url": "https://github.com/LAION-AI/Open-Assistant.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 63, "n_words": 38, "vocab_size": 33, "complexity": 4, "nloc": 6, "token_counts": 61, "n_ast_nodes": 98, "n_identifiers": 11, "d_id": 54671, "documentation": { "docstring": "\n Abuses concordance property to get a (not necessarily unqiue) ranking.\n The lack of uniqueness is due to the potential existance of multiple\n equally ranked winners. We have to pick one, which is where\n the non-uniqueness comes from\n ", "n_words": 37, "vocab_size": 32, "n_whitespaces": 53, "language": "en" } }, { "id": 176717, "commit_id": "2a05ccdb07cff88e56661dee8a9271859354027f", "repo": "networkx", "path": "networkx/algorithms/hierarchy.py", "file_name": "hierarchy.py", "fun_name": "flow_hierarchy", "commit_message": "Remove redundant py2 numeric conversions (#5661)\n\n* Remove redundant float conversion\r\n\r\n* Remove redundant int conversion\r\n\r\n* Use integer division\r\n\r\nCo-authored-by: Miroslav Šedivý <6774676+eumiro@users.noreply.github.com>", "code": "def flow_hierarchy(G, weight=None):\n \n if not G.is_directed():\n raise nx.NetworkXError(\"G must be a digraph in flow_hierarchy\")\n scc = nx.strongly_connected_components(G)\n return 1 - sum(G.subgraph(c).size(weight) for c in scc) / G.size(weight)\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 46, "n_words": 27, "vocab_size": 26, "complexity": 3, "nloc": 5, "token_counts": 61, "n_ast_nodes": 101, "n_identifiers": 12, "d_id": 42052, "documentation": { "docstring": "Returns the flow hierarchy of a directed network.\n\n Flow hierarchy is defined as the fraction of edges not participating\n in cycles in a directed graph [1]_.\n\n Parameters\n ----------\n G : DiGraph or MultiDiGraph\n A directed graph\n\n weight : key,optional (default=None)\n Attribute to use for node weights. If None the weight defaults to 1.\n\n Returns\n -------\n h : float\n Flow hierarchy value\n\n Notes\n -----\n The algorithm described in [1]_ computes the flow hierarchy through\n exponentiation of the adjacency matrix. This function implements an\n alternative approach that finds strongly connected components.\n An edge is in a cycle if and only if it is in a strongly connected\n component, which can be found in $O(m)$ time using Tarjan's algorithm.\n\n References\n ----------\n .. [1] Luo, J.; Magee, C.L. (2011),\n Detecting evolving patterns of self-organizing networks by flow\n hierarchy measurement, Complexity, Volume 16 Issue 6 53-61.\n DOI: 10.1002/cplx.20368\n http://web.mit.edu/~cmagee/www/documents/28-DetectingEvolvingPatterns_FlowHierarchy.pdf\n ", "n_words": 144, "vocab_size": 108, "n_whitespaces": 247, "language": "en" } }, { "id": 160188, "commit_id": "f404e9e92e87a3990712d723d5c562a89300ac01", "repo": "numpy", "path": "numpy/lib/function_base.py", "file_name": "function_base.py", "fun_name": "rot90", "commit_message": "Add space after argument name", "code": "def rot90(m, k=1, axes=(0, 1)):\n \n axes = tuple(axes)\n if len(axes) != 2:\n raise ValueError(\"len(axes) must be 2.\")\n\n m = asanyarray(m)\n\n if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim:\n raise ValueError(\"Axes must be different.\")\n\n if (axes[0] >= m.ndim or axes[0] < -m.ndim\n or axes[1] >= m.ndim or axes[1] < -m.ndim):\n raise ValueError(\"Axes={} out of range for array of ndim={}.\"\n .format(axes, m.ndim))\n\n k %= 4\n\n if k == 0:\n return m[:]\n if k == 2:\n return flip(flip(m, axes[0]), axes[1])\n\n axes_list = arange(0, m.ndim)\n (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]],\n axes_list[axes[0]])\n\n if k == 1:\n return transpose(flip(m, axes[1]), axes_list)\n else:\n # k == 3\n return flip(transpose(m, axes_list), axes[1])\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 265, "n_words": 105, "vocab_size": 68, "complexity": 11, "nloc": 79, "token_counts": 250, "n_ast_nodes": 377, "n_identifiers": 15, "d_id": 38560, "documentation": { "docstring": "\n Rotate an array by 90 degrees in the plane specified by axes.\n\n Rotation direction is from the first towards the second axis.\n\n Parameters\n ----------\n m : array_like\n Array of two or more dimensions.\n k : integer\n Number of times the array is rotated by 90 degrees.\n axes : (2,) array_like\n The array is rotated in the plane defined by the axes.\n Axes must be different.\n\n .. versionadded:: 1.12.0\n\n Returns\n -------\n y : ndarray\n A rotated view of `m`.\n\n See Also\n --------\n flip : Reverse the order of elements in an array along the given axis.\n fliplr : Flip an array horizontally.\n flipud : Flip an array vertically.\n\n Notes\n -----\n ``rot90(m, k=1, axes=(1,0))`` is the reverse of\n ``rot90(m, k=1, axes=(0,1))``\n\n ``rot90(m, k=1, axes=(1,0))`` is equivalent to\n ``rot90(m, k=-1, axes=(0,1))``\n\n Examples\n --------\n >>> m = np.array([[1,2],[3,4]], int)\n >>> m\n array([[1, 2],\n [3, 4]])\n >>> np.rot90(m)\n array([[2, 4],\n [1, 3]])\n >>> np.rot90(m, 2)\n array([[4, 3],\n [2, 1]])\n >>> m = np.arange(8).reshape((2,2,2))\n >>> np.rot90(m, 1, (1,2))\n array([[[1, 3],\n [0, 2]],\n [[5, 7],\n [4, 6]]])\n\n ", "n_words": 170, "vocab_size": 108, "n_whitespaces": 378, "language": "en" } }, { "id": 97025, "commit_id": "e6285db024d7af78f2022822abcde6f5d118af9e", "repo": "sentry", "path": "tests/sentry/data_export/endpoints/test_data_export.py", "file_name": "test_data_export.py", "fun_name": "test_export_resolves_empty_project", "commit_message": "ref(discover): Remove resolve_field_list from data export (#32547)\n\n- This removes the call to resolve_field_list to validate queries from\r\n the data export", "code": "def test_export_resolves_empty_project(self):\n \n payload = self.make_payload(\n \"discover\",\n {\"project\": [], \"start\": \"2020-05-18T14:00:00\", \"end\": \"2020-05-19T14:00:00\"},\n )\n with self.feature(\"organizations:discover-query\"):\n self.get_valid_response(self.org.slug, status_code=201, **payload)\n\n payload = self.make_payload(\n \"issue\", {\"project\": None, \"start\": \"2020-05-18T14:00:00\", \"end\": \"2020-05-19T14:00:00\"}\n )\n with self.feature(\"organizations:discover-query\"):\n self.get_valid_response(self.org.slug, status_code=201, **payload)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 138, "n_words": 34, "vocab_size": 21, "complexity": 1, "nloc": 12, "token_counts": 102, "n_ast_nodes": 186, "n_identifiers": 9, "d_id": 19363, "documentation": { "docstring": "\n Ensures that a request to this endpoint returns a 201 if projects\n is an empty list.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 38, "language": "en" } }, { "id": 274468, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/legacy_tf_layers/variable_scope_shim_test.py", "file_name": "variable_scope_shim_test.py", "fun_name": "get_compat_v1_regularization_losses", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_compat_v1_regularization_losses(self):\n \n return {\n name: regularizer()\n for name, regularizer in self._tf1_style_var_store._regularizers.items()\n } # pylint: disable=protected-access\n\n\n@test_combinations.generate(test_combinations.combine(mode=[\"eager\"]))", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@test_combinations.generate(test_combinations.combine(mode=[\"eager\"]))", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 59, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 5, "token_counts": 28, "n_ast_nodes": 76, "n_identifiers": 11, "d_id": 81207, "documentation": { "docstring": "Dict w/ regularization losses from `get_variable`&`compat.v1.layers`.", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 177372, "commit_id": "dc70d037f21c76c61e1aab02039c4ca87898f7c7", "repo": "networkx", "path": "networkx/generators/tests/test_trees.py", "file_name": "test_trees.py", "fun_name": "test_random_tree_n_zero", "commit_message": "added coverage in generators/tree.py (#6082)", "code": "def test_random_tree_n_zero():\n \n with pytest.raises(nx.NetworkXPointlessConcept):\n T = nx.random_tree(0, seed=1234)\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 21, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 27, "n_ast_nodes": 48, "n_identifiers": 8, "d_id": 42365, "documentation": { "docstring": "Tests if n = 0 then the NetworkXPointlessConcept exception is raised.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 272642, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/merging/add.py", "file_name": "add.py", "fun_name": "add", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def add(inputs, **kwargs):\n \n return Add(**kwargs)(inputs)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 32, "n_identifiers": 4, "d_id": 81028, "documentation": { "docstring": "Functional interface to the `tf.keras.layers.Add` layer.\n\n Args:\n inputs: A list of input tensors with the same shape.\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor as the sum of the inputs. It has the same shape as the inputs.\n\n Examples:\n\n >>> input_shape = (2, 3, 4)\n >>> x1 = tf.random.normal(input_shape)\n >>> x2 = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.add([x1, x2])\n >>> print(y.shape)\n (2, 3, 4)\n\n Used in a functional model:\n\n >>> input1 = tf.keras.layers.Input(shape=(16,))\n >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1)\n >>> input2 = tf.keras.layers.Input(shape=(32,))\n >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2)\n >>> added = tf.keras.layers.add([x1, x2])\n >>> out = tf.keras.layers.Dense(4)(added)\n >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)\n\n ", "n_words": 102, "vocab_size": 62, "n_whitespaces": 177, "language": "en" } }, { "id": 73846, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/models/__init__.py", "file_name": "__init__.py", "fun_name": "get_cached_paths", "commit_message": "Reformat with black", "code": "def get_cached_paths(self):\n \n return [\"/\"]\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 21, "n_identifiers": 2, "d_id": 16141, "documentation": { "docstring": "\n This returns a list of paths to invalidate in a frontend cache\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 27, "language": "en" } }, { "id": 123734, "commit_id": "df4293473d2fb6e887e31522cab5aff95e201581", "repo": "sqlmap", "path": "plugins/generic/misc.py", "file_name": "misc.py", "fun_name": "cleanup", "commit_message": "Fixing DeprecationWarning (logger.warn)", "code": "def cleanup(self, onlyFileTbl=False, udfDict=None, web=False):\n \n\n if web and self.webBackdoorFilePath:\n logger.info(\"cleaning up the web files uploaded\")\n\n self.delRemoteFile(self.webStagerFilePath)\n self.delRemoteFile(self.webBackdoorFilePath)\n\n if (not isStackingAvailable() or kb.udfFail) and not conf.direct:\n return\n\n if any((conf.osCmd, conf.osShell)) and Backend.isDbms(DBMS.PGSQL) and kb.copyExecTest:\n return\n\n if Backend.isOs(OS.WINDOWS):\n libtype = \"dynamic-link library\"\n\n elif Backend.isOs(OS.LINUX):\n libtype = \"shared object\"\n\n else:\n libtype = \"shared library\"\n\n if onlyFileTbl:\n logger.debug(\"cleaning up the database management system\")\n else:\n logger.info(\"cleaning up the database management system\")\n\n logger.debug(\"removing support tables\")\n inject.goStacked(\"DROP TABLE %s\" % self.fileTblName, silent=True)\n inject.goStacked(\"DROP TABLE %shex\" % self.fileTblName, silent=True)\n\n if not onlyFileTbl:\n inject.goStacked(\"DROP TABLE %s\" % self.cmdTblName, silent=True)\n\n if Backend.isDbms(DBMS.MSSQL):\n udfDict = {\"master..new_xp_cmdshell\": {}}\n\n if udfDict is None:\n udfDict = getattr(self, \"sysUdfs\", {})\n\n for udf, inpRet in udfDict.items():\n message = \"do you want to remove UDF '%s'? [Y/n] \" % udf\n\n if readInput(message, default='Y', boolean=True):\n dropStr = \"DROP FUNCTION %s\" % udf\n\n if Backend.isDbms(DBMS.PGSQL):\n inp = \", \".join(i for i in inpRet[\"input\"])\n dropStr += \"(%s)\" % inp\n\n logger.debug(\"removing UDF '%s'\" % udf)\n inject.goStacked(dropStr, silent=True)\n\n logger.info(\"database management system cleanup finished\")\n\n warnMsg = \"remember that UDF %s files \" % libtype\n\n if conf.osPwn:\n warnMsg += \"and Metasploit related files in the temporary \"\n warnMsg += \"folder \"\n\n warnMsg += \"saved on the file system can only be deleted \"\n warnMsg += \"manually\"\n logger.warning(warnMsg)\n", "url": "https://github.com/sqlmapproject/sqlmap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 726, "n_words": 203, "vocab_size": 118, "complexity": 20, "nloc": 45, "token_counts": 344, "n_ast_nodes": 594, "n_identifiers": 50, "d_id": 27412, "documentation": { "docstring": "\n Cleanup file system and database from sqlmap create files, tables\n and functions\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 34, "language": "en" } }, { "id": 276843, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/generic_utils.py", "file_name": "generic_utils.py", "fun_name": "func_load", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def func_load(code, defaults=None, closure=None, globs=None):\n \n if isinstance(code, (tuple, list)): # unpack previous dump\n code, defaults, closure = code\n if isinstance(defaults, list):\n defaults = tuple(defaults)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 56, "n_words": 24, "vocab_size": 22, "complexity": 7, "nloc": 18, "token_counts": 147, "n_ast_nodes": 78, "n_identifiers": 8, "d_id": 81753, "documentation": { "docstring": "Deserializes a user defined function.\n\n Args:\n code: bytecode of the function.\n defaults: defaults of the function.\n closure: closure of the function.\n globs: dictionary of global objects.\n\n Returns:\n A function object.\n ", "n_words": 30, "vocab_size": 22, "n_whitespaces": 74, "language": "en" } }, { "id": 75292, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/tests/test_models.py", "file_name": "test_models.py", "fun_name": "assert_orientation_landscape_image_is_correct", "commit_message": "Reformat with black", "code": "def assert_orientation_landscape_image_is_correct(self, rendition):\n \n\n from willow.plugins.pillow import PillowImage\n\n with rendition.get_willow_image() as willow_image:\n image = PillowImage.open(willow_image)\n # Check that the image is the correct size (and not rotated)\n self.assertEqual(image.get_size(), (600, 450))\n # Check that the red flower is in the bottom left\n # The JPEGs have compressed slightly differently so the colours won't be spot on\n colour = image.image.convert(\"RGB\").getpixel((155, 282))\n self.assertAlmostEqual(colour[0], 217, delta=25)\n self.assertAlmostEqual(colour[1], 38, delta=25)\n self.assertAlmostEqual(colour[2], 46, delta=25)\n\n # Check that the water is at the bottom\n colour = image.image.convert(\"RGB\").getpixel((377, 434))\n self.assertAlmostEqual(colour[0], 85, delta=25)\n self.assertAlmostEqual(colour[1], 93, delta=25)\n self.assertAlmostEqual(colour[2], 65, delta=25)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 212, "n_words": 89, "vocab_size": 61, "complexity": 1, "nloc": 13, "token_counts": 177, "n_ast_nodes": 273, "n_identifiers": 18, "d_id": 16387, "documentation": { "docstring": "\n Check that the image has the correct colored pixels in the right places\n so that we know the image did not physically rotate.\n ", "n_words": 23, "vocab_size": 18, "n_whitespaces": 45, "language": "en" } }, { "id": 203587, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/auth/__init__.py", "file_name": "__init__.py", "fun_name": "get_user_model", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_user_model():\n \n try:\n return django_apps.get_model(settings.AUTH_USER_MODEL, require_ready=False)\n except ValueError:\n raise ImproperlyConfigured(\n \"AUTH_USER_MODEL must be of the form 'app_label.model_name'\"\n )\n except LookupError:\n raise ImproperlyConfigured(\n \"AUTH_USER_MODEL refers to model '%s' that has not been installed\"\n % settings.AUTH_USER_MODEL\n )\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 115, "n_words": 35, "vocab_size": 30, "complexity": 3, "nloc": 12, "token_counts": 40, "n_ast_nodes": 71, "n_identifiers": 9, "d_id": 50462, "documentation": { "docstring": "\n Return the User model that is active in this project.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 226157, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_bar.py", "file_name": "_bar.py", "fun_name": "visible", "commit_message": "switch to black .22", "code": "def visible(self):\n \n return self[\"visible\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 57830, "documentation": { "docstring": "\n Determines whether or not this trace is visible. If\n \"legendonly\", the trace is not drawn, but can appear as a\n legend item (provided that the legend itself is visible).\n\n The 'visible' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n [True, False, 'legendonly']\n\n Returns\n -------\n Any\n ", "n_words": 53, "vocab_size": 43, "n_whitespaces": 134, "language": "en" } }, { "id": 264443, "commit_id": "7c105019d8ae9205051c302e7499b33a455f9176", "repo": "netbox", "path": "netbox/utilities/templatetags/builtins/filters.py", "file_name": "filters.py", "fun_name": "fgcolor", "commit_message": "Closes #8600: Document built-in template tags & filters", "code": "def fgcolor(value, dark='000000', light='ffffff'):\n \n value = value.lower().strip('#')\n if not re.match('^[0-9a-f]{6}$', value):\n return ''\n return f'#{foreground_color(value, dark, light)}'\n\n\n@register.filter()", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "@register.filter()", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 36, "n_words": 18, "vocab_size": 17, "complexity": 2, "nloc": 5, "token_counts": 42, "n_ast_nodes": 102, "n_identifiers": 11, "d_id": 77729, "documentation": { "docstring": "\n Return black (#000000) or white (#ffffff) given an arbitrary background color in RRGGBB format. The foreground\n color with the better contrast is returned.\n\n Args:\n value: The background color\n dark: The foreground color to use for light backgrounds\n light: The foreground color to use for dark backgrounds\n ", "n_words": 46, "vocab_size": 32, "n_whitespaces": 80, "language": "en" } }, { "id": 230975, "commit_id": "1d82b8822120db088bfeb6c8eae7ec8df9703783", "repo": "plotly.py", "path": "packages/python/plotly/plotly/matplotlylib/mplexporter/tests/test_basic.py", "file_name": "test_basic.py", "fun_name": "test_image", "commit_message": "Updated distutils.Version to packaging.Version", "code": "def test_image():\n # Test fails for matplotlib 1.5+ because the size of the image\n # generated by matplotlib has changed.\n if Version(matplotlib.__version__) == Version(\"3.4.1\"):\n image_size = 432\n else:\n pytest.skip(\"Test fails for older matplotlib\")\n np.random.seed(0) # image size depends on the seed\n fig, ax = plt.subplots(figsize=(2, 2))\n ax.imshow(np.random.random((10, 10)), cmap=plt.cm.jet, interpolation=\"nearest\")\n _assert_output_equal(\n fake_renderer_output(fig, FakeRenderer),\n f,\n )\n\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 110, "n_words": 55, "vocab_size": 45, "complexity": 2, "nloc": 18, "token_counts": 94, "n_ast_nodes": 159, "n_identifiers": 23, "d_id": 62617, "documentation": { "docstring": "\n opening figure\n opening axes\n draw image of size {image_size} \n closing axes\n closing figure\n ", "n_words": 13, "vocab_size": 9, "n_whitespaces": 159, "language": "en" } }, { "id": 101448, "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", "repo": "faceswap", "path": "tools/preview/preview.py", "file_name": "preview.py", "fun_name": "_build_tabs", "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", "code": "def _build_tabs(self) -> None:\n \n logger.debug(\"Build Tabs\")\n for section in self.config_tools.sections:\n tab = ttk.Notebook(self)\n self._tabs[section] = {\"tab\": tab}\n self.add(tab, text=section.replace(\"_\", \" \").title())\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 75, "n_words": 21, "vocab_size": 20, "complexity": 2, "nloc": 7, "token_counts": 64, "n_ast_nodes": 110, "n_identifiers": 15, "d_id": 20861, "documentation": { "docstring": " Build the notebook tabs for the each configuration section. ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 10, "language": "en" } }, { "id": 212994, "commit_id": "66931d51e1a06797381d3c32c2b1a4400c033357", "repo": "PySimpleGUI", "path": "PySimpleGUI.py", "file_name": "PySimpleGUI.py", "fun_name": "theme_use_custom_titlebar", "commit_message": "Release 4.60.0", "code": "def theme_use_custom_titlebar():\n \n if USE_CUSTOM_TITLEBAR is False:\n return False\n\n return USE_CUSTOM_TITLEBAR or pysimplegui_user_settings.get('-custom titlebar-', False)\n\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 14, "vocab_size": 12, "complexity": 3, "nloc": 4, "token_counts": 23, "n_ast_nodes": 41, "n_identifiers": 4, "d_id": 53561, "documentation": { "docstring": "\n Returns True if a custom titlebar will be / should be used.\n The setting is in the Global Settings window and can be overwridden\n using set_options call\n\n :return: True if a custom titlebar / custom menubar should be used\n :rtype: (bool)\n ", "n_words": 41, "vocab_size": 30, "n_whitespaces": 75, "language": "en" } }, { "id": 138510, "commit_id": "ea791ab0a0f176c94c911ef0eb06ca8fa568de0c", "repo": "ray", "path": "python/ray/data/tests/test_stats.py", "file_name": "test_stats.py", "fun_name": "test_dataset_stats_shuffle", "commit_message": "[Datasets] Print hierarchical stats for multi-stage operations. (#24119)\n\nThe total execution time for multi-stage operations being logged twice in the dataset stats is [confusing to users](https://github.com/ray-project/ray/issues/23915), making it seem like each stage in the operation took the same amount of time. This PR modifies the stats output for multi-stage operations, such that the total execution time is printed out once as a top-level op stats line, with the stats for each of the (sub)stages indented and devoid of the total execution time repeat.\r\n\r\nThis also opens the door for other op-level stats (e.g. peak memory utilization) and per-substage stats (e.g. total substage execution time).", "code": "def test_dataset_stats_shuffle(ray_start_regular_shared):\n context = DatasetContext.get_current()\n context.optimize_fuse_stages = True\n ds = ray.data.range(1000, parallelism=10)\n ds = ds.random_shuffle().repartition(1, shuffle=True)\n stats = canonicalize(ds.stats())\n assert (\n stats\n == \n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 59, "n_words": 24, "vocab_size": 18, "complexity": 1, "nloc": 41, "token_counts": 63, "n_ast_nodes": 103, "n_identifiers": 16, "d_id": 31449, "documentation": { "docstring": "Stage N read->random_shuffle: executed in T\n\n Substage Z read->random_shuffle_map: N/N blocks executed\n * Remote wall time: T min, T max, T mean, T total\n * Remote cpu time: T min, T max, T mean, T total\n * Output num rows: N min, N max, N mean, N total\n * Output size bytes: N min, N max, N mean, N total\n * Tasks per node: N min, N max, N mean; N nodes used\n\n Substage N random_shuffle_reduce: N/N blocks executed\n * Remote wall time: T min, T max, T mean, T total\n * Remote cpu time: T min, T max, T mean, T total\n * Output num rows: N min, N max, N mean, N total\n * Output size bytes: N min, N max, N mean, N total\n * Tasks per node: N min, N max, N mean; N nodes used\n\nStage N repartition: executed in T\n\n Substage Z repartition_map: N/N blocks executed\n * Remote wall time: T min, T max, T mean, T total\n * Remote cpu time: T min, T max, T mean, T total\n * Output num rows: N min, N max, N mean, N total\n * Output size bytes: N min, N max, N mean, N total\n * Tasks per node: N min, N max, N mean; N nodes used\n\n Substage N repartition_reduce: N/N blocks executed\n * Remote wall time: T min, T max, T mean, T total\n * Remote cpu time: T min, T max, T mean, T total\n * Output num rows: N min, N max, N mean, N total\n * Output size bytes: N min, N max, N mean, N total\n * Tasks per node: N min, N max, N mean; N nodes used\n", "n_words": 280, "vocab_size": 35, "n_whitespaces": 350, "language": "en" } }, { "id": 196819, "commit_id": "f757f3daae6e11ea0cfb7dadc133274d8d74315f", "repo": "sympy", "path": "sympy/series/gruntz.py", "file_name": "gruntz.py", "fun_name": "rewrite", "commit_message": "Reordered imports 2", "code": "def rewrite(e, Omega, x, wsym):\n \n if not isinstance(Omega, SubsSet):\n raise TypeError(\"Omega should be an instance of SubsSet\")\n if len(Omega) == 0:\n raise ValueError(\"Length cannot be 0\")\n # all items in Omega must be exponentials\n for t in Omega.keys():\n if not isinstance(t, exp):\n raise ValueError(\"Value should be exp\")\n rewrites = Omega.rewrites\n Omega = list(Omega.items())\n\n nodes = build_expression_tree(Omega, rewrites)\n Omega.sort(key=lambda x: nodes[x[1]].ht(), reverse=True)\n\n # make sure we know the sign of each exp() term; after the loop,\n # g is going to be the \"w\" - the simplest one in the mrv set\n for g, _ in Omega:\n sig = sign(g.exp, x)\n if sig != 1 and sig != -1:\n raise NotImplementedError('Result depends on the sign of %s' % sig)\n if sig == 1:\n wsym = 1/wsym # if g goes to oo, substitute 1/w\n # O2 is a list, which results by rewriting each item in Omega using \"w\"\n O2 = []\n denominators = []\n for f, var in Omega:\n c = limitinf(f.exp/g.exp, x)\n if c.is_Rational:\n denominators.append(c.q)\n arg = f.exp\n if var in rewrites:\n if not isinstance(rewrites[var], exp):\n raise ValueError(\"Value should be exp\")\n arg = rewrites[var].args[0]\n O2.append((var, exp((arg - c*g.exp).expand())*wsym**c))\n\n # Remember that Omega contains subexpressions of \"e\". So now we find\n # them in \"e\" and substitute them for our rewriting, stored in O2\n\n # the following powsimp is necessary to automatically combine exponentials,\n # so that the .xreplace() below succeeds:\n # TODO this should not be necessary\n from sympy.simplify.powsimp import powsimp\n f = powsimp(e, deep=True, combine='exp')\n for a, b in O2:\n f = f.xreplace({a: b})\n\n for _, var in Omega:\n assert not f.has(var)\n\n # finally compute the logarithm of w (logw).\n logw = g.exp\n if sig == 1:\n logw = -logw # log(w)->log(1/w)=-log(w)\n\n # Some parts of SymPy have difficulty computing series expansions with\n # non-integral exponents. The following heuristic improves the situation:\n exponent = reduce(ilcm, denominators, 1)\n f = f.subs({wsym: wsym**exponent})\n logw /= exponent\n\n return f, logw\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 594, "n_words": 319, "vocab_size": 189, "complexity": 16, "nloc": 43, "token_counts": 365, "n_ast_nodes": 589, "n_identifiers": 53, "d_id": 48197, "documentation": { "docstring": "e(x) ... the function\n Omega ... the mrv set\n wsym ... the symbol which is going to be used for w\n\n Returns the rewritten e in terms of w and log(w). See test_rewrite1()\n for examples and correct results.\n ", "n_words": 38, "vocab_size": 30, "n_whitespaces": 53, "language": "en" } }, { "id": 167698, "commit_id": "9612375ca28ade056f15d4338f1bfde5d045c9fc", "repo": "pandas", "path": "pandas/core/config_init.py", "file_name": "config_init.py", "fun_name": "register_converter_cb", "commit_message": "TYP: return values in core/*.py (#47587)\n\n* TYP: return values in core/*.py\r\n\r\n* fix test\r\n\r\n* to_html\r\n\r\n* to_html part 2\r\n\r\n* DataFrame.query\r\n\r\n* more overloads\r\n\r\n* fix query?\r\n\r\n* increase stacklevel by one\r\n\r\n* fix rename_axis\r\n\r\n* and an overload for DataFrame.eval\r\n\r\n* address comments\r\n\r\n* fix typevar", "code": "def register_converter_cb(key) -> None:\n from pandas.plotting import (\n deregister_matplotlib_converters,\n register_matplotlib_converters,\n )\n\n if cf.get_option(key):\n register_matplotlib_converters()\n else:\n deregister_matplotlib_converters()\n\n\nwith cf.config_prefix(\"plotting.matplotlib\"):\n cf.register_option(\n \"register_converters\",\n \"auto\",\n register_converter_doc,\n validator=is_one_of_factory([\"auto\", True, False]),\n cb=register_converter_cb,\n )\n\n# ------\n# Styler\n# ------\n\nstyler_sparse_index_doc = \n\nstyler_sparse_columns_doc = \n\nstyler_render_repr = \n\nstyler_max_elements = \n\nstyler_max_rows = \n\nstyler_max_columns = \n\nstyler_precision = \n\nstyler_decimal = \n\nstyler_thousands = \n\nstyler_na_rep = \n\nstyler_escape = \n\nstyler_formatter = \n\nstyler_multirow_align = \n\nstyler_multicol_align = r\n\nstyler_hrules = \n\nstyler_environment = \n\nstyler_encoding = \n\nstyler_mathjax = \n\nwith cf.config_prefix(\"styler\"):\n cf.register_option(\"sparse.index\", True, styler_sparse_index_doc, validator=is_bool)\n\n cf.register_option(\n \"sparse.columns\", True, styler_sparse_columns_doc, validator=is_bool\n )\n\n cf.register_option(\n \"render.repr\",\n \"html\",\n styler_render_repr,\n validator=is_one_of_factory([\"html\", \"latex\"]),\n )\n\n cf.register_option(\n \"render.max_elements\",\n 2**18,\n styler_max_elements,\n validator=is_nonnegative_int,\n )\n\n cf.register_option(\n \"render.max_rows\",\n None,\n styler_max_rows,\n validator=is_nonnegative_int,\n )\n\n cf.register_option(\n \"render.max_columns\",\n None,\n styler_max_columns,\n validator=is_nonnegative_int,\n )\n\n cf.register_option(\"render.encoding\", \"utf-8\", styler_encoding, validator=is_str)\n\n cf.register_option(\"format.decimal\", \".\", styler_decimal, validator=is_str)\n\n cf.register_option(\n \"format.precision\", 6, styler_precision, validator=is_nonnegative_int\n )\n\n cf.register_option(\n \"format.thousands\",\n None,\n styler_thousands,\n validator=is_instance_factory([type(None), str]),\n )\n\n cf.register_option(\n \"format.na_rep\",\n None,\n styler_na_rep,\n validator=is_instance_factory([type(None), str]),\n )\n\n cf.register_option(\n \"format.escape\",\n None,\n styler_escape,\n validator=is_one_of_factory([None, \"html\", \"latex\"]),\n )\n\n cf.register_option(\n \"format.formatter\",\n None,\n styler_formatter,\n validator=is_instance_factory([type(None), dict, Callable, str]),\n )\n\n cf.register_option(\"html.mathjax\", True, styler_mathjax, validator=is_bool)\n\n cf.register_option(\n \"latex.multirow_align\",\n \"c\",\n styler_multirow_align,\n validator=is_one_of_factory([\"c\", \"t\", \"b\", \"naive\"]),\n )\n\n val_mca = [\"r\", \"|r|\", \"|r\", \"r|\", \"c\", \"|c|\", \"|c\", \"c|\", \"l\", \"|l|\", \"|l\", \"l|\"]\n val_mca += [\"naive-l\", \"naive-r\"]\n cf.register_option(\n \"latex.multicol_align\",\n \"r\",\n styler_multicol_align,\n validator=is_one_of_factory(val_mca),\n )\n\n cf.register_option(\"latex.hrules\", False, styler_hrules, validator=is_bool)\n\n cf.register_option(\n \"latex.environment\",\n None,\n styler_environment,\n validator=is_instance_factory([type(None), str]),\n )\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 695, "n_words": 200, "vocab_size": 127, "complexity": 2, "nloc": 9, "token_counts": 34, "n_ast_nodes": 854, "n_identifiers": 41, "d_id": 40081, "documentation": { "docstring": "\n: bool\n Whether to sparsify the display of a hierarchical index. Setting to False will\n display each explicit level element in a hierarchical key for each row.\n\n: bool\n Whether to sparsify the display of hierarchical columns. Setting to False will\n display each explicit level element in a hierarchical key for each column.\n\n: str\n Determine which output to use in Jupyter Notebook in {\"html\", \"latex\"}.\n\n: int\n The maximum number of data-cell () elements that will be rendered before\n trimming will occur over columns, rows or both if needed.\n\n: int, optional\n The maximum number of rows that will be rendered. May still be reduced to\n satsify ``max_elements``, which takes precedence.\n\n: int, optional\n The maximum number of columns that will be rendered. May still be reduced to\n satsify ``max_elements``, which takes precedence.\n\n: int\n The precision for floats and complex numbers.\n\n: str\n The character representation for the decimal separator for floats and complex.\n\n: str, optional\n The character representation for thousands separator for floats, int and complex.\n\n: str, optional\n The string representation for values identified as missing.\n\n: str, optional\n Whether to escape certain characters according to the given context; html or latex.\n\n: str, callable, dict, optional\n A formatter object to be used as default within ``Styler.format``.\n\n: {\"c\", \"t\", \"b\"}\n The specifier for vertical alignment of sparsified LaTeX multirows.\n\n: {\"r\", \"c\", \"l\", \"naive-l\", \"naive-r\"}\n The specifier for horizontal alignment of sparsified LaTeX multicolumns. Pipe\n decorators can also be added to non-naive values to draw vertical\n rules, e.g. \"\\|r\" will draw a rule on the left side of right aligned merged cells.\n\n: bool\n Whether to add horizontal rules on top and bottom and below the headers.\n\n: str\n The environment to replace ``\\\\begin{table}``. If \"longtable\" is used results\n in a specific longtable environment format.\n\n: str\n The encoding used for output HTML and LaTeX files.\n\n: bool\n If False will render special CSS classes to table attributes that indicate Mathjax\n will not be used in Jupyter Notebook.\n", "n_words": 334, "vocab_size": 162, "n_whitespaces": 397, "language": "en" } }, { "id": 64634, "commit_id": "119273e633ec8e56c7d5c4649ef81c3deeb5f7d2", "repo": "erpnext", "path": "erpnext/accounts/report/cash_flow/custom_cash_flow.py", "file_name": "custom_cash_flow.py", "fun_name": "get_accounts_in_mappers", "commit_message": "fix: custom cash flow mapper doesn't show any data", "code": "def get_accounts_in_mappers(mapping_names):\n\treturn frappe.db.sql(, (', '.join('%s' % d for d in mapping_names)))\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 10, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 9, "token_counts": 29, "n_ast_nodes": 50, "n_identifiers": 7, "d_id": 13684, "documentation": { "docstring": "\n\t\tselect cfma.name, cfm.label, cfm.is_working_capital, cfm.is_income_tax_liability,\n\t\tcfm.is_income_tax_expense, cfm.is_finance_cost, cfm.is_finance_cost_adjustment, cfma.account\n\t\tfrom `tabCash Flow Mapping Accounts` cfma\n\t\tjoin `tabCash Flow Mapping` cfm on cfma.parent=cfm.name\n\t\twhere cfma.parent in (%s)\n\t\torder by cfm.is_working_capital\n\t", "n_words": 29, "vocab_size": 27, "n_whitespaces": 23, "language": "en" } }, { "id": 232039, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/mapbox/_layer.py", "file_name": "_layer.py", "fun_name": "circle", "commit_message": "switch to black .22", "code": "def circle(self):\n \n return self[\"circle\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 63483, "documentation": { "docstring": "\n The 'circle' property is an instance of Circle\n that may be specified as:\n - An instance of :class:`plotly.graph_objs.layout.mapbox.layer.Circle`\n - A dict of string/value properties that will be passed\n to the Circle constructor\n\n Supported dict properties:\n\n radius\n Sets the circle radius\n (mapbox.layer.paint.circle-radius). Has an\n effect only when `type` is set to \"circle\".\n\n Returns\n -------\n plotly.graph_objs.layout.mapbox.layer.Circle\n ", "n_words": 54, "vocab_size": 41, "n_whitespaces": 209, "language": "en" } }, { "id": 205685, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/indexes.py", "file_name": "indexes.py", "fun_name": "set_name_with_model", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def set_name_with_model(self, model):\n \n _, table_name = split_identifier(model._meta.db_table)\n column_names = [\n model._meta.get_field(field_name).column\n for field_name, order in self.fields_orders\n ]\n column_names_with_order = [\n ((\"-%s\" if order else \"%s\") % column_name)\n for column_name, (field_name, order) in zip(\n column_names, self.fields_orders\n )\n ]\n # The length of the parts of the name is based on the default max\n # length of 30 characters.\n hash_data = [table_name] + column_names_with_order + [self.suffix]\n self.name = \"%s_%s_%s\" % (\n table_name[:11],\n column_names[0][:7],\n \"%s_%s\" % (names_digest(*hash_data, length=6), self.suffix),\n )\n if len(self.name) > self.max_name_length:\n raise ValueError(\n \"Index too long for multiple database support. Is self.suffix \"\n \"longer than 3 characters?\"\n )\n if self.name[0] == \"_\" or self.name[0].isdigit():\n self.name = \"D%s\" % self.name[1:]\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 366, "n_words": 109, "vocab_size": 81, "complexity": 7, "nloc": 25, "token_counts": 180, "n_ast_nodes": 289, "n_identifiers": 26, "d_id": 51168, "documentation": { "docstring": "\n Generate a unique name for the index.\n\n The name is divided into 3 parts - table name (12 chars), field name\n (8 chars) and unique hash + suffix (10 chars). Each part is made to\n fit its size by truncating the excess length.\n ", "n_words": 43, "vocab_size": 37, "n_whitespaces": 79, "language": "en" } }, { "id": 181800, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/tpot_tests.py", "file_name": "tpot_tests.py", "fun_name": "test_sparse_matrix", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_sparse_matrix():\n \n tpot_obj = TPOTClassifier(\n random_state=42,\n population_size=1,\n offspring_size=2,\n generations=1,\n verbosity=0,\n config_dict='TPOT light'\n )\n\n assert_raises(ValueError, tpot_obj.fit, sparse_features, sparse_target)\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 71, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 10, "token_counts": 45, "n_ast_nodes": 68, "n_identifiers": 14, "d_id": 43586, "documentation": { "docstring": "Assert that the TPOT fit function will raise a ValueError in a sparse matrix with config_dict='TPOT light'.", "n_words": 17, "vocab_size": 16, "n_whitespaces": 16, "language": "en" } }, { "id": 60470, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/build_env.py", "file_name": "build_env.py", "fun_name": "check_requirements", "commit_message": "upd; format", "code": "def check_requirements(self, reqs):\n # type: (Iterable[str]) -> Tuple[Set[Tuple[str, str]], Set[str]]\n \n missing = set()\n conflicting = set()\n if reqs:\n ws = WorkingSet(self._lib_dirs)\n for req in reqs:\n try:\n if ws.find(Requirement.parse(req)) is None:\n missing.add(req)\n except VersionConflict as e:\n conflicting.add((str(e.args[0].as_requirement()),\n str(e.args[1])))\n return conflicting, missing\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 231, "n_words": 40, "vocab_size": 34, "complexity": 5, "nloc": 13, "token_counts": 96, "n_ast_nodes": 158, "n_identifiers": 19, "d_id": 12176, "documentation": { "docstring": "Return 2 sets:\n - conflicting requirements: set of (installed, wanted) reqs tuples\n - missing requirements: set of reqs\n ", "n_words": 18, "vocab_size": 13, "n_whitespaces": 47, "language": "en" } }, { "id": 271843, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_utils_v1.py", "file_name": "training_utils_v1.py", "fun_name": "unpack_iterator_input", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def unpack_iterator_input(iterator):\n \n try:\n next_element = iterator.get_next()\n except tf.errors.OutOfRangeError:\n raise RuntimeError(\n \"Your dataset iterator ran out of data; \"\n \"Make sure that your dataset can generate \"\n \"required number of samples.\"\n )\n\n if isinstance(next_element, (list, tuple)):\n if len(next_element) not in [2, 3]:\n raise ValueError(\n \"Please provide model inputs as a list or tuple of 2 or 3 \"\n \"elements: (input, target) or (input, target, sample_weights) \"\n \"Received %s\" % next_element\n )\n if len(next_element) == 2:\n x, y = next_element\n weights = None\n else:\n x, y, weights = next_element\n else:\n x = next_element\n y = None\n weights = None\n return x, y, weights\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 315, "n_words": 101, "vocab_size": 67, "complexity": 5, "nloc": 26, "token_counts": 105, "n_ast_nodes": 180, "n_identifiers": 16, "d_id": 80862, "documentation": { "docstring": "Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`.\n\n Args:\n iterator: Instance of a dataset iterator.\n\n Returns:\n Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.\n ", "n_words": 33, "vocab_size": 25, "n_whitespaces": 52, "language": "en" } }, { "id": 266037, "commit_id": "ea6d86e6c4bb6037465410db6205a7471bc81a6c", "repo": "netbox", "path": "netbox/extras/tests/test_customfields.py", "file_name": "test_customfields.py", "fun_name": "test_missing_required_field", "commit_message": "Closes #10052: The cf attribute now returns deserialized custom field data", "code": "def test_missing_required_field(self):\n \n cf3 = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name='baz', required=True)\n cf3.save()\n cf3.content_types.set([ContentType.objects.get_for_model(Site)])\n\n site = Site(name='Test Site', slug='test-site')\n\n # Set custom field data with a required field omitted\n site.custom_field_data['foo'] = 'abc'\n with self.assertRaises(ValidationError):\n site.clean()\n\n site.custom_field_data['baz'] = 'def'\n site.clean()\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 115, "n_words": 34, "vocab_size": 28, "complexity": 1, "nloc": 10, "token_counts": 92, "n_ast_nodes": 165, "n_identifiers": 22, "d_id": 78274, "documentation": { "docstring": "\n Check that a ValidationError is raised if any required custom fields are not present.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 29, "language": "en" } }, { "id": 218384, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "get_annotations", "commit_message": "add python 3.10.4 for windows", "code": "def get_annotations(obj, *, globals=None, locals=None, eval_str=False):\n \n if isinstance(obj, type):\n # class\n obj_dict = getattr(obj, '__dict__', None)\n if obj_dict and hasattr(obj_dict, 'get'):\n ann = obj_dict.get('__annotations__', None)\n if isinstance(ann, types.GetSetDescriptorType):\n ann = None\n else:\n ann = None\n\n obj_globals = None\n module_name = getattr(obj, '__module__', None)\n if module_name:\n module = sys.modules.get(module_name, None)\n if module:\n obj_globals = getattr(module, '__dict__', None)\n obj_locals = dict(vars(obj))\n unwrap = obj\n elif isinstance(obj, types.ModuleType):\n # module\n ann = getattr(obj, '__annotations__', None)\n obj_globals = getattr(obj, '__dict__')\n obj_locals = None\n unwrap = None\n elif callable(obj):\n # this includes types.Function, types.BuiltinFunctionType,\n # types.BuiltinMethodType, functools.partial, functools.singledispatch,\n # \"class funclike\" from Lib/test/test_inspect... on and on it goes.\n ann = getattr(obj, '__annotations__', None)\n obj_globals = getattr(obj, '__globals__', None)\n obj_locals = None\n unwrap = obj\n else:\n raise TypeError(f\"{obj!r} is not a module, class, or callable.\")\n\n if ann is None:\n return {}\n\n if not isinstance(ann, dict):\n raise ValueError(f\"{obj!r}.__annotations__ is neither a dict nor None\")\n\n if not ann:\n return {}\n\n if not eval_str:\n return dict(ann)\n\n if unwrap is not None:\n while True:\n if hasattr(unwrap, '__wrapped__'):\n unwrap = unwrap.__wrapped__\n continue\n if isinstance(unwrap, functools.partial):\n unwrap = unwrap.func\n continue\n break\n if hasattr(unwrap, \"__globals__\"):\n obj_globals = unwrap.__globals__\n\n if globals is None:\n globals = obj_globals\n if locals is None:\n locals = obj_locals\n\n return_value = {key:\n value if not isinstance(value, str) else eval(value, globals, locals)\n for key, value in ann.items() }\n return return_value\n\n\n# ----------------------------------------------------------- type-checking", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 676, "n_words": 222, "vocab_size": 108, "complexity": 22, "nloc": 56, "token_counts": 347, "n_ast_nodes": 573, "n_identifiers": 38, "d_id": 55272, "documentation": { "docstring": "Compute the annotations dict for an object.\n\n obj may be a callable, class, or module.\n Passing in an object of any other type raises TypeError.\n\n Returns a dict. get_annotations() returns a new dict every time\n it's called; calling it twice on the same object will return two\n different but equivalent dicts.\n\n This function handles several details for you:\n\n * If eval_str is true, values of type str will\n be un-stringized using eval(). This is intended\n for use with stringized annotations\n (\"from __future__ import annotations\").\n * If obj doesn't have an annotations dict, returns an\n empty dict. (Functions and methods always have an\n annotations dict; classes, modules, and other types of\n callables may not.)\n * Ignores inherited annotations on classes. If a class\n doesn't have its own annotations dict, returns an empty dict.\n * All accesses to object members and dict values are done\n using getattr() and dict.get() for safety.\n * Always, always, always returns a freshly-created dict.\n\n eval_str controls whether or not values of type str are replaced\n with the result of calling eval() on those values:\n\n * If eval_str is true, eval() is called on values of type str.\n * If eval_str is false (the default), values of type str are unchanged.\n\n globals and locals are passed in to eval(); see the documentation\n for eval() for more information. If either globals or locals is\n None, this function may replace that value with a context-specific\n default, contingent on type(obj):\n\n * If obj is a module, globals defaults to obj.__dict__.\n * If obj is a class, globals defaults to\n sys.modules[obj.__module__].__dict__ and locals\n defaults to the obj class namespace.\n * If obj is a callable, globals defaults to obj.__globals__,\n although if obj is a wrapped function (using\n functools.update_wrapper()) it is first unwrapped.\n ", "n_words": 290, "vocab_size": 146, "n_whitespaces": 468, "language": "en" } }, { "id": 280645, "commit_id": "e69dd22bc51b28b9f311c81abed92dfe46e82960", "repo": "keras", "path": "keras/engine/training_v1.py", "file_name": "training_v1.py", "fun_name": "load_weights", "commit_message": "Add ability to do partial reloading of v3 models.\n\nPiperOrigin-RevId: 493123409", "code": "def load_weights(self, filepath, by_name=False, skip_mismatch=False):\n \n if backend.is_tpu_strategy(self._distribution_strategy):\n if self._distribution_strategy.extended.steps_per_run > 1 and (\n not saving_utils.is_hdf5_filepath(filepath)\n ):\n raise ValueError(\n \"Load weights is not yet supported with TPUStrategy \"\n \"with steps_per_run greater than 1.\"\n )\n return super().load_weights(\n filepath, by_name=by_name, skip_mismatch=skip_mismatch\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 183, "n_words": 39, "vocab_size": 35, "complexity": 4, "nloc": 12, "token_counts": 70, "n_ast_nodes": 113, "n_identifiers": 14, "d_id": 83407, "documentation": { "docstring": "Loads all layer weights, either from a TensorFlow or an HDF5 file.\n\n If `by_name` is False weights are loaded based on the network's\n topology. This means the architecture should be the same as when the\n weights were saved. Note that layers that don't have weights are not\n taken into account in the topological ordering, so adding or removing\n layers is fine as long as they don't have weights.\n\n If `by_name` is True, weights are loaded into layers only if they share\n the same name. This is useful for fine-tuning or transfer-learning\n models where some of the layers have changed.\n\n Only topological loading (`by_name=False`) is supported when loading\n weights from the TensorFlow format. Note that topological loading\n differs slightly between TensorFlow and HDF5 formats for user-defined\n classes inheriting from `tf.keras.Model`: HDF5 loads based on a\n flattened list of weights, while the TensorFlow format loads based on\n the object-local names of attributes to which layers are assigned in the\n `Model`'s constructor.\n\n Args:\n filepath: String, path to the weights file to load. For weight files\n in TensorFlow format, this is the file prefix (the same as was\n passed to `save_weights`).\n by_name: Boolean, whether to load weights by name or by topological\n order. Only topological loading is supported for weight files in\n TensorFlow format.\n skip_mismatch: Boolean, whether to skip loading of layers where\n there is a mismatch in the number of weights, or a mismatch in\n the shape of the weight (only valid when `by_name=True`).\n\n Returns:\n When loading a weight file in TensorFlow format, returns the same\n status object as `tf.train.Checkpoint.restore`. When graph building,\n restore ops are run automatically as soon as the network is built\n (on first call for user-defined classes inheriting from `Model`,\n immediately if it is already built).\n\n When loading weights in HDF5 format, returns `None`.\n\n Raises:\n ImportError: If h5py is not available and the weight file is in HDF5\n format.\n ValueError: If `skip_mismatch` is set to `True` when `by_name` is\n `False`.\n ", "n_words": 321, "vocab_size": 159, "n_whitespaces": 694, "language": "en" } }, { "id": 65345, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/sales_register/sales_register.py", "file_name": "sales_register.py", "fun_name": "get_mode_of_payments", "commit_message": "style: format code with black", "code": "def get_mode_of_payments(invoice_list):\n\tmode_of_payments = {}\n\tif invoice_list:\n\t\tinv_mop = frappe.db.sql(\n\t\t\t\n\t\t\t% \", \".join([\"%s\"] * len(invoice_list)),\n\t\t\ttuple(invoice_list),\n\t\t\tas_dict=1,\n\t\t)\n\n\t\tfor d in inv_mop:\n\t\t\tmode_of_payments.setdefault(d.parent, []).append(d.mode_of_payment)\n\n\treturn mode_of_payments\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 15, "n_words": 26, "vocab_size": 24, "complexity": 3, "nloc": 13, "token_counts": 71, "n_ast_nodes": 116, "n_identifiers": 16, "d_id": 13866, "documentation": { "docstring": "select parent, mode_of_payment\n\t\t\tfrom `tabSales Invoice Payment` where parent in (%s) group by parent, mode_of_payment", "n_words": 15, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 180043, "commit_id": "dc6175a21d7ccf922d53379df5a35111bd1093fd", "repo": "gradio", "path": "gradio/utils.py", "file_name": "utils.py", "fun_name": "assert_configs_are_equivalent_besides_ids", "commit_message": "tabbed-interface-rewritten (#958)", "code": "def assert_configs_are_equivalent_besides_ids(config1, config2):\n \n assert config1[\"mode\"] == config2[\"mode\"], \"Modes are different\"\n assert config1[\"theme\"] == config2[\"theme\"], \"Themes are different\"\n assert len(config1[\"components\"]) == len(\n config2[\"components\"]\n ), \"# of components are different\"\n\n mapping = {}\n\n for c1, c2 in zip(config1[\"components\"], config2[\"components\"]):\n c1, c2 = deepcopy(c1), deepcopy(c2)\n mapping[c1[\"id\"]] = c2[\"id\"]\n c1.pop(\"id\")\n c2.pop(\"id\")\n assert c1 == c2, \"{} does not match {}\".format(c1, c2)\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 120, "n_words": 57, "vocab_size": 43, "complexity": 6, "nloc": 27, "token_counts": 310, "n_ast_nodes": 213, "n_identifiers": 11, "d_id": 43068, "documentation": { "docstring": "Allows you to test if two different Blocks configs produce the same demo.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 267729, "commit_id": "05608b20e8f875d51866a184f8c579fe60498e05", "repo": "ansible", "path": "lib/ansible/cli/galaxy.py", "file_name": "galaxy.py", "fun_name": "execute_list_collection", "commit_message": "Fix listing collections that are missing the metadata required by build (#76596)\n\n* Rethread pr/70185 through the dependency resolver\r\n\r\nHang optional metadata toggle on the ConcreteArtifactsManager instead of threading it through whole list codepath\r\n\r\nDon't error while listing collections if a collection's metadata is missing keys required for building a collection.\r\n\r\nGive an informative warning if metadata has been badly formatted.\r\n\r\nCo-authored-by: Sam Doran ", "code": "def execute_list_collection(self, artifacts_manager=None):\n \n if artifacts_manager is not None:\n artifacts_manager.require_build_metadata = False\n\n output_format = context.CLIARGS['output_format']\n collections_search_paths = set(context.CLIARGS['collections_path'])\n collection_name = context.CLIARGS['collection']\n default_collections_path = AnsibleCollectionConfig.collection_paths\n collections_in_paths = {}\n\n warnings = []\n path_found = False\n collection_found = False\n for path in collections_search_paths:\n collection_path = GalaxyCLI._resolve_path(path)\n if not os.path.exists(path):\n if path in default_collections_path:\n # don't warn for missing default paths\n continue\n warnings.append(\"- the configured path {0} does not exist.\".format(collection_path))\n continue\n\n if not os.path.isdir(collection_path):\n warnings.append(\"- the configured path {0}, exists, but it is not a directory.\".format(collection_path))\n continue\n\n path_found = True\n\n if collection_name:\n # list a specific collection\n\n validate_collection_name(collection_name)\n namespace, collection = collection_name.split('.')\n\n collection_path = validate_collection_path(collection_path)\n b_collection_path = to_bytes(os.path.join(collection_path, namespace, collection), errors='surrogate_or_strict')\n\n if not os.path.exists(b_collection_path):\n warnings.append(\"- unable to find {0} in collection paths\".format(collection_name))\n continue\n\n if not os.path.isdir(collection_path):\n warnings.append(\"- the configured path {0}, exists, but it is not a directory.\".format(collection_path))\n continue\n\n collection_found = True\n\n try:\n collection = Requirement.from_dir_path_as_unknown(\n b_collection_path,\n artifacts_manager,\n )\n except ValueError as val_err:\n six.raise_from(AnsibleError(val_err), val_err)\n\n if output_format in {'yaml', 'json'}:\n collections_in_paths[collection_path] = {\n collection.fqcn: {'version': collection.ver}\n }\n\n continue\n\n fqcn_width, version_width = _get_collection_widths([collection])\n\n _display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)\n _display_collection(collection, fqcn_width, version_width)\n\n else:\n # list all collections\n collection_path = validate_collection_path(path)\n if os.path.isdir(collection_path):\n display.vvv(\"Searching {0} for collections\".format(collection_path))\n collections = list(find_existing_collections(\n collection_path, artifacts_manager,\n ))\n else:\n # There was no 'ansible_collections/' directory in the path, so there\n # or no collections here.\n display.vvv(\"No 'ansible_collections' directory found at {0}\".format(collection_path))\n continue\n\n if not collections:\n display.vvv(\"No collections found at {0}\".format(collection_path))\n continue\n\n if output_format in {'yaml', 'json'}:\n collections_in_paths[collection_path] = {\n collection.fqcn: {'version': collection.ver} for collection in collections\n }\n\n continue\n\n # Display header\n fqcn_width, version_width = _get_collection_widths(collections)\n _display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width)\n\n # Sort collections by the namespace and name\n for collection in sorted(collections, key=to_text):\n _display_collection(collection, fqcn_width, version_width)\n\n # Do not warn if the specific collection was found in any of the search paths\n if collection_found and collection_name:\n warnings = []\n\n for w in warnings:\n display.warning(w)\n\n if not path_found:\n raise AnsibleOptionsError(\"- None of the provided paths were usable. Please specify a valid path with --{0}s-path\".format(context.CLIARGS['type']))\n\n if output_format == 'json':\n display.display(json.dumps(collections_in_paths))\n elif output_format == 'yaml':\n display.display(yaml_dump(collections_in_paths))\n\n return 0\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1630, "n_words": 332, "vocab_size": 160, "complexity": 22, "nloc": 82, "token_counts": 529, "n_ast_nodes": 892, "n_identifiers": 63, "d_id": 79020, "documentation": { "docstring": "\n List all collections installed on the local system\n\n :param artifacts_manager: Artifacts manager.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 34, "language": "en" } }, { "id": 227384, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_layout.py", "file_name": "_layout.py", "fun_name": "piecolorway", "commit_message": "switch to black .22", "code": "def piecolorway(self):\n \n return self[\"piecolorway\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 59057, "documentation": { "docstring": "\n Sets the default pie slice colors. Defaults to the main\n `colorway` used for trace colors. If you specify a new list\n here it can still be extended with lighter and darker colors,\n see `extendpiecolors`.\n\n The 'piecolorway' property is a colorlist that may be specified\n as a tuple, list, one-dimensional numpy array, or pandas Series of valid\n color strings\n\n Returns\n -------\n list\n ", "n_words": 61, "vocab_size": 55, "n_whitespaces": 139, "language": "en" } }, { "id": 276019, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/saved_model/load.py", "file_name": "load.py", "fun_name": "_generate_object_paths", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _generate_object_paths(object_graph_def):\n \n paths = {0: \"root\"}\n nodes_to_visit = [0]\n\n while nodes_to_visit:\n current_node = nodes_to_visit.pop()\n current_path = paths[current_node]\n for reference in object_graph_def.nodes[current_node].children:\n if reference.node_id in paths:\n continue\n paths[reference.node_id] = \"{}.{}\".format(\n current_path, reference.local_name\n )\n nodes_to_visit.append(reference.node_id)\n\n return paths\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 145, "n_words": 35, "vocab_size": 29, "complexity": 4, "nloc": 14, "token_counts": 81, "n_ast_nodes": 133, "n_identifiers": 14, "d_id": 81529, "documentation": { "docstring": "Traverses through an ObjectGraphDef and builds a map of all node paths.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 226312, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_box.py", "file_name": "_box.py", "fun_name": "notchwidth", "commit_message": "switch to black .22", "code": "def notchwidth(self):\n \n return self[\"notchwidth\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 57985, "documentation": { "docstring": "\n Sets the width of the notches relative to the box' width. For\n example, with 0, the notches are as wide as the box(es).\n\n The 'notchwidth' property is a number and may be specified as:\n - An int or float in the interval [0, 0.5]\n\n Returns\n -------\n int|float\n ", "n_words": 47, "vocab_size": 40, "n_whitespaces": 106, "language": "en" } }, { "id": 113277, "commit_id": "97d067e614243f06ed1f8e2d389512977fff8828", "repo": "nni", "path": "nni/compression/pytorch/utils/utils.py", "file_name": "utils.py", "fun_name": "python_slice_replace", "commit_message": "Speedup enhancement (#4925)", "code": "def python_slice_replace(funcstr):\n \n # parse the input parameters\n pattern = 'torch\\.slice\\((.*)\\)'\n parameter_str = re.findall(pattern, funcstr)\n parameters = re.split(',', parameter_str[0])\n target_tensor = parameters[0]\n dim = int(parameters[1])\n dim_str = ','.join([':']*(dim) + [':'.join(parameters[2:])])\n\n print('%s[%s]' % (target_tensor, dim_str))\n new_str = funcstr.replace(\n 'torch.slice(%s)' % parameter_str[0], '%s[%s]' % (target_tensor, dim_str))\n return new_str\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 85, "n_words": 45, "vocab_size": 33, "complexity": 1, "nloc": 11, "token_counts": 107, "n_ast_nodes": 182, "n_identifiers": 16, "d_id": 24877, "documentation": { "docstring": "\n translate the torch.slice to the appropriate python str that can be replace\n in the forward function string.\n\n Parameters\n ----------\n funcstr: str\n the str that calling the torch.slice, for example:\n _8 = torch.slice(attention_mask, 0, 0, 9223372036854775807, 1)\n\n Returns:\n new_str: str\n the string that should replace the original one\n ", "n_words": 47, "vocab_size": 34, "n_whitespaces": 93, "language": "en" } }, { "id": 195059, "commit_id": "ecdfbd0bb2ab76876e9fd3817d4502c3938a2ade", "repo": "ParlAI", "path": "projects/style_gen/modules.py", "file_name": "modules.py", "fun_name": "forward", "commit_message": "Decoder-Only Transformer (#4329)\n\n* quick and dirty decoder-only implementation\r\n\r\n* fix decoder_only incremental decoding\r\n\r\n* remove unused code, add some comments, propogate func signature change\r\n\r\n* consolidate code in decoder.py\r\n\r\n* unify encoder_state\r\n\r\n* export PassThroughEncoder\r\n\r\n* add missing build_ functions\r\n\r\n* defaults in TransformerDecoderLayer __init__\r\n\r\n* comments, consolidating more logic, simplified forward_layers args\r\n\r\n* resize token embeddings and unit test\r\n\r\n* attempt to suppress some unused import warnings\r\n\r\n* padded_tensor fp16 friendly\r\n\r\n* autoformat\r\n\r\n* decoder_only -> decoder\r\n\r\n* more documentation\r\n\r\n* update name in test\r\n\r\n* add missing dict args\r\n\r\n* more argument massaging\r\n\r\n* update TestBartDistillation::test_narrow_distillation_losses numbers\r\n\r\n* update TestTransformerDistillation::test_narrow_distillation_losses numbers\r\n\r\n* fix _pad_tensor in seeker\r\n\r\nCo-authored-by: klshuster ", "code": "def forward(self, input, encoder_state, embedded_input=None, incr_state=None):\n \n\n encoder_output, encoder_mask = encoder_state\n\n if input is not None:\n seq_len = input.size(1)\n positions = input.new(seq_len).long()\n else:\n seq_len = embedded_input.size(1)\n positions = embedded_input.new(seq_len).long()\n positions = torch.arange(seq_len, out=positions).unsqueeze(0)\n\n if incr_state is not None:\n # We're doing incremental decoding, so select only the most recent position\n if input is not None:\n input = input[:, -1:]\n if embedded_input is not None:\n embedded_input = embedded_input[:, -1:, :]\n if positions is not None:\n positions = positions[:, -1:]\n else:\n incr_state = {}\n\n if embedded_input is not None:\n tensor = embedded_input # No need to copy because we only reassign below\n else:\n tensor = self.embeddings(input)\n if self.embeddings_scale:\n tensor = tensor * np.sqrt(self.dim)\n if self.variant == 'xlm':\n tensor = self.norm_embeddings(tensor)\n if positions.max().item() > self.n_positions:\n warn_once(\n 'You are inputting a sequence of {x} length, but only have '\n '--n-positions {y}. Set --truncate or increase --n-positions'.format(\n x=positions.max().item(), y=self.n_positions\n )\n )\n tensor = tensor + self.position_embeddings(positions).expand_as(tensor)\n\n if self.variant == 'bart':\n tensor = self.norm_embeddings(tensor)\n\n tensor = self.dropout(tensor) # --dropout\n\n new_incr_state = {}\n if getattr(self.layers, 'is_model_parallel', False):\n tensor, new_incr_state = self._apply_model_parallel(\n tensor, encoder_output, encoder_mask, incr_state=incr_state\n )\n else:\n for idx, layer in enumerate(self.layers):\n tensor, new_incr_state[idx] = layer(\n x=tensor,\n encoder_output=encoder_output,\n encoder_mask=encoder_mask,\n incr_state=incr_state.get(idx),\n )\n\n if self.variant == 'prelayernorm':\n tensor = self.norm_embeddings(tensor)\n\n return tensor, new_incr_state\n\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 795, "n_words": 203, "vocab_size": 113, "complexity": 14, "nloc": 53, "token_counts": 378, "n_ast_nodes": 608, "n_identifiers": 43, "d_id": 47178, "documentation": { "docstring": "\n Forward pass with the ability to pass in token-embedded inputs.\n ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 25, "language": "en" } }, { "id": 109118, "commit_id": "2d918ba09155810194bb4ba136369082ad46c8c8", "repo": "matplotlib", "path": "lib/matplotlib/pyplot.py", "file_name": "pyplot.py", "fun_name": "xkcd", "commit_message": "Simplify impl. of functions optionally used as context managers.\n\nWe can actually just put the \"exit\" logic into an ExitStack callback.\nIf the return value is never `__enter__`'d via a \"with\" statement, it is\nnever `__exit__`'d either.", "code": "def xkcd(scale=1, length=100, randomness=2):\n \n # This cannot be implemented in terms of contextmanager() or rc_context()\n # because this needs to work as a non-contextmanager too.\n\n if rcParams['text.usetex']:\n raise RuntimeError(\n \"xkcd mode is not compatible with text.usetex = True\")\n\n stack = ExitStack()\n stack.callback(dict.update, rcParams, rcParams.copy())\n\n from matplotlib import patheffects\n rcParams.update({\n 'font.family': ['xkcd', 'xkcd Script', 'Humor Sans', 'Comic Neue',\n 'Comic Sans MS'],\n 'font.size': 14.0,\n 'path.sketch': (scale, length, randomness),\n 'path.effects': [\n patheffects.withStroke(linewidth=4, foreground=\"w\")],\n 'axes.linewidth': 1.5,\n 'lines.linewidth': 2.0,\n 'figure.facecolor': 'white',\n 'grid.linewidth': 0.0,\n 'axes.grid': False,\n 'axes.unicode_minus': False,\n 'axes.edgecolor': 'black',\n 'xtick.major.size': 8,\n 'xtick.major.width': 3,\n 'ytick.major.size': 8,\n 'ytick.major.width': 3,\n })\n\n return stack\n\n\n## Figures ##\n\n@_api.make_keyword_only(\"3.6\", \"facecolor\")", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "@_api.make_keyword_only(\"3.6\", \"facecolor\")", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 285, "n_words": 100, "vocab_size": 92, "complexity": 2, "nloc": 27, "token_counts": 158, "n_ast_nodes": 281, "n_identifiers": 19, "d_id": 23441, "documentation": { "docstring": "\n Turn on `xkcd `_ sketch-style drawing mode. This will\n only have effect on things drawn after this function is called.\n\n For best results, the \"Humor Sans\" font should be installed: it is\n not included with Matplotlib.\n\n Parameters\n ----------\n scale : float, optional\n The amplitude of the wiggle perpendicular to the source line.\n length : float, optional\n The length of the wiggle along the line.\n randomness : float, optional\n The scale factor by which the length is shrunken or expanded.\n\n Notes\n -----\n This function works by a number of rcParams, so it will probably\n override others you have set before.\n\n If you want the effects of this function to be temporary, it can\n be used as a context manager, for example::\n\n with plt.xkcd():\n # This figure will be in XKCD-style\n fig1 = plt.figure()\n # ...\n\n # This figure will be in regular style\n fig2 = plt.figure()\n ", "n_words": 145, "vocab_size": 93, "n_whitespaces": 270, "language": "en" } }, { "id": 267571, "commit_id": "43153c58310d02223f2cb0964f4255ba1ac4ed53", "repo": "ansible", "path": "lib/ansible/playbook/base.py", "file_name": "base.py", "fun_name": "post_validate", "commit_message": "`FieldAttribute`s as descriptors (#73908)", "code": "def post_validate(self, templar):\n \n\n # save the omit value for later checking\n omit_value = templar.available_variables.get('omit')\n\n for (name, attribute) in self.fattributes.items():\n if attribute.static:\n value = getattr(self, name)\n\n # we don't template 'vars' but allow template as values for later use\n if name not in ('vars',) and templar.is_template(value):\n display.warning('\"%s\" is not templatable, but we found: %s, '\n 'it will not be templated and will be used \"as is\".' % (name, value))\n continue\n\n if getattr(self, name) is None:\n if not attribute.required:\n continue\n else:\n raise AnsibleParserError(\"the field '%s' is required but was not set\" % name)\n elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'):\n # Intermediate objects like Play() won't have their fields validated by\n # default, as their values are often inherited by other objects and validated\n # later, so we don't want them to fail out early\n continue\n\n try:\n # Run the post-validator if present. These methods are responsible for\n # using the given templar to template the values, if required.\n method = getattr(self, '_post_validate_%s' % name, None)\n if method:\n value = method(attribute, getattr(self, name), templar)\n elif attribute.isa == 'class':\n value = getattr(self, name)\n else:\n # if the attribute contains a variable, template it now\n value = templar.template(getattr(self, name))\n\n # if this evaluated to the omit value, set the value back to\n # the default specified in the FieldAttribute and move on\n if omit_value is not None and value == omit_value:\n if callable(attribute.default):\n setattr(self, name, attribute.default())\n else:\n setattr(self, name, attribute.default)\n continue\n\n # and make sure the attribute is of the type it should be\n if value is not None:\n value = self.get_validated_value(name, attribute, value, templar)\n\n # and assign the massaged value back to the attribute field\n setattr(self, name, value)\n except (TypeError, ValueError) as e:\n value = getattr(self, name)\n raise AnsibleParserError(\"the field '%s' has an invalid value (%s), and could not be converted to an %s.\"\n \"The error was: %s\" % (name, value, attribute.isa, e), obj=self.get_ds(), orig_exc=e)\n except (AnsibleUndefinedVariable, UndefinedError) as e:\n if templar._fail_on_undefined_errors and name != 'name':\n if name == 'args':\n msg = \"The task includes an option with an undefined variable. The error was: %s\" % (to_native(e))\n else:\n msg = \"The field '%s' has an invalid value, which includes an undefined variable. The error was: %s\" % (name, to_native(e))\n raise AnsibleParserError(msg, obj=self.get_ds(), orig_exc=e)\n\n self._finalized = True\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 1300, "n_words": 376, "vocab_size": 187, "complexity": 20, "nloc": 45, "token_counts": 373, "n_ast_nodes": 611, "n_identifiers": 40, "d_id": 78953, "documentation": { "docstring": "\n we can't tell that everything is of the right type until we have\n all the variables. Run basic types (from isa) as well as\n any _post_validate_ functions.\n ", "n_words": 27, "vocab_size": 24, "n_whitespaces": 57, "language": "en" } }, { "id": 130567, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/cluster_utils.py", "file_name": "cluster_utils.py", "fun_name": "wait_for_nodes", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def wait_for_nodes(self, timeout=30):\n \n start_time = time.time()\n while time.time() - start_time < timeout:\n clients = self.global_state.node_table()\n live_clients = [client for client in clients if client[\"Alive\"]]\n\n expected = len(self.list_all_nodes())\n if len(live_clients) == expected:\n logger.debug(\"All nodes registered as expected.\")\n return\n else:\n logger.debug(\n f\"{len(live_clients)} nodes are currently registered, \"\n f\"but we are expecting {expected}\"\n )\n time.sleep(0.1)\n raise TimeoutError(\"Timed out while waiting for nodes to join.\")\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 257, "n_words": 61, "vocab_size": 49, "complexity": 5, "nloc": 16, "token_counts": 100, "n_ast_nodes": 182, "n_identifiers": 17, "d_id": 29306, "documentation": { "docstring": "Waits for correct number of nodes to be registered.\n\n This will wait until the number of live nodes in the client table\n exactly matches the number of \"add_node\" calls minus the number of\n \"remove_node\" calls that have been made on this cluster. This means\n that if a node dies without \"remove_node\" having been called, this will\n raise an exception.\n\n Args:\n timeout (float): The number of seconds to wait for nodes to join\n before failing.\n\n Raises:\n TimeoutError: An exception is raised if we time out while waiting\n for nodes to join.\n ", "n_words": 90, "vocab_size": 62, "n_whitespaces": 198, "language": "en" } }, { "id": 196016, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/algebras/quaternion.py", "file_name": "quaternion.py", "fun_name": "rotate_point", "commit_message": "Updated import locations", "code": "def rotate_point(pin, r):\n \n if isinstance(r, tuple):\n # if r is of the form (vector, angle)\n q = Quaternion.from_axis_angle(r[0], r[1])\n else:\n # if r is a quaternion\n q = r.normalize()\n pout = q * Quaternion(0, pin[0], pin[1], pin[2]) * conjugate(q)\n return (pout.b, pout.c, pout.d)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 122, "n_words": 43, "vocab_size": 33, "complexity": 2, "nloc": 7, "token_counts": 83, "n_ast_nodes": 126, "n_identifiers": 14, "d_id": 47516, "documentation": { "docstring": "Returns the coordinates of the point pin(a 3 tuple) after rotation.\n\n Parameters\n ==========\n\n pin : tuple\n A 3-element tuple of coordinates of a point which needs to be\n rotated.\n r : Quaternion or tuple\n Axis and angle of rotation.\n\n It's important to note that when r is a tuple, it must be of the form\n (axis, angle)\n\n Returns\n =======\n\n tuple\n The coordinates of the point after rotation.\n\n Examples\n ========\n\n >>> from sympy import Quaternion\n >>> from sympy import symbols, trigsimp, cos, sin\n >>> x = symbols('x')\n >>> q = Quaternion(cos(x/2), 0, 0, sin(x/2))\n >>> trigsimp(Quaternion.rotate_point((1, 1, 1), q))\n (sqrt(2)*cos(x + pi/4), sqrt(2)*sin(x + pi/4), 1)\n >>> (axis, angle) = q.to_axis_angle()\n >>> trigsimp(Quaternion.rotate_point((1, 1, 1), (axis, angle)))\n (sqrt(2)*cos(x + pi/4), sqrt(2)*sin(x + pi/4), 1)\n\n ", "n_words": 123, "vocab_size": 71, "n_whitespaces": 322, "language": "en" } }, { "id": 160599, "commit_id": "126046f84449fffeb0c75ae88657ce6b90236eee", "repo": "numpy", "path": "numpy/core/shape_base.py", "file_name": "shape_base.py", "fun_name": "vstack", "commit_message": "ENH: adding casting option to numpy.stack. (#21627)\n\nnp.concatenate and np.stack are similar methods, but only np.concatenate has the casting option.\r\n\r\nThis PR puts the casting option into the np.stack method to control what kind of data casting may occur\r\n\r\nCloses gh-20959\r\n\r\n* ENH: adding casting option to numpy.stack.\r\n\r\nSee #20959\r\n\r\n* ENH: adding dtype option to numpy.stack.\r\n\r\nSee #20959\r\n\r\n* REV: removing auto-generated file loops_modulo.dispatch.c\r\n\r\nSee numpy#20959\r\n\r\n* REV: removing auto-generated file loops_modulo.dispatch.c\r\n\r\nSee numpy#20959\r\n\r\n* REV: removing inserted newlines\r\n\r\nSee numpy#20959\r\n\r\nCo-authored-by: alescrocaro \r\nCo-authored-by: JessePires \r\nCo-authored-by: patriarka \r\n\r\n* DOC: inserting versionadded info in dtype and casting parameters.\r\n\r\nSee numpy#20959\r\n\r\nCo-authored-by: alescrocaro \r\nCo-authored-by: JessePires \r\nCo-authored-by: patriarka \r\n\r\n* TST: writing tests to stack method with dtype and casting options\r\n\r\nSee numpy#20959\r\n\r\nCo-authored-by: alescrocaro \r\nCo-authored-by: JessePires \r\nCo-authored-by: patriarka \r\n\r\n* DOC: adding upcoming_change file for new options casting and dtype in method stack.\r\n\r\nSee numpy#20959\r\n\r\nCo-authored-by: alescrocaro \r\nCo-authored-by: JessePires \r\nCo-authored-by: patriarka \r\n\r\n* REV: reverting lint errors.\r\n\r\nSee numpy#20959\r\n\r\nCo-authored-by: alescrocaro \r\nCo-authored-by: JessePires \r\nCo-authored-by: patriarka \r\n\r\n* DOC: inserting hstack and vstack methods in upcoming changes\r\n\r\nSee numpy#20959\r\n\r\nCo-authored-by: alescrocaro \r\nCo-authored-by: JessePires \r\nCo-authored-by: patriarka \r\n\r\n* ENH: adding dtype and casting keyword arguments to numpy.vstack and numpy.hstack.\r\n\r\nSee numpy#20959\r\n\r\nCo-authored-by: alescrocaro \r\nCo-authored-by: JessePires \r\nCo-authored-by: patriarka \r\n\r\n* TST: writing tests to vstack and hstack methods with dtype and casting keyword arguments.\r\n\r\nSee numpy#20959\r\n\r\nCo-authored-by: alescrocaro \r\nCo-authored-by: JessePires \r\nCo-authored-by: patriarka \r\n\r\n* REV: reverting the 'out' option type in stack method.\r\n\r\nSee numpy#20959\r\n\r\nCo-authored-by: alescrocaro \r\nCo-authored-by: JessePires \r\nCo-authored-by: patriarka \r\n\r\n* REV: Reverting out type changes in overload of shape_base.pyi file.\r\n\r\nSee numpy#20959\r\n\r\nCo-authored-by: alescrocaro \r\nCo-authored-by: jhonatancunha \r\nCo-authored-by: patriarka \r\n\r\n* DOC: correcting some english erros in upcoming_changes file.\r\n\r\nSee numpy#20959\r\n\r\nCo-authored-by: alescrocaro \r\nCo-authored-by: JessePires \r\nCo-authored-by: patriarka \r\n\r\nCo-authored-by: JessePires \r\nCo-authored-by: alescrocaro \r\nCo-authored-by: JessePires \r\nCo-authored-by: patriarka ", "code": "def vstack(tup, *, dtype=None, casting=\"same_kind\"):\n \n if not overrides.ARRAY_FUNCTION_ENABLED:\n # raise warning if necessary\n _arrays_for_stack_dispatcher(tup, stacklevel=2)\n arrs = atleast_2d(*tup)\n if not isinstance(arrs, list):\n arrs = [arrs]\n return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting)\n\n\n@array_function_dispatch(_vhstack_dispatcher)", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "@array_function_dispatch(_vhstack_dispatcher)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 66, "n_words": 31, "vocab_size": 26, "complexity": 3, "nloc": 7, "token_counts": 68, "n_ast_nodes": 117, "n_identifiers": 16, "d_id": 38662, "documentation": { "docstring": "\n Stack arrays in sequence vertically (row wise).\n\n This is equivalent to concatenation along the first axis after 1-D arrays\n of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by\n `vsplit`.\n\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate`, `stack` and\n `block` provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the first axis.\n 1-D arrays must have the same length.\n\n dtype : str or dtype\n If provided, the destination array will have this dtype. Cannot be\n provided together with `out`.\n\n .. versionadded:: 1.24\n\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur. Defaults to 'same_kind'.\n\n .. versionadded:: 1.24\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 2-D.\n\n See Also\n --------\n concatenate : Join a sequence of arrays along an existing axis.\n stack : Join a sequence of arrays along a new axis.\n block : Assemble an nd-array from nested lists of blocks.\n hstack : Stack arrays in sequence horizontally (column wise).\n dstack : Stack arrays in sequence depth wise (along third axis).\n column_stack : Stack 1-D arrays as columns into a 2-D array.\n vsplit : Split an array into multiple sub-arrays vertically (row-wise).\n\n Examples\n --------\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([4, 5, 6])\n >>> np.vstack((a,b))\n array([[1, 2, 3],\n [4, 5, 6]])\n\n >>> a = np.array([[1], [2], [3]])\n >>> b = np.array([[4], [5], [6]])\n >>> np.vstack((a,b))\n array([[1],\n [2],\n [3],\n [4],\n [5],\n [6]])\n\n ", "n_words": 282, "vocab_size": 175, "n_whitespaces": 499, "language": "en" } }, { "id": 152952, "commit_id": "0d9d14e6669be3dd6bb3b72222dbe6a6dffe1bee", "repo": "modin", "path": "modin/pandas/indexing.py", "file_name": "indexing.py", "fun_name": "_determine_setitem_axis", "commit_message": "FIX-#3860: Fix single row Series assignment. (#3894)\n\nSigned-off-by: mvashishtha ", "code": "def _determine_setitem_axis(self, row_lookup, col_lookup, row_scalar, col_scalar):\n \n if self.df.shape == (1, 1):\n return None if not (row_scalar ^ col_scalar) else 1 if row_scalar else 0\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 49, "n_words": 24, "vocab_size": 21, "complexity": 13, "nloc": 23, "token_counts": 156, "n_ast_nodes": 61, "n_identifiers": 8, "d_id": 35201, "documentation": { "docstring": "\n Determine an axis along which we should do an assignment.\n\n Parameters\n ----------\n row_lookup : slice or list\n Indexer for rows.\n col_lookup : slice or list\n Indexer for columns.\n row_scalar : bool\n Whether indexer for rows is scalar or not.\n col_scalar : bool\n Whether indexer for columns is scalar or not.\n\n Returns\n -------\n int or None\n None if this will be a both axis assignment, number of axis to assign in other cases.\n\n Notes\n -----\n axis = 0: column assignment df[col] = item\n axis = 1: row assignment df.loc[row] = item\n axis = None: assignment along both axes\n ", "n_words": 97, "vocab_size": 62, "n_whitespaces": 265, "language": "en" } }, { "id": 337070, "commit_id": "a73f8b725105b12a60a9b22918bda68f8b6d26c3", "repo": "diffusers", "path": "src/diffusers/models/resnet.py", "file_name": "resnet.py", "fun_name": "_upsample_2d", "commit_message": "Clean up resnet.py file (#780)\n\n* clean up resnet.py\r\n\r\n* make style and quality\r\n\r\n* minor formatting", "code": "def _upsample_2d(self, hidden_states, weight=None, kernel=None, factor=2, gain=1):\n \n\n assert isinstance(factor, int) and factor >= 1\n\n # Setup filter kernel.\n if kernel is None:\n kernel = [1] * factor\n\n # setup kernel\n kernel = torch.tensor(kernel, dtype=torch.float32)\n if kernel.ndim == 1:\n kernel = torch.outer(kernel, kernel)\n kernel /= torch.sum(kernel)\n\n kernel = kernel * (gain * (factor**2))\n\n if self.use_conv:\n convH = weight.shape[2]\n convW = weight.shape[3]\n inC = weight.shape[1]\n\n pad_value = (kernel.shape[0] - factor) - (convW - 1)\n\n stride = (factor, factor)\n # Determine data dimensions.\n output_shape = (\n (hidden_states.shape[2] - 1) * factor + convH,\n (hidden_states.shape[3] - 1) * factor + convW,\n )\n output_padding = (\n output_shape[0] - (hidden_states.shape[2] - 1) * stride[0] - convH,\n output_shape[1] - (hidden_states.shape[3] - 1) * stride[1] - convW,\n )\n assert output_padding[0] >= 0 and output_padding[1] >= 0\n num_groups = hidden_states.shape[1] // inC\n\n # Transpose weights.\n weight = torch.reshape(weight, (num_groups, -1, inC, convH, convW))\n weight = torch.flip(weight, dims=[3, 4]).permute(0, 2, 1, 3, 4)\n weight = torch.reshape(weight, (num_groups * inC, -1, convH, convW))\n\n inverse_conv = F.conv_transpose2d(\n hidden_states, weight, stride=stride, output_padding=output_padding, padding=0\n )\n\n output = upfirdn2d_native(\n inverse_conv,\n torch.tensor(kernel, device=inverse_conv.device),\n pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2 + 1),\n )\n else:\n pad_value = kernel.shape[0] - factor\n output = upfirdn2d_native(\n hidden_states,\n torch.tensor(kernel, device=hidden_states.device),\n up=factor,\n pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2),\n )\n\n return output\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 759, "n_words": 220, "vocab_size": 109, "complexity": 6, "nloc": 45, "token_counts": 430, "n_ast_nodes": 660, "n_identifiers": 39, "d_id": 120955, "documentation": { "docstring": "Fused `upsample_2d()` followed by `Conv2d()`.\n\n Padding is performed only once at the beginning, not between the operations. The fused op is considerably more\n efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of\n arbitrary order.\n\n Args:\n hidden_states: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.\n weight: Weight tensor of the shape `[filterH, filterW, inChannels,\n outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.\n kernel: FIR filter of the shape `[firH, firW]` or `[firN]`\n (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.\n factor: Integer upsampling factor (default: 2).\n gain: Scaling factor for signal magnitude (default: 1.0).\n\n Returns:\n output: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same\n datatype as `hidden_states`.\n ", "n_words": 140, "vocab_size": 103, "n_whitespaces": 289, "language": "en" } }, { "id": 204824, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/base.py", "file_name": "base.py", "fun_name": "validate_thread_sharing", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def validate_thread_sharing(self):\n \n if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()):\n raise DatabaseError(\n \"DatabaseWrapper objects created in a \"\n \"thread can only be used in that same thread. The object \"\n \"with alias '%s' was created in thread id %s and this is \"\n \"thread id %s.\" % (self.alias, self._thread_ident, _thread.get_ident())\n )\n\n # ##### Miscellaneous #####\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 153, "n_words": 54, "vocab_size": 46, "complexity": 3, "nloc": 8, "token_counts": 48, "n_ast_nodes": 86, "n_identifiers": 8, "d_id": 50907, "documentation": { "docstring": "\n Validate that the connection isn't accessed by another thread than the\n one which originally created it, unless the connection was explicitly\n authorized to be shared between threads (via the `inc_thread_sharing()`\n method). Raise an exception if the validation fails.\n ", "n_words": 38, "vocab_size": 33, "n_whitespaces": 74, "language": "en" } }, { "id": 206137, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/middleware/common.py", "file_name": "common.py", "fun_name": "process_response", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def process_response(self, request, response):\n \n # If the given URL is \"Not Found\", then check if we should redirect to\n # a path with a slash appended.\n if response.status_code == 404 and self.should_redirect_with_slash(request):\n return self.response_redirect_class(self.get_full_path_with_slash(request))\n\n # Add the Content-Length header to non-streaming responses if not\n # already set.\n if not response.streaming and not response.has_header(\"Content-Length\"):\n response.headers[\"Content-Length\"] = str(len(response.content))\n\n return response\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 136, "n_words": 58, "vocab_size": 45, "complexity": 5, "nloc": 6, "token_counts": 68, "n_ast_nodes": 117, "n_identifiers": 14, "d_id": 51379, "documentation": { "docstring": "\n When the status code of the response is 404, it may redirect to a path\n with an appended slash if should_redirect_with_slash() returns True.\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 45, "language": "en" } }, { "id": 307759, "commit_id": "dd20a7ea62fc003748c5f0cf99be25c69c9b5a05", "repo": "core", "path": "tests/components/recorder/test_statistics.py", "file_name": "test_statistics.py", "fun_name": "test_duplicate_statistics_handle_integrity_error", "commit_message": "Display statistics in the source's unit (#78031)", "code": "def test_duplicate_statistics_handle_integrity_error(hass_recorder, caplog):\n \n hass = hass_recorder()\n wait_recording_done(hass)\n\n period1 = dt_util.as_utc(dt_util.parse_datetime(\"2021-09-01 00:00:00\"))\n period2 = dt_util.as_utc(dt_util.parse_datetime(\"2021-09-30 23:00:00\"))\n\n external_energy_metadata_1 = {\n \"has_mean\": False,\n \"has_sum\": True,\n \"name\": \"Total imported energy\",\n \"source\": \"test\",\n \"state_unit_of_measurement\": \"kWh\",\n \"statistic_id\": \"test:total_energy_import_tariff_1\",\n \"unit_of_measurement\": \"kWh\",\n }\n external_energy_statistics_1 = [\n {\n \"start\": period1,\n \"last_reset\": None,\n \"state\": 3,\n \"sum\": 5,\n },\n ]\n external_energy_statistics_2 = [\n {\n \"start\": period2,\n \"last_reset\": None,\n \"state\": 3,\n \"sum\": 6,\n }\n ]\n\n with patch.object(\n statistics, \"_statistics_exists\", return_value=False\n ), patch.object(\n statistics, \"_insert_statistics\", wraps=statistics._insert_statistics\n ) as insert_statistics_mock:\n async_add_external_statistics(\n hass, external_energy_metadata_1, external_energy_statistics_1\n )\n async_add_external_statistics(\n hass, external_energy_metadata_1, external_energy_statistics_1\n )\n async_add_external_statistics(\n hass, external_energy_metadata_1, external_energy_statistics_2\n )\n wait_recording_done(hass)\n assert insert_statistics_mock.call_count == 3\n\n with session_scope(hass=hass) as session:\n tmp = session.query(recorder.db_schema.Statistics).all()\n assert len(tmp) == 2\n\n assert \"Blocked attempt to insert duplicated statistic rows\" in caplog.text\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 447, "n_words": 117, "vocab_size": 79, "complexity": 1, "nloc": 50, "token_counts": 224, "n_ast_nodes": 387, "n_identifiers": 32, "d_id": 106525, "documentation": { "docstring": "Test the recorder does not blow up if statistics is duplicated.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 280163, "commit_id": "0b393d4049afd187ed7d24ea70177a72cf4a3ce2", "repo": "keras", "path": "keras/saving/experimental/saving_lib.py", "file_name": "saving_lib.py", "fun_name": "save_model", "commit_message": "New saving: add npz support and make zipping optional.\n\nWe should decide which store to go with by default. h5 is faster, but only marginally so. Zip has no speed impact (the long saving/loading time was due to the breakdown into many files/dirs previously). But it has a temporary disk space impact.\n\nNote: Using h5 without zipping will not work with GCS (due to H5 using its own file pointer). This issue could be worked around via special casing.\nAll other combinations work with GCS.\n\nSaving time for NASNetLarge:\n\n- Legacy h5: 2.8s\n- New h5 + zip: 2.6s\n- New h5 + no zip: 2.5s\n- New npz + zip: 3.2s\n- New npz + no zip: 3.0s\n- Legacy savedmodel: 142.2s (!)\n\nLoading times are similar across the board (nozip is a bit faster).\n\nPiperOrigin-RevId: 481705383", "code": "def save_model(model, filepath, weights_format=\"h5\", use_zip=True):\n \n if not filepath.endswith(\".keras\"):\n raise ValueError(\n \"Invalid filename: expected a `.keras` extension. \"\n f\"Received: filepath={filepath}\"\n )\n if weights_format == \"h5\" and h5py is None:\n raise ImportError(\n \"h5py must be installed in order to save a model in hdf5 format.\"\n )\n\n if not model.built:\n warnings.warn(\n \"You are saving a model that has not yet been built. \"\n \"It might not contain any weights yet. \"\n \"Consider building the model first by calling it \"\n \"on some data.\",\n stacklevel=2,\n )\n saving_v3_enabled_value = getattr(_SAVING_V3_ENABLED, \"value\", False)\n _SAVING_V3_ENABLED.value = True\n\n serialized_model_dict = serialize_keras_object(model)\n config_json = json.dumps(serialized_model_dict)\n # TODO(fchollet): consider saving dependencies list / versions in metadata.\n metadata_json = json.dumps(\n {\n \"keras_version\": keras.__version__,\n \"date_saved\": datetime.datetime.now().strftime(\"%Y-%m-%d@%H:%M:%S\"),\n }\n )\n if use_zip:\n # Use a temporary directory for the storing files prior to zipping.\n write_path = _get_temp_dir()\n else:\n tf.io.gfile.makedirs(filepath)\n write_path = filepath\n try:\n # Write files locally before zipping.\n with open(tf.io.gfile.join(write_path, _METADATA_FILENAME), \"w\") as f:\n f.write(metadata_json)\n with open(tf.io.gfile.join(write_path, _CONFIG_FILENAME), \"w\") as f:\n f.write(config_json)\n\n weights_path = tf.io.gfile.join(write_path, _VARS_FNAME)\n assets_path = tf.io.gfile.join(write_path, _ASSETS_DIRNAME)\n\n if weights_format == \"h5\":\n weights_store = H5IOStore(weights_path, mode=\"w\")\n elif weights_format == \"npz\":\n weights_store = NpzIOStore(weights_path, mode=\"w\")\n else:\n raise ValueError(\n \"Unknown `weights_format`. Expected 'h5' or 'npz'. \"\n f\"Received: {weights_format}\"\n )\n _save_state(\n model,\n weights_handler=weights_store,\n assets_handler=DiskIOStore(assets_path),\n inner_path=\"\",\n visited_trackables=set(),\n )\n weights_store.close()\n\n if use_zip:\n # Zip local files into an archive.\n with zipfile.ZipFile(filepath, \"w\") as zipfile_to_save:\n _write_to_zip_recursively(zipfile_to_save, write_path, \"\")\n except Exception as e:\n raise e\n finally:\n _SAVING_V3_ENABLED.value = saving_v3_enabled_value\n if use_zip and tf.io.gfile.exists(write_path):\n # Remove the directory temporarily used.\n tf.io.gfile.rmtree(write_path)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 800, "n_words": 242, "vocab_size": 167, "complexity": 13, "nloc": 66, "token_counts": 363, "n_ast_nodes": 634, "n_identifiers": 64, "d_id": 83277, "documentation": { "docstring": "Save an archive representing a Keras model to the given filepath.\n\n The zip-based archive contains the following structure:\n\n - JSON configuration file (`config.json`): Records of model, layer, and\n other object configurations.\n - Npz or h5 model variables file (`variables.npz` or `variables.h5`).\n - Assets files (if any) found in the `assets/` directory structure,\n which mirrors the model's inner structure.\n - JSON metadata file (`metdata.json`).\n\n The states of Keras trackables (layers, optimizers, loss, and metrics) are\n automatically saved as long as they can be discovered through the attributes\n returned by `dir(model)`. Typically, the state includes the variables\n associated with the trackable, but some specially purposed layers may\n contain more such as the vocabularies stored in the hashmaps. The trackables\n define how their asset state is saved by exposing `save_assets()` and\n `load_assets()` APIs.\n\n For the case of layer states, the variables will be visited as long as\n they are either 1) referenced via layer attributes, or 2) referenced via a\n container (list, tuple, or dict), and the container is referenced via a\n layer attribute.\n ", "n_words": 171, "vocab_size": 115, "n_whitespaces": 236, "language": "en" } }, { "id": 267825, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py", "file_name": "__init__.py", "fun_name": "cleanup", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def cleanup(self) -> None:\n \n if self.remove_config:\n os.remove(self.config_path)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 32, "n_words": 7, "vocab_size": 7, "complexity": 2, "nloc": 4, "token_counts": 21, "n_ast_nodes": 37, "n_identifiers": 6, "d_id": 79106, "documentation": { "docstring": "Clean up the cloud resource and any temporary configuration files after tests complete.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 153687, "commit_id": "2809f7c12384e2615390bcb76f2e7f0d88a3fffa", "repo": "modin", "path": "modin/pandas/general.py", "file_name": "general.py", "fun_name": "notna", "commit_message": "DOCS-#4336: Reformat general utilities docstrings (#4338)\n\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Alexander Myskov ", "code": "def notna(obj): # noqa: PR01, RT01, D200\n \n if isinstance(obj, BasePandasDataset):\n return obj.notna()\n else:\n return pandas.notna(obj)\n\n\nnotnull = notna\n\n\n@_inherit_docstrings(pandas.merge, apilink=\"pandas.merge\")", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "@_inherit_docstrings(pandas.merge, apilink=\"pandas.merge\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 42, "n_words": 20, "vocab_size": 19, "complexity": 2, "nloc": 5, "token_counts": 29, "n_ast_nodes": 75, "n_identifiers": 9, "d_id": 35545, "documentation": { "docstring": "\n Detect non-missing values for an array-like object.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 128786, "commit_id": "d6b6dc560dd2920bf8ee1e4de01c4ec25fdab555", "repo": "ray", "path": "rllib/evaluation/rollout_worker.py", "file_name": "rollout_worker.py", "fun_name": "sample", "commit_message": "[AIR] Address multiple warnings in AIR examples (#28800)\n\nThese changes address warnings and errors in several of the AIR examples. They're all small changes, so I've batched them together in this PR:\r\n\r\n- Fixed `concant_samples` deprecation warning, which appears in `rollout_worker.py`\r\n- Fixed call to `randrange` which passed floating point argument; this will generate errors for python>=3.11\r\n- Removed another use of the `object_store_memory` in instantiating an actor\r\n- Replaced a number of strings (docstrings and regexes) which had escape sequences with string literals\r\n- Fixed an issue that broke the `tfx_tabular_train_to_serve` example where data that was being sent to the server was not being encoded as json correctly by `requests`.\r\n- Fixed a deprecated import of `ray.rllib.agents.marwil` in the `rl_offline_example`.\r\n\r\nSigned-off-by: pdmurray ", "code": "def sample(self) -> SampleBatchType:\n \n if self.fake_sampler and self.last_batch is not None:\n return self.last_batch\n elif self.input_reader is None:\n raise ValueError(\n \"RolloutWorker has no `input_reader` object! \"\n \"Cannot call `sample()`. You can try setting \"\n \"`create_env_on_driver` to True.\"\n )\n\n if log_once(\"sample_start\"):\n logger.info(\n \"Generating sample batch of size {}\".format(\n self.rollout_fragment_length\n )\n )\n\n batches = [self.input_reader.next()]\n steps_so_far = (\n batches[0].count\n if self.count_steps_by == \"env_steps\"\n else batches[0].agent_steps()\n )\n\n # In truncate_episodes mode, never pull more than 1 batch per env.\n # This avoids over-running the target batch size.\n if self.batch_mode == \"truncate_episodes\":\n max_batches = self.num_envs\n else:\n max_batches = float(\"inf\")\n while steps_so_far < self.rollout_fragment_length and (\n len(batches) < max_batches or self.policy_config.get(\"offline_sampling\")\n ):\n batch = self.input_reader.next()\n steps_so_far += (\n batch.count\n if self.count_steps_by == \"env_steps\"\n else batch.agent_steps()\n )\n batches.append(batch)\n batch = concat_samples(batches)\n\n self.callbacks.on_sample_end(worker=self, samples=batch)\n\n # Always do writes prior to compression for consistency and to allow\n # for better compression inside the writer.\n self.output_writer.write(batch)\n\n if log_once(\"sample_end\"):\n logger.info(\"Completed sample batch:\\n\\n{}\\n\".format(summarize(batch)))\n\n if self.compress_observations:\n batch.compress(bulk=self.compress_observations == \"bulk\")\n\n if self.fake_sampler:\n self.last_batch = batch\n return batch\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 654, "n_words": 163, "vocab_size": 109, "complexity": 14, "nloc": 62, "token_counts": 249, "n_ast_nodes": 430, "n_identifiers": 38, "d_id": 28809, "documentation": { "docstring": "Returns a batch of experience sampled from this worker.\n\n This method must be implemented by subclasses.\n\n Returns:\n A columnar batch of experiences (e.g., tensors).\n\n Examples:\n >>> import gym\n >>> from ray.rllib.evaluation.rollout_worker import RolloutWorker\n >>> from ray.rllib.algorithms.pg.pg_tf_policy import PGTF1Policy\n >>> worker = RolloutWorker( # doctest: +SKIP\n ... env_creator=lambda _: gym.make(\"CartPole-v0\"), # doctest: +SKIP\n ... policy_spec=PGTF1Policy) # doctest: +SKIP\n >>> print(worker.sample()) # doctest: +SKIP\n SampleBatch({\"obs\": [...], \"action\": [...], ...})\n ", "n_words": 67, "vocab_size": 46, "n_whitespaces": 198, "language": "en" } }, { "id": 269811, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/benchmarks/keras_examples_benchmarks/mnist_conv_custom_training_benchmark_test.py", "file_name": "mnist_conv_custom_training_benchmark_test.py", "fun_name": "benchmark_custom_training_mnist_bs_256", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def benchmark_custom_training_mnist_bs_256(self):\n \n batch_size = 256\n run_iters = 5\n train_dataset = self.train_dataset.shuffle(buffer_size=1024).batch(\n batch_size\n )\n\n # Instantiate a loss function.\n loss_fn = tf.keras.losses.CategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.NONE\n )\n # Instantiate an optimizer to train the model.\n optimizer = tf.keras.optimizers.Adam()\n model = self._build_model()\n\n metrics, wall_time = self.measure_performance(\n model,\n train_dataset,\n loss_fn,\n optimizer,\n batch_size,\n run_iters,\n self.epochs,\n )\n extras = benchmark_util.get_keras_examples_metadata(\n \"conv\", batch_size, \".keras.ctl_graph\"\n )\n self.report_benchmark(\n iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 302, "n_words": 62, "vocab_size": 46, "complexity": 1, "nloc": 26, "token_counts": 126, "n_ast_nodes": 197, "n_identifiers": 30, "d_id": 80290, "documentation": { "docstring": "Measure performance with batch_size=256 and run_iters=5.", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 109439, "commit_id": "c73f4c455514cf5422d27bf38c93250de8316b21", "repo": "matplotlib", "path": "lib/matplotlib/_constrained_layout.py", "file_name": "_constrained_layout.py", "fun_name": "match_submerged_margins", "commit_message": "Merge SubplotBase into AxesBase.", "code": "def match_submerged_margins(layoutgrids, fig):\n \n\n for sfig in fig.subfigs:\n match_submerged_margins(layoutgrids, sfig)\n\n axs = [a for a in fig.get_axes()\n if a.get_subplotspec() is not None and a.get_in_layout()]\n\n for ax1 in axs:\n ss1 = ax1.get_subplotspec()\n if ss1.get_gridspec() not in layoutgrids:\n axs.remove(ax1)\n continue\n lg1 = layoutgrids[ss1.get_gridspec()]\n\n # interior columns:\n if len(ss1.colspan) > 1:\n maxsubl = np.max(\n lg1.margin_vals['left'][ss1.colspan[1:]] +\n lg1.margin_vals['leftcb'][ss1.colspan[1:]]\n )\n maxsubr = np.max(\n lg1.margin_vals['right'][ss1.colspan[:-1]] +\n lg1.margin_vals['rightcb'][ss1.colspan[:-1]]\n )\n for ax2 in axs:\n ss2 = ax2.get_subplotspec()\n lg2 = layoutgrids[ss2.get_gridspec()]\n if lg2 is not None and len(ss2.colspan) > 1:\n maxsubl2 = np.max(\n lg2.margin_vals['left'][ss2.colspan[1:]] +\n lg2.margin_vals['leftcb'][ss2.colspan[1:]])\n if maxsubl2 > maxsubl:\n maxsubl = maxsubl2\n maxsubr2 = np.max(\n lg2.margin_vals['right'][ss2.colspan[:-1]] +\n lg2.margin_vals['rightcb'][ss2.colspan[:-1]])\n if maxsubr2 > maxsubr:\n maxsubr = maxsubr2\n for i in ss1.colspan[1:]:\n lg1.edit_margin_min('left', maxsubl, cell=i)\n for i in ss1.colspan[:-1]:\n lg1.edit_margin_min('right', maxsubr, cell=i)\n\n # interior rows:\n if len(ss1.rowspan) > 1:\n maxsubt = np.max(\n lg1.margin_vals['top'][ss1.rowspan[1:]] +\n lg1.margin_vals['topcb'][ss1.rowspan[1:]]\n )\n maxsubb = np.max(\n lg1.margin_vals['bottom'][ss1.rowspan[:-1]] +\n lg1.margin_vals['bottomcb'][ss1.rowspan[:-1]]\n )\n\n for ax2 in axs:\n ss2 = ax2.get_subplotspec()\n lg2 = layoutgrids[ss2.get_gridspec()]\n if lg2 is not None:\n if len(ss2.rowspan) > 1:\n maxsubt = np.max([np.max(\n lg2.margin_vals['top'][ss2.rowspan[1:]] +\n lg2.margin_vals['topcb'][ss2.rowspan[1:]]\n ), maxsubt])\n maxsubb = np.max([np.max(\n lg2.margin_vals['bottom'][ss2.rowspan[:-1]] +\n lg2.margin_vals['bottomcb'][ss2.rowspan[:-1]]\n ), maxsubb])\n for i in ss1.rowspan[1:]:\n lg1.edit_margin_min('top', maxsubt, cell=i)\n for i in ss1.rowspan[:-1]:\n lg1.edit_margin_min('bottom', maxsubb, cell=i)\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 26, "n_whitespaces": 1147, "n_words": 190, "vocab_size": 91, "complexity": 21, "nloc": 64, "token_counts": 623, "n_ast_nodes": 986, "n_identifiers": 33, "d_id": 23586, "documentation": { "docstring": "\n Make the margins that are submerged inside an Axes the same size.\n\n This allows axes that span two columns (or rows) that are offset\n from one another to have the same size.\n\n This gives the proper layout for something like::\n fig = plt.figure(constrained_layout=True)\n axs = fig.subplot_mosaic(\"AAAB\\nCCDD\")\n\n Without this routine, the axes D will be wider than C, because the\n margin width between the two columns in C has no width by default,\n whereas the margins between the two columns of D are set by the\n width of the margin between A and B. However, obviously the user would\n like C and D to be the same size, so we need to add constraints to these\n \"submerged\" margins.\n\n This routine makes all the interior margins the same, and the spacing\n between the three columns in A and the two column in C are all set to the\n margins between the two columns of D.\n\n See test_constrained_layout::test_constrained_layout12 for an example.\n ", "n_words": 158, "vocab_size": 87, "n_whitespaces": 218, "language": "en" } }, { "id": 67600, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/doctype/delivery_note/delivery_note.py", "file_name": "delivery_note.py", "fun_name": "get_returned_qty_map", "commit_message": "style: format code with black", "code": "def get_returned_qty_map(delivery_note):\n\t\n\treturned_qty_map = frappe._dict(\n\t\tfrappe.db.sql(\n\t\t\t,\n\t\t\tdelivery_note,\n\t\t)\n\t)\n\n\treturn returned_qty_map\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 4, "n_words": 13, "vocab_size": 11, "complexity": 1, "nloc": 14, "token_counts": 26, "n_ast_nodes": 56, "n_identifiers": 8, "d_id": 14572, "documentation": { "docstring": "returns a map: {so_detail: returned_qty}select dn_item.dn_detail, abs(dn_item.qty) as qty\n\t\tfrom `tabDelivery Note Item` dn_item, `tabDelivery Note` dn\n\t\twhere dn.name = dn_item.parent\n\t\t\tand dn.docstatus = 1\n\t\t\tand dn.is_return = 1\n\t\t\tand dn.return_against = %s\n\t", "n_words": 33, "vocab_size": 26, "n_whitespaces": 27, "language": "en" } }, { "id": 107199, "commit_id": "b24acb7772e0534f4bcdb0b5b492d9d94954dd91", "repo": "matplotlib", "path": "lib/matplotlib/lines.py", "file_name": "lines.py", "fun_name": "set_dash_capstyle", "commit_message": "DOC: Document default cap styles\n\n- remove '(default)' from cap style demo as this is only true for Line2D\n and the default rcParameters\n- document default cap styles for Line2D and Patch in their cap style\n setters\n- document default cap style for GraphicsContextBase in the same way as\n it's already done for joinstyle", "code": "def set_dash_capstyle(self, s):\n \n cs = CapStyle(s)\n if self._dashcapstyle != cs:\n self.stale = True\n self._dashcapstyle = cs\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 55, "n_words": 16, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 31, "n_ast_nodes": 52, "n_identifiers": 7, "d_id": 22639, "documentation": { "docstring": "\n How to draw the end caps if the line is `~Line2D.is_dashed`.\n\n The default capstyle is :rc:`lines.dash_capstyle`.\n\n Parameters\n ----------\n s : `.CapStyle` or %(CapStyle)s\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 66, "language": "en" } }, { "id": 40164, "commit_id": "c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c", "repo": "dash", "path": "dash/_get_paths.py", "file_name": "_get_paths.py", "fun_name": "app_get_relative_path", "commit_message": "f-strings everywhere! fffff", "code": "def app_get_relative_path(requests_pathname, path):\n if requests_pathname == \"/\" and path == \"\":\n return \"/\"\n if requests_pathname != \"/\" and path == \"\":\n return requests_pathname\n if not path.startswith(\"/\"):\n raise exceptions.UnsupportedRelativePath(\n f\n )\n return \"/\".join([requests_pathname.rstrip(\"/\"), path.lstrip(\"/\")])\n\n", "url": "https://github.com/plotly/dash.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 83, "n_words": 33, "vocab_size": 20, "complexity": 6, "nloc": 13, "token_counts": 67, "n_ast_nodes": 127, "n_identifiers": 9, "d_id": 7331, "documentation": { "docstring": "\n Paths that aren't prefixed with a leading / are not supported.\n You supplied: {path}\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 48, "language": "en" } }, { "id": 154583, "commit_id": "e5b1888cd932909e49194d58035da34b210b91c4", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/hdk_on_native/df_algebra.py", "file_name": "df_algebra.py", "fun_name": "translate_exprs_to_base", "commit_message": "FEAT-#4946: Replace OmniSci with HDK (#4947)\n\nCo-authored-by: Iaroslav Igoshev \r\nSigned-off-by: Andrey Pavlenko ", "code": "def translate_exprs_to_base(exprs, base):\n \n new_exprs = dict(exprs)\n\n frames = set()\n for k, v in new_exprs.items():\n v.collect_frames(frames)\n frames.discard(base)\n\n while len(frames) > 0:\n mapper = InputMapper()\n new_frames = set()\n for frame in frames:\n frame_base = frame._op.input[0]\n if frame_base != base:\n new_frames.add(frame_base)\n assert isinstance(frame._op, TransformNode)\n mapper.add_mapper(frame, TransformMapper(frame._op))\n\n for k, v in new_exprs.items():\n new_expr = new_exprs[k].translate_input(mapper)\n new_expr.collect_frames(new_frames)\n new_exprs[k] = new_expr\n\n new_frames.discard(base)\n frames = new_frames\n\n res = OrderedDict()\n for col in exprs.keys():\n res[col] = new_exprs[col]\n return res\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 246, "n_words": 71, "vocab_size": 47, "complexity": 7, "nloc": 25, "token_counts": 176, "n_ast_nodes": 282, "n_identifiers": 31, "d_id": 36093, "documentation": { "docstring": "\n Fold expressions.\n\n Fold expressions with their input nodes until `base`\n frame is the only input frame.\n\n Parameters\n ----------\n exprs : dict\n Expressions to translate.\n base : HdkOnNativeDataframe\n Required input frame for translated expressions.\n\n Returns\n -------\n dict\n Translated expressions.\n ", "n_words": 38, "vocab_size": 30, "n_whitespaces": 93, "language": "en" } }, { "id": 128247, "commit_id": "65d0c0aa48be8f9f7faae857d3ab71444997755a", "repo": "ray", "path": "python/ray/serve/controller.py", "file_name": "controller.py", "fun_name": "get_http_proxy_names", "commit_message": "[Serve] add alpha gRPC support (#28175)", "code": "def get_http_proxy_names(self) -> bytes:\n \n if self.http_state is None:\n return None\n\n from ray.serve.generated.serve_pb2 import ActorNameList\n\n actor_name_list = ActorNameList(\n names=self.http_state.get_http_proxy_names().values()\n )\n return actor_name_list.SerializeToString()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 85, "n_words": 21, "vocab_size": 20, "complexity": 2, "nloc": 9, "token_counts": 51, "n_ast_nodes": 83, "n_identifiers": 13, "d_id": 28646, "documentation": { "docstring": "Returns the http_proxy actor name list serialized by protobuf.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 134143, "commit_id": "02f911ce78137cb63ecb685a8ef8e56dcb60062c", "repo": "ray", "path": "release/air_tests/air_benchmarks/mlperf-train/resnet50_ray_air.py", "file_name": "resnet50_ray_air.py", "fun_name": "decode_crop_and_flip_tf_record_batch", "commit_message": "Benchmarking Ray Data bulk ingest as input file size changes. (#29296)\n\nThis PR adds a benchmark which takes work from https://github.com/anyscale/air-benchmarks and makes it run as a release test.\r\n\r\nFull metrics are stored in Databricks.\r\n\r\nSigned-off-by: Cade Daniel ", "code": "def decode_crop_and_flip_tf_record_batch(tf_record_batch):\n \n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 5, "token_counts": 45, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 30203, "documentation": { "docstring": "\n This version of the preprocessor fuses the load step with the crop and flip\n step, which should have better performance (at the cost of re-executing the\n load step on each epoch):\n - the reference tf.data implementation can use the fused decode_and_crop op\n - ray.data doesn't have to materialize the intermediate decoded batch.\n ", "n_words": 52, "vocab_size": 40, "n_whitespaces": 71, "language": "en" } }, { "id": 3696, "commit_id": "e3e05d79655bc0b2b4c3fbc0c05b8d90fce6dcd8", "repo": "airbyte", "path": "airbyte-integrations/connectors/tasks.py", "file_name": "tasks.py", "fun_name": "all_checks", "commit_message": "update code-checkers config (#9707)\n\nfix `all-checks` command\r\n\r\nSigned-off-by: Sergei Solonitcyn ", "code": "def all_checks(ctx, connectors=None): # type: ignore[no-untyped-def]\n \n tasks = (\n black,\n flake,\n isort,\n mypy,\n coverage,\n )\n for task_ in tasks:\n try:\n task_(ctx, connectors=connectors)\n except Exit as e:\n if e.code:\n raise\n\n\n@task(help={\"connectors\": _arg_help_connectors, \"write\": \"Write changes into the files (runs 'black' without '--check' option)\"})", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "@task(help={\"connectors\": _arg_help_connectors, \"write\": \"Write changes into the files (runs 'black' without '--check' option)\"})", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 140, "n_words": 42, "vocab_size": 42, "complexity": 4, "nloc": 14, "token_counts": 50, "n_ast_nodes": 107, "n_identifiers": 16, "d_id": 518, "documentation": { "docstring": "\n Run following checks one by one with default parameters: black, flake, isort, mypy, test, coverage.\n Zero exit code indicates about successful passing of all checks.\n Terminate on the first non-zero exit code.\n ", "n_words": 32, "vocab_size": 30, "n_whitespaces": 45, "language": "en" } }, { "id": 63403, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "parseString", "commit_message": "upd; format", "code": "def parseString(self, instring, parseAll=False):\n \n ParserElement.resetCache()\n if not self.streamlined:\n self.streamline()\n # ~ self.saveAsList = True\n for e in self.ignoreExprs:\n e.streamline()\n if not self.keepTabs:\n instring = instring.expandtabs()\n try:\n loc, tokens = self._parse(instring, 0)\n if parseAll:\n loc = self.preParse(instring, loc)\n se = Empty() + StringEnd()\n se._parse(instring, loc)\n except ParseBaseException as exc:\n if ParserElement.verbose_stacktrace:\n raise\n else:\n # catch and re-raise exception from here, clearing out pyparsing internal stack trace\n if getattr(exc, '__traceback__', None) is not None:\n exc.__traceback__ = self._trim_traceback(exc.__traceback__)\n raise exc\n else:\n return tokens\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 359, "n_words": 80, "vocab_size": 64, "complexity": 8, "nloc": 23, "token_counts": 141, "n_ast_nodes": 233, "n_identifiers": 25, "d_id": 13288, "documentation": { "docstring": "\n Execute the parse expression with the given string.\n This is the main interface to the client code, once the complete\n expression has been built.\n\n Returns the parsed data as a :class:`ParseResults` object, which may be\n accessed as a list, or as a dict or object with attributes if the given parser\n includes results names.\n\n If you want the grammar to require that the entire input string be\n successfully parsed, then set ``parseAll`` to True (equivalent to ending\n the grammar with ``StringEnd()``).\n\n Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string,\n in order to report proper column numbers in parse actions.\n If the input string contains tabs and\n the grammar uses parse actions that use the ``loc`` argument to index into the\n string being parsed, you can ensure you have a consistent view of the input\n string by:\n\n - calling ``parseWithTabs`` on your grammar before calling ``parseString``\n (see :class:`parseWithTabs`)\n - define your parse action using the full ``(s, loc, toks)`` signature, and\n reference the input string using the parse action's ``s`` argument\n - explictly expand the tabs in your input string before calling\n ``parseString``\n\n Example::\n\n Word('a').parseString('aaaaabaaa') # -> ['aaaaa']\n Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text\n ", "n_words": 197, "vocab_size": 121, "n_whitespaces": 389, "language": "en" } }, { "id": 139481, "commit_id": "bc3a1d35cf6e9a5fd7eef908a8e76aefb80ce6a9", "repo": "ray", "path": "rllib/policy/dynamic_tf_policy_v2.py", "file_name": "dynamic_tf_policy_v2.py", "fun_name": "_create_input_dict_and_dummy_batch", "commit_message": "[RLlib] Introduce new policy base classes. (#24742)", "code": "def _create_input_dict_and_dummy_batch(self, view_requirements, existing_inputs):\n \n input_dict = {}\n for view_col, view_req in view_requirements.items():\n # Point state_in to the already existing self._state_inputs.\n mo = re.match(\"state_in_(\\d+)\", view_col)\n if mo is not None:\n input_dict[view_col] = self._state_inputs[int(mo.group(1))]\n # State-outs (no placeholders needed).\n elif view_col.startswith(\"state_out_\"):\n continue\n # Skip action dist inputs placeholder (do later).\n elif view_col == SampleBatch.ACTION_DIST_INPUTS:\n continue\n # This is a tower: Input placeholders already exist.\n elif view_col in existing_inputs:\n input_dict[view_col] = existing_inputs[view_col]\n # All others.\n else:\n time_axis = not isinstance(view_req.shift, int)\n if view_req.used_for_training:\n # Create a +time-axis placeholder if the shift is not an\n # int (range or list of ints).\n # Do not flatten actions if action flattening disabled.\n if self.config.get(\"_disable_action_flattening\") and view_col in [\n SampleBatch.ACTIONS,\n SampleBatch.PREV_ACTIONS,\n ]:\n flatten = False\n # Do not flatten observations if no preprocessor API used.\n elif (\n view_col in [SampleBatch.OBS, SampleBatch.NEXT_OBS]\n and self.config[\"_disable_preprocessor_api\"]\n ):\n flatten = False\n # Flatten everything else.\n else:\n flatten = True\n input_dict[view_col] = get_placeholder(\n space=view_req.space,\n name=view_col,\n time_axis=time_axis,\n flatten=flatten,\n )\n dummy_batch = self._get_dummy_batch_from_view_requirements(batch_size=32)\n\n return SampleBatch(input_dict, seq_lens=self._seq_lens), dummy_batch\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 891, "n_words": 164, "vocab_size": 107, "complexity": 11, "nloc": 35, "token_counts": 214, "n_ast_nodes": 346, "n_identifiers": 36, "d_id": 31711, "documentation": { "docstring": "Creates input_dict and dummy_batch for loss initialization.\n\n Used for managing the Policy's input placeholders and for loss\n initialization.\n Input_dict: Str -> tf.placeholders, dummy_batch: str -> np.arrays.\n\n Args:\n view_requirements (ViewReqs): The view requirements dict.\n existing_inputs (Dict[str, tf.placeholder]): A dict of already\n existing placeholders.\n\n Returns:\n Tuple[Dict[str, tf.placeholder], Dict[str, np.ndarray]]: The\n input_dict/dummy_batch tuple.\n ", "n_words": 50, "vocab_size": 43, "n_whitespaces": 155, "language": "en" } }, { "id": 216569, "commit_id": "06aeefffad82d8f5db43b4429aeae87bad735acf", "repo": "salt", "path": "salt/fileserver/hgfs.py", "file_name": "hgfs.py", "fun_name": "lock", "commit_message": "Don't leak sub-processes\n\nSigned-off-by: Pedro Algarvio ", "code": "def lock(remote=None):\n \n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 8, "nloc": 21, "token_counts": 115, "n_ast_nodes": 16, "n_identifiers": 2, "d_id": 54644, "documentation": { "docstring": "\n Place an update.lk\n\n ``remote`` can either be a dictionary containing repo configuration\n information, or a pattern. If the latter, then remotes for which the URL\n matches the pattern will be locked.\n ", "n_words": 31, "vocab_size": 27, "n_whitespaces": 47, "language": "en" } }, { "id": 244075, "commit_id": "8d7da432af02a52bc5330b30997984335d0930a4", "repo": "mmdetection", "path": "mmdet/utils/logger.py", "file_name": "logger.py", "fun_name": "log_img_scale", "commit_message": "Update YOLOX log for non square input (#7235)", "code": "def log_img_scale(img_scale, shape_order='hw', skip_square=False):\n \n if shape_order == 'hw':\n height, width = img_scale\n elif shape_order == 'wh':\n width, height = img_scale\n else:\n raise ValueError(f'Invalid shape_order {shape_order}.')\n\n if skip_square and (height == width):\n return False\n\n logger = get_root_logger()\n caller = get_caller_name()\n logger.info(f'image shape: height={height}, width={width} in {caller}')\n\n return True\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 102, "n_words": 47, "vocab_size": 37, "complexity": 5, "nloc": 13, "token_counts": 72, "n_ast_nodes": 141, "n_identifiers": 12, "d_id": 70229, "documentation": { "docstring": "Log image size.\n\n Args:\n img_scale (tuple): Image size to be logged.\n shape_order (str, optional): The order of image shape.\n 'hw' for (height, width) and 'wh' for (width, height).\n Defaults to 'hw'.\n skip_square (bool, optional): Whether to skip logging for square\n img_scale. Defaults to False.\n\n Returns:\n bool: Whether to have done logging.\n ", "n_words": 51, "vocab_size": 41, "n_whitespaces": 121, "language": "en" } }, { "id": 265839, "commit_id": "ffce5d968d8a77c97852999b6ef916e80c1de55f", "repo": "netbox", "path": "netbox/netbox/search/__init__.py", "file_name": "__init__.py", "fun_name": "get_category", "commit_message": "8927 plugin search (#10489)\n\n* #7016 base search classes\r\n\r\n* 7016 add search indexes\r\n\r\n* 7016 add search indexes\r\n\r\n* 7016 add search indexes\r\n\r\n* 7016 add search indexes\r\n\r\n* 7016 add search indexes\r\n\r\n* 7016 add search indexes\r\n\r\n* 8927 refactor search\r\n\r\n* 8927 refactor search\r\n\r\n* 8927 refactor search\r\n\r\n* 8927 refactor search\r\n\r\n* 8927 get search choices working\r\n\r\n* 8927 cleanup - optimize\r\n\r\n* 8927 use backend search function\r\n\r\n* 8927 fix for plugin search\r\n\r\n* 8927 add docs\r\n\r\n* Move search app to a module under netbox/\r\n\r\n* Utilize global registry to register model search classes\r\n\r\n* Build search form options from registry\r\n\r\n* Determine search categories from model app by default\r\n\r\n* Enable dynamic search registration for plugins\r\n\r\n* Update docs & improve plugin support\r\n\r\n* Clean up search backend class\r\n\r\n* Docs for #8927\r\n\r\nCo-authored-by: jeremystretch ", "code": "def get_category(cls):\n \n if hasattr(cls, 'category'):\n return cls.category\n return cls.model._meta.app_config.verbose_name\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 41, "n_words": 9, "vocab_size": 8, "complexity": 2, "nloc": 4, "token_counts": 28, "n_ast_nodes": 48, "n_identifiers": 8, "d_id": 78210, "documentation": { "docstring": "\n Return the title of the search category under which this model is registered.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 28, "language": "en" } }, { "id": 107073, "commit_id": "7603999becc00d39f7616ed20e426e776144cda1", "repo": "matplotlib", "path": "lib/matplotlib/widgets.py", "file_name": "widgets.py", "fun_name": "_get_animated_artists", "commit_message": "Improve docstring and add comments", "code": "def _get_animated_artists(self):\n \n return tuple(a for ax_ in self.ax.get_figure().get_axes()\n for a in ax_.get_children()\n if a.get_animated() and a not in self.artists)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 73, "n_words": 19, "vocab_size": 15, "complexity": 5, "nloc": 4, "token_counts": 46, "n_ast_nodes": 75, "n_identifiers": 11, "d_id": 22587, "documentation": { "docstring": "\n Convenience method to get all animated artists of the figure containing\n this widget, excluding those already present in self.artists.\n The returned tuple is not sorted by 'z_order': z_order sorting is\n valid only when considering all artists and not only a subset of all\n artists.\n ", "n_words": 44, "vocab_size": 37, "n_whitespaces": 87, "language": "en" } }, { "id": 181853, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tpot/builtins/feature_set_selector.py", "file_name": "feature_set_selector.py", "fun_name": "fit", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def fit(self, X, y=None):\n \n subset_df = pd.read_csv(self.subset_list, header=0, index_col=0)\n\n if isinstance(self.sel_subset, int):\n self.sel_subset_name = subset_df.index[self.sel_subset]\n elif isinstance(self.sel_subset, str):\n self.sel_subset_name = self.sel_subset\n else: # list or tuple\n self.sel_subset_name = []\n for s in self.sel_subset:\n if isinstance(s, int):\n self.sel_subset_name.append(subset_df.index[s])\n else:\n self.sel_subset_name.append(s)\n\n\n sel_features = subset_df.loc[self.sel_subset_name, 'Features']\n if not isinstance(sel_features, str):\n sel_features = \";\".join(sel_features.tolist())\n\n sel_uniq_features = set(sel_features.split(';'))\n\n if isinstance(X, pd.DataFrame): # use columns' names\n self.feature_names = list(X.columns.values)\n self.feat_list = sorted(list(set(sel_uniq_features).intersection(set(self.feature_names))))\n self.feat_list_idx = [list(X.columns).index(feat_name) for feat_name in self.feat_list]\n elif isinstance(X, np.ndarray): # use index\n self.feature_names = list(range(X.shape[1]))\n sel_uniq_features = [int(val) for val in sel_uniq_features]\n self.feat_list = sorted(list(set(sel_uniq_features).intersection(set(self.feature_names))))\n self.feat_list_idx = self.feat_list\n\n if not len(self.feat_list):\n raise ValueError('No feature is found on the subset list!')\n return self\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 404, "n_words": 109, "vocab_size": 69, "complexity": 11, "nloc": 29, "token_counts": 313, "n_ast_nodes": 500, "n_identifiers": 42, "d_id": 43622, "documentation": { "docstring": "Fit FeatureSetSelector for feature selection\n\n Parameters\n ----------\n X: array-like of shape (n_samples, n_features)\n The training input samples.\n y: array-like, shape (n_samples,)\n The target values (integers that correspond to classes in classification, real numbers in regression).\n\n Returns\n -------\n self: object\n Returns a copy of the estimator\n ", "n_words": 45, "vocab_size": 40, "n_whitespaces": 134, "language": "en" } }, { "id": 231617, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/_modebar.py", "file_name": "_modebar.py", "fun_name": "removesrc", "commit_message": "switch to black .22", "code": "def removesrc(self):\n \n return self[\"removesrc\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 63061, "documentation": { "docstring": "\n Sets the source reference on Chart Studio Cloud for `remove`.\n\n The 'removesrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 77, "language": "en" } }, { "id": 137052, "commit_id": "ed3f3c08225c7049a96a290b586c67c37e2d0bc0", "repo": "ray", "path": "rllib/policy/policy_map.py", "file_name": "policy_map.py", "fun_name": "__len__", "commit_message": "[RLlib] PolicyMap LRU cache enhancements: Swap out policies (instead of GC'ing and recreating) + use Ray object store (instead of file system). (#29513)", "code": "def __len__(self) -> int:\n \n return len(self._valid_keys)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 15, "n_ast_nodes": 27, "n_identifiers": 5, "d_id": 31064, "documentation": { "docstring": "Returns number of all policies, including the stashed-to-disk ones.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 74351, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/tests/test_page_model.py", "file_name": "test_page_model.py", "fun_name": "test_copy_keep_live_false_not_emits_signal", "commit_message": "Reformat with black", "code": "def test_copy_keep_live_false_not_emits_signal(self):\n \n homepage = Page.objects.get(url_path=\"/home/\")\n signal_fired = False\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 29, "n_words": 8, "vocab_size": 7, "complexity": 1, "nloc": 7, "token_counts": 51, "n_ast_nodes": 39, "n_identifiers": 8, "d_id": 16249, "documentation": { "docstring": "Test that copying of a live page with keep_live=False not emits a page_published signal.", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 269416, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/applications/regnet.py", "file_name": "regnet.py", "fun_name": "SqueezeAndExciteBlock", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def SqueezeAndExciteBlock(filters_in, se_filters, name=None):\n \n if name is None:\n name = str(backend.get_uid(\"squeeze_and_excite\"))\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 24, "n_words": 11, "vocab_size": 10, "complexity": 2, "nloc": 5, "token_counts": 32, "n_ast_nodes": 48, "n_identifiers": 7, "d_id": 80065, "documentation": { "docstring": "Implements the Squeeze and excite block (https://arxiv.org/abs/1709.01507).\n\n Args:\n filters_in: input filters to the block\n se_filters: filters to squeeze to\n name: name prefix\n\n Returns:\n A function object\n ", "n_words": 26, "vocab_size": 21, "n_whitespaces": 55, "language": "en" } }, { "id": 167519, "commit_id": "cd2b8196c519b58c82cccdb8472d86c1c58f9511", "repo": "pandas", "path": "pandas/core/frame.py", "file_name": "frame.py", "fun_name": "_sanitize_column", "commit_message": "BUG: DataFrame.loc not aligning dict when setting to a column (#47361)\n\n* BUG: DataFrame.loc not aligning dict when setting to a column\r\n\r\n* Add partial case\r\n\r\n* Use is_dict_like\r\n\r\n* Revert \"Use is_dict_like\"\r\n\r\nThis reverts commit d2708512b751c6470690fdff0abc9c99404dc002.", "code": "def _sanitize_column(self, value) -> ArrayLike:\n \n self._ensure_valid_index(value)\n\n # We should never get here with DataFrame value\n if isinstance(value, Series):\n return _reindex_for_setitem(value, self.index)\n elif isinstance(value, dict):\n return _reindex_for_setitem(Series(value), self.index)\n\n if is_list_like(value):\n com.require_length_match(value, self.index)\n return sanitize_array(value, self.index, copy=True, allow_2d=True)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 118, "n_words": 36, "vocab_size": 30, "complexity": 4, "nloc": 21, "token_counts": 86, "n_ast_nodes": 132, "n_identifiers": 16, "d_id": 40030, "documentation": { "docstring": "\n Ensures new columns (which go into the BlockManager as new blocks) are\n always copied and converted into an array.\n\n Parameters\n ----------\n value : scalar, Series, or array-like\n\n Returns\n -------\n numpy.ndarray or ExtensionArray\n ", "n_words": 32, "vocab_size": 29, "n_whitespaces": 96, "language": "en" } }, { "id": 67568, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/startup/leaderboard.py", "file_name": "leaderboard.py", "fun_name": "get_all_items", "commit_message": "style: format code with black", "code": "def get_all_items(date_range, company, field, limit=None):\n\tif field in (\"available_stock_qty\", \"available_stock_value\"):\n\t\tselect_field = \"sum(actual_qty)\" if field == \"available_stock_qty\" else \"sum(stock_value)\"\n\t\treturn frappe.db.get_all(\n\t\t\t\"Bin\",\n\t\t\tfields=[\"item_code as name\", \"{0} as value\".format(select_field)],\n\t\t\tgroup_by=\"item_code\",\n\t\t\torder_by=\"value desc\",\n\t\t\tlimit=limit,\n\t\t)\n\telse:\n\t\tif field == \"total_sales_amount\":\n\t\t\tselect_field = \"sum(order_item.base_net_amount)\"\n\t\t\tselect_doctype = \"Sales Order\"\n\t\telif field == \"total_purchase_amount\":\n\t\t\tselect_field = \"sum(order_item.base_net_amount)\"\n\t\t\tselect_doctype = \"Purchase Order\"\n\t\telif field == \"total_qty_sold\":\n\t\t\tselect_field = \"sum(order_item.stock_qty)\"\n\t\t\tselect_doctype = \"Sales Order\"\n\t\telif field == \"total_qty_purchased\":\n\t\t\tselect_field = \"sum(order_item.stock_qty)\"\n\t\t\tselect_doctype = \"Purchase Order\"\n\n\t\tdate_condition = get_date_condition(date_range, \"sales_order.transaction_date\")\n\n\t\treturn frappe.db.sql(\n\t\t\t.format(\n\t\t\t\tselect_field, select_doctype, date_condition\n\t\t\t),\n\t\t\t(company, cint(limit)),\n\t\t\tas_dict=1,\n\t\t) # nosec\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 65, "n_words": 96, "vocab_size": 56, "complexity": 7, "nloc": 40, "token_counts": 152, "n_ast_nodes": 284, "n_identifiers": 20, "d_id": 14559, "documentation": { "docstring": "\n\t\t\tselect order_item.item_code as name, {0} as value\n\t\t\tfrom `tab{1}` sales_order join `tab{1} Item` as order_item\n\t\t\t\ton sales_order.name = order_item.parent\n\t\t\twhere sales_order.docstatus = 1\n\t\t\t\tand sales_order.company = %s {2}\n\t\t\tgroup by order_item.item_code\n\t\t\torder by value desc\n\t\t\tlimit %s\n\t\t", "n_words": 37, "vocab_size": 29, "n_whitespaces": 29, "language": "en" } }, { "id": 169226, "commit_id": "5c66e65d7b9fef47ccb585ce2fd0b3ea18dc82ea", "repo": "pandas", "path": "pandas/core/indexes/multi.py", "file_name": "multi.py", "fun_name": "_get_reconciled_name_object", "commit_message": "TYP: type all arguments with bool default values (#48624)\n\n* TYP: type all arguments with bool default values\r\n\r\n* bool_t\r\n\r\n* ignore type error in pandas/core/arrays/sparse/accessor.py", "code": "def _get_reconciled_name_object(self, other) -> MultiIndex:\n \n names = self._maybe_match_names(other)\n if self.names != names:\n # error: Cannot determine type of \"rename\"\n return self.rename(names) # type: ignore[has-type]\n return self\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 77, "n_words": 26, "vocab_size": 24, "complexity": 2, "nloc": 10, "token_counts": 34, "n_ast_nodes": 58, "n_identifiers": 7, "d_id": 40404, "documentation": { "docstring": "\n If the result of a set operation will be self,\n return self, unless the names change, in which\n case make a shallow copy of self.\n ", "n_words": 25, "vocab_size": 21, "n_whitespaces": 54, "language": "en" } }, { "id": 196312, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/geometry/polygon.py", "file_name": "polygon.py", "fun_name": "is_equilateral", "commit_message": "Updated import locations", "code": "def is_equilateral(self):\n \n return not has_variety(s.length for s in self.sides)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 2, "token_counts": 20, "n_ast_nodes": 34, "n_identifiers": 6, "d_id": 47812, "documentation": { "docstring": "Are all the sides the same length?\n\n Returns\n =======\n\n is_equilateral : boolean\n\n See Also\n ========\n\n sympy.geometry.entity.GeometryEntity.is_similar, RegularPolygon\n is_isosceles, is_right, is_scalene\n\n Examples\n ========\n\n >>> from sympy import Triangle, Point\n >>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))\n >>> t1.is_equilateral()\n False\n\n >>> from sympy import sqrt\n >>> t2 = Triangle(Point(0, 0), Point(10, 0), Point(5, 5*sqrt(3)))\n >>> t2.is_equilateral()\n True\n\n ", "n_words": 57, "vocab_size": 41, "n_whitespaces": 183, "language": "en" } }, { "id": 217444, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ftplib.py", "file_name": "ftplib.py", "fun_name": "makeport", "commit_message": "add python 3.10.4 for windows", "code": "def makeport(self):\n \n sock = socket.create_server((\"\", 0), family=self.af, backlog=1)\n port = sock.getsockname()[1] # Get proper port\n host = self.sock.getsockname()[0] # Get proper host\n if self.af == socket.AF_INET:\n resp = self.sendport(host, port)\n else:\n resp = self.sendeprt(host, port)\n if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT:\n sock.settimeout(self.timeout)\n return sock\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 132, "n_words": 43, "vocab_size": 30, "complexity": 3, "nloc": 11, "token_counts": 99, "n_ast_nodes": 159, "n_identifiers": 18, "d_id": 54789, "documentation": { "docstring": "Create a new socket and send a PORT command for it.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 160860, "commit_id": "f9bed20bffd88bce06dbc8be200179edfe7580a4", "repo": "numpy", "path": "numpy/lib/tests/test_arraysetops.py", "file_name": "test_arraysetops.py", "fun_name": "test_in1d_mixed_boolean", "commit_message": "BUG: Fix numpy.isin for timedelta dtype (#21860)\n\nThis PR fixes the issue discussed on #12065 and #21843 where 'timedelta64' was noted to be a subtype of numpy.integer. This in principle should detect any cases where int(np.min(ar2)) fails. This PR also adds unittests for these.\r\n\r\n* TST: Create in1d test for timedelta input\r\n\r\n* MAINT: fix in1d for timedelta input\r\n\r\n* TST: in1d raise ValueError for timedelta input\r\n\r\n* MAINT: Clean up type checking for isin kind=\"table\"\r\n\r\n* TST: Add test for mixed boolean/integer in1d\r\n\r\n* MAINT: Increase readability of in1d type checking\r\n\r\n* STY: Apply small code style tweaks\r\n\r\nThis is probably really mainly my personal opinion...\r\n\r\nCo-authored-by: Sebastian Berg ", "code": "def test_in1d_mixed_boolean(self, kind):\n \n for dtype in np.typecodes[\"AllInteger\"]:\n a = np.array([True, False, False], dtype=bool)\n b = np.array([1, 1, 1, 1], dtype=dtype)\n expected = np.array([True, False, False], dtype=bool)\n assert_array_equal(in1d(a, b, kind=kind), expected)\n\n a, b = b, a\n expected = np.array([True, True, True, True], dtype=bool)\n assert_array_equal(in1d(a, b, kind=kind), expected)\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 137, "n_words": 46, "vocab_size": 26, "complexity": 2, "nloc": 9, "token_counts": 131, "n_ast_nodes": 188, "n_identifiers": 13, "d_id": 38763, "documentation": { "docstring": "Test that in1d works as expected for bool/int input.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 261971, "commit_id": "8d85af84cd5f1748f979fddcbc4aab1449f61ecb", "repo": "TTS", "path": "TTS/tts/utils/text/punctuation.py", "file_name": "punctuation.py", "fun_name": "strip", "commit_message": "Implement Punctuation class", "code": "def strip(self, text):\n \n return re.sub(self.puncs_regular_exp, \" \", text).strip()\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 25, "n_ast_nodes": 42, "n_identifiers": 6, "d_id": 77086, "documentation": { "docstring": "Remove all the punctuations by replacing with `space`.\n\n Args:\n text (str): The text to be processed.\n\n Example::\n\n \"This is. example !\" -> \"This is example \"\n ", "n_words": 26, "vocab_size": 23, "n_whitespaces": 69, "language": "en" } }, { "id": 221111, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/bdb.py", "file_name": "bdb.py", "fun_name": "dispatch_line", "commit_message": "add python 3.10.4 for windows", "code": "def dispatch_line(self, frame):\n \n if self.stop_here(frame) or self.break_here(frame):\n self.user_line(frame)\n if self.quitting: raise BdbQuit\n return self.trace_dispatch\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 57, "n_words": 14, "vocab_size": 13, "complexity": 4, "nloc": 5, "token_counts": 40, "n_ast_nodes": 66, "n_identifiers": 9, "d_id": 56214, "documentation": { "docstring": "Invoke user function and return trace function for line event.\n\n If the debugger stops on the current line, invoke\n self.user_line(). Raise BdbQuit if self.quitting is set.\n Return self.trace_dispatch to continue tracing in this scope.\n ", "n_words": 34, "vocab_size": 32, "n_whitespaces": 62, "language": "en" } }, { "id": 202517, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/custom_methods/models.py", "file_name": "models.py", "fun_name": "articles_from_same_day_2", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def articles_from_same_day_2(self):\n \n from django.db import connection\n\n with connection.cursor() as cursor:\n cursor.execute(\n ,\n [connection.ops.adapt_datefield_value(self.pub_date), self.id],\n )\n return [self.__class__(*row) for row in cursor.fetchall()]\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 105, "n_words": 21, "vocab_size": 21, "complexity": 2, "nloc": 12, "token_counts": 63, "n_ast_nodes": 105, "n_identifiers": 14, "d_id": 50143, "documentation": { "docstring": "\n Verbose version of get_articles_from_same_day_1, which does a custom\n database query for the sake of demonstration.\n \n SELECT id, headline, pub_date\n FROM custom_methods_article\n WHERE pub_date = %s\n AND id != %s", "n_words": 29, "vocab_size": 26, "n_whitespaces": 115, "language": "en" } }, { "id": 291944, "commit_id": "af4e37339a39badd5596e8bc9ba86d6c1994aa1b", "repo": "core", "path": "homeassistant/components/sia/sia_entity_base.py", "file_name": "sia_entity_base.py", "fun_name": "async_create_post_interval_update_cb", "commit_message": "Add Connectivity sensor to SIA (#64305)\n\n* implemented connectivity sensor\r\n\r\n* further cleanup off update code\r\n\r\n* cleanup and tighter behaviour for attributes\r\n\r\n* added seperate connectivity class to binary sensor\r\n\r\n* callbacks and keys\r\n\r\n* redid name and unique_id logic, non-breaking result\r\n\r\n* using entry more in inits\r\n\r\n* Fix import\r\n\r\n* fix ping_interval in sia_entity_base\r\n\r\n* added ping_interval default to next\r\n\r\n* fixed next\r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "def async_create_post_interval_update_cb(self) -> None:\n \n self._post_interval_update_cb_canceller = async_call_later(\n self.hass,\n get_unavailability_interval(self.ping_interval),\n self.async_post_interval_update,\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 65, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 7, "token_counts": 30, "n_ast_nodes": 48, "n_identifiers": 8, "d_id": 91047, "documentation": { "docstring": "Create a port interval update cb and store the callback.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 86308, "commit_id": "d745edbd591063f2c3241cd1960c361834058823", "repo": "sentry", "path": "tests/sentry/models/test_groupsnooze.py", "file_name": "test_groupsnooze.py", "fun_name": "test_rate_reached_perf_issue", "commit_message": "ref(perf issues): Enable ignore in a time period (#39120)\n\nEnable ignoring a performance issue in a time period e.g. ignore this\r\nuntil it happens 10x / hr or ignore until 10 users experience it in an\r\nhour.", "code": "def test_rate_reached_perf_issue(self):\n \n snooze = GroupSnooze.objects.create(group=self.perf_group, count=10, window=24 * 60)\n for i in range(0, 10):\n self.store_transaction(\n environment=None,\n project_id=self.project.id,\n user_id=str(i),\n groups=[self.perf_group],\n )\n assert not snooze.is_valid(test_rates=True)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 133, "n_words": 23, "vocab_size": 23, "complexity": 2, "nloc": 10, "token_counts": 82, "n_ast_nodes": 124, "n_identifiers": 22, "d_id": 18097, "documentation": { "docstring": "Test when a performance issue is ignored until it happens 10 times in a day", "n_words": 15, "vocab_size": 14, "n_whitespaces": 14, "language": "en" } }, { "id": 76619, "commit_id": "dcae64c255f2fe97f658b1a3f438d3644b197661", "repo": "wagtail", "path": "wagtail/tests/test_streamfield.py", "file_name": "test_streamfield.py", "fun_name": "test_lazy_load_get_prep_value", "commit_message": "Allow `StreamField` to use `JSONField` internal type via `use_json_field` kwarg\n\nAdd system check for use_json_field in StreamField\n\nChange system check level to Warning\n\nAdd use_json_field argument to StreamField in test models\n\nUse RemovedInWagtail219Warning instead of a system check\n\nHandle unpacked values in to_python when use_json_field is True\n\nDuplicate models and tests for JSONField-based StreamField\n\nAdd basic tests for JSONField-based StreamField\n\nAdd json_field property in StreamField to unify JSONField usage\n\nAdd docs\n\nDon't use destructuring for kwargs in deconstruct\n\nAdd versionchanged note to StreamField reference", "code": "def test_lazy_load_get_prep_value(self):\n \n with self.assertNumQueries(1):\n instance = self.model.objects.get(pk=self.with_image.pk)\n\n # Expect a single UPDATE to update the model, without any additional\n # SELECT related to the image block that has not been accessed.\n with self.assertNumQueries(1):\n instance.save()\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 91, "n_words": 34, "vocab_size": 29, "complexity": 1, "nloc": 5, "token_counts": 45, "n_ast_nodes": 82, "n_identifiers": 10, "d_id": 16555, "documentation": { "docstring": "\n Saving a lazy StreamField that hasn't had its data accessed should not\n cause extra database queries by loading and then re-saving block values.\n Instead the initial JSON stream data should be written back for any\n blocks that have not been accessed.\n ", "n_words": 41, "vocab_size": 37, "n_whitespaces": 77, "language": "en" } }, { "id": 23430, "commit_id": "3c6d551207f8eb5d0916404a7eca5e641887047d", "repo": "PaddleOCR", "path": "PPOCRLabel/PPOCRLabel.py", "file_name": "PPOCRLabel.py", "fun_name": "gen_quad_from_poly", "commit_message": "Support multipoint labeling\n\nSupport multipoint labeling", "code": "def gen_quad_from_poly(self, poly):\n \n point_num = poly.shape[0]\n min_area_quad = np.zeros((4, 2), dtype=np.float32)\n rect = cv2.minAreaRect(poly.astype(\n np.int32)) # (center (x,y), (width, height), angle of rotation)\n box = np.array(cv2.boxPoints(rect))\n\n first_point_idx = 0\n min_dist = 1e4\n for i in range(4):\n dist = np.linalg.norm(box[(i + 0) % 4] - poly[0]) + \\\n np.linalg.norm(box[(i + 1) % 4] - poly[point_num // 2 - 1]) + \\\n np.linalg.norm(box[(i + 2) % 4] - poly[point_num // 2]) + \\\n np.linalg.norm(box[(i + 3) % 4] - poly[-1])\n if dist < min_dist:\n min_dist = dist\n first_point_idx = i\n for i in range(4):\n min_area_quad[i] = box[(first_point_idx + i) % 4]\n\n bbox_new = min_area_quad.tolist()\n bbox = []\n\n for box in bbox_new:\n box = list(map(int, box))\n bbox.append(box)\n\n return bbox\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 358, "n_words": 116, "vocab_size": 64, "complexity": 5, "nloc": 24, "token_counts": 192, "n_ast_nodes": 378, "n_identifiers": 32, "d_id": 4591, "documentation": { "docstring": "\n Generate min area quad from poly.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 177419, "commit_id": "eae1accf4a4eb8b767e82a45f9aca18785301670", "repo": "networkx", "path": "networkx/algorithms/approximation/connectivity.py", "file_name": "connectivity.py", "fun_name": "all_pairs_node_connectivity", "commit_message": "Added an example in all_pairs_node_connectivity (#6126)\n\n* add missing reference in all_node_cuts flow_func parameter\r\n\r\n* added example to all_pairs_node_connectivity\r\n\r\n* Update networkx/algorithms/approximation/connectivity.py\r\n\r\nAdded suggestion\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/approximation/connectivity.py\r\n\r\nadded pprint\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/connectivity/kcutsets.py\r\n\r\nfix linking\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* solved style problems\r\n\r\nCo-authored-by: Ross Barnowski ", "code": "def all_pairs_node_connectivity(G, nbunch=None, cutoff=None):\n \n if nbunch is None:\n nbunch = G\n else:\n nbunch = set(nbunch)\n\n directed = G.is_directed()\n if directed:\n iter_func = itertools.permutations\n else:\n iter_func = itertools.combinations\n\n all_pairs = {n: {} for n in nbunch}\n\n for u, v in iter_func(nbunch, 2):\n k = local_node_connectivity(G, u, v, cutoff=cutoff)\n all_pairs[u][v] = k\n if not directed:\n all_pairs[v][u] = k\n\n return all_pairs\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 145, "n_words": 58, "vocab_size": 37, "complexity": 6, "nloc": 17, "token_counts": 114, "n_ast_nodes": 177, "n_identifiers": 17, "d_id": 42370, "documentation": { "docstring": "Compute node connectivity between all pairs of nodes.\n\n Pairwise or local node connectivity between two distinct and nonadjacent\n nodes is the minimum number of nodes that must be removed (minimum\n separating cutset) to disconnect them. By Menger's theorem, this is equal\n to the number of node independent paths (paths that share no nodes other\n than source and target). Which is what we compute in this function.\n\n This algorithm is a fast approximation that gives an strict lower\n bound on the actual number of node independent paths between two nodes [1]_.\n It works for both directed and undirected graphs.\n\n\n Parameters\n ----------\n G : NetworkX graph\n\n nbunch: container\n Container of nodes. If provided node connectivity will be computed\n only over pairs of nodes in nbunch.\n\n cutoff : integer\n Maximum node connectivity to consider. If None, the minimum degree\n of source or target is used as a cutoff in each pair of nodes.\n Default value None.\n\n Returns\n -------\n K : dictionary\n Dictionary, keyed by source and target, of pairwise node connectivity\n\n Examples\n --------\n A 3 node cycle with one extra node attached has connectivity 2 between all\n nodes in the cycle and connectivity 1 between the extra node and the rest:\n\n >>> G = nx.cycle_graph(3)\n >>> G.add_edge(2, 3)\n >>> import pprint # for nice dictionary formatting\n >>> pprint.pprint(nx.all_pairs_node_connectivity(G))\n {0: {1: 2, 2: 2, 3: 1},\n 1: {0: 2, 2: 2, 3: 1},\n 2: {0: 2, 1: 2, 3: 1},\n 3: {0: 1, 1: 1, 2: 1}}\n\n See Also\n --------\n local_node_connectivity\n node_connectivity\n\n References\n ----------\n .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for\n Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035\n http://eclectic.ss.uci.edu/~drwhite/working.pdf\n ", "n_words": 272, "vocab_size": 166, "n_whitespaces": 440, "language": "en" } }, { "id": 37513, "commit_id": "57e6464ac9a31156f1c93e59107323e6ec01309e", "repo": "transformers", "path": "src/transformers/testing_utils.py", "file_name": "testing_utils.py", "fun_name": "require_torch_multi_gpu", "commit_message": "Update all require decorators to use skipUnless when possible (#16999)", "code": "def require_torch_multi_gpu(test_case):\n \n if not is_torch_available():\n return unittest.skip(\"test requires PyTorch\")(test_case)\n\n import torch\n\n return unittest.skipUnless(torch.cuda.device_count() > 1, \"test requires multiple GPUs\")(test_case)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 38, "n_words": 19, "vocab_size": 17, "complexity": 2, "nloc": 5, "token_counts": 44, "n_ast_nodes": 79, "n_identifiers": 9, "d_id": 6818, "documentation": { "docstring": "\n Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without\n multiple GPUs.\n\n To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k \"multi_gpu\"\n ", "n_words": 39, "vocab_size": 36, "n_whitespaces": 52, "language": "en" } }, { "id": 67056, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/doctype/gst_settings/gst_settings.py", "file_name": "gst_settings.py", "fun_name": "send_gstin_reminder", "commit_message": "style: format code with black", "code": "def send_gstin_reminder(party_type, party):\n\t\n\tfrappe.has_permission(party_type, throw=True)\n\temail = _send_gstin_reminder(party_type, party)\n\tif email:\n\t\tfrappe.msgprint(_(\"Reminder to update GSTIN Sent\"), title=\"Reminder sent\", indicator=\"green\")\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 14, "n_words": 19, "vocab_size": 19, "complexity": 2, "nloc": 5, "token_counts": 46, "n_ast_nodes": 78, "n_identifiers": 12, "d_id": 14421, "documentation": { "docstring": "Send GSTIN reminder to one party (called from Customer, Supplier form)", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 294292, "commit_id": "23a630e0bcbd2aec6a598a19ebaf2929eba97e5b", "repo": "core", "path": "tests/components/tod/test_binary_sensor.py", "file_name": "test_binary_sensor.py", "fun_name": "test_midnight_turnover_before_midnight_outside_period", "commit_message": "Update Times of the Day tests to use freezegun (#68327)", "code": "async def test_midnight_turnover_before_midnight_outside_period(hass):\n \n config = {\n \"binary_sensor\": [\n {\"platform\": \"tod\", \"name\": \"Night\", \"after\": \"22:00\", \"before\": \"5:00\"}\n ]\n }\n await async_setup_component(hass, \"binary_sensor\", config)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"binary_sensor.night\")\n assert state.state == STATE_OFF\n\n\n@freeze_time(\"2019-01-10 10:00:00-08:00\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@freeze_time(\"2019-01-10 10:00:00-08:00\")", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 78, "n_words": 33, "vocab_size": 31, "complexity": 1, "nloc": 10, "token_counts": 62, "n_ast_nodes": 131, "n_identifiers": 10, "d_id": 93329, "documentation": { "docstring": "Test midnight turnover setting before midnight outside period.", "n_words": 8, "vocab_size": 7, "n_whitespaces": 7, "language": "en" } }, { "id": 291472, "commit_id": "9ecbcd2d8fedc4f0a59f231d1f221157d4cfa359", "repo": "core", "path": "homeassistant/components/panasonic_bluray/media_player.py", "file_name": "media_player.py", "fun_name": "turn_off", "commit_message": "Use _attr_state in panasonic bluray media player (#82834)", "code": "def turn_off(self) -> None:\n \n if self.state != MediaPlayerState.OFF:\n self._device.send_key(\"POWER\")\n\n self._attr_state = MediaPlayerState.OFF\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 44, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 12, "token_counts": 32, "n_ast_nodes": 56, "n_identifiers": 8, "d_id": 90581, "documentation": { "docstring": "\n Instruct the device to turn standby.\n\n Sending the \"POWER\" button will turn the device to standby - there\n is no way to turn it completely off remotely. However this works in\n our favour as it means the device is still accepting commands and we\n can thus turn it back on when desired.\n ", "n_words": 52, "vocab_size": 39, "n_whitespaces": 95, "language": "en" } }, { "id": 84758, "commit_id": "bd9a1dc9710293e36d2d47d970d7afb95100c2e6", "repo": "zulip", "path": "zerver/tests/test_message_send.py", "file_name": "test_message_send.py", "fun_name": "test_personal_message", "commit_message": "tests: Consistently JSON-encode ‘to’ parameter\n\nAlthough our POST /messages handler accepts the ‘to’ parameter with or\nwithout JSON encoding, there are two problems with passing it as an\nunencoded string.\n\nFirstly, you’d fail to send a message to a stream named ‘true’ or\n‘false’ or ‘null’ or ‘2022’, as the JSON interpretation is prioritized\nover the plain string interpretation.\n\nSecondly, and more importantly for our tests, it violates our OpenAPI\nschema, which requires the parameter to be JSON-encoded. This is\nbecause OpenAPI has no concept of a parameter that’s “optionally\nJSON-encoded”, nor should it: such a parameter cannot be unambiguously\ndecoded for the reason above.\n\nOur version of openapi-core doesn’t currently detect this schema\nviolation, but after the next upgrade it will.\n\nSigned-off-by: Anders Kaseorg ", "code": "def test_personal_message(self) -> None:\n \n user_profile = self.example_user(\"hamlet\")\n self.login_user(user_profile)\n othello = self.example_user(\"othello\")\n result = self.client_post(\n \"/json/messages\",\n {\n \"type\": \"private\",\n \"content\": \"Test message\",\n \"to\": orjson.dumps([othello.email]).decode(),\n },\n )\n self.assert_json_success(result)\n message_id = orjson.loads(result.content)[\"id\"]\n\n recent_conversations = get_recent_private_conversations(user_profile)\n self.assert_length(recent_conversations, 1)\n recent_conversation = list(recent_conversations.values())[0]\n recipient_id = list(recent_conversations.keys())[0]\n self.assertEqual(set(recent_conversation[\"user_ids\"]), {othello.id})\n self.assertEqual(recent_conversation[\"max_message_id\"], message_id)\n\n # Now send a message to yourself and see how that interacts with the data structure\n result = self.client_post(\n \"/json/messages\",\n {\n \"type\": \"private\",\n \"content\": \"Test message\",\n \"to\": orjson.dumps([user_profile.email]).decode(),\n },\n )\n self.assert_json_success(result)\n self_message_id = orjson.loads(result.content)[\"id\"]\n\n recent_conversations = get_recent_private_conversations(user_profile)\n self.assert_length(recent_conversations, 2)\n recent_conversation = recent_conversations[recipient_id]\n self.assertEqual(set(recent_conversation[\"user_ids\"]), {othello.id})\n self.assertEqual(recent_conversation[\"max_message_id\"], message_id)\n\n # Now verify we have the appropriate self-pm data structure\n del recent_conversations[recipient_id]\n recent_conversation = list(recent_conversations.values())[0]\n recipient_id = list(recent_conversations.keys())[0]\n self.assertEqual(set(recent_conversation[\"user_ids\"]), set())\n self.assertEqual(recent_conversation[\"max_message_id\"], self_message_id)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 478, "n_words": 112, "vocab_size": 66, "complexity": 1, "nloc": 43, "token_counts": 318, "n_ast_nodes": 537, "n_identifiers": 28, "d_id": 17867, "documentation": { "docstring": "\n Sending a personal message to a valid username is successful.\n ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 25, "language": "en" } }, { "id": 124497, "commit_id": "56716a1c1b6f9aae3967b910a799bb6af9f2c5d9", "repo": "ray", "path": "python/ray/_private/test_utils.py", "file_name": "test_utils.py", "fun_name": "external_ray_cluster_activity_hook4", "commit_message": "[dashboard] Add `RAY_CLUSTER_ACTIVITY_HOOK` to `/api/component_activities` (#26297)\n\nAdd external hook to /api/component_activities endpoint in dashboard snapshot router\r\nChange is_active field of RayActivityResponse to take an enum RayActivityStatus instead of bool. This is a backward incompatible change, but should be ok because [dashboard] Add component_activities API #25996 wasn't included in any branch cuts. RayActivityResponse now supports informing when there was an error getting the activity observation and the reason.", "code": "def external_ray_cluster_activity_hook4():\n \n raise Exception(\"Error in external cluster activity hook\")\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 15, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 27614, "documentation": { "docstring": "\n Example external hook for test_component_activities_hook.\n\n Errors during execution.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 18, "language": "en" } }, { "id": 67844, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/reorder_item.py", "file_name": "reorder_item.py", "fun_name": "get_item_warehouse_projected_qty", "commit_message": "style: format code with black", "code": "def get_item_warehouse_projected_qty(items_to_consider):\n\titem_warehouse_projected_qty = {}\n\n\tfor item_code, warehouse, projected_qty in frappe.db.sql(\n\t\t.format(\n\t\t\t\", \".join([\"%s\"] * len(items_to_consider))\n\t\t),\n\t\titems_to_consider,\n\t):\n\n\t\tif item_code not in item_warehouse_projected_qty:\n\t\t\titem_warehouse_projected_qty.setdefault(item_code, {})\n\n\t\tif warehouse not in item_warehouse_projected_qty.get(item_code):\n\t\t\titem_warehouse_projected_qty[item_code][warehouse] = flt(projected_qty)\n\n\t\twarehouse_doc = frappe.get_doc(\"Warehouse\", warehouse)\n\n\t\twhile warehouse_doc.parent_warehouse:\n\t\t\tif not item_warehouse_projected_qty.get(item_code, {}).get(warehouse_doc.parent_warehouse):\n\t\t\t\titem_warehouse_projected_qty.setdefault(item_code, {})[warehouse_doc.parent_warehouse] = flt(\n\t\t\t\t\tprojected_qty\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\titem_warehouse_projected_qty[item_code][warehouse_doc.parent_warehouse] += flt(projected_qty)\n\t\t\twarehouse_doc = frappe.get_doc(\"Warehouse\", warehouse_doc.parent_warehouse)\n\n\treturn item_warehouse_projected_qty\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 38, "n_words": 60, "vocab_size": 44, "complexity": 6, "nloc": 24, "token_counts": 166, "n_ast_nodes": 265, "n_identifiers": 18, "d_id": 14639, "documentation": { "docstring": "select item_code, warehouse, projected_qty\n\t\tfrom tabBin where item_code in ({0})\n\t\t\tand (warehouse != \"\" and warehouse is not null)", "n_words": 19, "vocab_size": 18, "n_whitespaces": 16, "language": "en" } }, { "id": 101231, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "lib/align/detected_face.py", "file_name": "detected_face.py", "fun_name": "affine_matrix", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def affine_matrix(self) -> np.ndarray:\n \n assert self._affine_matrix is not None\n return self._affine_matrix\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 32, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 21, "n_ast_nodes": 35, "n_identifiers": 5, "d_id": 20651, "documentation": { "docstring": " :class: `numpy.ndarray`: The affine matrix to transpose the mask to a full frame. ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 14, "language": "en" } }, { "id": 258546, "commit_id": "fb082b223dc9f1dd327f48dc9b830ee382d6f661", "repo": "scikit-learn", "path": "sklearn/neighbors/_regression.py", "file_name": "_regression.py", "fun_name": "predict", "commit_message": "MAINT Do not compute distances for uniform weighting (#22280)", "code": "def predict(self, X):\n \n if self.weights == \"uniform\":\n # In that case, we do not need the distances to perform\n # the weighting so we do not compute them.\n neigh_ind = self.kneighbors(X, return_distance=False)\n neigh_dist = None\n else:\n neigh_dist, neigh_ind = self.kneighbors(X)\n\n weights = _get_weights(neigh_dist, self.weights)\n\n _y = self._y\n if _y.ndim == 1:\n _y = _y.reshape((-1, 1))\n\n if weights is None:\n y_pred = np.mean(_y[neigh_ind], axis=1)\n else:\n y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)\n denom = np.sum(weights, axis=1)\n\n for j in range(_y.shape[1]):\n num = np.sum(_y[neigh_ind, j] * weights, axis=1)\n y_pred[:, j] = num / denom\n\n if self._y.ndim == 1:\n y_pred = y_pred.ravel()\n\n return y_pred\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 320, "n_words": 99, "vocab_size": 65, "complexity": 6, "nloc": 21, "token_counts": 199, "n_ast_nodes": 310, "n_identifiers": 26, "d_id": 75287, "documentation": { "docstring": "Predict the target for the provided data.\n\n Parameters\n ----------\n X : array-like of shape (n_queries, n_features), \\\n or (n_queries, n_indexed) if metric == 'precomputed'\n Test samples.\n\n Returns\n -------\n y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=int\n Target values.\n ", "n_words": 40, "vocab_size": 33, "n_whitespaces": 126, "language": "en" } }, { "id": 204741, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/serializers/base.py", "file_name": "base.py", "fun_name": "handle_m2m_field", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def handle_m2m_field(self, obj, field):\n \n raise NotImplementedError(\n \"subclasses of Serializer must provide a handle_m2m_field() method\"\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 47, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 15, "n_ast_nodes": 27, "n_identifiers": 5, "d_id": 50868, "documentation": { "docstring": "\n Called to handle a ManyToManyField.\n ", "n_words": 5, "vocab_size": 5, "n_whitespaces": 20, "language": "en" } }, { "id": 283270, "commit_id": "b71abcfbf4d7e8ac1855522aff0378e13c8b5362", "repo": "OpenBBTerminal", "path": "openbb_terminal/helper_funcs.py", "file_name": "helper_funcs.py", "fun_name": "get_user_timezone", "commit_message": "Updating some names (#1575)\n\n* quick econ fix\r\n\r\n* black\r\n\r\n* keys and feature flags\r\n\r\n* terminal name :eyes:\r\n\r\n* some more replacements\r\n\r\n* some more replacements\r\n\r\n* edit pyproject\r\n\r\n* gst -> openbb\r\n\r\n* add example portfolios back to git\r\n\r\n* Update api from gst\r\n\r\n* sorry. skipping some tests\r\n\r\n* another round of names\r\n\r\n* another round of test edits\r\n\r\n* Missed some .gst refs and update timezone\r\n\r\n* water mark stuff\r\n\r\n* Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS\r\n\r\n* fix more GST to OpenBB Terminal\r\n\r\n* Logging : merge conflicts with main\r\n\r\n* Revert wrong files\r\n\r\nCo-authored-by: Andrew \r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA ", "code": "def get_user_timezone() -> str:\n \n filename = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"timezone.openbb\",\n )\n if os.path.isfile(filename):\n with open(filename) as f:\n return f.read()\n return \"\"\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 67, "n_words": 20, "vocab_size": 19, "complexity": 2, "nloc": 16, "token_counts": 60, "n_ast_nodes": 105, "n_identifiers": 13, "d_id": 84512, "documentation": { "docstring": "Get user timezone if it is a valid one\n\n Returns\n -------\n str\n user timezone based on timezone.openbb file\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 37, "language": "en" } }, { "id": 105788, "commit_id": "d7dfbc83d68e87ba002c5eb2555f7a932e59038a", "repo": "datasets", "path": "tests/utils.py", "file_name": "utils.py", "fun_name": "require_sqlalchemy", "commit_message": "Add ability to read-write to SQL databases. (#4928)\n\n* Add ability to read-write to SQL databases.\r\n\r\n* Fix issue where pandas<1.4.0 doesn't return the number of rows\r\n\r\n* Fix issue where connections were not closed properly\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Change according to reviews\r\n\r\n* Change according to reviews\r\n\r\n* Inherit from AbstractDatasetInputStream in SqlDatasetReader\r\n\r\n* Revert typing in SQLDatasetReader as we do not support Connexion\r\n\r\n* Align API with Pandas/Daskk\r\n\r\n* Update tests\r\n\r\n* Update docs\r\n\r\n* Update some more tests\r\n\r\n* Missing comma\r\n\r\n* Small docs fix\r\n\r\n* Style\r\n\r\n* Update src/datasets/arrow_dataset.py\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Update src/datasets/packaged_modules/sql/sql.py\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Address some comments\r\n\r\n* Address the rest\r\n\r\n* Improve tests\r\n\r\n* sqlalchemy required tip\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: mariosasko ", "code": "def require_sqlalchemy(test_case):\n \n try:\n import sqlalchemy # noqa\n except ImportError:\n test_case = unittest.skip(\"test requires sqlalchemy\")(test_case)\n return test_case\n\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 43, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 6, "token_counts": 26, "n_ast_nodes": 50, "n_identifiers": 6, "d_id": 22204, "documentation": { "docstring": "\n Decorator marking a test that requires SQLAlchemy.\n\n These tests are skipped when SQLAlchemy isn't installed.\n\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 25, "language": "en" } }, { "id": 208534, "commit_id": "23276ac4770f380ce1d5808950dd412a35594af1", "repo": "ipython", "path": "IPython/testing/tools.py", "file_name": "tools.py", "fun_name": "make_tempfile", "commit_message": "Fix EncodingWarning on Python 3.10", "code": "def make_tempfile(name):\n \n open(name, 'w', encoding='utf-8').close()\n try:\n yield\n finally:\n os.unlink(name)\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 35, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 6, "token_counts": 31, "n_ast_nodes": 59, "n_identifiers": 7, "d_id": 52377, "documentation": { "docstring": " Create an empty, named, temporary file for the duration of the context.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 16, "language": "en" } }, { "id": 222940, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/filelist.py", "file_name": "filelist.py", "fun_name": "findall", "commit_message": "add python 3.10.4 for windows", "code": "def findall(dir=os.curdir):\n \n files = _find_all_simple(dir)\n if dir == os.curdir:\n make_rel = functools.partial(os.path.relpath, start=dir)\n files = map(make_rel, files)\n return list(files)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 45, "n_words": 19, "vocab_size": 16, "complexity": 2, "nloc": 6, "token_counts": 52, "n_ast_nodes": 84, "n_identifiers": 14, "d_id": 56821, "documentation": { "docstring": "\n Find all files under 'dir' and return the list of full filenames.\n Unless dir is '.', return full filenames with dir prepended.\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 32, "language": "en" } }, { "id": 315611, "commit_id": "0c29b68cf82c777ec6fd70ce38911f1d1e39d26a", "repo": "core", "path": "tests/components/http/test_ban.py", "file_name": "test_ban.py", "fun_name": "test_ip_ban_manager_never_started", "commit_message": "Switch linear search to a dict lookup for ip bans (#74482)", "code": "async def test_ip_ban_manager_never_started(hass, aiohttp_client, caplog):\n \n app = web.Application()\n app[\"hass\"] = hass\n setup_bans(hass, app, 5)\n set_real_ip = mock_real_ip(app)\n\n with patch(\n \"homeassistant.components.http.ban.load_yaml_config_file\",\n side_effect=FileNotFoundError,\n ):\n client = await aiohttp_client(app)\n\n # Mock the manager never being started\n del app[KEY_BAN_MANAGER]\n\n set_real_ip(\"4.3.2.1\")\n resp = await client.get(\"/\")\n assert resp.status == HTTPStatus.NOT_FOUND\n assert \"IP Ban middleware loaded but banned IPs not loaded\" in caplog.text\n\n\n@pytest.mark.parametrize(\n \"remote_addr, bans, status\",\n list(\n zip(\n BANNED_IPS_WITH_SUPERVISOR,\n [1, 1, 0],\n [HTTPStatus.FORBIDDEN, HTTPStatus.FORBIDDEN, HTTPStatus.UNAUTHORIZED],\n )\n ),\n)", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"remote_addr, bans, status\",\n list(\n zip(\n BANNED_IPS_WITH_SUPERVISOR,\n [1, 1, 0],\n [HTTPStatus.FORBIDDEN, HTTPStatus.FORBIDDEN, HTTPStatus.UNAUTHORIZED],\n )\n ),\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 186, "n_words": 72, "vocab_size": 65, "complexity": 1, "nloc": 15, "token_counts": 87, "n_ast_nodes": 209, "n_identifiers": 29, "d_id": 114189, "documentation": { "docstring": "Test we handle the ip ban manager not being started.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 204933, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/schema.py", "file_name": "schema.py", "fun_name": "remove_field", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def remove_field(self, model, field):\n \n # Special-case implicit M2M tables\n if field.many_to_many and field.remote_field.through._meta.auto_created:\n return self.delete_model(field.remote_field.through)\n # It might not actually have a column behind it\n if field.db_parameters(connection=self.connection)[\"type\"] is None:\n return\n # Drop any FK constraints, MySQL requires explicit deletion\n if field.remote_field:\n fk_names = self._constraint_names(model, [field.column], foreign_key=True)\n for fk_name in fk_names:\n self.execute(self._delete_fk_sql(model, fk_name))\n # Delete the column\n sql = self.sql_delete_column % {\n \"table\": self.quote_name(model._meta.db_table),\n \"column\": self.quote_name(field.column),\n }\n self.execute(sql)\n # Reset connection if required\n if self.connection.features.connection_persists_old_columns:\n self.connection.close()\n # Remove all deferred statements referencing the deleted column.\n for sql in list(self.deferred_sql):\n if isinstance(sql, Statement) and sql.references_column(\n model._meta.db_table, field.column\n ):\n self.deferred_sql.remove(sql)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 346, "n_words": 97, "vocab_size": 79, "complexity": 10, "nloc": 21, "token_counts": 190, "n_ast_nodes": 304, "n_identifiers": 32, "d_id": 50985, "documentation": { "docstring": "\n Remove a field from a model. Usually involves deleting a column,\n but for M2Ms may involve deleting a table.\n ", "n_words": 19, "vocab_size": 15, "n_whitespaces": 41, "language": "en" } }, { "id": 216279, "commit_id": "3c7e1ec1f08abd7cd1ba78ad7880acb6ba6fdce7", "repo": "salt", "path": "tests/pytests/functional/transport/server/test_req_channel.py", "file_name": "test_req_channel.py", "fun_name": "test_basic", "commit_message": "Fix minion unit tests, specifically .../tests/pytests/test_minion.py", "code": "def test_basic(push_channel):\n \n msgs = [\n {\"foo\": \"bar\"},\n {\"bar\": \"baz\"},\n {\"baz\": \"qux\", \"list\": [1, 2, 3]},\n ]\n for msg in msgs:\n ret = push_channel.send(msg, timeout=5, tries=1)\n assert ret[\"load\"] == msg\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 76, "n_words": 29, "vocab_size": 27, "complexity": 2, "nloc": 9, "token_counts": 66, "n_ast_nodes": 112, "n_identifiers": 8, "d_id": 54497, "documentation": { "docstring": "\n Test a variety of messages, make sure we get the expected responses\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 19, "language": "en" } }, { "id": 216508, "commit_id": "9e1ca8b5b9e7006fea28f473711917755cf5a262", "repo": "salt", "path": "tests/pytests/unit/modules/test_aptpkg.py", "file_name": "test_aptpkg.py", "fun_name": "test_upgrade_allow_downgrades", "commit_message": "Add --allow-downgrades capability for apt upgrade", "code": "def test_upgrade_allow_downgrades(uninstall_var, upgrade_var):\n \n with patch(\"salt.utils.pkg.clear_rtag\", MagicMock()):\n with patch(\n \"salt.modules.aptpkg.list_pkgs\", MagicMock(return_value=uninstall_var)\n ):\n mock_cmd = MagicMock(return_value={\"retcode\": 0, \"stdout\": upgrade_var})\n patch_kwargs = {\n \"__salt__\": {\n \"config.get\": MagicMock(return_value=True),\n \"cmd.run_all\": mock_cmd,\n },\n }\n with patch.multiple(aptpkg, **patch_kwargs):\n aptpkg.upgrade()\n args_matching = [\n True\n for args in patch_kwargs[\"__salt__\"][\"cmd.run_all\"].call_args[0]\n if \"--allow-downgrades\" in args\n ]\n # Here we shouldn't see the parameter and args_matching should be empty.\n assert any(args_matching) is False\n\n aptpkg.upgrade(allow_downgrades=True)\n args_matching = [\n True\n for args in patch_kwargs[\"__salt__\"][\"cmd.run_all\"].call_args[0]\n if \"--allow-downgrades\" in args\n ]\n # --allow-downgrades should be in the args list and we should have at least on True in the list.\n assert any(args_matching) is True\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 505, "n_words": 98, "vocab_size": 61, "complexity": 5, "nloc": 27, "token_counts": 155, "n_ast_nodes": 270, "n_identifiers": 16, "d_id": 54619, "documentation": { "docstring": "\n Tests the allow_downgrades option for upgrade.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 13, "language": "en" } }, { "id": 203770, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/db/backends/base/models.py", "file_name": "models.py", "fun_name": "srs", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def srs(self):\n \n # TODO: Is caching really necessary here? Is complexity worth it?\n if hasattr(self, \"_srs\"):\n # Returning a clone of the cached SpatialReference object.\n return self._srs.clone()\n else:\n # Attempting to cache a SpatialReference object.\n\n # Trying to get from WKT first.\n try:\n self._srs = gdal.SpatialReference(self.wkt)\n return self.srs\n except Exception as e:\n msg = e\n\n try:\n self._srs = gdal.SpatialReference(self.proj4text)\n return self.srs\n except Exception as e:\n msg = e\n\n raise Exception(\n \"Could not get OSR SpatialReference from WKT: %s\\nError:\\n%s\"\n % (self.wkt, msg)\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 341, "n_words": 82, "vocab_size": 57, "complexity": 4, "nloc": 18, "token_counts": 89, "n_ast_nodes": 157, "n_identifiers": 12, "d_id": 50533, "documentation": { "docstring": "\n Return a GDAL SpatialReference object.\n ", "n_words": 5, "vocab_size": 5, "n_whitespaces": 20, "language": "en" } }, { "id": 218358, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/importlib/util.py", "file_name": "util.py", "fun_name": "set_package", "commit_message": "add python 3.10.4 for windows", "code": "def set_package(fxn):\n ", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 4, "token_counts": 17, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 55256, "documentation": { "docstring": "Set __package__ on the returned module.\n\n This function is deprecated.\n\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 16, "language": "en" } }, { "id": 65209, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/consolidated_financial_statement/consolidated_financial_statement.py", "file_name": "consolidated_financial_statement.py", "fun_name": "get_subsidiary_companies", "commit_message": "style: format code with black", "code": "def get_subsidiary_companies(company):\n\tlft, rgt = frappe.get_cached_value(\"Company\", company, [\"lft\", \"rgt\"])\n\n\treturn frappe.db.sql_list(\n\t\t.format(\n\t\t\tlft, rgt\n\t\t)\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 9, "n_words": 16, "vocab_size": 13, "complexity": 1, "nloc": 8, "token_counts": 39, "n_ast_nodes": 66, "n_identifiers": 9, "d_id": 13823, "documentation": { "docstring": "select name from `tabCompany`\n\t\twhere lft >= {0} and rgt <= {1} order by lft, rgt", "n_words": 16, "vocab_size": 15, "n_whitespaces": 14, "language": "en" } }, { "id": 63361, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "from_dict", "commit_message": "upd; format", "code": "def from_dict(cls, other, name=None):\n ", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 4, "nloc": 11, "token_counts": 93, "n_ast_nodes": 20, "n_identifiers": 4, "d_id": 13268, "documentation": { "docstring": "\n Helper classmethod to construct a ParseResults from a dict, preserving the\n name-value relations as results names. If an optional 'name' argument is\n given, a nested ParseResults will be returned\n ", "n_words": 29, "vocab_size": 26, "n_whitespaces": 58, "language": "en" } }, { "id": 73170, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/options.py", "file_name": "options.py", "fun_name": "create_view", "commit_message": "Reformat with black", "code": "def create_view(self, request):\n \n kwargs = {\"model_admin\": self}\n view_class = self.create_view_class\n return view_class.as_view(**kwargs)(request)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 40, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 31, "n_ast_nodes": 54, "n_identifiers": 7, "d_id": 15968, "documentation": { "docstring": "\n Instantiates a class-based view to provide 'creation' functionality for\n the assigned model, or redirect to Wagtail's create view if the\n assigned model extends 'Page'. The view class used can be overridden by\n changing the 'create_view_class' attribute.\n ", "n_words": 36, "vocab_size": 30, "n_whitespaces": 72, "language": "en" } }, { "id": 264426, "commit_id": "10e6ae20949028171fdfcc50fde78bf289f41d5f", "repo": "netbox", "path": "netbox/utilities/utils.py", "file_name": "utils.py", "fun_name": "get_viewname", "commit_message": "Introduce get_viewname() as a standard utility", "code": "def get_viewname(model, action=None):\n \n viewname = f'{model._meta.app_label}:{model._meta.model_name}'\n\n # Determine whether this is a plugin view and adjust the namespace appropriately\n if isinstance(model._meta.app_config, PluginConfig):\n viewname = f'plugins:{viewname}'\n\n # Append the action, if any\n if action:\n viewname = f'{viewname}_{action}'\n\n return viewname\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 73, "n_words": 38, "vocab_size": 29, "complexity": 3, "nloc": 7, "token_counts": 39, "n_ast_nodes": 99, "n_identifiers": 10, "d_id": 77721, "documentation": { "docstring": "\n Return the view name for the given model and action, if valid.\n\n :param model: The model or instance to which the view applies\n :param action: A string indicating the desired action (if any); e.g. \"add\" or \"list\"\n ", "n_words": 37, "vocab_size": 30, "n_whitespaces": 50, "language": "en" } }, { "id": 219635, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "plus", "commit_message": "add python 3.10.4 for windows", "code": "def plus(self, a):\n \n a = _convert_other(a, raiseit=True)\n return a.__pos__(context=self)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 27, "n_ast_nodes": 44, "n_identifiers": 7, "d_id": 55667, "documentation": { "docstring": "Plus corresponds to unary prefix plus in Python.\n\n The operation is evaluated using the same rules as add; the\n operation plus(a) is calculated as add('0', a) where the '0'\n has the same exponent as the operand.\n\n >>> ExtendedContext.plus(Decimal('1.3'))\n Decimal('1.3')\n >>> ExtendedContext.plus(Decimal('-1.3'))\n Decimal('-1.3')\n >>> ExtendedContext.plus(-1)\n Decimal('-1')\n ", "n_words": 45, "vocab_size": 34, "n_whitespaces": 115, "language": "en" } }, { "id": 115562, "commit_id": "149ae900c62910a480f8af70daa98362d513a350", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/clickhouse_handler/clickhouse_handler.py", "file_name": "clickhouse_handler.py", "fun_name": "get_tables", "commit_message": "CH handler implementation", "code": "def get_tables(self) -> Response:\n \n q = f\"SHOW TABLES FROM {self.connection_data['database']}\"\n result = self.native_query(q)\n df = result.data_frame\n result.data_frame = df.rename(columns={df.columns[0]: 'table_name'})\n return result\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 64, "n_words": 22, "vocab_size": 17, "complexity": 1, "nloc": 9, "token_counts": 48, "n_ast_nodes": 94, "n_identifiers": 11, "d_id": 25491, "documentation": { "docstring": "\n Get a list with all of the tabels in ClickHouse db\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 10853, "commit_id": "13edc16d806fb5d77a6849551178ccc75937f25f", "repo": "jina", "path": "jina/orchestrate/deployments/__init__.py", "file_name": "__init__.py", "fun_name": "uses_before_args", "commit_message": "refactor: rename pod to deployment (#4230)\n\n* refactor: rename pod to deployment\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: undo daemon mistake\r\n\r\n* refactor: leftover cleanup\r\n\r\n* fix: more test fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more tests\r\n\r\n* fix: fix more tests\r\n\r\n* refactor: fix more tests\r\n\r\n* refactor: more tests fixes\r\n\r\n* refactor: rename pea to pod\r\n\r\n* refactor: adjust docs\r\n\r\n* refactor: complete pea renaming\r\n\r\n* refactor: more fixes\r\n\r\n* fix: pea_type in k8s yamls\r\n\r\n* fix: adjust pod args name\r\n\r\n* refactor: rename peapods parser folder\r\n\r\n* fix: da init\r\n\r\nCo-authored-by: Jina Dev Bot ", "code": "def uses_before_args(self) -> Namespace:\n \n return self.pod_args['uses_before']\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 7, "token_counts": 15, "n_ast_nodes": 28, "n_identifiers": 4, "d_id": 1951, "documentation": { "docstring": "Get the arguments for the `uses_before` of this Deployment.\n\n\n .. # noqa: DAR201\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 61443, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/appdirs.py", "file_name": "appdirs.py", "fun_name": "user_config_dir", "commit_message": "upd; format", "code": "def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):\n r\n if system in [\"win32\", \"darwin\"]:\n path = user_data_dir(appname, appauthor, None, roaming)\n else:\n path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser(\"~/.config\"))\n if appname:\n path = os.path.join(path, appname)\n if appname and version:\n path = os.path.join(path, version)\n return path\n\n\n# for the discussion regarding site_config_dir locations\n# see ", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 99, "n_words": 48, "vocab_size": 37, "complexity": 5, "nloc": 38, "token_counts": 95, "n_ast_nodes": 152, "n_identifiers": 12, "d_id": 12578, "documentation": { "docstring": "Return full path to the user-specific config dir for this application.\n\n \"appname\" is the name of application.\n If None, just the system directory is returned.\n \"appauthor\" (only used on Windows) is the name of the\n appauthor or distributing body for this application. Typically\n it is the owning company name. This falls back to appname. You may\n pass False to disable it.\n \"version\" is an optional version path element to append to the\n path. You might want to use this if you want multiple versions\n of your app to be able to run independently. If used, this\n would typically be \".\".\n Only applied when appname is present.\n \"roaming\" (boolean, default False) can be set True to use the Windows\n roaming appdata directory. That means that for users on a Windows\n network setup for roaming profiles, this user data will be\n sync'd on login. See\n \n for a discussion of issues.\n\n Typical user config directories are:\n Mac OS X: same as user_data_dir\n Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined\n Win *: same as user_data_dir\n\n For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.\n That means, by default \"~/.config/\".\n ", "n_words": 188, "vocab_size": 131, "n_whitespaces": 445, "language": "en" } }, { "id": 3683, "commit_id": "359fcd801128239b39297828d39821f631ce00c0", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-google-ads/unit_tests/test_streams.py", "file_name": "test_streams.py", "fun_name": "test_page_token_expired_retry_fails", "commit_message": "Source Google Ads: handle page token expired exception (#9812)\n\n* dynamic date range\r\n\r\n* raise exception if exites the cycle without error\r\n\r\n* if range days is 1 already do not retry\r\n\r\n* added unit tests\r\n\r\n* added comments\r\n\r\n* added comments\r\n\r\n* common mock classes are moved to common module\r\n\r\n* change read_records\r\n\r\n* refactored get_date_params\r\n\r\n* handle corner case\r\n\r\n* added parse_dates function\r\n\r\n* added test_streams\r\n\r\n* check mock calls\r\n\r\n* fix unit tests for chunk date range refactoring\r\n\r\n* removed commented codes\r\n\r\n* remove commented line\r\n\r\n* refactor test_streams\r\n\r\n* refactor CustomQuery.get_query\r\n\r\n* remove TODO\r\n\r\n* deleted unused json\r\n\r\n* format\r\n\r\n* fix chunk_date_range\r\n\r\n* added docstring\r\n\r\n* set range_days to 15 for ShoppingPerformanceReport\r\n\r\n* refactor chunk_date_range\r\n\r\n* format code 2\r\n\r\n* call parent read_records method\r\n\r\n* add return type in get_date_params\r\n\r\n* change e to exception\r\n\r\n* set start_date as end_date\r\n\r\n* log page token has expired\r\n\r\n* bump version\r\n\r\n* updated spec and def yaml\r\n\r\nCo-authored-by: auganbay ", "code": "def test_page_token_expired_retry_fails(mock_ads_client, test_config):\n \n stream_slice = {\"start_date\": \"2021-01-01\", \"end_date\": \"2021-01-15\"}\n\n google_api = MockGoogleAdsFails(credentials=test_config[\"credentials\"], customer_id=test_config[\"customer_id\"])\n incremental_stream_config = dict(\n api=google_api,\n conversion_window_days=test_config[\"conversion_window_days\"],\n start_date=test_config[\"start_date\"],\n time_zone=\"local\",\n end_date=\"2021-04-04\",\n )\n stream = ClickView(**incremental_stream_config)\n stream.get_query = Mock()\n stream.get_query.return_value = \"query\"\n\n with pytest.raises(GoogleAdsException):\n list(stream.read_records(sync_mode=SyncMode.incremental, cursor_field=[\"segments.date\"], stream_slice=stream_slice))\n\n stream.get_query.assert_called_with({\"start_date\": \"2021-01-03\", \"end_date\": \"2021-01-15\"})\n assert stream.get_query.call_count == 2\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 119, "n_words": 44, "vocab_size": 38, "complexity": 1, "nloc": 17, "token_counts": 144, "n_ast_nodes": 251, "n_identifiers": 31, "d_id": 515, "documentation": { "docstring": "\n Page token has expired while reading records within date \"2021-01-03\", it should raise error,\n because Google Ads API doesn't allow filter by datetime.\n ", "n_words": 23, "vocab_size": 23, "n_whitespaces": 33, "language": "en" } }, { "id": 68185, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/www/support/index.py", "file_name": "index.py", "fun_name": "get_favorite_articles_by_page_view", "commit_message": "style: format code with black", "code": "def get_favorite_articles_by_page_view():\n\treturn frappe.db.sql(\n\t\t,\n\t\tas_dict=True,\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 2, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 21, "token_counts": 18, "n_ast_nodes": 30, "n_identifiers": 5, "d_id": 14733, "documentation": { "docstring": "\n\t\t\tSELECT\n\t\t\t\tt1.name as name,\n\t\t\t\tt1.title as title,\n\t\t\t\tt1.content as content,\n\t\t\t\tt1.route as route,\n\t\t\t\tt1.category as category,\n\t\t\t\tcount(t1.route) as count\n\t\t\tFROM `tabHelp Article` AS t1\n\t\t\t\tINNER JOIN\n\t\t\t\t`tabWeb Page View` AS t2\n\t\t\tON t1.route = t2.path\n\t\t\tWHERE t1.published = 1\n\t\t\tGROUP BY route\n\t\t\tORDER BY count DESC\n\t\t\tLIMIT 6;\n\t\t\t", "n_words": 48, "vocab_size": 38, "n_whitespaces": 33, "language": "en" } }, { "id": 42808, "commit_id": "60eb9e106f5915398eafd6aa339ec710c102dc09", "repo": "airflow", "path": "tests/providers/cncf/kubernetes/operators/test_kubernetes_pod.py", "file_name": "test_kubernetes_pod.py", "fun_name": "test_previous_pods_ignored_for_reattached", "commit_message": "Use KubernetesHook to create api client in KubernetesPodOperator (#20578)\n\nAdd support for k8s hook in KPO; use it always (even when no conn id); continue to consider the core k8s settings that KPO already takes into account but emit deprecation warning about them.\r\n\r\nKPO historically takes into account a few settings from core airflow cfg (e.g. verify ssl, tcp keepalive, context, config file, and in_cluster). So to use the hook to generate the client, somehow the hook has to take these settings into account. But we don't want the hook to consider these settings in general. So we read them in KPO and if necessary patch the hook and warn.", "code": "def test_previous_pods_ignored_for_reattached(self):\n \n k = KubernetesPodOperator(\n namespace=\"default\",\n image=\"ubuntu:16.04\",\n name=\"test\",\n task_id=\"task\",\n )\n self.run_pod(k)\n k.client.list_namespaced_pod.assert_called_once()\n _, kwargs = k.client.list_namespaced_pod.call_args\n assert 'already_checked!=True' in kwargs['label_selector']\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 113, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 11, "token_counts": 60, "n_ast_nodes": 106, "n_identifiers": 15, "d_id": 7739, "documentation": { "docstring": "\n When looking for pods to possibly reattach to,\n ignore pods from previous tries that were properly finished\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 39, "language": "en" } }, { "id": 107057, "commit_id": "2d8bd625813e4c513bbe8bedddca45da368bca9b", "repo": "matplotlib", "path": "lib/matplotlib/figure.py", "file_name": "figure.py", "fun_name": "tight_layout", "commit_message": "Recreated deprecated files and changed references", "code": "def tight_layout(self, *, pad=1.08, h_pad=None, w_pad=None, rect=None):\n \n from contextlib import nullcontext\n from ._tight_layout import (\n get_subplotspec_list, get_tight_layout_figure)\n subplotspec_list = get_subplotspec_list(self.axes)\n if None in subplotspec_list:\n _api.warn_external(\"This figure includes Axes that are not \"\n \"compatible with tight_layout, so results \"\n \"might be incorrect.\")\n renderer = _get_renderer(self)\n with getattr(renderer, \"_draw_disabled\", nullcontext)():\n kwargs = get_tight_layout_figure(\n self, self.axes, subplotspec_list, renderer,\n pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect)\n if kwargs:\n self.subplots_adjust(**kwargs)\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 252, "n_words": 62, "vocab_size": 55, "complexity": 3, "nloc": 16, "token_counts": 118, "n_ast_nodes": 186, "n_identifiers": 20, "d_id": 22574, "documentation": { "docstring": "\n Adjust the padding between and around subplots.\n\n To exclude an artist on the Axes from the bounding box calculation\n that determines the subplot parameters (i.e. legend, or annotation),\n set ``a.set_in_layout(False)`` for that artist.\n\n Parameters\n ----------\n pad : float, default: 1.08\n Padding between the figure edge and the edges of subplots,\n as a fraction of the font size.\n h_pad, w_pad : float, default: *pad*\n Padding (height/width) between edges of adjacent subplots,\n as a fraction of the font size.\n rect : tuple (left, bottom, right, top), default: (0, 0, 1, 1)\n A rectangle in normalized figure coordinates into which the whole\n subplots area (including labels) will fit.\n\n See Also\n --------\n .Figure.set_tight_layout\n .pyplot.tight_layout\n ", "n_words": 110, "vocab_size": 81, "n_whitespaces": 275, "language": "en" } }, { "id": 258977, "commit_id": "fc72ebe61c833f227560bd9d0dcf88cdda6c6adb", "repo": "scikit-learn", "path": "sklearn/datasets/tests/test_base.py", "file_name": "test_base.py", "fun_name": "test_load_files_allowed_extensions", "commit_message": "ENH Adds file extension selection to load_files (#22498)\n\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Tony \r\nCo-authored-by: Kazim \r\nCo-authored-by: Tony Attalla <39226687+TonyAttalla@users.noreply.github.com>\r\nCo-authored-by: Guillaume Lemaitre ", "code": "def test_load_files_allowed_extensions(tmp_path, allowed_extensions):\n \n d = tmp_path / \"sub\"\n d.mkdir()\n files = (\"file1.txt\", \"file2.json\", \"file3.json\", \"file4.md\")\n paths = [d / f for f in files]\n for p in paths:\n p.touch()\n res = load_files(tmp_path, allowed_extensions=allowed_extensions)\n assert set([str(p) for p in paths if p.suffix in allowed_extensions]) == set(\n res.filenames\n )\n\n\n@pytest.mark.parametrize(\n \"filename, expected_n_samples, expected_n_features, expected_target_names\",\n [\n (\"wine_data.csv\", 178, 13, [\"class_0\", \"class_1\", \"class_2\"]),\n (\"iris.csv\", 150, 4, [\"setosa\", \"versicolor\", \"virginica\"]),\n (\"breast_cancer.csv\", 569, 30, [\"malignant\", \"benign\"]),\n ],\n)", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"filename, expected_n_samples, expected_n_features, expected_target_names\",\n [\n (\"wine_data.csv\", 178, 13, [\"class_0\", \"class_1\", \"class_2\"]),\n (\"iris.csv\", 150, 4, [\"setosa\", \"versicolor\", \"virginica\"]),\n (\"breast_cancer.csv\", 569, 30, [\"malignant\", \"benign\"]),\n ],\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 141, "n_words": 72, "vocab_size": 59, "complexity": 5, "nloc": 11, "token_counts": 87, "n_ast_nodes": 240, "n_identifiers": 19, "d_id": 75505, "documentation": { "docstring": "Check the behaviour of `allowed_extension` in `load_files`.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 165326, "commit_id": "6caefb19f4d7c05451fafca182c6eb39fe9901ed", "repo": "pandas", "path": "pandas/tests/window/test_rolling.py", "file_name": "test_rolling.py", "fun_name": "test_rolling_non_monotonic", "commit_message": "ENH: Rolling window with step size (GH-15354) (#45765)", "code": "def test_rolling_non_monotonic(method, expected):\n \n # Based on an example found in computation.rst\n use_expanding = [True, False, True, False, True, True, True, True]\n df = DataFrame({\"values\": np.arange(len(use_expanding)) ** 2})\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 39, "n_words": 27, "vocab_size": 22, "complexity": 1, "nloc": 9, "token_counts": 100, "n_ast_nodes": 72, "n_identifiers": 9, "d_id": 39660, "documentation": { "docstring": "\n Make sure the (rare) branch of non-monotonic indices is covered by a test.\n\n output from 1.1.3 is assumed to be the expected output. Output of sum/mean has\n manually been verified.\n\n GH 36933.\n ", "n_words": 32, "vocab_size": 29, "n_whitespaces": 48, "language": "en" } }, { "id": 72022, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/test_menu.py", "file_name": "test_menu.py", "fun_name": "test_remember_collapsed", "commit_message": "Reformat with black", "code": "def test_remember_collapsed(self):\n \n # Sidebar should not be collapsed\n self.client.cookies[\"wagtail_sidebar_collapsed\"] = \"0\"\n response = self.client.get(reverse(\"wagtailadmin_home\"))\n self.assertNotContains(response, \"sidebar-collapsed\")\n\n # Sidebar should be collapsed\n self.client.cookies[\"wagtail_sidebar_collapsed\"] = \"1\"\n response = self.client.get(reverse(\"wagtailadmin_home\"))\n self.assertContains(response, \"sidebar-collapsed\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 92, "n_words": 29, "vocab_size": 17, "complexity": 1, "nloc": 7, "token_counts": 68, "n_ast_nodes": 127, "n_identifiers": 9, "d_id": 15818, "documentation": { "docstring": "Sidebar should render with collapsed class applied.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 153345, "commit_id": "e7cb2e82f8b9c7a68f82abdd3b6011d661230b7e", "repo": "modin", "path": "modin/core/execution/ray/implementations/pandas_on_ray/partitioning/partition.py", "file_name": "partition.py", "fun_name": "get_index_and_columns", "commit_message": "REFACTOR-#4251: define public interfaces in `modin.core.execution.ray` module (#3868)\n\nSigned-off-by: Anatoly Myachev ", "code": "def _get_index_and_columns(df):\n \n return len(df.index), len(df.columns)\n\n\n@ray.remote(num_returns=4)", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "@ray.remote(num_returns=4)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 11, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 50, "n_identifiers": 8, "d_id": 35381, "documentation": { "docstring": "\n Get the number of rows and columns of a pandas DataFrame.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A pandas DataFrame which dimensions are needed.\n\n Returns\n -------\n int\n The number of rows.\n int\n The number of columns.\n ", "n_words": 35, "vocab_size": 27, "n_whitespaces": 84, "language": "en" } }, { "id": 8248, "commit_id": "1caede3a2da4ec71cb8650c7e45120c26948a5b9", "repo": "ludwig", "path": "ludwig/explain/explainer.py", "file_name": "explainer.py", "fun_name": "explain", "commit_message": "Explanation API and feature importance for GBM (#2564)\n\n* add docstring for explain_ig\r\n\r\n* solidify Explainer API\r\n\r\n* add gbm explainer\r\n\r\n* add dataclasses for typed explanations\r\n\r\n* add GBM feature importance\r\n\r\n* remove unused imports\r\n\r\n* add tests\r\n\r\n* fix test\r\n\r\n* extract explanation into file\r\n\r\n* rename base to explainer\r\n\r\n* remove unused kwargs\r\n\r\n* remove device placement from base explainer\r\n\r\n* use proper field from gbm", "code": "def explain(self) -> Tuple[List[Explanation], List[float]]:\n \n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 12, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 12, "token_counts": 19, "n_ast_nodes": 30, "n_identifiers": 6, "d_id": 1382, "documentation": { "docstring": "Explain the model's predictions.\n\n # Return\n\n :return: (Tuple[List[Explanation], List[float]]) `(explanations, expected_values)`\n `explanations`: (List[Explanation]) A list of explanations, one for each row in the input data. Each\n explanation contains the feature attributions for each label in the target feature's vocab.\n\n `expected_values`: (List[float]) of length [output feature cardinality] Expected value for each label in\n the target feature's vocab.\n ", "n_words": 56, "vocab_size": 40, "n_whitespaces": 121, "language": "en" } }, { "id": 8086, "commit_id": "e4fc06f986e03919d9aef3ab55c05fee5a6b9d3a", "repo": "ludwig", "path": "ludwig/datasets/loaders/dataset_loader.py", "file_name": "dataset_loader.py", "fun_name": "get_default_cache_location", "commit_message": "Config-first Datasets API (ludwig.datasets refactor) (#2479)\n\n* Adds README and stub for reading dataset configs.\r\n\r\n* Adds __init__.py for configs, moves circular import into function scope in ludwig/datasets/__init__.py\r\n\r\n* Print config files in datasets folder.\r\n\r\n* First pass at automatic archive extraction.\r\n\r\n* Implemented downloading and extract.\r\n\r\n* Refactor DatasetConfig into its own file.\r\n\r\n* Fixed bugs downloading kaggle dataset.\r\n\r\n* Makes registry store dataset instances, not classes. Also comments out import_submodules for testing.\r\n\r\n* Typo fix.\r\n\r\n* Only pass data files on to load_unprocessed_dataframe, symlink directories.\r\n\r\n* Downloading dataset files into existing directory if exists.\r\n\r\n* Refactor: make datasets fully config-first, lazy load dataset loaders.\r\n\r\n* Implemented agnews custom loader.\r\n\r\n* Implements train/validation/test split by files, and globbing support\r\n\r\n* Adds _glob_multiple\r\n\r\n* Adds adult_census_income, agnews, allstate_claims_severity.\r\n\r\n* Implements sha256 verification, adds more datasets up to creditcard_fraud.\r\n\r\n* Adds checksums, dbpedia, electricity\r\n\r\n* Fixes gzip file name returned as string not list, adds up to forest_cover dataset.\r\n\r\n* Adds datasets up to reuters_r8\r\n\r\n* Adds all datasets which don't require a custom class.\r\n\r\n* Restore dataset import behavior by implementing module __getattr__\r\n\r\n* Adds KDD datasets.\r\n\r\n* Adds ieee_fraud.\r\n\r\n* Adds imbalanced_insurance, insurance_lite.\r\n\r\n* Adds mnist.\r\n\r\n* Completes implementation of all of the built-in datasets.\r\n\r\n* Made cache_dir optional, read from environment variable if set.\r\n\r\n* Upgrades datasets tests.\r\n\r\n* Adds test for new dataset config API. Also adds scripts for dataset link checking.\r\n\r\n* Fixes loading allstate claims severity dataset.\r\n\r\n* Use @lru_cache(1), @cache not supported in python < 3.9\r\n\r\n* Deletes dataset registry, updates automl test utils\r\n\r\n* Fix imports of datasets API.\r\n\r\n* Adds more detail to sha256: docstring and basic README\r\n\r\n* Copy-paste link oops.\r\n\r\n* Fixes handling of nested archive types like .tar.bz Also adds a LUDWIG_CACHE and export to the README\r\n\r\n* Adds link for twitter bots.\r\n\r\n* Fix order of splits in README.md\r\n\r\n* typo\r\n\r\n* Adds verify as a phase in doc string.\r\n\r\n* Support .pqt, .pq extensions for parquet.\r\n\r\n* Handle nested archives with longer file extensions like .csv.zip\r\n\r\n* Handle nested .gz types properly too. Check all extensions with .endswith\r\n\r\n* Handle all archive types with .endswith\r\n\r\n* Update ludwig/datasets/loaders/split_loaders.py\r\n\r\nCo-authored-by: Joppe Geluykens \r\n\r\n* Adds explanation for export, fixes preserve_paths (should be relative to processed_dataset_dir)\r\n\r\n* Resolve preserved paths relative to raw dataset dir before move.\r\n\r\n* Catch runtime exception from extracting sub-archives.\r\n\r\nCo-authored-by: Daniel Treiman \r\nCo-authored-by: Joppe Geluykens ", "code": "def get_default_cache_location() -> str:\n \n if \"LUDWIG_CACHE\" in os.environ and os.environ[\"LUDWIG_CACHE\"]:\n return os.environ[\"LUDWIG_CACHE\"]\n else:\n return str(Path.home().joinpath(\".ludwig_cache\"))\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 38, "n_words": 15, "vocab_size": 14, "complexity": 3, "nloc": 6, "token_counts": 44, "n_ast_nodes": 81, "n_identifiers": 7, "d_id": 1336, "documentation": { "docstring": "Returns a path to the default LUDWIG_CACHE location, or $HOME/.ludwig_cache.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 160229, "commit_id": "24d653f11a55f76b125a91d7d4523052ef14b9b9", "repo": "numpy", "path": "numpy/core/numeric.py", "file_name": "numeric.py", "fun_name": "correlate", "commit_message": "DOC: Use math mode", "code": "def correlate(a, v, mode='valid'):\n \n return multiarray.correlate2(a, v, mode)\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 14, "n_words": 8, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 37, "n_identifiers": 6, "d_id": 38576, "documentation": { "docstring": "\n Cross-correlation of two 1-dimensional sequences.\n\n This function computes the correlation as generally defined in signal\n processing texts::\n\n .. math:: c_k = \\sum_n a_{n+k} * \\overline{v_n}\n\n with a and v sequences being zero-padded where necessary and\n :math:`\\overline x` denoting complex conjugation.\n\n Parameters\n ----------\n a, v : array_like\n Input sequences.\n mode : {'valid', 'same', 'full'}, optional\n Refer to the `convolve` docstring. Note that the default\n is 'valid', unlike `convolve`, which uses 'full'.\n old_behavior : bool\n `old_behavior` was removed in NumPy 1.10. If you need the old\n behavior, use `multiarray.correlate`.\n\n Returns\n -------\n out : ndarray\n Discrete cross-correlation of `a` and `v`.\n\n See Also\n --------\n convolve : Discrete, linear convolution of two one-dimensional sequences.\n multiarray.correlate : Old, no conjugate, version of correlate.\n scipy.signal.correlate : uses FFT which has superior performance on large arrays. \n\n Notes\n -----\n The definition of correlation above is not unique and sometimes correlation\n may be defined differently. Another common definition is::\n\n .. math:: c'_k = \\sum_n a_{n} * \\overline{v_{n+k}\n\n which is related to :math:`c_k` by :math:`c'_k = c_{-k}`.\n\n `numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) because it does\n not use the FFT to compute the convolution; in that case, `scipy.signal.correlate` might\n be preferable.\n \n\n Examples\n --------\n >>> np.correlate([1, 2, 3], [0, 1, 0.5])\n array([3.5])\n >>> np.correlate([1, 2, 3], [0, 1, 0.5], \"same\")\n array([2. , 3.5, 3. ])\n >>> np.correlate([1, 2, 3], [0, 1, 0.5], \"full\")\n array([0.5, 2. , 3.5, 3. , 0. ])\n\n Using complex sequences:\n\n >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full')\n array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ])\n\n Note that you get the time reversed, complex conjugated result\n when the two input sequences change places, i.e.,\n ``c_{va}[k] = c^{*}_{av}[-k]``:\n\n >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full')\n array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j])\n\n ", "n_words": 293, "vocab_size": 195, "n_whitespaces": 491, "language": "en" } }, { "id": 195055, "commit_id": "ecdfbd0bb2ab76876e9fd3817d4502c3938a2ade", "repo": "ParlAI", "path": "parlai/core/params.py", "file_name": "params.py", "fun_name": "default", "commit_message": "Decoder-Only Transformer (#4329)\n\n* quick and dirty decoder-only implementation\r\n\r\n* fix decoder_only incremental decoding\r\n\r\n* remove unused code, add some comments, propogate func signature change\r\n\r\n* consolidate code in decoder.py\r\n\r\n* unify encoder_state\r\n\r\n* export PassThroughEncoder\r\n\r\n* add missing build_ functions\r\n\r\n* defaults in TransformerDecoderLayer __init__\r\n\r\n* comments, consolidating more logic, simplified forward_layers args\r\n\r\n* resize token embeddings and unit test\r\n\r\n* attempt to suppress some unused import warnings\r\n\r\n* padded_tensor fp16 friendly\r\n\r\n* autoformat\r\n\r\n* decoder_only -> decoder\r\n\r\n* more documentation\r\n\r\n* update name in test\r\n\r\n* add missing dict args\r\n\r\n* more argument massaging\r\n\r\n* update TestBartDistillation::test_narrow_distillation_losses numbers\r\n\r\n* update TestTransformerDistillation::test_narrow_distillation_losses numbers\r\n\r\n* fix _pad_tensor in seeker\r\n\r\nCo-authored-by: klshuster ", "code": "def default(val, default):\n \n return val if val is not None else default\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 2, "token_counts": 17, "n_ast_nodes": 27, "n_identifiers": 2, "d_id": 47174, "documentation": { "docstring": "\n shorthand for explicit None check for optional arguments.\n ", "n_words": 8, "vocab_size": 7, "n_whitespaces": 15, "language": "en" } }, { "id": 135643, "commit_id": "b84dac2609bd587c43ed17bb6fa18fb7241a41de", "repo": "ray", "path": "rllib/utils/actor_manager.py", "file_name": "actor_manager.py", "fun_name": "actors", "commit_message": "Refactor ActorManager to store underlying remote actors in dict. (#29953)\n\nSigned-off-by: Jun Gong ", "code": "def actors(self):\n \n # TODO(jungong) : remove this API once WorkerSet.remote_workers()\n # and WorkerSet._remote_workers() are removed.\n return self.__actors\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 45, "n_words": 17, "vocab_size": 16, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 21, "n_identifiers": 3, "d_id": 30681, "documentation": { "docstring": "Access the underlying actors being managed.\n\n Warning (jungong): This API should almost never be used.\n It is only exposed for testing and backward compatibility reasons.\n Remote actors managed by this class should never be accessed directly.\n ", "n_words": 36, "vocab_size": 32, "n_whitespaces": 64, "language": "en" } }, { "id": 106875, "commit_id": "5b8b7f267cfaf76a2a39a727ef31a62b3909a093", "repo": "visdom", "path": "py/visdom/__init__.py", "file_name": "__init__.py", "fun_name": "_send", "commit_message": "apply black py to all python files", "code": "def _send(self, msg, endpoint=\"events\", quiet=False, from_log=False, create=True):\n \n if msg.get(\"eid\", None) is None:\n msg[\"eid\"] = self.env\n self.env_list.add(self.env)\n\n if msg.get(\"eid\", None) is not None:\n self.env_list.add(msg[\"eid\"])\n\n # TODO investigate send use cases, then deprecate\n if not self.send:\n return msg, endpoint\n\n if \"win\" in msg and msg[\"win\"] is None and create:\n msg[\"win\"] = \"window_\" + get_rand_id()\n\n if not from_log:\n self._log(msg, endpoint)\n\n if self.offline:\n # If offline, don't even try to post\n return msg[\"win\"] if \"win\" in msg else True\n\n try:\n return self._handle_post(\n \"{0}:{1}{2}/{3}\".format(\n self.server, self.port, self.base_url, endpoint\n ),\n data=json.dumps(msg),\n )\n except (requests.RequestException, requests.ConnectionError, requests.Timeout):\n if self.raise_exceptions:\n raise ConnectionError(\"Error connecting to Visdom server\")\n else:\n if self.raise_exceptions is None:\n warnings.warn(\n \"Visdom is eventually changing to default to raising \"\n \"exceptions rather than ignoring/printing. This change\"\n \" is expected to happen by July 2018. Please set \"\n \"`raise_exceptions` to False to retain current \"\n \"behavior.\",\n PendingDeprecationWarning,\n )\n if not quiet:\n print(\"Exception in user code:\")\n print(\"-\" * 60)\n traceback.print_exc()\n return False\n", "url": "https://github.com/fossasia/visdom.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 712, "n_words": 153, "vocab_size": 107, "complexity": 14, "nloc": 39, "token_counts": 244, "n_ast_nodes": 412, "n_identifiers": 34, "d_id": 22492, "documentation": { "docstring": "\n This function sends specified JSON request to the Tornado server. This\n function should generally not be called by the user, unless you want to\n build the required JSON yourself. `endpoint` specifies the destination\n Tornado server endpoint for the request.\n\n If `create=True`, then if `win=None` in the message a new window will be\n created with a random name. If `create=False`, `win=None` indicates the\n operation should be applied to all windows.\n ", "n_words": 69, "vocab_size": 51, "n_whitespaces": 126, "language": "en" } }, { "id": 176523, "commit_id": "6ab4e54e696ae65534e1c3329930df8beee03573", "repo": "networkx", "path": "networkx/classes/multidigraph.py", "file_name": "multidigraph.py", "fun_name": "add_edge", "commit_message": "Fixed wrong dict factory usage on MultiDiGraph (#5456)\n\n* Fixed the issue that the wrong dict factory on a MultiDiGraph was used for edge attributes (edge_key_dict_factory instead of edge_attr_dict_factory)\r\nExtended tests to typecheck the dict factories and added a test that incorporates custom dict factories on a MultiDiGraph\r\n\r\n* Mypy ignore inferred types in MDG subclass.\r\n\r\nCo-authored-by: Fabian Ball \r\nCo-authored-by: Ross Barnowski ", "code": "def add_edge(self, u_for_edge, v_for_edge, key=None, **attr):\n \n u, v = u_for_edge, v_for_edge\n # add nodes\n if u not in self._succ:\n if u is None:\n raise ValueError(\"None cannot be a node\")\n self._succ[u] = self.adjlist_inner_dict_factory()\n self._pred[u] = self.adjlist_inner_dict_factory()\n self._node[u] = self.node_attr_dict_factory()\n if v not in self._succ:\n if v is None:\n raise ValueError(\"None cannot be a node\")\n self._succ[v] = self.adjlist_inner_dict_factory()\n self._pred[v] = self.adjlist_inner_dict_factory()\n self._node[v] = self.node_attr_dict_factory()\n if key is None:\n key = self.new_edge_key(u, v)\n if v in self._succ[u]:\n keydict = self._adj[u][v]\n datadict = keydict.get(key, self.edge_attr_dict_factory())\n datadict.update(attr)\n keydict[key] = datadict\n else:\n # selfloops work this way without special treatment\n datadict = self.edge_attr_dict_factory()\n datadict.update(attr)\n keydict = self.edge_key_dict_factory()\n keydict[key] = datadict\n self._succ[u][v] = keydict\n self._pred[v][u] = keydict\n return key\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 425, "n_words": 112, "vocab_size": 58, "complexity": 7, "nloc": 29, "token_counts": 246, "n_ast_nodes": 390, "n_identifiers": 22, "d_id": 41942, "documentation": { "docstring": "Add an edge between u and v.\n\n The nodes u and v will be automatically added if they are\n not already in the graph.\n\n Edge attributes can be specified with keywords or by directly\n accessing the edge's attribute dictionary. See examples below.\n\n Parameters\n ----------\n u_for_edge, v_for_edge : nodes\n Nodes can be, for example, strings or numbers.\n Nodes must be hashable (and not None) Python objects.\n key : hashable identifier, optional (default=lowest unused integer)\n Used to distinguish multiedges between a pair of nodes.\n attr : keyword arguments, optional\n Edge data (or labels or objects) can be assigned using\n keyword arguments.\n\n Returns\n -------\n The edge key assigned to the edge.\n\n See Also\n --------\n add_edges_from : add a collection of edges\n\n Notes\n -----\n To replace/update edge data, use the optional key argument\n to identify a unique edge. Otherwise a new edge will be created.\n\n NetworkX algorithms designed for weighted graphs cannot use\n multigraphs directly because it is not clear how to handle\n multiedge weights. Convert to Graph using edge attribute\n 'weight' to enable weighted graph algorithms.\n\n Default keys are generated using the method `new_edge_key()`.\n This method can be overridden by subclassing the base class and\n providing a custom `new_edge_key()` method.\n\n Examples\n --------\n The following all add the edge e=(1, 2) to graph G:\n\n >>> G = nx.MultiDiGraph()\n >>> e = (1, 2)\n >>> key = G.add_edge(1, 2) # explicit two-node form\n >>> G.add_edge(*e) # single edge as tuple of two nodes\n 1\n >>> G.add_edges_from([(1, 2)]) # add edges from iterable container\n [2]\n\n Associate data to edges using keywords:\n\n >>> key = G.add_edge(1, 2, weight=3)\n >>> key = G.add_edge(1, 2, key=0, weight=4) # update data for key=0\n >>> key = G.add_edge(1, 3, weight=7, capacity=15, length=342.7)\n\n For non-string attribute keys, use subscript notation.\n\n >>> ekey = G.add_edge(1, 2)\n >>> G[1][2][0].update({0: 5})\n >>> G.edges[1, 2, 0].update({0: 5})\n ", "n_words": 301, "vocab_size": 186, "n_whitespaces": 677, "language": "en" } }, { "id": 101222, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "lib/align/detected_face.py", "file_name": "detected_face.py", "fun_name": "_get_kwargs", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def _get_kwargs(self) -> Dict[str, Union[int, Tuple[int, int]]]:\n \n retval = {kword: self._kwarg_mapping[kword]\n for kword in self._kwarg_requirements[self._blur_type]}\n logger.trace(\"BlurMask kwargs: %s\", retval) # type: ignore\n return retval\n\n\n_HASHES_SEEN: Dict[str, Dict[str, int]] = {}\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 75, "n_words": 30, "vocab_size": 26, "complexity": 2, "nloc": 6, "token_counts": 56, "n_ast_nodes": 107, "n_identifiers": 15, "d_id": 20642, "documentation": { "docstring": " dict: the valid keyword arguments for the requested :attr:`_blur_type` ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 10, "language": "en" } }, { "id": 221361, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/codecs.py", "file_name": "codecs.py", "fun_name": "getencoder", "commit_message": "add python 3.10.4 for windows", "code": "def getencoder(encoding):\n\n \n return lookup(encoding).encode\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 10, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 24, "n_identifiers": 4, "d_id": 56376, "documentation": { "docstring": " Lookup up the codec for the given encoding and return\n its encoder function.\n\n Raises a LookupError in case the encoding cannot be found.\n\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 41, "language": "en" } }, { "id": 101267, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "tools/manual/faceviewer/viewport.py", "file_name": "viewport.py", "fun_name": "reset", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def reset(self):\n \n self._landmarks = {}\n self._tk_faces = {}\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 29, "n_words": 8, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 33, "n_identifiers": 4, "d_id": 20686, "documentation": { "docstring": " Reset all the cached objects on a face size change. ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 320800, "commit_id": "a20bb67a878b2e68abf8268c1b0a27f018d01352", "repo": "qutebrowser", "path": "qutebrowser/mainwindow/tabbedbrowser.py", "file_name": "tabbedbrowser.py", "fun_name": "on_mode_entered", "commit_message": "mypy: Upgrade to PyQt5-stubs 5.15.6.0\n\nFor some unknown reason, those new stubs cause a *lot* of things now to be\nchecked by mypy which formerly probably got skipped due to Any being implied\nsomewhere.\n\nThe stubs themselves mainly improved, with a couple of regressions too.\n\nIn total, there were some 337 (!) new mypy errors. This commit fixes almost all\nof them, and the next commit improves a fix to get things down to 0 errors\nagain.\n\nOverview of the changes:\n\n==== qutebrowser/app.py\n\n- Drop type ignore due to improved stubs.\n\n==== qutebrowser/browser/browsertab.py\n\n- Specify the type of _widget members more closely than just QWidget.\n This is debatable: I suppose the abstract stuff shouldn't need to know\n anything about the concrete backends at all. But it seems like we cut some\n corners when initially implementing things, and put some code in browsertab.py\n just because the APIs of both backends happened to be compatible. Perhaps\n something to reconsider once we drop QtWebKit and hopefully implement a dummy\n backend.\n\n- Add an additional assertion in AbstractAction.run_string. This is already\n covered by the isinstance(member, self.action_base) above it, but that's too\n dynamic for mypy to understand.\n\n- Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x\n and y components), not a single int.\n\n- Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x\n and y components), not a single int.\n\n- Fix the argument types of AbstractScroller.to_perc, as it's possible to pass\n fractional percentages too.\n\n- Specify the type for AbstractHistoryPrivate._history. See above (_widget) re\n this being debatable.\n\n- Fix the return type of AbstractTabPrivate.event_target(), which can be None\n (see #3888).\n\n- Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS\n return value), not None.\n\n- Fix the argument type for AbstractTabPrivate.toggle_inspector: position can\n be None to use the last used position.\n\n- Declare the type of sub-objects of AbstractTab.\n\n- Fix the return value of AbstractTab.icon(), which is the QIcon, not None.\n\n==== qutebrowser/browser/commands.py\n\n- Make sure the active window is a MainWindow (with a .win_id attribute).\n\n==== qutebrowser/browser/downloadview.py\n\n- Add _model() which makes sure that self.model() is a DownloadModel, not None\n or any other model. This is needed because other methods access a variety of\n custom attributes on it, e.g. last_index().\n\n==== qutebrowser/browser/greasemonkey.py\n\n- Add an ignore for AbstractDownload.requested_url which we patch onto the\n downloads. Probably would be nicer to add it as a proper attribute which always\n gets set by the DownloadManager.\n\n==== qutebrowser/browser/hints.py\n\n- Remove type ignores for QUrl.toString().\n- Add a new type ignore for combining different URL flags (which works, but is\n not exactly type safe... still probably a regression in the stubs).\n- Make sure the things we get back from self._get_keyparser are what we actually\n expect. Probably should introduce a TypedDict (and/or overloads for\n _get_keyparser with typing.Literal) to teach mypy about the exact return value.\n See #7098.\n This is needed because we access Hint/NormalKeyParser-specific attributes such\n as .set_inhibited_timout() or .update_bindings().\n\n==== qutebrowser/browser/inspector.py\n\n- Similar changes than in browsertab.py to make some types where we share API\n (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next\n commit.\n\n==== qutebrowser/browser/network/pac.py\n\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/browser/qtnetworkdownloads.py\n\n- Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an\n AbstractDownload), so that we can call ._uses_nam() on it.\n\n==== qutebrowser/browser/qutescheme.py\n\n- Remove now unneeded type ignore for QUrl flags.\n\n==== qutebrowser/browser/urlmarks.py\n\n- Specify the type of UrlMarkManager._lineparser, as those only get initialized\n in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist.\n\n==== qutebrowser/browser/webelem.py\n\n- New casts to turn single KeyboardModifier (enum) entries into\n KeyboardModifiers (flags). Might not be needed anymore with Qt 6.\n- With that, casting the final value is now unneeded.\n\n==== qutebrowser/browser/webengine/notification.py\n\n- Remove now unneeded type ignore for signal.\n- Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished()\n is a QProcess, not just any QObject.\n\n==== qutebrowser/browser/webengine/webenginedownloads.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/browser/webengine/webengineelem.py\n\n- Specify the type of WebEngineElement._tab.\n- Remove now unneeded type ignore for mixed flags.\n\n==== qutebrowser/browser/webengine/webengineinspector.py\n\n- See changes to inspector.py and next commit.\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/browser/webengine/webenginequtescheme.py\n\n- Remove now unneeded type ignore for mixed flags.\n\n==== qutebrowser/browser/webengine/webenginesettings.py\n\n- Ignore access of .setter attribute which we patch onto QWebEngineProfile.\n Would be nice to have a subclass or wrapper-class instead.\n\n==== qutebrowser/browser/webengine/webenginetab.py\n\n- Specified the type of _widget members more closely than just QWidget.\n See browsertab.py changes for details.\n- Remove some now-unneeded type ignores for creating FindFlags.\n- Specify more concrete types for WebEngineTab members where we actually need to\n access WebEngine-specific attributes.\n- Make sure the page we get is our custom WebEnginePage subclass, not just any\n QWebEnginePage. This is needed because we access custom attributes on it.\n\n==== qutebrowser/browser/webengine/webview.py\n\n- Make sure the page we get is our custom WebEnginePage subclass, not just any\n QWebEnginePage. This is needed because we access custom attributes on it.\n\n==== qutebrowser/browser/webkit/network/networkreply.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/browser/webkit/webkitinspector.py\n\n- See changes to inspector.py and next commit.\n\n==== qutebrowser/browser/webkit/webkittab.py\n\n- Specify the type of _widget members more closely than just QWidget.\n See browsertab.py changes for details.\n- Add a type ignore for WebKitAction because our workaround needs to\n treat them as ints (which is allowed by PyQt, even if not type-safe).\n- Add new ignores for findText calls: The text is a QString and can be None; the\n flags are valid despite mypy thinking they aren't (stubs regression?).\n- Specify the type for WebKitHistoryPrivate._history, because we access\n WebKit-specific attributes. See above (_widget) re this being debatable.\n- Make mypy aware that .currentFrame() and .frameAt() can return None (stubs\n regression?).\n- Make sure the .page() and .page().networkAccessManager() are our subclasses\n rather than the more generic QtWebKit objects, as we use custom attributes.\n- Add new type ignores for signals (stubs regression!)\n\n==== qutebrowser/browser/webkit/webpage.py\n\n- Make sure the .networkAccessManager() is our subclass rather than the more\n generic QtWebKit object, as we use custom attributes.\n- Replace a cast by a type ignore. The cast didn't work anymore.\n\n==== qutebrowser/browser/webkit/webview.py\n\n- Make sure the .page() is our subclass rather than the more generic QtWebKit\n object, as we use custom attributes.\n\n==== qutebrowser/commands/userscripts.py\n\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/completion/completer.py\n\n- Add a new _completion() getter (which ensures it actually gets the completion\n view) rather than accessing the .parent() directly (which could be any QObject).\n\n==== qutebrowser/completion/completiondelegate.py\n\n- Make sure self.parent() is a CompletionView (no helper method as there is only\n one instance).\n- Remove a now-unneeded type ignore for adding QSizes.\n\n==== qutebrowser/completion/completionwidget.py\n\n- Add a ._model() getter which ensures that we get a CompletionModel (with\n custom attributes) rather than Qt's .model() which can be any QAbstractItemModel\n (or None).\n- Removed a now-unneeded type ignore for OR-ing flags.\n\n==== qutebrowser/completion/models/completionmodel.py\n\n- Remove now unneeded type ignores for signals.\n- Ignore a complaint about .set_pattern() not being defined. Completion\n categories don't share any common parent class, so it would be good to introduce\n a typing.Protocol for this. See #7098.\n\n==== qutebrowser/components/misccommands.py\n\n- Removed a now-unneeded type ignore for OR-ing flags.\n\n==== qutebrowser/components/readlinecommands.py\n\n- Make sure QApplication.instance() is a QApplication (and not just a\n QCoreApplication). This includes the former \"not None\" check.\n\n==== qutebrowser/components/scrollcommands.py\n\n- Add basic annotation for \"funcs\" dict. Could have a callable protocol to\n specify it needs a count kwarg, see #7098.\n\n==== qutebrowser/config/stylesheet.py\n\n- Correctly specify that stylesheet apply to QWidgets, not any QObject.\n- Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy\n about this with overloads and protocols (stylesheet for set_register being None\n => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not\n worth the troble. See #7098.\n\n==== qutebrowser/keyinput/keyutils.py\n\n- Remove some now-unneeded type ignores and add a cast for using a single enum\n value as flags. Might need to look at this again with Qt 6 support.\n\n==== qutebrowser/keyinput/modeman.py\n\n- Add a FIXME for using a TypedDict, see comments for hints.py above.\n\n==== qutebrowser/mainwindow/mainwindow.py\n\n- Remove now-unneeded type ignores for calling with OR-ed flags.\n- Improve where we cast from WindowType to WindowFlags, no int needed\n- Use new .tab_bar() getter, see below.\n\n==== qutebrowser/mainwindow/prompt.py\n\n- Remove now-unneeded type ignores for calling with OR-ed flags.\n\n==== qutebrowser/mainwindow/statusbar/bar.py\n\n- Adjust type ignores around @pyqtProperty. The fact one is still needed seems\n like a stub regression.\n\n==== qutebrowser/mainwindow/statusbar/command.py\n\n- Fix type for setText() override (from QLineEdit): text can be None\n (QString in C++).\n\n==== qutebrowser/mainwindow/statusbar/url.py\n\n- Adjust type ignores around @pyqtProperty. The fact one is still needed seems\n like a stub regression.\n\n==== qutebrowser/mainwindow/tabbedbrowser.py\n\n- Specify that TabDeque manages browser tabs, not any QWidgets. It accesses\n AbstractTab-specific attributes.\n- Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access\n .maybe_hide.\n- Fix the annotations for stored marks: Scroll positions are a QPoint, not int.\n- Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and\n .widget(), which ensures that the return values are valid AbstractTabs (or None\n for _tab_by_idx). This is needed because we access AbstractTab-specific\n attributes.\n- For some places, where the tab can be None, continue using .currentTab() but\n add asserts.\n- Remove some now-unneeded [unreachable] ignores, as mypy knows about the None\n possibility now.\n\n==== qutebrowser/mainwindow/tabwidget.py\n\n- Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and\n .widget() are of type TabBar and AbstractTab, respectively.\n- Add additional assertions where we expect ._tab_by_idx() to never be None.\n- Remove dead code in get_tab_fields for handling a None y scroll position. I\n was unable to find any place in the code where this could be set to None.\n- Remove some now-unneeded type ignores and casts, as mypy now knows that\n _type_by_idx() could be None.\n- Work around a strange instance where mypy complains about not being able to\n find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility,\n despite it clearly being shown as a bool *inside* that class without any\n annotation.\n- Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in\n fact a TabWidget.\n\n==== qutebrowser/misc/crashsignal.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/misc/editor.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/misc/ipc.py\n\n- Remove now unneeded type ignores for signals.\n- Add new type ignores for .error() which is both a signal and a getter\n (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was\n renamed to errorOccurred in 5.15.\n\n==== qutebrowser/misc/objects.py\n\n- Make sure mypy knows that objects.app is our custom Application (with custom\n attributes) rather than any QApplication.\n\n==== qutebrowser/utils/objreg.py\n\n- Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol,\n but ideally, the whole objreg stuff should die one day anyways.\n\n==== tests/unit/completion/test_completer.py\n\n- Make CompletionWidgetStub inherit from CompletionView so that it passes the\n new isinstance() asserts in completer.py (see above).", "code": "def on_mode_entered(self, mode):\n \n if (config.val.tabs.mode_on_change == 'restore' and\n mode in modeman.INPUT_MODES):\n tab = self.widget.currentWidget()\n if tab is not None:\n assert isinstance(tab, browsertab.AbstractTab), tab\n tab.data.input_mode = mode\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 107, "n_words": 26, "vocab_size": 21, "complexity": 4, "nloc": 7, "token_counts": 60, "n_ast_nodes": 96, "n_identifiers": 17, "d_id": 117362, "documentation": { "docstring": "Save input mode when tabs.mode_on_change = restore.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 278120, "commit_id": "6fafb567af4e4d9f42974d0b6c55b18bc03e17eb", "repo": "keras", "path": "keras/feature_column/sequence_feature_column_test.py", "file_name": "sequence_feature_column_test.py", "fun_name": "test_shared_embedding_column_with_non_sequence_categorical", "commit_message": "resolve line-too-long in feature_column", "code": "def test_shared_embedding_column_with_non_sequence_categorical(self):\n \n with tf.Graph().as_default():\n vocabulary_size = 3\n sparse_input_a = tf.compat.v1.SparseTensorValue(\n # example 0, ids [2]\n # example 1, ids [0, 1]\n indices=((0, 0), (1, 0), (1, 1)),\n values=(2, 0, 1),\n dense_shape=(2, 2),\n )\n sparse_input_b = tf.compat.v1.SparseTensorValue(\n # example 0, ids [2]\n # example 1, ids [0, 1]\n indices=((0, 0), (1, 0), (1, 1)),\n values=(2, 0, 1),\n dense_shape=(2, 2),\n )\n\n categorical_column_a = (\n tf.feature_column.categorical_column_with_identity(\n key=\"aaa\", num_buckets=vocabulary_size\n )\n )\n categorical_column_b = (\n tf.feature_column.categorical_column_with_identity(\n key=\"bbb\", num_buckets=vocabulary_size\n )\n )\n shared_embedding_columns = tf.feature_column.shared_embeddings(\n [categorical_column_a, categorical_column_b], dimension=2\n )\n\n sequence_input_layer = ksfc.SequenceFeatures(\n shared_embedding_columns\n )\n with self.assertRaisesRegex(\n ValueError,\n r\"In embedding_column: aaa_shared_embedding\\. \"\n r\"categorical_column must \"\n r\"be of type SequenceCategoricalColumn to use \"\n r\"SequenceFeatures\\.\",\n ):\n _, _ = sequence_input_layer(\n {\"aaa\": sparse_input_a, \"bbb\": sparse_input_b}\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 696, "n_words": 115, "vocab_size": 64, "complexity": 1, "nloc": 39, "token_counts": 218, "n_ast_nodes": 332, "n_identifiers": 29, "d_id": 82378, "documentation": { "docstring": "Tests that error is raised for non-sequence shared embedding\n column.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 16, "language": "en" } }, { "id": 19224, "commit_id": "c05a4fdada59fd97332417c4b99515118bfef45c", "repo": "PythonRobotics", "path": "PathTracking/model_predictive_speed_and_steer_control/model_predictive_speed_and_steer_control.py", "file_name": "model_predictive_speed_and_steer_control.py", "fun_name": "linear_mpc_control", "commit_message": "Fix ModuleNotFoundError when executing test in the tests folder and little improve MPC controller (#619)\n\n* Fix ModuleNotFoundError when executing test in the tests folder\r\n\r\nSigned-off-by: Trung Kien \r\n\r\n* Improve model_predictive_speed_and_steer_control\r\n\r\n- Fix typo\r\n- Using @ for matrix multiplication instead of * with have been deprecated in CVXPY1.1\r\n- Fix missing conftest module in test file\r\n\r\nSigned-off-by: Trung Kien ", "code": "def linear_mpc_control(xref, xbar, x0, dref):\n \n\n x = cvxpy.Variable((NX, T + 1))\n u = cvxpy.Variable((NU, T))\n\n cost = 0.0\n constraints = []\n\n for t in range(T):\n cost += cvxpy.quad_form(u[:, t], R)\n\n if t != 0:\n cost += cvxpy.quad_form(xref[:, t] - x[:, t], Q)\n\n A, B, C = get_linear_model_matrix(\n xbar[2, t], xbar[3, t], dref[0, t])\n constraints += [x[:, t + 1] == A @ x[:, t] + B @ u[:, t] + C]\n\n if t < (T - 1):\n cost += cvxpy.quad_form(u[:, t + 1] - u[:, t], Rd)\n constraints += [cvxpy.abs(u[1, t + 1] - u[1, t]) <=\n MAX_DSTEER * DT]\n\n cost += cvxpy.quad_form(xref[:, T] - x[:, T], Qf)\n\n constraints += [x[:, 0] == x0]\n constraints += [x[2, :] <= MAX_SPEED]\n constraints += [x[2, :] >= MIN_SPEED]\n constraints += [cvxpy.abs(u[0, :]) <= MAX_ACCEL]\n constraints += [cvxpy.abs(u[1, :]) <= MAX_STEER]\n\n prob = cvxpy.Problem(cvxpy.Minimize(cost), constraints)\n prob.solve(solver=cvxpy.ECOS, verbose=False)\n\n if prob.status == cvxpy.OPTIMAL or prob.status == cvxpy.OPTIMAL_INACCURATE:\n ox = get_nparray_from_matrix(x.value[0, :])\n oy = get_nparray_from_matrix(x.value[1, :])\n ov = get_nparray_from_matrix(x.value[2, :])\n oyaw = get_nparray_from_matrix(x.value[3, :])\n oa = get_nparray_from_matrix(u.value[0, :])\n odelta = get_nparray_from_matrix(u.value[1, :])\n\n else:\n print(\"Error: Cannot solve mpc..\")\n oa, odelta, ox, oy, oyaw, ov = None, None, None, None, None, None\n\n return oa, odelta, ox, oy, oyaw, ov\n\n", "url": "https://github.com/AtsushiSakai/PythonRobotics.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 414, "n_words": 201, "vocab_size": 108, "complexity": 6, "nloc": 35, "token_counts": 476, "n_ast_nodes": 699, "n_identifiers": 51, "d_id": 2919, "documentation": { "docstring": "\n linear mpc control\n\n xref: reference point\n xbar: operational point\n x0: initial state\n dref: reference steer angle\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 35, "language": "en" } }, { "id": 9895, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "jina/peapods/pods/__init__.py", "file_name": "__init__.py", "fun_name": "uses_after_args", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "def uses_after_args(self) -> Namespace:\n \n return self.peas_args['uses_after']\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 7, "token_counts": 15, "n_ast_nodes": 28, "n_identifiers": 4, "d_id": 1762, "documentation": { "docstring": "Get the arguments for the `uses_after` of this Pod.\n\n\n .. # noqa: DAR201\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 206492, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/test/utils.py", "file_name": "utils.py", "fun_name": "captured_stderr", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def captured_stderr():\n \n return captured_output(\"stderr\")\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 10, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 23, "n_identifiers": 2, "d_id": 51543, "documentation": { "docstring": "Capture the output of sys.stderr:\n\n with captured_stderr() as stderr:\n print(\"hello\", file=sys.stderr)\n self.assertEqual(stderr.getvalue(), \"hello\\n\")\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 29, "language": "en" } }, { "id": 141374, "commit_id": "8affbc7be6fdce169264b8db5b0276dbcc719f6d", "repo": "ray", "path": "python/ray/tune/tests/test_checkpoint_manager.py", "file_name": "test_checkpoint_manager.py", "fun_name": "testBestCheckpointsOnlyNan", "commit_message": "[tune/train] Consolidate checkpoint manager 3: Ray Tune (#24430)\n\n**Update**: This PR is now part 3 of a three PR group to consolidate the checkpoints.\r\n\r\n1. Part 1 adds the common checkpoint management class #24771 \r\n2. Part 2 adds the integration for Ray Train #24772\r\n3. This PR builds on #24772 and includes all changes. It moves the Ray Tune integration to use the new common checkpoint manager class.\r\n\r\nOld PR description:\r\n\r\nThis PR consolidates the Ray Train and Tune checkpoint managers. These concepts previously did something very similar but in different modules. To simplify maintenance in the future, we've consolidated the common core.\r\n\r\n- This PR keeps full compatibility with the previous interfaces and implementations. This means that for now, Train and Tune will have separate CheckpointManagers that both extend the common core\r\n- This PR prepares Tune to move to a CheckpointStrategy object\r\n- In follow-up PRs, we can further unify interfacing with the common core, possibly removing any train- or tune-specific adjustments (e.g. moving to setup on init rather on runtime for Ray Train)\r\n\r\nCo-authored-by: Antoni Baum ", "code": "def testBestCheckpointsOnlyNan(self):\n \n keep_checkpoints_num = 2\n checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num)\n checkpoints = [\n _TrackedCheckpoint(\n dir_or_data=i,\n storage_mode=CheckpointStorage.PERSISTENT,\n metrics=self.mock_result(float(\"nan\"), i),\n )\n for i in range(4)\n ]\n\n for checkpoint in checkpoints:\n checkpoint_manager.on_checkpoint(checkpoint)\n\n best_checkpoints = checkpoint_manager.best_checkpoints()\n # best_checkpoints is sorted from worst to best\n self.assertEqual(len(best_checkpoints), keep_checkpoints_num)\n self.assertEqual(best_checkpoints[0].dir_or_data, 2)\n self.assertEqual(best_checkpoints[1].dir_or_data, 3)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 210, "n_words": 44, "vocab_size": 38, "complexity": 3, "nloc": 17, "token_counts": 110, "n_ast_nodes": 173, "n_identifiers": 20, "d_id": 32343, "documentation": { "docstring": "\n Tests that checkpoints with only nan priority are handled correctly.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 37497, "commit_id": "57e6464ac9a31156f1c93e59107323e6ec01309e", "repo": "transformers", "path": "src/transformers/testing_utils.py", "file_name": "testing_utils.py", "fun_name": "slow", "commit_message": "Update all require decorators to use skipUnless when possible (#16999)", "code": "def slow(test_case):\n \n return unittest.skipUnless(_run_slow_tests, \"test is slow\")(test_case)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 33, "n_identifiers": 5, "d_id": 6802, "documentation": { "docstring": "\n Decorator marking a test as slow.\n\n Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.\n\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 34, "language": "en" } }, { "id": 19357, "commit_id": "def289b723e9216830c2a7b2577cb31b55710167", "repo": "PythonRobotics", "path": "PathPlanning/CubicSpline/cubic_spline_planner.py", "file_name": "cubic_spline_planner.py", "fun_name": "calc_first_derivative", "commit_message": "enhance cubic spline path doc (#698)\n\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc", "code": "def calc_first_derivative(self, x):\n \n\n if x < self.x[0]:\n return None\n elif x > self.x[-1]:\n return None\n\n i = self.__search_index(x)\n dx = x - self.x[i]\n dy = self.b[i] + 2.0 * self.c[i] * dx + 3.0 * self.d[i] * dx ** 2.0\n return dy\n", "url": "https://github.com/AtsushiSakai/PythonRobotics.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 113, "n_words": 42, "vocab_size": 27, "complexity": 3, "nloc": 9, "token_counts": 91, "n_ast_nodes": 131, "n_identifiers": 10, "d_id": 2945, "documentation": { "docstring": "\n Calc first derivative at given x.\n\n if x is outside the input x, return None\n\n Returns\n -------\n dy : float\n first derivative for given x.\n ", "n_words": 25, "vocab_size": 21, "n_whitespaces": 79, "language": "en" } }, { "id": 63284, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "countedArray", "commit_message": "upd; format", "code": "def countedArray(expr, intExpr=None):\n \n arrayExpr = Forward()", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 2, "nloc": 10, "token_counts": 85, "n_ast_nodes": 27, "n_identifiers": 5, "d_id": 13233, "documentation": { "docstring": "Helper to define a counted list of expressions.\n\n This helper defines a pattern of the form::\n\n integer expr expr expr...\n\n where the leading integer tells how many expr expressions follow.\n The matched tokens returns the array of expr tokens as a list - the\n leading count token is suppressed.\n\n If ``intExpr`` is specified, it should be a pyparsing expression\n that produces an integer value.\n\n Example::\n\n countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']\n\n # in this parser, the leading integer value is given in binary,\n # '10' indicating that 2 values are in the array\n binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))\n countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']\n ", "n_words": 110, "vocab_size": 75, "n_whitespaces": 178, "language": "en" } }, { "id": 255414, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/test/compose_test.py", "file_name": "compose_test.py", "fun_name": "test_overlapping_function_names", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def test_overlapping_function_names(self) -> None:\n \n ops = [\n helper.make_opsetid(\"\", 10),\n helper.make_opsetid(\"local\", 10)\n ]\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 55, "n_words": 12, "vocab_size": 12, "complexity": 8, "nloc": 97, "token_counts": 814, "n_ast_nodes": 50, "n_identifiers": 5, "d_id": 74754, "documentation": { "docstring": "\n Tests error checking when the name of local function entries overlaps\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 42787, "commit_id": "60eb9e106f5915398eafd6aa339ec710c102dc09", "repo": "airflow", "path": "airflow/providers/cncf/kubernetes/hooks/kubernetes.py", "file_name": "kubernetes.py", "fun_name": "get_conn", "commit_message": "Use KubernetesHook to create api client in KubernetesPodOperator (#20578)\n\nAdd support for k8s hook in KPO; use it always (even when no conn id); continue to consider the core k8s settings that KPO already takes into account but emit deprecation warning about them.\r\n\r\nKPO historically takes into account a few settings from core airflow cfg (e.g. verify ssl, tcp keepalive, context, config file, and in_cluster). So to use the hook to generate the client, somehow the hook has to take these settings into account. But we don't want the hook to consider these settings in general. So we read them in KPO and if necessary patch the hook and warn.", "code": "def get_conn(self) -> Any:\n \n\n in_cluster = self._coalesce_param(\n self.in_cluster, self.conn_extras.get(\"extra__kubernetes__in_cluster\") or None\n )\n cluster_context = self._coalesce_param(\n self.cluster_context, self.conn_extras.get(\"extra__kubernetes__cluster_context\") or None\n )\n kubeconfig_path = self._coalesce_param(\n self.config_file, self.conn_extras.get(\"extra__kubernetes__kube_config_path\") or None\n )\n\n kubeconfig = self.conn_extras.get(\"extra__kubernetes__kube_config\") or None\n num_selected_configuration = len([o for o in [in_cluster, kubeconfig, kubeconfig_path] if o])\n\n if num_selected_configuration > 1:\n raise AirflowException(\n \"Invalid connection configuration. Options kube_config_path, \"\n \"kube_config, in_cluster are mutually exclusive. \"\n \"You can only use one option at a time.\"\n )\n\n disable_verify_ssl = self._coalesce_param(\n self.disable_verify_ssl, _get_bool(self._get_field(\"disable_verify_ssl\"))\n )\n disable_tcp_keepalive = self._coalesce_param(\n self.disable_tcp_keepalive, _get_bool(self._get_field(\"disable_tcp_keepalive\"))\n )\n\n # BEGIN apply settings from core kubernetes configuration\n # this section should be removed in next major release\n deprecation_warnings: List[Tuple[str, Any]] = []\n if disable_verify_ssl is None and self._deprecated_core_disable_verify_ssl is True:\n deprecation_warnings.append(('verify_ssl', False))\n disable_verify_ssl = self._deprecated_core_disable_verify_ssl\n # by default, hook will try in_cluster first. so we only need to\n # apply core airflow config and alert when False and in_cluster not otherwise set.\n if in_cluster is None and self._deprecated_core_in_cluster is False:\n deprecation_warnings.append(('in_cluster', self._deprecated_core_in_cluster))\n in_cluster = self._deprecated_core_in_cluster\n if not cluster_context and self._deprecated_core_cluster_context:\n deprecation_warnings.append(('cluster_context', self._deprecated_core_cluster_context))\n cluster_context = self._deprecated_core_cluster_context\n if not kubeconfig_path and self._deprecated_core_config_file:\n deprecation_warnings.append(('config_file', self._deprecated_core_config_file))\n kubeconfig_path = self._deprecated_core_config_file\n if disable_tcp_keepalive is None and self._deprecated_core_disable_tcp_keepalive is True:\n deprecation_warnings.append(('enable_tcp_keepalive', False))\n disable_tcp_keepalive = True\n if deprecation_warnings:\n self._deprecation_warning_core_param(deprecation_warnings)\n # END apply settings from core kubernetes configuration\n\n if disable_verify_ssl is True:\n _disable_verify_ssl()\n if disable_tcp_keepalive is not True:\n _enable_tcp_keepalive()\n\n if in_cluster:\n self.log.debug(\"loading kube_config from: in_cluster configuration\")\n config.load_incluster_config()\n return client.ApiClient()\n\n if kubeconfig_path is not None:\n self.log.debug(\"loading kube_config from: %s\", kubeconfig_path)\n config.load_kube_config(\n config_file=kubeconfig_path,\n client_configuration=self.client_configuration,\n context=cluster_context,\n )\n return client.ApiClient()\n\n if kubeconfig is not None:\n with tempfile.NamedTemporaryFile() as temp_config:\n self.log.debug(\"loading kube_config from: connection kube_config\")\n temp_config.write(kubeconfig.encode())\n temp_config.flush()\n config.load_kube_config(\n config_file=temp_config.name,\n client_configuration=self.client_configuration,\n context=cluster_context,\n )\n return client.ApiClient()\n\n return self._get_default_client(cluster_context=cluster_context)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 1032, "n_words": 267, "vocab_size": 146, "complexity": 24, "nloc": 71, "token_counts": 460, "n_ast_nodes": 759, "n_identifiers": 49, "d_id": 7735, "documentation": { "docstring": "Returns kubernetes api session for use with requests", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 256622, "commit_id": "4e940be85902dc93f3924662ba83111df72bb4d3", "repo": "haystack", "path": "rest_api/controller/feedback.py", "file_name": "feedback.py", "fun_name": "get_feedback", "commit_message": "Allow Linux CI to push changes to forks (#2182)\n\n* Add explicit reference to repo name to allow CI to push code back\r\n\r\n* Run test matrix only on tested code changes\r\n\r\n* Isolate the bot to check if it works\r\n\r\n* Clarify situation with a comment\r\n\r\n* Simplify autoformat.yml\r\n\r\n* Add code and docs check\r\n\r\n* Add git pull to make sure to fetch changes if they were created\r\n\r\n* Add cache to autoformat.yml too\r\n\r\n* Add information on forks in CONTRIBUTING.md\r\n\r\n* Add a not about code quality tools in CONTRIBUTING.md\r\n\r\n* Add image file types to the CI exclusion list\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def get_feedback():\n \n labels = DOCUMENT_STORE.get_all_labels()\n return labels\n\n\n@router.delete(\"/feedback\")", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "@router.delete(\"/feedback\")", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 16, "n_words": 8, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 41, "n_identifiers": 6, "d_id": 74903, "documentation": { "docstring": "\n This endpoint allows the API user to retrieve all the feedback that has been submitted\n through the `POST /feedback` endpoint.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 30, "language": "en" } }, { "id": 226332, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_box.py", "file_name": "_box.py", "fun_name": "boxmean", "commit_message": "switch to black .22", "code": "def boxmean(self):\n \n return self[\"boxmean\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58005, "documentation": { "docstring": "\n If True, the mean of the box(es)' underlying distribution is\n drawn as a dashed line inside the box(es). If \"sd\" the standard\n deviation is also drawn. Defaults to True when `mean` is set.\n Defaults to \"sd\" when `sd` is set Otherwise defaults to False.\n\n The 'boxmean' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n [True, 'sd', False]\n\n Returns\n -------\n Any\n ", "n_words": 68, "vocab_size": 52, "n_whitespaces": 156, "language": "en" } }, { "id": 320069, "commit_id": "5b66ef0a748fd5570361a2a1ed6147e0462568d2", "repo": "paperless-ngx", "path": "src/documents/tests/test_api.py", "file_name": "test_api.py", "fun_name": "test_task_result_with_error", "commit_message": "Updates how task_args and task_kwargs are parsed, adds testing to cover everything I can think of", "code": "def test_task_result_with_error(self):\n \n result1 = TaskResult.objects.create(\n task_id=str(uuid.uuid4()),\n task_name=\"documents.tasks.some_task\",\n status=celery.states.SUCCESS,\n result={\n \"exc_type\": \"ConsumerError\",\n \"exc_message\": [\"test.pdf: Not consuming test.pdf: It is a duplicate.\"],\n \"exc_module\": \"documents.consumer\",\n },\n )\n _ = PaperlessTask.objects.create(attempted_task=result1)\n\n response = self.client.get(self.ENDPOINT)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.data), 1)\n\n returned_data = response.data[0]\n\n self.assertEqual(\n returned_data[\"result\"],\n \"test.pdf: Not consuming test.pdf: It is a duplicate.\",\n )\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 240, "n_words": 48, "vocab_size": 38, "complexity": 1, "nloc": 20, "token_counts": 124, "n_ast_nodes": 206, "n_identifiers": 28, "d_id": 117054, "documentation": { "docstring": "\n GIVEN:\n - A celery task completed with an exception\n WHEN:\n - API call is made to get tasks\n THEN:\n - The returned result is the exception info\n ", "n_words": 27, "vocab_size": 23, "n_whitespaces": 89, "language": "en" } }, { "id": 43876, "commit_id": "2fdc23333909096d427171002582e2906f8bbc0a", "repo": "airflow", "path": "airflow/models/taskmixin.py", "file_name": "taskmixin.py", "fun_name": "serialize_for_task_group", "commit_message": "Fix remaining mypy issues in \"core\" Airflow (#20795)\n\nCo-authored-by: Josh Fell \r\nCo-authored-by: Tzu-ping Chung \r\nCo-authored-by: Jarek Potiuk ", "code": "def serialize_for_task_group(self) -> Tuple[DagAttributeTypes, Any]:\n \n raise NotImplementedError()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 29, "n_identifiers": 6, "d_id": 8079, "documentation": { "docstring": "This is used by SerializedTaskGroup to serialize a task group's content.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 141858, "commit_id": "f8b0ab7e78246e4dddf6c0095f3f7a5e409988ba", "repo": "ray", "path": "python/ray/actor.py", "file_name": "actor.py", "fun_name": "bind", "commit_message": "[Ray DAG] Add documentation in `more options` section (#25528)", "code": "def bind(self, *args, **kwargs):\n ", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "\"\"\"\n For Ray DAG building that creates static graph from decorated", "n_ast_errors": 1, "ast_levels": 5, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 5, "token_counts": 39, "n_ast_nodes": 29, "n_identifiers": 13, "d_id": 32500, "documentation": { "docstring": "\n For Ray DAG building that creates static graph from decorated", "n_words": 10, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 155341, "commit_id": "2ebc9cf51bfc773e3d4c898f5a33c0f60ad7ebc5", "repo": "modin", "path": "modin/pandas/base.py", "file_name": "base.py", "fun_name": "_build_repr_df", "commit_message": "REFACTOR-#5310: Remove some hasattr('columns') checks. (#5311)\n\nSigned-off-by: mvashishtha ", "code": "def _build_repr_df(self, num_rows, num_cols):\n \n # Fast track for empty dataframe.\n if len(self.index) == 0 or (self._is_dataframe and len(self.columns) == 0):\n return pandas.DataFrame(\n index=self.index,\n columns=self.columns if self._is_dataframe else None,\n )\n if len(self.index) <= num_rows:\n row_indexer = slice(None)\n else:\n # Add one here so that pandas automatically adds the dots\n # It turns out to be faster to extract 2 extra rows and columns than to\n # build the dots ourselves.\n num_rows_for_head = num_rows // 2 + 1\n num_rows_for_tail = (\n num_rows_for_head\n if len(self.index) > num_rows\n else len(self.index) - num_rows_for_head\n if len(self.index) - num_rows_for_head >= 0\n else None\n )\n row_indexer = list(range(len(self.index))[:num_rows_for_head]) + (\n list(range(len(self.index))[-num_rows_for_tail:])\n if num_rows_for_tail is not None\n else []\n )\n if self._is_dataframe:\n if len(self.columns) <= num_cols:\n col_indexer = slice(None)\n else:\n num_cols_for_front = num_cols // 2 + 1\n num_cols_for_back = (\n num_cols_for_front\n if len(self.columns) > num_cols\n else len(self.columns) - num_cols_for_front\n if len(self.columns) - num_cols_for_front >= 0\n else None\n )\n col_indexer = list(range(len(self.columns))[:num_cols_for_front]) + (\n list(range(len(self.columns))[-num_cols_for_back:])\n if num_cols_for_back is not None\n else []\n )\n indexer = row_indexer, col_indexer\n else:\n indexer = row_indexer\n return self.iloc[indexer]._query_compiler.to_pandas()\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 786, "n_words": 173, "vocab_size": 84, "complexity": 14, "nloc": 43, "token_counts": 295, "n_ast_nodes": 473, "n_identifiers": 23, "d_id": 36343, "documentation": { "docstring": "\n Build pandas DataFrame for string representation.\n\n Parameters\n ----------\n num_rows : int\n Number of rows to show in string representation. If number of\n rows in this dataset is greater than `num_rows` then half of\n `num_rows` rows from the beginning and half of `num_rows` rows\n from the end are shown.\n num_cols : int\n Number of columns to show in string representation. If number of\n columns in this dataset is greater than `num_cols` then half of\n `num_cols` columns from the beginning and half of `num_cols`\n columns from the end are shown.\n\n Returns\n -------\n pandas.DataFrame or pandas.Series\n A pandas dataset with `num_rows` or fewer rows and `num_cols` or fewer columns.\n ", "n_words": 106, "vocab_size": 46, "n_whitespaces": 269, "language": "en" } }, { "id": 218217, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/importlib/metadata/__init__.py", "file_name": "__init__.py", "fun_name": "discover", "commit_message": "add python 3.10.4 for windows", "code": "def discover(cls, **kwargs):\n \n context = kwargs.pop('context', None)\n if context and kwargs:\n raise ValueError(\"cannot accept context and kwargs\")\n context = context or DistributionFinder.Context(**kwargs)\n return itertools.chain.from_iterable(\n resolver(context) for resolver in cls._discover_resolvers()\n )\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 94, "n_words": 30, "vocab_size": 24, "complexity": 5, "nloc": 8, "token_counts": 60, "n_ast_nodes": 101, "n_identifiers": 13, "d_id": 55211, "documentation": { "docstring": "Return an iterable of Distribution objects for all packages.\n\n Pass a ``context`` or pass keyword arguments for constructing\n a context.\n\n :context: A ``DistributionFinder.Context`` object.\n :return: Iterable of Distribution objects for all packages.\n ", "n_words": 32, "vocab_size": 24, "n_whitespaces": 67, "language": "en" } }, { "id": 222607, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/cmd.py", "file_name": "cmd.py", "fun_name": "ensure_string_list", "commit_message": "add python 3.10.4 for windows", "code": "def ensure_string_list(self, option):\n r\n val = getattr(self, option)\n if val is None:\n return\n elif isinstance(val, str):\n setattr(self, option, re.split(r',\\s*|\\s+', val))\n else:\n if isinstance(val, list):\n ok = all(isinstance(v, str) for v in val)\n else:\n ok = False\n if not ok:\n raise DistutilsOptionError(\n \"'%s' must be a list of strings (got %r)\"\n % (option, val))\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 229, "n_words": 53, "vocab_size": 44, "complexity": 6, "nloc": 20, "token_counts": 92, "n_ast_nodes": 144, "n_identifiers": 15, "d_id": 56670, "documentation": { "docstring": "Ensure that 'option' is a list of strings. If 'option' is\n currently a string, we split it either on /,\\s*/ or /\\s+/, so\n \"foo bar baz\", \"foo,bar,baz\", and \"foo, bar baz\" all become\n [\"foo\", \"bar\", \"baz\"].\n ", "n_words": 36, "vocab_size": 32, "n_whitespaces": 67, "language": "en" } }, { "id": 153838, "commit_id": "dcee13d57ebf9a006460deedb734c15791acae7a", "repo": "modin", "path": "modin/experimental/pandas/io.py", "file_name": "io.py", "fun_name": "_read", "commit_message": "REFACTOR-#4510: Align experimental and regular IO modules initializations (#4511)\n\nSigned-off-by: alexander3774 ", "code": "def _read(**kwargs) -> DataFrame:\n \n Engine.subscribe(_update_engine)\n from modin.core.execution.dispatching.factories.dispatcher import FactoryDispatcher\n\n try:\n pd_obj = FactoryDispatcher.read_csv_glob(**kwargs)\n except AttributeError:\n raise AttributeError(\"read_csv_glob() is only implemented for pandas on Ray.\")\n\n # This happens when `read_csv` returns a TextFileReader object for iterating through\n if isinstance(pd_obj, pandas.io.parsers.TextFileReader):\n reader = pd_obj.read\n pd_obj.read = lambda *args, **kwargs: DataFrame(\n query_compiler=reader(*args, **kwargs)\n )\n return pd_obj\n\n return DataFrame(query_compiler=pd_obj)\n\n\nread_csv_glob = _make_parser_func(sep=\",\")\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 134, "n_words": 58, "vocab_size": 51, "complexity": 3, "nloc": 50, "token_counts": 100, "n_ast_nodes": 176, "n_identifiers": 27, "d_id": 35651, "documentation": { "docstring": "\n General documentation is available in `modin.pandas.read_csv`.\n\n This experimental feature provides parallel reading from multiple csv files which are\n defined by glob pattern.\n\n Parameters\n ----------\n **kwargs : dict\n Keyword arguments in `modin.pandas.read_csv`.\n\n Returns\n -------\n modin.DataFrame\n\n Examples\n --------\n >>> import modin.experimental.pandas as pd\n >>> df = pd.read_csv_glob(\"s3://dask-data/nyc-taxi/2015/yellow_tripdata_2015-1*\")\n UserWarning: `read_*` implementation has mismatches with pandas:\n Data types of partitions are different! Please refer to the troubleshooting section of the Modin documentation to fix this issue.\n VendorID tpep_pickup_datetime ... total_amount congestion_surcharge\n 0 1.0 2020-10-01 00:09:08 ... 4.30 0.0\n 1 1.0 2020-10-01 00:09:19 ... 13.30 2.5\n 2 1.0 2020-10-01 00:30:00 ... 15.36 2.5\n 3 2.0 2020-10-01 00:56:46 ... -3.80 0.0\n 4 2.0 2020-10-01 00:56:46 ... 3.80 0.0\n ... ... ... ... ... ...\n 4652008 NaN 2020-12-31 23:44:35 ... 43.95 2.5\n 4652009 NaN 2020-12-31 23:41:36 ... 20.17 2.5\n 4652010 NaN 2020-12-31 23:01:17 ... 78.98 0.0\n 4652011 NaN 2020-12-31 23:31:29 ... 39.50 0.0\n 4652012 NaN 2020-12-31 23:12:48 ... 20.64 0.0\n\n [4652013 rows x 18 columns]\n ", "n_words": 158, "vocab_size": 110, "n_whitespaces": 680, "language": "en" } }, { "id": 152978, "commit_id": "0faf4675140415e17d4112f9d0d37cfe87770b9e", "repo": "modin", "path": "modin/core/execution/ray/implementations/pandas_on_ray/io/io.py", "file_name": "io.py", "fun_name": "_to_csv_check_support", "commit_message": "REFACTOR-#3871: move related to pandas functionality into 'PandasOnRayIO' class (#3872)\n\nSigned-off-by: Anatoly Myachev ", "code": "def _to_csv_check_support(kwargs):\n \n path_or_buf = kwargs[\"path_or_buf\"]\n compression = kwargs[\"compression\"]\n if not isinstance(path_or_buf, str):\n return False\n # case when the pointer is placed at the beginning of the file.\n if \"r\" in kwargs[\"mode\"] and \"+\" in kwargs[\"mode\"]:\n return False\n # encodings with BOM don't support;\n # instead of one mark in result bytes we will have them by the number of partitions\n # so we should fallback in pandas for `utf-16`, `utf-32` with all aliases, in instance\n # (`utf_32_be`, `utf_16_le` and so on)\n if kwargs[\"encoding\"] is not None:\n encoding = kwargs[\"encoding\"].lower()\n if \"u\" in encoding or \"utf\" in encoding:\n if \"16\" in encoding or \"32\" in encoding:\n return False\n if compression is None or not compression == \"infer\":\n return False\n if any((path_or_buf.endswith(ext) for ext in [\".gz\", \".bz2\", \".zip\", \".xz\"])):\n return False\n return True\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 329, "n_words": 131, "vocab_size": 80, "complexity": 13, "nloc": 17, "token_counts": 126, "n_ast_nodes": 232, "n_identifiers": 11, "d_id": 35220, "documentation": { "docstring": "\n Check if parallel version of ``to_csv`` could be used.\n\n Parameters\n ----------\n kwargs : dict\n Keyword arguments passed to ``.to_csv()``.\n\n Returns\n -------\n bool\n Whether parallel version of ``to_csv`` is applicable.\n ", "n_words": 29, "vocab_size": 25, "n_whitespaces": 108, "language": "en" } }, { "id": 262200, "commit_id": "ef63c995248fb854d1efae73acfbdcf75666c263", "repo": "TTS", "path": "tests/data_tests/test_loader.py", "file_name": "test_loader.py", "fun_name": "test_start_by_longest", "commit_message": "Implement `start_by_longest` option for TTSDatase", "code": "def test_start_by_longest(self):\n \n if ok_ljspeech:\n dataloader, _ = self._create_dataloader(2, c.r, 0, True)\n dataloader.dataset.preprocess_samples()\n for i, data in enumerate(dataloader):\n if i == self.max_loader_iter:\n break\n mel_lengths = data[\"mel_lengths\"]\n if i == 0:\n max_len = mel_lengths[0]\n print(mel_lengths)\n self.assertTrue(all(max_len >= mel_lengths))\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 196, "n_words": 36, "vocab_size": 30, "complexity": 5, "nloc": 12, "token_counts": 84, "n_ast_nodes": 136, "n_identifiers": 19, "d_id": 77141, "documentation": { "docstring": "Test start_by_longest option.\n\n Ther first item of the fist batch must be longer than all the other items.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 32, "language": "en" } }, { "id": 244084, "commit_id": "c576e5d570bf64a99e2c6817ed7b5c0084a44a55", "repo": "mmdetection", "path": "mmdet/models/utils/point_sample.py", "file_name": "point_sample.py", "fun_name": "get_uncertainty", "commit_message": "[Enhance] Take point sample related functions out of mask_point_head (#7353)\n\nadd point sample\r\n\r\nreplace function in mask_point_head", "code": "def get_uncertainty(mask_pred, labels):\n \n if mask_pred.shape[1] == 1:\n gt_class_logits = mask_pred.clone()\n else:\n inds = torch.arange(mask_pred.shape[0], device=mask_pred.device)\n gt_class_logits = mask_pred[inds, labels].unsqueeze(1)\n return -torch.abs(gt_class_logits)\n\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 54, "n_words": 21, "vocab_size": 18, "complexity": 2, "nloc": 7, "token_counts": 67, "n_ast_nodes": 106, "n_identifiers": 12, "d_id": 70232, "documentation": { "docstring": "Estimate uncertainty based on pred logits.\n\n We estimate uncertainty as L1 distance between 0.0 and the logits\n prediction in 'mask_pred' for the foreground class in `classes`.\n\n Args:\n mask_pred (Tensor): mask predication logits, shape (num_rois,\n num_classes, mask_height, mask_width).\n\n labels (list[Tensor]): Either predicted or ground truth label for\n each predicted mask, of length num_rois.\n\n Returns:\n scores (Tensor): Uncertainty scores with the most uncertain\n locations having the highest uncertainty score,\n shape (num_rois, 1, mask_height, mask_width)\n ", "n_words": 72, "vocab_size": 59, "n_whitespaces": 152, "language": "en" } }, { "id": 217708, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/http/client.py", "file_name": "client.py", "fun_name": "_is_textIO", "commit_message": "add python 3.10.4 for windows", "code": "def _is_textIO(stream):\n \n return isinstance(stream, io.TextIOBase)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 26, "n_identifiers": 5, "d_id": 54893, "documentation": { "docstring": "Test whether a file-like object is a text or a binary stream.\n ", "n_words": 12, "vocab_size": 10, "n_whitespaces": 19, "language": "en" } }, { "id": 85794, "commit_id": "35ec251212b82e5d9468062a3ab5945d8e739002", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metric_data.py", "file_name": "test_organization_metric_data.py", "fun_name": "test_pagination_offset_without_orderby", "commit_message": "feat(metrics): Support rate for derived metric [TET-129 TET-127] (#38792)\n\nAdds support for operation `rate` to be able to compute performance\r\nrelated metrics such as tpm, tps, epm, eps\r\n\r\nThis PR achieves this by:\r\n- Defining rate as a derived operation that produces its own SnQL rather\r\nthan trying to compute the data sketch aggregate and using that directly\r\n- Replaces `filter_conditions_func` that used to just produce a snql\r\ncondition to be used a conditional aggregate with `snql_func` that\r\ninstead produces a SnQL function\r\n- Replaces the logic in `get_entity` on MetricsExpression to determine\r\nthe entity from the MRI rather than from the aggregate applied", "code": "def test_pagination_offset_without_orderby(self):\n \n response = self.get_response(\n self.organization.slug,\n field=f\"count({TransactionMetricKey.MEASUREMENTS_LCP.value})\",\n groupBy=\"transaction\",\n cursor=Cursor(0, 1),\n statsPeriod=\"1h\",\n useCase=\"performance\",\n )\n assert response.status_code == 200, response.data\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 112, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 10, "token_counts": 55, "n_ast_nodes": 97, "n_identifiers": 17, "d_id": 18044, "documentation": { "docstring": "\n Test that ensures a successful response is returned even when requesting an offset\n without an orderBy\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 38, "language": "en" } }, { "id": 260913, "commit_id": "7fda68d45734d41e47da1f57d23348ae8de655b0", "repo": "scikit-learn", "path": "sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py", "file_name": "gradient_boosting.py", "fun_name": "_finalize_sample_weight", "commit_message": "FEA Adds class_weight to HistGradientBoostingClassifier (#22014)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: jeremie du boisberranger ", "code": "def _finalize_sample_weight(self, sample_weight, y):\n ", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "\"\"\"Finalize sample weight.\n\n Used by subclasses to adjustuseful for", "n_ast_errors": 2, "ast_levels": 7, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 36, "n_identifiers": 17, "d_id": 76560, "documentation": { "docstring": "Finalize sample weight.\n\n Used by subclasses to adjust sample_weights. This is useful for implementing", "n_words": 14, "vocab_size": 14, "n_whitespaces": 20, "language": "en" } }, { "id": 101212, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "lib/align/alignments.py", "file_name": "alignments.py", "fun_name": "frame_has_multiple_faces", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def frame_has_multiple_faces(self, frame_name):\n \n if not frame_name:\n retval = False\n else:\n retval = bool(len(self._data.get(frame_name, {}).get(\"faces\", [])) > 1)\n logger.trace(\"'%s': %s\", frame_name, retval)\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 80, "n_words": 23, "vocab_size": 20, "complexity": 2, "nloc": 7, "token_counts": 58, "n_ast_nodes": 97, "n_identifiers": 10, "d_id": 20633, "documentation": { "docstring": " Check whether a given frame_name exists within the alignments :attr:`data` and contains\n more than 1 face.\n\n Parameters\n ----------\n frame_name: str\n The frame_name name to check. This should be the base name of the frame, not the full\n path\n\n Returns\n -------\n bool\n ``True`` if the given frame_name exists within the alignments :attr:`data` and has more\n than 1 face associated with it, otherwise ``False``\n ", "n_words": 62, "vocab_size": 45, "n_whitespaces": 163, "language": "en" } }, { "id": 264021, "commit_id": "d789a7daa7712716c89259b987349917a89aece7", "repo": "pyinstaller", "path": "PyInstaller/utils/hooks/qt/__init__.py", "file_name": "__init__.py", "fun_name": "get_qt_library_info", "commit_message": "hookutils: reorganize the Qt hook utilities\n\nReorganize the Qt module information to provide information necessary\nto deal with variations between different python Qt bindings (PySide2,\nPyQt5, PySide6, and PyQt6). Replace the existing table-like dictionary\nwith list of entries, which is easier to format and document. From this\nlist, we now generate two dictionaries; one that maps Qt module (shared\nlibrary) names to the module info entries (the same role as the old\ndictionary), and one that maps python module names to the module info\nentries. The latter is necessary to accommodate python modules that do\nnot have corresponding Qt shared libraries (header-only Qt modules,\nsuch as QtAxContainer; or statically-linked module, such as QSci), but\nwe still need to provide information about plugins or translation\nfiles.\n\nThe new information list is based on manual inspection of source code\nfor Qt 5.15 and 6.3, and should provide comprehensive information about\nall plugin names and translation file basenames.\n\nIn addition, most of the helper functions, which take a reference to\nthe `QtLibraryInfo` class as their first argument, have been turned\ninto methods of the `QtLibraryInfo` class. The corresponding hooks\nhave also been adjusted.", "code": "def get_qt_library_info(namespace):\n \n if namespace == 'PyQt5':\n return pyqt5_library_info\n if namespace == 'PyQt6':\n return pyqt6_library_info\n elif namespace == 'PySide2':\n return pyside2_library_info\n elif namespace == 'PySide6':\n return pyside6_library_info\n\n raise ValueError(f'Invalid namespace: {namespace}!')\n\n\n# add_qt_dependencies\n# --------------------\n# Generic implemnentation that finds the Qt 5/6 dependencies based on the hook name of a PyQt5/PyQt6/PySide2/PySide6\n# hook. Returns (hiddenimports, binaries, datas). Typical usage:\n# ``hiddenimports, binaries, datas = add_qt5_dependencies(__file__)``.", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 106, "n_words": 65, "vocab_size": 48, "complexity": 5, "nloc": 10, "token_counts": 40, "n_ast_nodes": 84, "n_identifiers": 7, "d_id": 77563, "documentation": { "docstring": "\n Return QtLibraryInfo instance for the given namespace.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 152835, "commit_id": "1f92336be768d235c18a82acb2195b7135101ae7", "repo": "stable-diffusion-webui", "path": "modules/deepbooru.py", "file_name": "deepbooru.py", "fun_name": "create_deepbooru_process", "commit_message": "refactored the deepbooru module to improve speed on running multiple interogations in a row. Added the option to generate deepbooru tags for textual inversion preproccessing.", "code": "def create_deepbooru_process(threshold=0.5):\n \n from modules import shared # prevents circular reference\n shared.deepbooru_process_manager = multiprocessing.Manager()\n shared.deepbooru_process_queue = shared.deepbooru_process_manager.Queue()\n shared.deepbooru_process_return = shared.deepbooru_process_manager.dict()\n shared.deepbooru_process_return[\"value\"] = -1\n shared.deepbooru_process = multiprocessing.Process(target=deepbooru_process, args=(shared.deepbooru_process_queue, shared.deepbooru_process_return, threshold))\n shared.deepbooru_process.start()\n\n", "url": "https://github.com/AUTOMATIC1111/stable-diffusion-webui.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 54, "n_words": 29, "vocab_size": 25, "complexity": 1, "nloc": 8, "token_counts": 87, "n_ast_nodes": 140, "n_identifiers": 16, "d_id": 35197, "documentation": { "docstring": "\n Creates deepbooru process. A queue is created to send images into the process. This enables multiple images\n to be processed in a row without reloading the model or creating a new process. To return the data, a shared\n dictionary is created to hold the tags created. To wait for tags to be returned, a value of -1 is assigned\n to the dictionary and the method adding the image to the queue should wait for this value to be updated with\n the tags.\n ", "n_words": 82, "vocab_size": 50, "n_whitespaces": 105, "language": "en" } }, { "id": 125650, "commit_id": "e19cf164fd51c4f6bf730e999cba46b30c39ff83", "repo": "ray", "path": "python/ray/data/datasource/parquet_datasource.py", "file_name": "parquet_datasource.py", "fun_name": "_estimate_files_encoding_ratio", "commit_message": "[Datasets] Use sampling to estimate in-memory data size for Parquet data source (#26868)", "code": "def _estimate_files_encoding_ratio(self) -> float:\n \n if not DatasetContext.get_current().decoding_size_estimation:\n return PARQUET_ENCODING_RATIO_ESTIMATE_DEFAULT\n\n # Sample a few rows from Parquet files to estimate the encoding ratio.\n # Launch tasks to sample multiple files remotely in parallel.\n # Evenly distributed to sample N rows in i-th row group in i-th file.\n # TODO(ekl/cheng) take into account column pruning.\n start_time = time.perf_counter()\n num_files = len(self._pq_ds.pieces)\n num_samples = int(num_files * PARQUET_ENCODING_RATIO_ESTIMATE_SAMPLING_RATIO)\n min_num_samples = min(\n PARQUET_ENCODING_RATIO_ESTIMATE_MIN_NUM_SAMPLES, num_files\n )\n max_num_samples = min(\n PARQUET_ENCODING_RATIO_ESTIMATE_MAX_NUM_SAMPLES, num_files\n )\n num_samples = max(min(num_samples, max_num_samples), min_num_samples)\n\n # Evenly distributed to choose which file to sample, to avoid biased prediction\n # if data is skewed.\n file_samples = [\n self._pq_ds.pieces[idx]\n for idx in np.linspace(0, num_files - 1, num_samples).astype(int).tolist()\n ]\n\n sample_piece = cached_remote_fn(_sample_piece)\n futures = []\n for idx, sample in enumerate(file_samples):\n # Sample i-th row group in i-th file.\n futures.append(sample_piece.remote(_SerializedPiece(sample), idx))\n sample_ratios = ray.get(futures)\n ratio = np.mean(sample_ratios)\n\n sampling_duration = time.perf_counter() - start_time\n if sampling_duration > 5:\n logger.info(\n \"Parquet input size estimation took \"\n f\"{round(sampling_duration, 2)} seconds.\"\n )\n logger.debug(f\"Estimated Parquet encoding ratio from sampling is {ratio}.\")\n return max(ratio, PARQUET_ENCODING_RATIO_ESTIMATE_LOWER_BOUND)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 488, "n_words": 170, "vocab_size": 110, "complexity": 5, "nloc": 35, "token_counts": 198, "n_ast_nodes": 339, "n_identifiers": 49, "d_id": 27939, "documentation": { "docstring": "Return an estimate of the Parquet files encoding ratio.\n\n To avoid OOMs, it is safer to return an over-estimate than an underestimate.\n ", "n_words": 22, "vocab_size": 20, "n_whitespaces": 36, "language": "en" } }, { "id": 258948, "commit_id": "1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe", "repo": "scikit-learn", "path": "sklearn/random_projection.py", "file_name": "random_projection.py", "fun_name": "johnson_lindenstrauss_min_dim", "commit_message": "MNT Update black to stable version (#22474)", "code": "def johnson_lindenstrauss_min_dim(n_samples, *, eps=0.1):\n \n eps = np.asarray(eps)\n n_samples = np.asarray(n_samples)\n\n if np.any(eps <= 0.0) or np.any(eps >= 1):\n raise ValueError(\"The JL bound is defined for eps in ]0, 1[, got %r\" % eps)\n\n if np.any(n_samples) <= 0:\n raise ValueError(\n \"The JL bound is defined for n_samples greater than zero, got %r\"\n % n_samples\n )\n\n denominator = (eps**2 / 2) - (eps**3 / 3)\n return (4 * np.log(n_samples) / denominator).astype(np.int64)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 133, "n_words": 69, "vocab_size": 50, "complexity": 4, "nloc": 12, "token_counts": 112, "n_ast_nodes": 177, "n_identifiers": 11, "d_id": 75490, "documentation": { "docstring": "Find a 'safe' number of components to randomly project to.\n\n The distortion introduced by a random projection `p` only changes the\n distance between two points by a factor (1 +- eps) in an euclidean space\n with good probability. The projection `p` is an eps-embedding as defined\n by:\n\n (1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2\n\n Where u and v are any rows taken from a dataset of shape (n_samples,\n n_features), eps is in ]0, 1[ and p is a projection by a random Gaussian\n N(0, 1) matrix of shape (n_components, n_features) (or a sparse\n Achlioptas matrix).\n\n The minimum number of components to guarantee the eps-embedding is\n given by:\n\n n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)\n\n Note that the number of dimensions is independent of the original\n number of features but instead depends on the size of the dataset:\n the larger the dataset, the higher is the minimal dimensionality of\n an eps-embedding.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n n_samples : int or array-like of int\n Number of samples that should be a integer greater than 0. If an array\n is given, it will compute a safe number of components array-wise.\n\n eps : float or ndarray of shape (n_components,), dtype=float, \\\n default=0.1\n Maximum distortion rate in the range (0,1 ) as defined by the\n Johnson-Lindenstrauss lemma. If an array is given, it will compute a\n safe number of components array-wise.\n\n Returns\n -------\n n_components : int or ndarray of int\n The minimal number of components to guarantee with good probability\n an eps-embedding with n_samples.\n\n Examples\n --------\n >>> from sklearn.random_projection import johnson_lindenstrauss_min_dim\n >>> johnson_lindenstrauss_min_dim(1e6, eps=0.5)\n 663\n\n >>> johnson_lindenstrauss_min_dim(1e6, eps=[0.5, 0.1, 0.01])\n array([ 663, 11841, 1112658])\n\n >>> johnson_lindenstrauss_min_dim([1e4, 1e5, 1e6], eps=0.1)\n array([ 7894, 9868, 11841])\n\n References\n ----------\n\n .. [1] https://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma\n\n .. [2] Sanjoy Dasgupta and Anupam Gupta, 1999,\n \"An elementary proof of the Johnson-Lindenstrauss Lemma.\"\n http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.45.3654\n\n ", "n_words": 318, "vocab_size": 187, "n_whitespaces": 522, "language": "en" } }, { "id": 120553, "commit_id": "17de89b16ac5ee05aee03115d858e67489eab973", "repo": "jax", "path": "build/build_wheel.py", "file_name": "build_wheel.py", "fun_name": "verify_mac_libraries_dont_reference_chkstack", "commit_message": "feat: refactor code using pyupgrade\n\nThis PR upgrades legacy Python code to 3.7+ code using pyupgrade:\n```sh\npyupgrade --py37-plus --keep-runtime-typing **.py\n```\n\na", "code": "def verify_mac_libraries_dont_reference_chkstack():\n \n if not _is_mac():\n return\n nm = subprocess.run(\n [\"nm\", \"-g\",\n r.Rlocation(\"org_tensorflow/tensorflow/compiler/xla/python/xla_extension.so\")\n ],\n capture_output=True, text=True,\n check=False)\n if nm.returncode != 0:\n raise RuntimeError(f\"nm process failed: {nm.stdout} {nm.stderr}\")\n if \"____chkstk_darwin\" in nm.stdout:\n raise RuntimeError(\n \"Mac wheel incorrectly depends on symbol ____chkstk_darwin, which \"\n \"means that it isn't compatible with older MacOS versions.\")\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 90, "n_words": 50, "vocab_size": 47, "complexity": 4, "nloc": 15, "token_counts": 69, "n_ast_nodes": 136, "n_identifiers": 14, "d_id": 26889, "documentation": { "docstring": "Verifies that xla_extension.so doesn't depend on ____chkstk_darwin.\n\n We don't entirely know why this happens, but in some build environments\n we seem to target the wrong Mac OS version.\n https://github.com/google/jax/issues/3867\n\n This check makes sure we don't release wheels that have this dependency.\n ", "n_words": 41, "vocab_size": 37, "n_whitespaces": 46, "language": "en" } }, { "id": 59586, "commit_id": "a7bd9cadd5038383449b0e75a87bb23a73b278d8", "repo": "prefect", "path": "tests/test_flows.py", "file_name": "test_flows.py", "fun_name": "test_timeout_does_not_wait_for_completion_for_sync_flows", "commit_message": "Add support for Python 3.11 (#7304)\n\nCo-authored-by: Chris Guidry ", "code": "def test_timeout_does_not_wait_for_completion_for_sync_flows(self, tmp_path):\n \n if sys.version_info[1] == 11:\n pytest.xfail(\"The engine returns _after_ sleep finishes in Python 3.11\")\n\n canary_file = tmp_path / \"canary\"\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 53, "n_words": 21, "vocab_size": 21, "complexity": 2, "nloc": 14, "token_counts": 96, "n_ast_nodes": 52, "n_identifiers": 8, "d_id": 11913, "documentation": { "docstring": "\n Sync flows are cancelled when they change instructions. The flow will return\n immediately when the timeout is reached, but the thread it executes in will\n continue until the next instruction is reached. `time.sleep` will return then\n the thread will be interrupted.\n ", "n_words": 41, "vocab_size": 31, "n_whitespaces": 77, "language": "en" } }, { "id": 65174, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/budget_variance_report/budget_variance_report.py", "file_name": "budget_variance_report.py", "fun_name": "get_target_distribution_details", "commit_message": "style: format code with black", "code": "def get_target_distribution_details(filters):\n\ttarget_details = {}\n\tfor d in frappe.db.sql(\n\t\t,\n\t\t(filters.from_fiscal_year, filters.to_fiscal_year),\n\t\tas_dict=1,\n\t):\n\t\ttarget_details.setdefault(d.name, {}).setdefault(d.month, flt(d.percentage_allocation))\n\n\treturn target_details\n\n\n# Get actual details from gl entry", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 16, "n_words": 26, "vocab_size": 25, "complexity": 2, "nloc": 22, "token_counts": 63, "n_ast_nodes": 97, "n_identifiers": 15, "d_id": 13816, "documentation": { "docstring": "\n\t\t\tselect\n\t\t\t\tmd.name,\n\t\t\t\tmdp.month,\n\t\t\t\tmdp.percentage_allocation\n\t\t\tfrom\n\t\t\t\t`tabMonthly Distribution Percentage` mdp,\n\t\t\t\t`tabMonthly Distribution` md\n\t\t\twhere\n\t\t\t\tmdp.parent = md.name\n\t\t\t\tand md.fiscal_year between %s and %s\n\t\t\torder by\n\t\t\t\tmd.fiscal_year\n\t\t", "n_words": 25, "vocab_size": 21, "n_whitespaces": 13, "language": "en" } }, { "id": 65703, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/controllers/website_list_for_contact.py", "file_name": "website_list_for_contact.py", "fun_name": "get_list_context", "commit_message": "style: format code with black", "code": "def get_list_context(context=None):\n\treturn {\n\t\t\"global_number_format\": frappe.db.get_default(\"number_format\") or \"#,###.##\",\n\t\t\"currency\": frappe.db.get_default(\"currency\"),\n\t\t\"currency_symbols\": json.dumps(\n\t\t\tdict(\n\t\t\t\tfrappe.db.sql(\n\t\t\t\t\t\n\t\t\t\t)\n\t\t\t)\n\t\t),\n\t\t\"row_template\": \"templates/includes/transaction_row.html\",\n\t\t\"get_list\": get_transaction_list,\n\t}\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 9, "n_words": 22, "vocab_size": 21, "complexity": 2, "nloc": 15, "token_counts": 61, "n_ast_nodes": 111, "n_identifiers": 10, "d_id": 13991, "documentation": { "docstring": "select name, symbol\n\t\t\tfrom tabCurrency where enabled=1", "n_words": 7, "vocab_size": 7, "n_whitespaces": 5, "language": "en" } }, { "id": 272164, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/initializers/initializers_v2.py", "file_name": "initializers_v2.py", "fun_name": "_assert_float_dtype", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _assert_float_dtype(dtype):\n \n dtype = tf.as_dtype(dtype)\n if not dtype.is_floating:\n raise ValueError(f\"Expected floating point type, got {dtype}.\")\n return dtype\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 36, "n_words": 17, "vocab_size": 16, "complexity": 2, "nloc": 5, "token_counts": 28, "n_ast_nodes": 53, "n_identifiers": 6, "d_id": 80966, "documentation": { "docstring": "Validate and return floating point type based on `dtype`.\n\n `dtype` must be a floating point type.\n\n Args:\n dtype: The data type to validate.\n\n Returns:\n Validated type.\n\n Raises:\n ValueError: if `dtype` is not a floating point type.\n ", "n_words": 36, "vocab_size": 27, "n_whitespaces": 66, "language": "en" } }, { "id": 62599, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/html5lib/treebuilders/base.py", "file_name": "base.py", "fun_name": "removeChild", "commit_message": "upd; format", "code": "def removeChild(self, node):\n \n raise NotImplementedError\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 18, "n_identifiers": 4, "d_id": 13010, "documentation": { "docstring": "Remove node from the children of the current node\n\n :arg node: the child node to remove\n\n ", "n_words": 16, "vocab_size": 12, "n_whitespaces": 30, "language": "en" } }, { "id": 315806, "commit_id": "7cd68381f1d4f58930ffd631dfbfc7159d459832", "repo": "core", "path": "tests/components/ecobee/test_config_flow.py", "file_name": "test_config_flow.py", "fun_name": "test_import_flow_triggered_but_no_ecobee_conf", "commit_message": "Search/replace RESULT_TYPE_* by FlowResultType enum (#74642)", "code": "async def test_import_flow_triggered_but_no_ecobee_conf(hass):\n \n flow = config_flow.EcobeeFlowHandler()\n flow.hass = hass\n flow.hass.data[DATA_ECOBEE_CONFIG] = {}\n\n result = await flow.async_step_import(import_data=None)\n\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"step_id\"] == \"user\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 45, "n_words": 24, "vocab_size": 19, "complexity": 1, "nloc": 7, "token_counts": 58, "n_ast_nodes": 101, "n_identifiers": 13, "d_id": 114384, "documentation": { "docstring": "Test expected result if import flow triggers but ecobee.conf doesn't exist.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 48680, "commit_id": "56946fac8f29aa44ce84391f138d63c4c8a2a285", "repo": "django-rest-framework", "path": "rest_framework/views.py", "file_name": "views.py", "fun_name": "exception_handler", "commit_message": "Preserve exception messages for wrapped Django exceptions (#8051)\n\n* Preserve messages for wrapped Django exceptions\r\n\r\n* Fix the test\r\n\r\n* Update test_generics.py\r\n\r\n* Update test_generics.py\r\n\r\nCo-authored-by: Tom Christie ", "code": "def exception_handler(exc, context):\n \n if isinstance(exc, Http404):\n exc = exceptions.NotFound(*(exc.args))\n elif isinstance(exc, PermissionDenied):\n exc = exceptions.PermissionDenied(*(exc.args))\n\n if isinstance(exc, exceptions.APIException):\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['Retry-After'] = '%d' % exc.wait\n\n if isinstance(exc.detail, (list, dict)):\n data = exc.detail\n else:\n data = {'detail': exc.detail}\n\n set_rollback()\n return Response(data, status=exc.status_code, headers=headers)\n\n return None\n\n", "url": "https://github.com/encode/django-rest-framework.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 178, "n_words": 56, "vocab_size": 39, "complexity": 7, "nloc": 18, "token_counts": 152, "n_ast_nodes": 246, "n_identifiers": 22, "d_id": 9566, "documentation": { "docstring": "\n Returns the response that should be used for any given exception.\n\n By default we handle the REST framework `APIException`, and also\n Django's built-in `Http404` and `PermissionDenied` exceptions.\n\n Any unhandled exceptions may return `None`, which will cause a 500 error\n to be raised.\n ", "n_words": 42, "vocab_size": 39, "n_whitespaces": 61, "language": "en" } }, { "id": 38408, "commit_id": "afe5d42d8d1d80af911ed980c2936bfe887078f6", "repo": "transformers", "path": "utils/check_repo.py", "file_name": "check_repo.py", "fun_name": "check_all_decorator_order", "commit_message": "Black preview (#17217)\n\n* Black preview\r\n\r\n* Fixup too!\r\n\r\n* Fix check copies\r\n\r\n* Use the same version as the CI\r\n\r\n* Bump black", "code": "def check_all_decorator_order():\n \n errors = []\n for fname in os.listdir(PATH_TO_TESTS):\n if fname.endswith(\".py\"):\n filename = os.path.join(PATH_TO_TESTS, fname)\n new_errors = check_decorator_order(filename)\n errors += [f\"- {filename}, line {i}\" for i in new_errors]\n if len(errors) > 0:\n msg = \"\\n\".join(errors)\n raise ValueError(\n \"The parameterized decorator (and its variants) should always be first, but this is not the case in the\"\n f\" following files:\\n{msg}\"\n )\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 154, "n_words": 59, "vocab_size": 51, "complexity": 5, "nloc": 13, "token_counts": 78, "n_ast_nodes": 148, "n_identifiers": 16, "d_id": 6970, "documentation": { "docstring": "Check that in all test files, the slow decorator is always last.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 25927, "commit_id": "34bf03ce99d46e84c0990f009025b436c1f0386c", "repo": "saleor", "path": "saleor/order/models.py", "file_name": "models.py", "fun_name": "ready_to_fulfill", "commit_message": "Optimize order filtering by ready to fulfill status (#9113)", "code": "def ready_to_fulfill(self):\n \n statuses = {OrderStatus.UNFULFILLED, OrderStatus.PARTIALLY_FULFILLED}\n payments = Payment.objects.filter(is_active=True).values(\"id\")\n return self.filter(\n Exists(payments.filter(order_id=OuterRef(\"id\"))),\n status__in=statuses,\n total_gross_amount__lte=F(\"total_paid_amount\"),\n )\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 83, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 8, "token_counts": 66, "n_ast_nodes": 110, "n_identifiers": 18, "d_id": 4925, "documentation": { "docstring": "Return orders that can be fulfilled.\n\n Orders ready to fulfill are fully paid but unfulfilled (or partially\n fulfilled).\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 39, "language": "en" } }, { "id": 260360, "commit_id": "4cc347d4d0cbbfdcbd353f08842e0668fed78c9f", "repo": "scikit-learn", "path": "sklearn/decomposition/_fastica.py", "file_name": "_fastica.py", "fun_name": "fit_transform", "commit_message": "MAINT Use _validate_params in FastICA (#23711)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: jeremiedbb ", "code": "def fit_transform(self, X, y=None):\n \n self._validate_params()\n\n return self._fit_transform(X, compute_sources=True)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 28, "n_ast_nodes": 45, "n_identifiers": 7, "d_id": 76206, "documentation": { "docstring": "Fit the model and recover the sources from X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n X_new : ndarray of shape (n_samples, n_components)\n Estimated sources obtained by transforming the data with the\n estimated unmixing matrix.\n ", "n_words": 66, "vocab_size": 49, "n_whitespaces": 177, "language": "en" } }, { "id": 189494, "commit_id": "902e7eb4f0147b5882a613b67467e38a1d47f01e", "repo": "manim", "path": "manim/mobject/svg/text_mobject.py", "file_name": "text_mobject.py", "fun_name": "_text2settings", "commit_message": "Hide more private methods from the docs. (#2468)\n\n* hide privs from text_mobject.py\r\n\r\n* hide privs from tex_mobject.py\r\n\r\n* hide privs from code_mobject.py\r\n\r\n* hide privs from svg_mobject.py\r\n\r\n* remove SVGPath and utils from __init__.py\r\n\r\n* don't import string_to_numbers\r\n\r\n* hide privs from geometry.py\r\n\r\n* hide privs from matrix.py\r\n\r\n* hide privs from numbers.py\r\n\r\n* hide privs from three_dimensions.py\r\n\r\n* forgot underscore under set_stroke_width_from_length\r\n\r\n* there were more i missed\r\n\r\n* unhidea method that was used in docs\r\n\r\n* forgot other text2hash\r\n\r\n* remove svg_path from docs", "code": "def _text2settings(self):\n \n t2xs = [\n (self.t2f, \"font\"),\n (self.t2s, \"slant\"),\n (self.t2w, \"weight\"),\n (self.t2c, \"color\"),\n ]\n setting_args = {arg: getattr(self, arg) for _, arg in t2xs}\n\n settings = self._get_settings_from_t2xs(t2xs)\n settings.extend(self._get_settings_from_gradient(setting_args))\n\n # Handle overlaps\n\n settings.sort(key=lambda setting: setting.start)\n for index, setting in enumerate(settings):\n if index + 1 == len(settings):\n break\n\n next_setting = settings[index + 1]\n if setting.end > next_setting.start:\n new_setting = self._merge_settings(setting, next_setting, setting_args)\n new_index = index + 1\n while (\n new_index < len(settings)\n and settings[new_index].start < new_setting.start\n ):\n new_index += 1\n settings.insert(new_index, new_setting)\n\n # Set all text settings (default font, slant, weight)\n temp_settings = settings.copy()\n start = 0\n for setting in settings:\n if setting.start != start:\n temp_settings.append(TextSetting(start, setting.start, **setting_args))\n start = setting.end\n if start != len(self.text):\n temp_settings.append(TextSetting(start, len(self.text), **setting_args))\n settings = sorted(temp_settings, key=lambda setting: setting.start)\n\n if re.search(r\"\\n\", self.text):\n line_num = 0\n for start, end in self._find_indexes(\"\\n\", self.text):\n for setting in settings:\n if setting.line_num == -1:\n setting.line_num = line_num\n if start < setting.end:\n line_num += 1\n new_setting = copy.copy(setting)\n setting.end = end\n new_setting.start = end\n new_setting.line_num = line_num\n settings.append(new_setting)\n settings.sort(key=lambda setting: setting.start)\n break\n for setting in settings:\n if setting.line_num == -1:\n setting.line_num = 0\n return settings\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 888, "n_words": 182, "vocab_size": 99, "complexity": 17, "nloc": 52, "token_counts": 389, "n_ast_nodes": 612, "n_identifiers": 38, "d_id": 46094, "documentation": { "docstring": "Converts the texts and styles to a setting for parsing.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 100464, "commit_id": "aa39234538a8f83e6aa2b60b8275a570e8876ac2", "repo": "faceswap", "path": "plugins/train/model/original.py", "file_name": "original.py", "fun_name": "decoder", "commit_message": "Update all Keras Imports to be conditional (#1214)\n\n* Remove custom keras importer\r\n\r\n* first round keras imports fix\r\n\r\n* launcher.py: Remove KerasFinder references\r\n\r\n* 2nd round keras imports update (lib and extract)\r\n\r\n* 3rd round keras imports update (train)\r\n\r\n* remove KerasFinder from tests\r\n\r\n* 4th round keras imports update (tests)", "code": "def decoder(self, side):\r\n \r\n input_ = Input(shape=(8, 8, 512))\r\n var_x = input_\r\n var_x = UpscaleBlock(256, activation=\"leakyrelu\")(var_x)\r\n var_x = UpscaleBlock(128, activation=\"leakyrelu\")(var_x)\r\n var_x = UpscaleBlock(64, activation=\"leakyrelu\")(var_x)\r\n var_x = Conv2DOutput(3, 5, name=f\"face_out_{side}\")(var_x)\r\n outputs = [var_x]\r\n\r\n if self.learn_mask:\r\n var_y = input_\r\n var_y = UpscaleBlock(256, activation=\"leakyrelu\")(var_y)\r\n var_y = UpscaleBlock(128, activation=\"leakyrelu\")(var_y)\r\n var_y = UpscaleBlock(64, activation=\"leakyrelu\")(var_y)\r\n var_y = Conv2DOutput(1, 5, name=f\"mask_out_{side}\")(var_y)\r\n outputs.append(var_y)\r\n return KerasModel(input_, outputs=outputs, name=f\"decoder_{side}\")\r\n\r", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 194, "n_words": 58, "vocab_size": 29, "complexity": 2, "nloc": 16, "token_counts": 168, "n_ast_nodes": 283, "n_identifiers": 16, "d_id": 19938, "documentation": { "docstring": " The original Faceswap Decoder Network.\r\n\r\n The decoders for the original model have separate weights for each side \"A\" and \"B\", so two\r\n instances are created in :func:`build_model`, one for each side.\r\n\r\n Parameters\r\n ----------\r\n side: str\r\n Either `\"a` or `\"b\"`. This is used for naming the decoder model.\r\n\r\n Returns\r\n -------\r\n :class:`keras.models.Model`\r\n The Keras decoder model. This will be called twice, once for each side.\r\n ", "n_words": 63, "vocab_size": 49, "n_whitespaces": 149, "language": "en" } }, { "id": 178014, "commit_id": "583b3cb3b03a36a30b3ce9fe96eb4fb28548a070", "repo": "label-studio", "path": "label_studio/core/label_config.py", "file_name": "label_config.py", "fun_name": "check_toname_in_config_by_regex", "commit_message": "fix: DEV-1462: Fix changing label config for repeater tag (#2725)\n\n* fix: DEV-1462: Fix changing label config for repeater tag with created annotations", "code": "def check_toname_in_config_by_regex(config_string, to_name, control_type=None):\n \n c = parse_config(config_string)\n if control_type:\n check_list = [control_type]\n else:\n check_list = list(c.keys())\n for control in check_list:\n item = c[control].get('regex', {})\n for to_name_item in c[control]['to_name']:\n expression = to_name_item\n for key in item:\n expression = expression.replace(key, item[key])\n pattern = re.compile(expression)\n full_match = pattern.fullmatch(to_name)\n if full_match:\n return True\n return False\n\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 182, "n_words": 51, "vocab_size": 35, "complexity": 6, "nloc": 17, "token_counts": 112, "n_ast_nodes": 179, "n_identifiers": 21, "d_id": 42572, "documentation": { "docstring": "\n Check if to_name is in config including regex filter\n :return: True if to_name is fullmatch to some pattern ion config\n ", "n_words": 20, "vocab_size": 16, "n_whitespaces": 30, "language": "en" } }, { "id": 315483, "commit_id": "b09aaba421d6d6178d582bef9ea363017e55639d", "repo": "core", "path": "tests/components/mikrotik/test_device_tracker.py", "file_name": "test_device_tracker.py", "fun_name": "test_hub_not_support_wireless", "commit_message": "Add type hints and code cleanup for mikrotik (#74296)\n\n* Add type hints and code cleanup for mikrotik\r\n\r\n* update test and increase coverage\r\n\r\n* move setup_mikrotik_entry to __init__.py", "code": "async def test_hub_not_support_wireless(hass, mock_device_registry_devices):\n \n\n await setup_mikrotik_entry(hass, support_wireless=False)\n device_1 = hass.states.get(\"device_tracker.device_1\")\n assert device_1\n assert device_1.state == \"home\"\n # device_2 is added from DHCP\n device_2 = hass.states.get(\"device_tracker.device_2\")\n assert device_2\n assert device_2.state == \"home\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 58, "n_words": 31, "vocab_size": 22, "complexity": 1, "nloc": 8, "token_counts": 53, "n_ast_nodes": 95, "n_identifiers": 10, "d_id": 114071, "documentation": { "docstring": "Test device_trackers created when hub doesn't support wireless.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 263956, "commit_id": "dc12cb59559f99110917bcbd21c9960ab57d994f", "repo": "pyinstaller", "path": "tests/unit/test_bytecode.py", "file_name": "test_bytecode.py", "fun_name": "test_finditer", "commit_message": "tests: fix test_finditer\n\nHave the test use bytestrings instead of strings.\n\nAlso assert that the bytecode string passed to bytecode.finditer()\nis in fact a bytestring.", "code": "def test_finditer():\n \n matches = list(finditer(re.compile(rb\"\\d+\"), b\"0123 4567 890 12 3 4\"))\n aligned = [i.group() for i in matches]\n assert aligned == [b\"0123\", b\"567\", b\"890\", b\"12\"]\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 37, "n_words": 25, "vocab_size": 23, "complexity": 2, "nloc": 4, "token_counts": 52, "n_ast_nodes": 84, "n_identifiers": 9, "d_id": 77525, "documentation": { "docstring": "\n Test that bytecode.finditer() yields matches only that start on an even byte (``match.start() % 2 == 0``).\n\n There are 3 permutations here when considering a match:\n - A match starts on an even byte:\n That's good! Include that sequence.\n - A single character match starts on an odd byte:\n Ignore it. It's a false positive.\n - A multi-character match starts on an odd byte:\n This match will be a false positive but there may be a genuine match shortly afterwards (in the case of the\n # test below - it'll be the next character) which overlaps with this one so we must override regex's\n behaviour of ignoring overlapping matches to prevent these from getting lost.\n ", "n_words": 115, "vocab_size": 82, "n_whitespaces": 169, "language": "en" } }, { "id": 189404, "commit_id": "5789be81609c8bf6d98d1d87d4061477d0cd37b9", "repo": "manim", "path": "manim/scene/vector_space_scene.py", "file_name": "vector_space_scene.py", "fun_name": "add_axes", "commit_message": "Fix `add_axes` in :class:`~.VectorScene`. (#2444)\n\n* fix bug\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def add_axes(self, animate=False, color=WHITE, **kwargs):\n \n axes = Axes(color=color, axis_config={\"unit_size\": 1})\n if animate:\n self.play(Create(axes))\n self.add(axes)\n return axes\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 62, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 6, "token_counts": 53, "n_ast_nodes": 86, "n_identifiers": 12, "d_id": 46041, "documentation": { "docstring": "\n Adds a pair of Axes to the Scene.\n\n Parameters\n ----------\n animate : bool, optional\n Whether or not to animate the addition of the axes through Create.\n color : bool, optional\n The color of the axes. Defaults to WHITE.\n ", "n_words": 38, "vocab_size": 26, "n_whitespaces": 103, "language": "en" } }, { "id": 261817, "commit_id": "d17d0f9f721dd030f7405023a838edb564ac1a4c", "repo": "scikit-learn", "path": "sklearn/model_selection/_validation.py", "file_name": "_validation.py", "fun_name": "_score", "commit_message": "FIX `cross_validate` with multimetric scoring returns the non-failed scorers results even if some fail (#23101)\n\nCo-authored-by: Guillaume Lemaitre ", "code": "def _score(estimator, X_test, y_test, scorer, error_score=\"raise\"):\n \n if isinstance(scorer, dict):\n # will cache method calls if needed. scorer() returns a dict\n scorer = _MultimetricScorer(scorers=scorer, raise_exc=(error_score == \"raise\"))\n\n try:\n if y_test is None:\n scores = scorer(estimator, X_test)\n else:\n scores = scorer(estimator, X_test, y_test)\n except Exception:\n if isinstance(scorer, _MultimetricScorer):\n # If `_MultimetricScorer` raises exception, the `error_score`\n # parameter is equal to \"raise\".\n raise\n else:\n if error_score == \"raise\":\n raise\n else:\n scores = error_score\n warnings.warn(\n \"Scoring failed. The score on this train-test partition for \"\n f\"these parameters will be set to {error_score}. Details: \\n\"\n f\"{format_exc()}\",\n UserWarning,\n )\n\n # Check non-raised error messages in `_MultimetricScorer`\n if isinstance(scorer, _MultimetricScorer):\n exception_messages = [\n (name, str_e) for name, str_e in scores.items() if isinstance(str_e, str)\n ]\n if exception_messages:\n # error_score != \"raise\"\n for name, str_e in exception_messages:\n scores[name] = error_score\n warnings.warn(\n \"Scoring failed. The score on this train-test partition for \"\n f\"these parameters will be set to {error_score}. Details: \\n\"\n f\"{str_e}\",\n UserWarning,\n )\n\n error_msg = \"scoring must return a number, got %s (%s) instead. (scorer=%s)\"\n if isinstance(scores, dict):\n for name, score in scores.items():\n if hasattr(score, \"item\"):\n with suppress(ValueError):\n # e.g. unwrap memmapped scalars\n score = score.item()\n if not isinstance(score, numbers.Number):\n raise ValueError(error_msg % (score, type(score), name))\n scores[name] = score\n else: # scalar\n if hasattr(scores, \"item\"):\n with suppress(ValueError):\n # e.g. unwrap memmapped scalars\n scores = scores.item()\n if not isinstance(scores, numbers.Number):\n raise ValueError(error_msg % (scores, type(scores), scorer))\n return scores\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 863, "n_words": 228, "vocab_size": 126, "complexity": 17, "nloc": 51, "token_counts": 296, "n_ast_nodes": 506, "n_identifiers": 31, "d_id": 77015, "documentation": { "docstring": "Compute the score(s) of an estimator on a given test set.\n\n Will return a dict of floats if `scorer` is a dict, otherwise a single\n float is returned.\n ", "n_words": 28, "vocab_size": 23, "n_whitespaces": 37, "language": "en" } }, { "id": 321412, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "tests/unit/misc/test_ipc.py", "file_name": "test_ipc.py", "fun_name": "test_socket_options_address_in_use_problem", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def test_socket_options_address_in_use_problem(qlocalserver, short_tmpdir):\n \n servername = str(short_tmpdir / 'x')\n\n s1 = QLocalServer()\n ok = s1.listen(servername)\n assert ok\n\n s2 = QLocalServer()\n s2.setSocketOptions(QLocalServer.SocketOption.UserAccessOption)\n ok = s2.listen(servername)\n print(s2.errorString())\n # We actually would expect ok == False here - but we want the test to fail\n # when the Qt bug is fixed.\n assert ok\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 86, "n_words": 50, "vocab_size": 38, "complexity": 1, "nloc": 10, "token_counts": 64, "n_ast_nodes": 112, "n_identifiers": 15, "d_id": 117701, "documentation": { "docstring": "Qt seems to ignore AddressInUseError when using socketOptions.\n\n With this test we verify this bug still exists. If it fails, we can\n probably start using setSocketOptions again.\n ", "n_words": 27, "vocab_size": 24, "n_whitespaces": 36, "language": "en" } }, { "id": 34075, "commit_id": "1b730c3d11fdad0180ee9f9d3da9cff933c3b264", "repo": "transformers", "path": "utils/check_dummies.py", "file_name": "check_dummies.py", "fun_name": "create_dummy_object", "commit_message": "Better dummies (#15148)\n\n* Better dummies\r\n\r\n* See if this fixes the issue\r\n\r\n* Fix quality\r\n\r\n* Style\r\n\r\n* Add doc for DummyObject", "code": "def create_dummy_object(name, backend_name):\n \n if name.isupper():\n return DUMMY_CONSTANT.format(name)\n elif name.islower():\n return DUMMY_FUNCTION.format(name, backend_name)\n else:\n return DUMMY_CLASS.format(name, backend_name)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 49, "n_words": 16, "vocab_size": 13, "complexity": 3, "nloc": 7, "token_counts": 49, "n_ast_nodes": 80, "n_identifiers": 9, "d_id": 6195, "documentation": { "docstring": "Create the code for the dummy object corresponding to `name`.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 31115, "commit_id": "34097b3304d79ace845316d4929220623279c8bc", "repo": "transformers", "path": "src/transformers/testing_utils.py", "file_name": "testing_utils.py", "fun_name": "require_torch_bf16", "commit_message": "Extend Transformers Trainer Class to Enable CPU AMP and Integrate Intel Extension for PyTorch (#17138)\n\n* init PR\r\n\r\n* fix import ipex\r\n\r\n* minor fix on bf16\r\n\r\n* refine optimizer\r\n\r\n* refine args notes\r\n\r\n* refine code\r\n\r\n* refine ipex optimize args\r\n\r\n* refine half_precision_backend\r\n\r\n* black format\r\n\r\n* isort format\r\n\r\n* isort format files\r\n\r\n* flake8 format\r\n\r\n* doc builder format\r\n\r\n* refine codes\r\n\r\n* remove jit and optim bits\r\n\r\n* black preview format\r\n\r\n* Update src/transformers/trainer.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* refine code\r\n\r\n* refine notes\r\n\r\n* Update src/transformers/trainer.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/trainer.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* code refine\r\n\r\n* add ipex ut\r\n\r\n* add performance cpu doc\r\n\r\n* link to the cpu doc from main perf doc\r\n\r\n* install ipex into CI's docker\r\n\r\n* Update perf_train_cpu.mdx\r\n\r\n* Update docs/source/en/perf_train_cpu.mdx\r\n\r\nCo-authored-by: Stas Bekman \r\n\r\n* Update perf_train_cpu.mdx\r\n\r\n* Update perf_train_cpu.mdx\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: Stas Bekman \r\nCo-authored-by: Stas Bekman ", "code": "def require_torch_bf16(test_case):\n \n return unittest.skipUnless(\n is_torch_bf16_available(),\n \"test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0 or using CPU\",\n )(test_case)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 43, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 5, "token_counts": 21, "n_ast_nodes": 38, "n_identifiers": 5, "d_id": 5682, "documentation": { "docstring": "Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0 or using CPU.", "n_words": 18, "vocab_size": 16, "n_whitespaces": 17, "language": "en" } }, { "id": 242082, "commit_id": "131c4e78b3f093ad3d415ebcc1fb42bbbde72470", "repo": "scipy", "path": "scipy/optimize/_milp.py", "file_name": "_milp.py", "fun_name": "milp", "commit_message": "MAINT: optimize: milp: update error messages\n[skip ci]", "code": "def milp(c, *, integrality=None, bounds=None, constraints=None, options=None):\n r\n args_iv = _milp_iv(c, integrality, bounds, constraints, options)\n c, integrality, lb, ub, indptr, indices, data, b_l, b_u, options = args_iv\n\n highs_res = _highs_wrapper(c, indptr, indices, data, b_l, b_u,\n lb, ub, integrality, options)\n\n res = {}\n\n # Convert to scipy-style status and message\n highs_status = highs_res.get('status', None)\n highs_message = highs_res.get('message', None)\n status, message = _highs_to_scipy_status_message(highs_status,\n highs_message)\n res['status'] = status\n res['message'] = message\n res['success'] = res['status'] in {0, 2, 3}\n x = highs_res.get('x', None)\n res['x'] = np.array(x) if x is not None else None\n res['fun'] = highs_res.get('fun', None)\n res['mip_node_count'] = highs_res.get('mip_node_count', None)\n res['mip_dual_bound'] = highs_res.get('mip_dual_bound', None)\n res['mip_gap'] = highs_res.get('mip_gap', None)\n\n return OptimizeResult(res)\n", "url": "https://github.com/scipy/scipy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 245, "n_words": 107, "vocab_size": 69, "complexity": 2, "nloc": 226, "token_counts": 232, "n_ast_nodes": 357, "n_identifiers": 28, "d_id": 69782, "documentation": { "docstring": "\n Mixed-integer linear programming\n\n Solves problems of the following form:\n\n .. math::\n\n \\min_x \\ & c^T x \\\\\n \\mbox{such that} \\ & b_l \\leq A x \\leq b_u,\\\\\n & l \\leq x \\leq u, \\\\\n & x_i \\in \\mathbb{Z}, i \\in X_i\n\n where :math:`x` is a vector of decision variables;\n :math:`c`, :math:`b_l`, :math:`b_u`, :math:`l`, and :math:`u` are vectors;\n :math:`A` is a matrix, and :math:`X_i` is the set of indices of\n decision variables that must be integral. (In this context, a\n variable that can assume only integer values is said to be \"integral\";\n it has an \"integrality\" constraint.)\n\n Alternatively, that's:\n\n minimize::\n\n c @ x\n\n such that::\n\n b_l <= A @ x <= b_u\n l <= x <= u\n Specified elements of x must be integers\n\n By default, ``l = 0`` and ``u = np.inf`` unless specified with\n ``bounds``.\n\n Parameters\n ----------\n c : 1D array_like\n The coefficients of the linear objective function to be minimized.\n `c` is converted to a double precision array before the problem is\n solved.\n integrality : 1D array_like, optional\n Indicates the type of integrality constraint on each decision variable.\n\n ``0`` : Continuous variable; no integrality constraint.\n\n ``1`` : Integer variable; decision variable must be an integer\n within `bounds`.\n\n ``2`` : Semi-continuous variable; decision variable must be within\n `bounds` or take value ``0``.\n\n ``3`` : Semi-integer variable; decision variable must be an integer\n within `bounds` or take value ``0``.\n\n By default, all variables are continuous. `integrality` is converted\n to an array of integers before the problem is solved.\n\n bounds : scipy.optimize.Bounds, optional\n Bounds on the decision variables. Lower and upper bounds are converted\n to double precision arrays before the problem is solved. The\n ``keep_feasible`` parameter of the `Bounds` object is ignored. If\n not specified, all decision variables are constrained to be\n non-negative.\n constraints : sequence of scipy.optimize.LinearConstraint, optional\n Linear constraints of the optimization problem. Arguments may be\n one of the following:\n\n 1. A single `LinearConstraint` object\n 2. A single tuple that can be converted to a `LinearConstraint` object\n as ``LinearConstraint(*constraints)``\n 3. A sequence composed entirely of objects of type 1. and 2.\n\n Before the problem is solved, all values are converted to double\n precision, and the matrices of constraint coefficients are converted to\n instances of `scipy.sparse.csc_array`. The ``keep_feasible`` parameter\n of `LinearConstraint` objects is ignored.\n options : dict, optional\n A dictionary of solver options. The following keys are recognized.\n\n disp : bool (default: ``False``)\n Set to ``True`` if indicators of optimization status are to be\n printed to the console during optimization.\n presolve : bool (default: ``True``)\n Presolve attempts to identify trivial infeasibilities,\n identify trivial unboundedness, and simplify the problem before\n sending it to the main solver.\n time_limit : float, optional\n The maximum number of seconds allotted to solve the problem.\n Default is no time limit.\n\n Returns\n -------\n res : OptimizeResult\n An instance of :class:`scipy.optimize.OptimizeResult`. The object\n is guaranteed to have the following attributes.\n\n status : int\n An integer representing the exit status of the algorithm.\n\n ``0`` : Optimal solution found.\n\n ``1`` : Iteration or time limit reached.\n\n ``2`` : Problem is infeasible.\n\n ``3`` : Problem is unbounded.\n\n ``4`` : Other; see message for details.\n\n success : bool\n ``True`` when an optimal solution is found, the problem is\n determined to be infeasible, or the problem is determined\n to be unbounded.\n\n message : str\n A string descriptor of the exit status of the algorithm.\n\n The following attributes will also be present, but the values may be\n ``None``, depending on the solution status.\n\n x : ndarray\n The values of the decision variables that minimize the\n objective function while satisfying the constraints.\n fun : float\n The optimal value of the objective function ``c @ x``.\n mip_node_count : int\n The number of subproblems or \"nodes\" solved by the MILP solver.\n mip_dual_bound : float\n The MILP solver's final estimate of the lower bound on the optimal\n solution.\n mip_gap : float\n The difference between the final objective function value and the\n final dual bound.\n\n Notes\n -----\n `milp` is a wrapper of the HiGHS linear optimization software [1]_. The\n algorithm is deterministic, and it typically finds the global optimum of\n moderately challenging mixed-integer linear programs (when it exists).\n\n References\n ----------\n .. [1] Huangfu, Q., Galabova, I., Feldmeier, M., and Hall, J. A. J.\n \"HiGHS - high performance software for linear optimization.\"\n Accessed 12/25/2021 at https://www.maths.ed.ac.uk/hall/HiGHS/#guide\n .. [2] Huangfu, Q. and Hall, J. A. J. \"Parallelizing the dual revised\n simplex method.\" Mathematical Programming Computation, 10 (1),\n 119-142, 2018. DOI: 10.1007/s12532-017-0130-5\n\n Examples\n --------\n Consider the problem at\n https://en.wikipedia.org/wiki/Integer_programming#Example, which is\n expressed as a maximization problem of two variables. Since `milp` requires\n that the problem be expressed as a minimization problem, the objective\n function coefficients on the decision variables are:\n\n >>> c = -np.array([0, 1])\n\n Note the negative sign: we maximize the original objective function\n by minimizing the negative of the objective function.\n\n We collect the coefficients of the constraints into arrays like:\n\n >>> A = np.array([[-1, 1], [3, 2], [2, 3]])\n >>> b_u = np.array([1, 12, 12])\n >>> b_l = np.full_like(b_u, -np.inf)\n\n Because there is no lower limit on these constraints, we have defined a\n variable ``b_l`` full of values representing negative infinity. This may\n be unfamiliar to users of `scipy.optimize.linprog`, which only accepts\n \"less than\" (or \"upper bound\") inequality constraints of the form\n ``A_ub @ x <= b_u``. By accepting both ``b_l`` and ``b_u`` of constraints\n ``b_l <= A_ub @ x <= b_u``, `milp` makes it easy to specify \"greater than\"\n inequality constraints, \"less than\" inequality constraints, and equality\n constraints concisely.\n\n These arrays are collected into a single `LinearConstraint` object like:\n\n >>> from scipy.optimize import LinearConstraint\n >>> constraints = LinearConstraint(A, b_l, b_u)\n\n The non-negativity bounds on the decision variables are enforced by\n default, so we do not need to provide an argument for `bounds`.\n\n Finally, the problem states that both decision variables must be integers:\n\n >>> integrality = np.ones_like(c)\n\n We solve the problem like:\n >>> from scipy.optimize import milp\n >>> res = milp(c=c, constraints=constraints, integrality=integrality)\n >>> res.x\n [1.0, 2.0]\n\n Note that had we solved the relaxed problem (without integrality\n constraints):\n >>> res = milp(c=c, constraints=constraints) # OR:\n >>> # from scipy.optimize import linprog; res = linprog(c, A, b_u)\n >>> res.x\n [1.8, 2.8]\n\n we would not have obtained the correct solution by rounding to the nearest\n integers.\n\n ", "n_words": 1026, "vocab_size": 475, "n_whitespaces": 1938, "language": "en" } }, { "id": 275620, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_v2/utils.py", "file_name": "utils.py", "fun_name": "all_reduce_sum_gradients", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def all_reduce_sum_gradients(grads_and_vars):\n \n grads_and_vars = list(grads_and_vars)\n filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)\n if filtered_grads_and_vars:\n if tf.__internal__.distribute.strategy_supports_no_merge_call():\n grads = [pair[0] for pair in filtered_grads_and_vars]\n reduced = tf.distribute.get_replica_context().all_reduce(\n tf.distribute.ReduceOp.SUM, grads\n )\n else:\n # TODO(b/183257003): Remove this branch\n reduced = tf.distribute.get_replica_context().merge_call(\n _all_reduce_sum_fn, args=(filtered_grads_and_vars,)\n )\n else:\n reduced = []\n # Copy 'reduced' but add None gradients back in\n reduced_with_nones = []\n reduced_pos = 0\n for g, v in grads_and_vars:\n if g is None:\n reduced_with_nones.append((None, v))\n else:\n reduced_with_nones.append((reduced[reduced_pos], v))\n reduced_pos += 1\n assert reduced_pos == len(reduced), \"Failed to add all gradients\"\n return reduced_with_nones\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 281, "n_words": 84, "vocab_size": 59, "complexity": 6, "nloc": 25, "token_counts": 153, "n_ast_nodes": 247, "n_identifiers": 25, "d_id": 81433, "documentation": { "docstring": "Returns all-reduced gradients aggregated via summation.\n\n Args:\n grads_and_vars: List of (gradient, variable) pairs.\n\n Returns:\n List of (gradient, variable) pairs where gradients have been all-reduced.\n ", "n_words": 24, "vocab_size": 19, "n_whitespaces": 43, "language": "en" } }, { "id": 213577, "commit_id": "d743336b1f3654cd0315f380f43eed4116997c1d", "repo": "ivy", "path": "ivy/core/container.py", "file_name": "container.py", "fun_name": "dev", "commit_message": "renamed dev_str arg to dev for all methods.", "code": "def dev(self):\n \n return self._get_dev()\n", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 23, "n_identifiers": 3, "d_id": 53660, "documentation": { "docstring": "\n The device to which the arrays in the container belong, with None returned if the devices are not consistent\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 34, "language": "en" } }, { "id": 110754, "commit_id": "4507ae544155fb6fc9fa594faf1b8c8a23a85426", "repo": "matplotlib", "path": "lib/matplotlib/projections/polar.py", "file_name": "polar.py", "fun_name": "_zero_in_bounds", "commit_message": "Allow polar scales where zero is not in valid interval", "code": "def _zero_in_bounds(self):\n \n vmin, vmax = self._axes.yaxis._scale.limit_range_for_scale(0, 1, 1e-5)\n return vmin == 0\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 33, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 3, "token_counts": 32, "n_ast_nodes": 48, "n_identifiers": 8, "d_id": 24277, "documentation": { "docstring": "\n Return True if zero is within the valid values for the\n scale of the radial axis.\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 38, "language": "en" } }, { "id": 206541, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/utils/autoreload.py", "file_name": "autoreload.py", "fun_name": "_watch_glob", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _watch_glob(self, directory, patterns):\n \n prefix = \"glob\"\n if not directory.exists():\n if not directory.parent.exists():\n logger.warning(\n \"Unable to watch directory %s as neither it or its parent exist.\",\n directory,\n )\n return\n prefix = \"glob-parent-%s\" % directory.name\n patterns = [\"%s/%s\" % (directory.name, pattern) for pattern in patterns]\n directory = directory.parent\n\n expression = [\"anyof\"]\n for pattern in patterns:\n expression.append([\"match\", pattern, \"wholename\"])\n self._subscribe(directory, \"%s:%s\" % (prefix, directory), expression)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 243, "n_words": 63, "vocab_size": 49, "complexity": 5, "nloc": 16, "token_counts": 108, "n_ast_nodes": 181, "n_identifiers": 14, "d_id": 51559, "documentation": { "docstring": "\n Watch a directory with a specific glob. If the directory doesn't yet\n exist, attempt to watch the parent directory and amend the patterns to\n include this. It's important this method isn't called more than one per\n directory when updating all subscriptions. Subsequent calls will\n overwrite the named subscription, so it must include all possible glob\n expressions.\n ", "n_words": 56, "vocab_size": 46, "n_whitespaces": 106, "language": "en" } }, { "id": 251073, "commit_id": "fab7016b318d7c37fc30cef9c0567b9b620b883e", "repo": "mitmproxy", "path": "mitmproxy/tools/console/consoleaddons.py", "file_name": "consoleaddons.py", "fun_name": "edit_focus_options", "commit_message": "beautify flowtable dns entries\n\nthis isn't perfect (the whole table needs to be refactored properly),\nbut good enough for now.", "code": "def edit_focus_options(self) -> typing.Sequence[str]:\n \n flow = self.master.view.focus.flow\n focus_options = []\n\n if isinstance(flow, tcp.TCPFlow):\n focus_options = [\"tcp-message\"]\n elif isinstance(flow, http.HTTPFlow):\n focus_options = [\n \"cookies\",\n \"urlencoded form\",\n \"multipart form\",\n \"path\",\n \"method\",\n \"query\",\n \"reason\",\n \"request-headers\",\n \"response-headers\",\n \"request-body\",\n \"response-body\",\n \"status_code\",\n \"set-cookies\",\n \"url\",\n ]\n elif isinstance(flow, dns.DNSFlow):\n raise exceptions.CommandError(\"Cannot edit DNS flows yet, please submit a patch.\")\n\n return focus_options\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 357, "n_words": 54, "vocab_size": 44, "complexity": 4, "nloc": 28, "token_counts": 104, "n_ast_nodes": 181, "n_identifiers": 19, "d_id": 73587, "documentation": { "docstring": "\n Possible components for console.edit.focus.\n ", "n_words": 4, "vocab_size": 4, "n_whitespaces": 23, "language": "en" } }, { "id": 98369, "commit_id": "9acf84fbe1c7ffba0faec907ad3219141086949f", "repo": "sentry", "path": "src/sentry/utils/pytest/fixtures.py", "file_name": "fixtures.py", "fun_name": "default_user", "commit_message": "feat(appconnect): Introduce an endpoint to trigger refresh of builds (#33457)\n\nNormally the builds are refreshed once an hour, however we're adding a\r\nway to trigger this manually. This endpoint is still severely\r\nratelimited.\r\n\r\nThis also includes the UI code to add a button for this endpoint.\r\n\r\nNATIVE-139\r\n\r\nCo-authored-by: Priscila Oliveira ", "code": "def default_user(factories):\n \n return factories.create_user(email=\"admin@localhost\", is_superuser=True)\n\n\n@pytest.mark.django_db\n@pytest.fixture(scope=\"function\")", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@pytest.mark.django_db\n@pytest.fixture(scope=\"function\")", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 11, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 19, "n_ast_nodes": 60, "n_identifiers": 10, "d_id": 19565, "documentation": { "docstring": "A default (super)user with email ``admin@localhost`` and password ``admin``.\n\n :returns: A :class:`sentry.models.user.User` instance.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 19, "language": "en" } }, { "id": 109887, "commit_id": "df6f95703b60348e01603f98a439b133da2938a0", "repo": "matplotlib", "path": "lib/mpl_toolkits/axes_grid1/axes_rgb.py", "file_name": "axes_rgb.py", "fun_name": "imshow_rgb", "commit_message": "Improve mpl_toolkit documentation", "code": "def imshow_rgb(self, r, g, b, **kwargs):\n \n if not (r.shape == g.shape == b.shape):\n raise ValueError(\n f'Input shapes ({r.shape}, {g.shape}, {b.shape}) do not match')\n RGB = np.dstack([r, g, b])\n R = np.zeros_like(RGB)\n R[:, :, 0] = r\n G = np.zeros_like(RGB)\n G[:, :, 1] = g\n B = np.zeros_like(RGB)\n B[:, :, 2] = b\n im_rgb = self.RGB.imshow(RGB, **kwargs)\n im_r = self.R.imshow(R, **kwargs)\n im_g = self.G.imshow(G, **kwargs)\n im_b = self.B.imshow(B, **kwargs)\n return im_rgb, im_r, im_g, im_b\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 197, "n_words": 73, "vocab_size": 52, "complexity": 2, "nloc": 16, "token_counts": 165, "n_ast_nodes": 272, "n_identifiers": 20, "d_id": 23796, "documentation": { "docstring": "\n Create the four images {rgb, r, g, b}.\n\n Parameters\n ----------\n r, g, b : array-like\n The red, green, and blue arrays.\n **kwargs :\n Forwarded to `~.Axes.imshow` calls for the four images.\n\n Returns\n -------\n rgb : `~matplotlib.image.AxesImage`\n r : `~matplotlib.image.AxesImage`\n g : `~matplotlib.image.AxesImage`\n b : `~matplotlib.image.AxesImage`\n ", "n_words": 45, "vocab_size": 32, "n_whitespaces": 152, "language": "en" } }, { "id": 49867, "commit_id": "f4d6e64cdc132ae868699a0ba442f4ab1d304a14", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/unet.py", "file_name": "unet.py", "fun_name": "forward", "commit_message": "add disco_diffusion_cnclip_vitb16 module", "code": "def forward(self, qkv):\n \n bs, width, length = qkv.shape\n assert width % (3 * self.n_heads) == 0\n ch = width // (3 * self.n_heads)\n # q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)\n q, k, v = paddle.reshape(qkv, [bs * self.n_heads, ch * 3, length]).split(3, axis=1)\n scale = 1 / math.sqrt(math.sqrt(ch))\n weight = paddle.einsum(\"bct,bcs->bts\", q * scale, k * scale) # More stable with f16 than dividing afterwards\n weight = paddle.cast(nn.functional.softmax(paddle.cast(weight, 'float32'), axis=-1), weight.dtype)\n a = paddle.einsum(\"bts,bcs->bct\", weight, v)\n # return a.reshape(bs, -1, length)\n return paddle.reshape(a, [bs, -1, length])\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 177, "n_words": 92, "vocab_size": 63, "complexity": 1, "nloc": 10, "token_counts": 158, "n_ast_nodes": 253, "n_identifiers": 27, "d_id": 9938, "documentation": { "docstring": "\n Apply QKV attention.\n\n :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.\n :return: an [N x (H * C) x T] tensor after attention.\n ", "n_words": 33, "vocab_size": 21, "n_whitespaces": 62, "language": "en" } }, { "id": 147576, "commit_id": "2eaa54bd763ae0e63158ae0d939633c804394b78", "repo": "ray", "path": "rllib/agents/trainer_config.py", "file_name": "trainer_config.py", "fun_name": "callbacks", "commit_message": "[RLlib] POC: Config objects instead of dicts (PPO only). (#23491)", "code": "def callbacks(self, callbacks_class) -> \"TrainerConfig\":\n \n self.callbacks_class = callbacks_class\n\n return self\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 14, "token_counts": 17, "n_ast_nodes": 31, "n_identifiers": 3, "d_id": 34012, "documentation": { "docstring": "Sets the callbacks configuration.\n\n Args:\n callbacks_class: Callbacks class, whose methods will be run during\n various phases of training and environment sample collection.\n See the `DefaultCallbacks` class and\n `examples/custom_metrics_and_callbacks.py` for more usage information.\n\n Returns:\n This updated TrainerConfig object.\n ", "n_words": 37, "vocab_size": 35, "n_whitespaces": 125, "language": "en" } }, { "id": 30336, "commit_id": "14e467160c852095efb0107a25eb1b136fda3ea8", "repo": "spotify-downloader", "path": "spotdl/download/progress_handler.py", "file_name": "progress_handler.py", "fun_name": "notify_conversion_complete", "commit_message": "download the audio stream using yt-dlp not ffmpeg", "code": "def notify_conversion_complete(self, status=\"Converting\") -> None:\n \n\n self.progress = 95\n self.update(status)\n", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 9, "token_counts": 23, "n_ast_nodes": 41, "n_identifiers": 5, "d_id": 5493, "documentation": { "docstring": "\n Notifies the progress handler that the song has been converted.\n\n ### Arguments\n - status: The status to display.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 47, "language": "en" } }, { "id": 149922, "commit_id": "386d3e035337cea7cbe9e38a5a8100fa79948fbb", "repo": "freqtrade", "path": "freqtrade/strategy/hyper.py", "file_name": "hyper.py", "fun_name": "ft_load_params_from_file", "commit_message": "Rename stop/roi loading method", "code": "def ft_load_params_from_file(self) -> None:\n \n if self._ft_params_from_file:\n # Set parameters from Hyperopt results file\n params = self._ft_params_from_file\n self.minimal_roi = params.get('roi', getattr(self, 'minimal_roi', {}))\n\n self.stoploss = params.get('stoploss', {}).get(\n 'stoploss', getattr(self, 'stoploss', -0.1))\n trailing = params.get('trailing', {})\n self.trailing_stop = trailing.get(\n 'trailing_stop', getattr(self, 'trailing_stop', False))\n self.trailing_stop_positive = trailing.get(\n 'trailing_stop_positive', getattr(self, 'trailing_stop_positive', None))\n self.trailing_stop_positive_offset = trailing.get(\n 'trailing_stop_positive_offset',\n getattr(self, 'trailing_stop_positive_offset', 0))\n self.trailing_only_offset_is_reached = trailing.get(\n 'trailing_only_offset_is_reached',\n getattr(self, 'trailing_only_offset_is_reached', 0.0))\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 280, "n_words": 62, "vocab_size": 42, "complexity": 2, "nloc": 21, "token_counts": 157, "n_ast_nodes": 256, "n_identifiers": 13, "d_id": 34590, "documentation": { "docstring": "\n Load Parameters from parameter file\n Should/must run before config values are loaded in strategy_resolver.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 36, "language": "en" } }, { "id": 276951, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/layer_utils.py", "file_name": "layer_utils.py", "fun_name": "default_training_arg", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def default_training_arg(self):\n \n return self._default_training_arg\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 81798, "documentation": { "docstring": "The default value given to the \"training\" argument.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 61958, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py", "file_name": "database.py", "fun_name": "add_distribution", "commit_message": "upd; format", "code": "def add_distribution(self, distribution):\n \n self.adjacency_list[distribution] = []\n self.reverse_list[distribution] = []\n #self.missing[distribution] = []\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 40, "n_words": 12, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 44, "n_identifiers": 5, "d_id": 12779, "documentation": { "docstring": "Add the *distribution* to the graph.\n\n :type distribution: :class:`distutils2.database.InstalledDistribution`\n or :class:`distutils2.database.EggInfoDistribution`\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 52, "language": "en" } }, { "id": 277063, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/tf_inspect.py", "file_name": "tf_inspect.py", "fun_name": "_getargspec", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _getargspec(target):\n \n fullargspecs = getfullargspec(target)\n argspecs = ArgSpec(\n args=fullargspecs.args,\n varargs=fullargspecs.varargs,\n keywords=fullargspecs.varkw,\n defaults=fullargspecs.defaults,\n )\n return argspecs\n\nelse:\n _getargspec = _inspect.getargspec\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 100, "n_words": 19, "vocab_size": 16, "complexity": 1, "nloc": 9, "token_counts": 43, "n_ast_nodes": 78, "n_identifiers": 14, "d_id": 81840, "documentation": { "docstring": "A python3 version of getargspec.\n\n Calls `getfullargspec` and assigns args, varargs,\n varkw, and defaults to a python 2/3 compatible `ArgSpec`.\n\n The parameter name 'varkw' is changed to 'keywords' to fit the\n `ArgSpec` struct.\n\n Args:\n target: the target object to inspect.\n\n Returns:\n An ArgSpec with args, varargs, keywords, and defaults parameters\n from FullArgSpec.\n ", "n_words": 52, "vocab_size": 43, "n_whitespaces": 128, "language": "en" } }, { "id": 203882, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/db/backends/spatialite/operations.py", "file_name": "operations.py", "fun_name": "proj_version", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def proj_version(self):\n \n return self._get_spatialite_func(\"proj4_version()\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 26, "n_identifiers": 3, "d_id": 50571, "documentation": { "docstring": "Return the version of the PROJ library used by SpatiaLite.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 218720, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/lib2to3/fixes/fix_renames.py", "file_name": "fix_renames.py", "fun_name": "build_pattern", "commit_message": "add python 3.10.4 for windows", "code": "def build_pattern():\n #bare = set()\n for module, replace in list(MAPPING.items()):\n for old_attr, new_attr in list(replace.items()):\n LOOKUP[(module, old_attr)] = new_attr\n #bare.add(module)\n #bare.add(old_attr)\n #yield % (module, module)\n yield % (module, old_attr, old_attr)\n yield % (module, old_attr)\n #yield % alternates(bare)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 122, "n_words": 37, "vocab_size": 24, "complexity": 3, "nloc": 11, "token_counts": 60, "n_ast_nodes": 104, "n_identifiers": 9, "d_id": 55450, "documentation": { "docstring": "\n # import_name< 'import' (module=%r\n # | dotted_as_names< any* module=%r any* >) >\n # \n import_from< 'from' module_name=%r 'import'\n ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >\n \n power< module_name=%r trailer< '.' attr_name=%r > any* >\n bare_name=%s", "n_words": 35, "vocab_size": 22, "n_whitespaces": 178, "language": "en" } }, { "id": 288765, "commit_id": "3ab294e8efc00c9f3cda2993318bb582ba675f8c", "repo": "core", "path": "homeassistant/components/hunterdouglas_powerview/cover.py", "file_name": "cover.py", "fun_name": "close_tilt_position", "commit_message": "Powerview refactor prep for all shade types (#79862)", "code": "def close_tilt_position(self) -> PowerviewShadeMove:\n \n return PowerviewShadeMove(self._shade.close_position_tilt, {})\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 34, "n_identifiers": 5, "d_id": 87917, "documentation": { "docstring": "Return the close tilt position and required additional positions.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 228263, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_streamtube.py", "file_name": "_streamtube.py", "fun_name": "starts", "commit_message": "switch to black .22", "code": "def starts(self):\n \n return self[\"starts\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 59936, "documentation": { "docstring": "\n The 'starts' property is an instance of Starts\n that may be specified as:\n - An instance of :class:`plotly.graph_objs.streamtube.Starts`\n - A dict of string/value properties that will be passed\n to the Starts constructor\n\n Supported dict properties:\n\n x\n Sets the x components of the starting position\n of the streamtubes\n xsrc\n Sets the source reference on Chart Studio Cloud\n for `x`.\n y\n Sets the y components of the starting position\n of the streamtubes\n ysrc\n Sets the source reference on Chart Studio Cloud\n for `y`.\n z\n Sets the z components of the starting position\n of the streamtubes\n zsrc\n Sets the source reference on Chart Studio Cloud\n for `z`.\n\n Returns\n -------\n plotly.graph_objs.streamtube.Starts\n ", "n_words": 107, "vocab_size": 51, "n_whitespaces": 508, "language": "en" } }, { "id": 335461, "commit_id": "4261c3aadfc23ee5b123b80ab7d8680a013acb66", "repo": "diffusers", "path": "src/diffusers/models/unet_ldm.py", "file_name": "unet_ldm.py", "fun_name": "normalization", "commit_message": "Make style", "code": "def normalization(channels, swish=0.0):\n \n return GroupNorm32(num_channels=channels, num_groups=32, swish=swish)\n\n\n## go", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 14, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 27, "n_ast_nodes": 40, "n_identifiers": 6, "d_id": 120765, "documentation": { "docstring": "\n Make a standard normalization layer, with an optional swish activation.\n\n :param channels: number of input channels. :return: an nn.Module for normalization.\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 31, "language": "en" } }, { "id": 149120, "commit_id": "fe62a71f4c621aa06cfbda1f2aec19ab01062ebd", "repo": "freqtrade", "path": "freqtrade/resolvers/strategy_resolver.py", "file_name": "strategy_resolver.py", "fun_name": "check_override", "commit_message": "Simplify implementation of \"check_override\" by extracting it to function", "code": "def check_override(object, parentclass, attribute):\n \n return getattr(type(object), attribute) == getattr(parentclass, attribute)\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 16, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 27, "n_ast_nodes": 42, "n_identifiers": 6, "d_id": 34371, "documentation": { "docstring": "\n Checks if a object overrides the parent class attribute.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 65181, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/budget_variance_report/budget_variance_report.py", "file_name": "budget_variance_report.py", "fun_name": "get_cost_centers", "commit_message": "style: format code with black", "code": "def get_cost_centers(filters):\n\torder_by = \"\"\n\tif filters.get(\"budget_against\") == \"Cost Center\":\n\t\torder_by = \"order by lft\"\n\n\tif filters.get(\"budget_against\") in [\"Cost Center\", \"Project\"]:\n\t\treturn frappe.db.sql_list(\n\t\t\t.format(\n\t\t\t\ttab=filters.get(\"budget_against\"), order_by=order_by\n\t\t\t),\n\t\t\tfilters.get(\"company\"),\n\t\t)\n\telse:\n\t\treturn frappe.db.sql_list(\n\t\t\t.format(\n\t\t\t\ttab=filters.get(\"budget_against\")\n\t\t\t)\n\t\t) # nosec\n\n\n# Get dimension & target details", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 27, "n_words": 44, "vocab_size": 34, "complexity": 3, "nloc": 30, "token_counts": 91, "n_ast_nodes": 168, "n_identifiers": 9, "d_id": 13819, "documentation": { "docstring": "\n\t\t\t\tselect\n\t\t\t\t\tname\n\t\t\t\tfrom\n\t\t\t\t\t`tab{tab}`\n\t\t\t\twhere\n\t\t\t\t\tcompany = %s\n\t\t\t\t{order_by}\n\t\t\t\n\t\t\t\tselect\n\t\t\t\t\tname\n\t\t\t\tfrom\n\t\t\t\t\t`tab{tab}`\n\t\t\t", "n_words": 13, "vocab_size": 9, "n_whitespaces": 2, "language": "en" } }, { "id": 203487, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/templatetags/admin_list.py", "file_name": "admin_list.py", "fun_name": "paginator_number", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def paginator_number(cl, i):\n \n if i == cl.paginator.ELLIPSIS:\n return format_html(\"{} \", cl.paginator.ELLIPSIS)\n elif i == cl.page_num:\n return format_html('{} ', i)\n else:\n return format_html(\n '{} ',\n cl.get_query_string({PAGE_VAR: i}),\n mark_safe(' class=\"end\"' if i == cl.paginator.num_pages else \"\"),\n i,\n )\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 122, "n_words": 38, "vocab_size": 30, "complexity": 4, "nloc": 12, "token_counts": 78, "n_ast_nodes": 128, "n_identifiers": 11, "d_id": 50406, "documentation": { "docstring": "\n Generate an individual page index link in a paginated list.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 241567, "commit_id": "650c710efacd633fa283955145342bb64063c883", "repo": "lightning", "path": "tests/strategies/test_deepspeed_strategy.py", "file_name": "test_deepspeed_strategy.py", "fun_name": "test_deepspeed_multigpu_single_file", "commit_message": "Rename training plugin test files & names to strategy (#11303)", "code": "def test_deepspeed_multigpu_single_file(tmpdir):\n \n model = BoringModel()\n checkpoint_path = os.path.join(tmpdir, \"model.pt\")\n trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)\n trainer.fit(model)\n trainer.save_checkpoint(checkpoint_path)\n\n trainer = Trainer(\n default_root_dir=tmpdir, strategy=DeepSpeedStrategy(stage=3), gpus=1, fast_dev_run=True, precision=16\n )\n strategy = trainer.strategy\n assert isinstance(strategy, DeepSpeedStrategy)\n assert not strategy.load_full_weights\n with pytest.raises(MisconfigurationException, match=\"DeepSpeed was unable to load the checkpoint.\"):\n trainer.test(model, ckpt_path=checkpoint_path)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n strategy=DeepSpeedStrategy(stage=3, load_full_weights=True),\n gpus=1,\n fast_dev_run=True,\n precision=16,\n )\n strategy = trainer.strategy\n assert isinstance(strategy, DeepSpeedStrategy)\n assert strategy.load_full_weights\n trainer.test(model, ckpt_path=checkpoint_path)\n\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 167, "n_words": 64, "vocab_size": 41, "complexity": 1, "nloc": 25, "token_counts": 175, "n_ast_nodes": 270, "n_identifiers": 27, "d_id": 69594, "documentation": { "docstring": "Test to ensure that DeepSpeed loads from a single file checkpoint.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 160134, "commit_id": "729ad4f92420231e2a7009b3223c6c7620b8b808", "repo": "numpy", "path": "numpy/f2py/tests/test_f2py2e.py", "file_name": "test_f2py2e.py", "fun_name": "test_f2py_only", "commit_message": "TST: Initialize f2py2e tests of the F2PY CLI (#20668)\n\nIncreases F2PY coverage by around 15 percent. For the CLI itself it covers the major features (around 70 percent), with the exception of mostly numpy.distutils stuff.\r\n\r\nMore importantly, sets the groundwork for #20056, in that passing the same testsuite should indicate feature parity.", "code": "def test_f2py_only(capfd, retreal_f77, monkeypatch):\n \n foutl = get_io_paths(retreal_f77, mname=\"test\")\n ipath = foutl.finp\n toskip = \"t0 t4 t8 sd s8 s4\"\n tokeep = \"td s0\"\n monkeypatch.setattr(\n sys, \"argv\",\n f'f2py {ipath} -m test only: {tokeep}'.split())\n\n with util.switchdir(ipath.parent):\n f2pycli()\n out, err = capfd.readouterr()\n for skey in toskip.split():\n assert (\n f'buildmodule: Could not find the body of interfaced routine \"{skey}\". Skipping.'\n in err)\n for rkey in tokeep.split():\n assert f'Constructing wrapper function \"{rkey}\"' in out\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 184, "n_words": 69, "vocab_size": 60, "complexity": 3, "nloc": 17, "token_counts": 98, "n_ast_nodes": 183, "n_identifiers": 23, "d_id": 38506, "documentation": { "docstring": "Test that functions can be kept by only:\n CLI :: only:\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 164487, "commit_id": "06dac44e91bb099319fa6c421df8111b189d26a6", "repo": "pandas", "path": "pandas/core/indexes/base.py", "file_name": "base.py", "fun_name": "_is_all_dates", "commit_message": "CLN: Index methods incorrectly assuming object dtype (#45767)", "code": "def _is_all_dates(self) -> bool:\n \n\n if needs_i8_conversion(self.dtype):\n return True\n elif self.dtype != _dtype_obj:\n # TODO(ExtensionIndex): 3rd party EA might override?\n # Note: this includes IntervalIndex, even when the left/right\n # contain datetime-like objects.\n return False\n elif self._is_multi:\n return False\n return is_datetime_array(ensure_object(self._values))\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 142, "n_words": 40, "vocab_size": 33, "complexity": 4, "nloc": 11, "token_counts": 44, "n_ast_nodes": 76, "n_identifiers": 10, "d_id": 39562, "documentation": { "docstring": "\n Whether or not the index values only consist of dates.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 189517, "commit_id": "daf23c9d1031b12d9c119b8f6b7e60727d7f9242", "repo": "manim", "path": "manim/animation/animation.py", "file_name": "animation.py", "fun_name": "copy", "commit_message": "Upgraded typehints (#2429)\n\n* Future Annotations\r\n\r\n* Delete template_twitter_post.py\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Apply suggestions from code review\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed broken RTD\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def copy(self) -> Animation:\n \n return deepcopy(self)\n\n # Methods for interpolation, the mean of an Animation\n\n # TODO: stop using alpha as parameter name in different meanings.", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 46, "n_words": 26, "vocab_size": 25, "complexity": 1, "nloc": 9, "token_counts": 13, "n_ast_nodes": 26, "n_identifiers": 4, "d_id": 46111, "documentation": { "docstring": "Create a copy of the animation.\n\n Returns\n -------\n Animation\n A copy of ``self``\n ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 52, "language": "en" } }, { "id": 247617, "commit_id": "5dd949bee6158a8b651db9f2ae417a62c8184bfd", "repo": "synapse", "path": "tests/handlers/test_e2e_keys.py", "file_name": "test_e2e_keys.py", "fun_name": "test_change_one_time_keys", "commit_message": "Add type hints to some tests/handlers files. (#12224)", "code": "def test_change_one_time_keys(self) -> None:\n \n\n local_user = \"@boris:\" + self.hs.hostname\n device_id = \"xyz\"\n keys = {\n \"alg1:k1\": \"key1\",\n \"alg2:k2\": {\"key\": \"key2\", \"signatures\": {\"k1\": \"sig1\"}},\n \"alg2:k3\": {\"key\": \"key3\"},\n }\n\n res = self.get_success(\n self.handler.upload_keys_for_user(\n local_user, device_id, {\"one_time_keys\": keys}\n )\n )\n self.assertDictEqual(\n res, {\"one_time_key_counts\": {\"alg1\": 1, \"alg2\": 2, \"signed_curve25519\": 0}}\n )\n\n # Error when changing string key\n self.get_failure(\n self.handler.upload_keys_for_user(\n local_user, device_id, {\"one_time_keys\": {\"alg1:k1\": \"key2\"}}\n ),\n SynapseError,\n )\n\n # Error when replacing dict key with string\n self.get_failure(\n self.handler.upload_keys_for_user(\n local_user, device_id, {\"one_time_keys\": {\"alg2:k3\": \"key2\"}}\n ),\n SynapseError,\n )\n\n # Error when replacing string key with dict\n self.get_failure(\n self.handler.upload_keys_for_user(\n local_user,\n device_id,\n {\"one_time_keys\": {\"alg1:k1\": {\"key\": \"key\"}}},\n ),\n SynapseError,\n )\n\n # Error when replacing dict key\n self.get_failure(\n self.handler.upload_keys_for_user(\n local_user,\n device_id,\n {\n \"one_time_keys\": {\n \"alg2:k2\": {\"key\": \"key3\", \"signatures\": {\"k1\": \"sig1\"}}\n }\n },\n ),\n SynapseError,\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 680, "n_words": 124, "vocab_size": 61, "complexity": 1, "nloc": 49, "token_counts": 229, "n_ast_nodes": 407, "n_identifiers": 14, "d_id": 71783, "documentation": { "docstring": "attempts to change one-time-keys should be rejected", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 196180, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/permutations.py", "file_name": "permutations.py", "fun_name": "__call__", "commit_message": "Updated import locations", "code": "def __call__(self, *other):\n \n rv = Cycle(*other)\n for k, v in zip(list(self.keys()), [rv[self[k]] for k in self.keys()]):\n rv[k] = v\n return rv\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 60, "n_words": 21, "vocab_size": 16, "complexity": 3, "nloc": 5, "token_counts": 59, "n_ast_nodes": 93, "n_identifiers": 10, "d_id": 47680, "documentation": { "docstring": "Return product of cycles processed from R to L.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Cycle\n >>> Cycle(1, 2)(2, 3)\n (1 3 2)\n\n An instance of a Cycle will automatically parse list-like\n objects and Permutations that are on the right. It is more\n flexible than the Permutation in that all elements need not\n be present:\n\n >>> a = Cycle(1, 2)\n >>> a(2, 3)\n (1 3 2)\n >>> a(2, 3)(4, 5)\n (1 3 2)(4 5)\n\n ", "n_words": 74, "vocab_size": 54, "n_whitespaces": 179, "language": "en" } }, { "id": 316438, "commit_id": "7cd68381f1d4f58930ffd631dfbfc7159d459832", "repo": "core", "path": "tests/test_config_entries.py", "file_name": "test_config_entries.py", "fun_name": "test_default_discovery_abort_on_new_unique_flow", "commit_message": "Search/replace RESULT_TYPE_* by FlowResultType enum (#74642)", "code": "async def test_default_discovery_abort_on_new_unique_flow(hass, manager):\n \n mock_integration(hass, MockModule(\"comp\"))\n mock_entity_platform(hass, \"config_flow.comp\", None)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 18, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 21, "token_counts": 165, "n_ast_nodes": 45, "n_identifiers": 6, "d_id": 115016, "documentation": { "docstring": "Test that a flow using default discovery is aborted when a second flow with unique ID is created.", "n_words": 18, "vocab_size": 15, "n_whitespaces": 17, "language": "en" } }, { "id": 266511, "commit_id": "29b5eb6ba9fb652ddd8dd06cdd8f2e80e2098063", "repo": "ansible", "path": "lib/ansible/cli/doc.py", "file_name": "doc.py", "fun_name": "_get_roles_path", "commit_message": "updated metadata dump to do full docs dump (#76170)\n\n * minor refactor in other options by pushing common code into functions\r\n * consolidate coll_filter\r\n * more normalizing loader\r\n * dont pass plugin_loader, its global import\r\n * Also dump roles and collections\r\n * adjusted tests to new err msg\r\n * disable namespace filter (unused)", "code": "def _get_roles_path(self):\n \n roles_path = context.CLIARGS['roles_path']\n if context.CLIARGS['basedir'] is not None:\n subdir = os.path.join(context.CLIARGS['basedir'], \"roles\")\n if os.path.isdir(subdir):\n roles_path = (subdir,) + roles_path\n roles_path = roles_path + (context.CLIARGS['basedir'],)\n return roles_path\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 104, "n_words": 28, "vocab_size": 18, "complexity": 3, "nloc": 8, "token_counts": 75, "n_ast_nodes": 126, "n_identifiers": 10, "d_id": 78449, "documentation": { "docstring": "\n Add any 'roles' subdir in playbook dir to the roles search path.\n And as a last resort, add the playbook dir itself. Order being:\n - 'roles' subdir of playbook dir\n - DEFAULT_ROLES_PATH (default in cliargs)\n - playbook dir (basedir)\n NOTE: This matches logic in RoleDefinition._load_role_path() method.\n ", "n_words": 46, "vocab_size": 33, "n_whitespaces": 108, "language": "en" } }, { "id": 100396, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "plugins/train/trainer/_base.py", "file_name": "_base.py", "fun_name": "compile_sample", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def compile_sample(self, batch_size, samples=None, images=None, masks=None):\n \n num_images = self._config.get(\"preview_images\", 14)\n num_images = min(batch_size, num_images) if batch_size is not None else num_images\n retval = {}\n for side in (\"a\", \"b\"):\n logger.debug(\"Compiling samples: (side: '%s', samples: %s)\", side, num_images)\n side_images = images[side] if images is not None else self._target[side]\n side_masks = masks[side] if masks is not None else self._masks[side]\n side_samples = samples[side] if samples is not None else self._samples[side]\n retval[side] = [side_samples[0:num_images],\n side_images[0:num_images],\n side_masks[0:num_images]]\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 225, "n_words": 74, "vocab_size": 48, "complexity": 6, "nloc": 13, "token_counts": 153, "n_ast_nodes": 225, "n_identifiers": 20, "d_id": 19880, "documentation": { "docstring": " Compile the preview samples for display.\n\n Parameters\n ----------\n batch_size: int\n The requested batch size for each training iterations\n samples: dict, optional\n Dictionary for side \"a\", \"b\" of :class:`numpy.ndarray`. The sample images that should\n be used for creating the preview. If ``None`` then the samples will be generated from\n the internal random image generator. Default: ``None``\n images: dict, optional\n Dictionary for side \"a\", \"b\" of :class:`numpy.ndarray`. The target images that should\n be used for creating the preview. If ``None`` then the targets will be generated from\n the internal random image generator. Default: ``None``\n masks: dict, optional\n Dictionary for side \"a\", \"b\" of :class:`numpy.ndarray`. The masks that should be used\n for creating the preview. If ``None`` then the masks will be generated from the\n internal random image generator. Default: ``None``\n\n Returns\n -------\n list\n The list of samples, targets and masks as :class:`numpy.ndarrays` for creating a\n preview image\n ", "n_words": 145, "vocab_size": 58, "n_whitespaces": 349, "language": "en" } }, { "id": 246741, "commit_id": "02d708568b476f2f7716000b35c0adfa4cbd31b3", "repo": "synapse", "path": "tests/handlers/test_directory.py", "file_name": "test_directory.py", "fun_name": "test_delete_alias_admin", "commit_message": "Replace assertEquals and friends with non-deprecated versions. (#12092)", "code": "def test_delete_alias_admin(self):\n \n # Create an alias from a different user.\n self._create_alias(self.test_user)\n\n # Delete the user's alias as the admin.\n result = self.get_success(\n self.handler.delete_association(\n create_requester(self.admin_user), self.room_alias\n )\n )\n self.assertEqual(self.room_id, result)\n\n # Confirm the alias is gone.\n self.get_failure(\n self.handler.get_association(self.room_alias),\n synapse.api.errors.SynapseError,\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 168, "n_words": 39, "vocab_size": 31, "complexity": 1, "nloc": 12, "token_counts": 72, "n_ast_nodes": 117, "n_identifiers": 19, "d_id": 71337, "documentation": { "docstring": "A server admin can delete an alias created by another user.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 63033, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py", "file_name": "__init__.py", "fun_name": "_validate_resource_path", "commit_message": "upd; format", "code": "def _validate_resource_path(path):\n \n invalid = (\n os.path.pardir in path.split(posixpath.sep) or\n posixpath.isabs(path) or\n ntpath.isabs(path)\n )\n if not invalid:\n return\n\n msg = \"Use of .. or absolute path in a resource path is not allowed.\"\n\n # Aggressively disallow Windows absolute paths\n if ntpath.isabs(path) and not posixpath.isabs(path):\n raise ValueError(msg)\n\n # for compatibility, warn; in future\n # raise ValueError(msg)\n warnings.warn(\n msg[:-1] + \" and will raise exceptions in a future release.\",\n DeprecationWarning,\n stacklevel=4,\n )\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 234, "n_words": 69, "vocab_size": 48, "complexity": 6, "nloc": 16, "token_counts": 87, "n_ast_nodes": 151, "n_identifiers": 16, "d_id": 13106, "documentation": { "docstring": "\n Validate the resource paths according to the docs.\n https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access\n\n >>> warned = getfixture('recwarn')\n >>> warnings.simplefilter('always')\n >>> vrp = NullProvider._validate_resource_path\n >>> vrp('foo/bar.txt')\n >>> bool(warned)\n False\n >>> vrp('../foo/bar.txt')\n >>> bool(warned)\n True\n >>> warned.clear()\n >>> vrp('/foo/bar.txt')\n >>> bool(warned)\n True\n >>> vrp('foo/../../bar.txt')\n >>> bool(warned)\n True\n >>> warned.clear()\n >>> vrp('foo/f../bar.txt')\n >>> bool(warned)\n False\n\n Windows path separators are straight-up disallowed.\n >>> vrp(r'\\\\foo/bar.txt')\n Traceback (most recent call last):\n ...\n ValueError: Use of .. or absolute path in a resource path \\\nis not allowed.\n\n >>> vrp(r'C:\\\\foo/bar.txt')\n Traceback (most recent call last):\n ...\n ValueError: Use of .. or absolute path in a resource path \\\nis not allowed.\n\n Blank values are allowed\n\n >>> vrp('')\n >>> bool(warned)\n False\n\n Non-string values are not.\n\n >>> vrp(None)\n Traceback (most recent call last):\n ...\n AttributeError: ...\n ", "n_words": 123, "vocab_size": 58, "n_whitespaces": 409, "language": "en" } }, { "id": 219783, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "_round", "commit_message": "add python 3.10.4 for windows", "code": "def _round(self, places, rounding):\n \n if places <= 0:\n raise ValueError(\"argument should be at least 1 in _round\")\n if self._is_special or not self:\n return Decimal(self)\n ans = self._rescale(self.adjusted()+1-places, rounding)\n # it can happen that the rescale alters the adjusted exponent;\n # for example when rounding 99.97 to 3 significant figures.\n # When this happens we end up with an extra 0 at the end of\n # the number; a second rescale fixes this.\n if ans.adjusted() != self.adjusted():\n ans = ans._rescale(ans.adjusted()+1-places, rounding)\n return ans\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 185, "n_words": 82, "vocab_size": 66, "complexity": 5, "nloc": 9, "token_counts": 84, "n_ast_nodes": 141, "n_identifiers": 10, "d_id": 55798, "documentation": { "docstring": "Round a nonzero, nonspecial Decimal to a fixed number of\n significant figures, using the given rounding mode.\n\n Infinities, NaNs and zeros are returned unaltered.\n\n This operation is quiet: it raises no flags, and uses no\n information from the context.\n\n ", "n_words": 39, "vocab_size": 35, "n_whitespaces": 74, "language": "en" } }, { "id": 196294, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/geometry/polygon.py", "file_name": "polygon.py", "fun_name": "exterior_angle", "commit_message": "Updated import locations", "code": "def exterior_angle(self):\n \n return 2*S.Pi/self._n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 28, "n_identifiers": 5, "d_id": 47794, "documentation": { "docstring": "Measure of the exterior angles.\n\n Returns\n =======\n\n exterior_angle : number\n\n See Also\n ========\n\n sympy.geometry.line.LinearEntity.angle_between\n\n Examples\n ========\n\n >>> from sympy import RegularPolygon, Point\n >>> rp = RegularPolygon(Point(0, 0), 4, 8)\n >>> rp.exterior_angle\n pi/4\n\n ", "n_words": 32, "vocab_size": 29, "n_whitespaces": 123, "language": "en" } }, { "id": 96409, "commit_id": "146fba432a32568be7d0b884dae0c39a6c33a11f", "repo": "sentry", "path": "tests/sentry/incidents/test_action_handlers.py", "file_name": "test_action_handlers.py", "fun_name": "test_context_for_resolved_crash_rate_alert", "commit_message": "fix(metric_alerts): Make sure critical triggers resolve properly when no action is set on a warning trigger (#31883)\n\n### Problem\r\nIf we have an alert set up like:\r\n- Warning: 50. Action: None\r\n- Critical: 100. Action: Slack\r\n\r\nThen if we go from critical -> warning state the slack resolve action will fail to fire.\r\n\r\n### Cause\r\nThe reason this happens is related to a previous fix. For an alert like\r\n- Warning: 50. Action: Slack\r\n- Critical: 100. Action: Slack\r\n\r\nWhen going from critical -> warning the critical action would be marked as resolved. This would\r\ncause a slack notification with `Resolved` to be sent to the channel. This is misleading, because\r\nthe alert is still active, just in the warning state. What we want here is to fire a warning\r\nnotification instead.\r\n\r\nThe initial fix for this was that when we resolved a critical trigger, we’d check and see whether\r\nthere was an active warning trigger. If so, we’d send a warning trigger fire to our actions, rather\r\nthan a critical trigger resolve. This works ok for many cases, but fails when the actions on the\r\nwarning trigger are different to those on the critical trigger.\r\n\r\n### Fix\r\nSubstituting the warning trigger for the critical trigger causes us subtle bugs. So, instead of\r\nthis, when triggering fires/resolves on our action handlers we will also pass along the incident\r\nstate change that the trigger/resolve caused the incident to go into.\r\n\r\nSo if a critical trigger resolves, we check what state it would have put the incident in. If\r\nthere’s a warning trigger, then the state is warning. If no warning trigger, the state is closed.\r\nThis state is then used to appropriately generate the messages that we send to users via our\r\nvarious actions.\r\n\r\nSo now, If we have an alert set up like:\r\n- Warning: 50. Action: None\r\n- Critical: 100. Action: Slack\r\n\r\nIf this goes from\r\n- critical -> warning OR critical -> resolved we will send `IncidentStatus.WARNING` to any actions\r\nrelated to the critical trigger. \r\n- warning -> resolved We do nothing since there are no actions on the warning trigger\r\n\r\nIf we have an alert set up like:\r\n- Warning: 50. Action: Slack\r\n- Critical: 100. Action: Slack\r\n\r\nIf this goes from:\r\n- critical -> warning: critical trigger, `IncidentStatus.Warning`\r\n- warning -> resolved: warning trigger, `IncidentStatus.Closed`\r\n- critical -> resolved: Since we de-dupe triggers to avoid spamming the user, we will select the\r\nwarning trigger here, and send `IncidentStatus.closed`\r\n\r\nIf we have an alert set up like:\r\n- Warning: 50. Action: Slack\r\n- Critical: 100. Action: Pagerduty\r\n\r\nIf this goes from:\r\n- critical -> warning: critical trigger, `IncidentStatus.Warning` sent to Pagerduty. Nothing sent\r\nto Slack\r\n- warning -> resolved: warning trigger, `IncidentStatus.Closed` sent to Slack. Nothing sent to\r\nPagerduty\r\n- critical -> resolved: Critical trigger, `IncidentStatus.Warning` sent to Pagerduty. Warning\r\ntrigger, `IncidentStatus.Closed` sent to Slack. We don’t de-dupe here since the actions are\r\ndifferent.", "code": "def test_context_for_resolved_crash_rate_alert(self):\n \n status = TriggerStatus.RESOLVED\n incident = self.create_incident()\n alert_rule = self.create_alert_rule(\n aggregate=\"percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate\",\n threshold_type=AlertRuleThresholdType.BELOW,\n query=\"\",\n )\n alert_rule_trigger = self.create_alert_rule_trigger(alert_rule)\n action = self.create_alert_rule_trigger_action(\n alert_rule_trigger=alert_rule_trigger, triggered_for_incident=incident\n )\n generated_email_context = generate_incident_trigger_email_context(\n self.project, incident, action.alert_rule_trigger, status, IncidentStatus.CLOSED\n )\n assert generated_email_context[\"aggregate\"] == \"percentage(sessions_crashed, sessions)\"\n assert generated_email_context[\"threshold\"] == 100\n assert generated_email_context[\"threshold_direction_string\"] == \">\"\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 195, "n_words": 49, "vocab_size": 38, "complexity": 1, "nloc": 18, "token_counts": 102, "n_ast_nodes": 168, "n_identifiers": 24, "d_id": 19309, "documentation": { "docstring": "\n Test that ensures the resolved notification contains the correct threshold string\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 26, "language": "en" } }, { "id": 35529, "commit_id": "8635407bc724c45142c1f91dbc9ef3ea681e1a56", "repo": "transformers", "path": "templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py", "file_name": "test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py", "fun_name": "test_causal_lm_model_past_with_attn_mask", "commit_message": "Fix tf.concatenate + test past_key_values for TF models (#15774)\n\n* fix wrong method name tf.concatenate\r\n\r\n* add tests related to causal LM / decoder\r\n\r\n* make style and quality\r\n\r\n* clean-up\r\n\r\n* Fix TFBertModel's extended_attention_mask when past_key_values is provided\r\n\r\n* Fix tests\r\n\r\n* fix copies\r\n\r\n* More tf.int8 -> tf.int32 in TF test template\r\n\r\n* clean-up\r\n\r\n* Update TF test template\r\n\r\n* revert the previous commit + update the TF test template\r\n\r\n* Fix TF template extended_attention_mask when past_key_values is provided\r\n\r\n* Fix some styles manually\r\n\r\n* clean-up\r\n\r\n* Fix ValueError: too many values to unpack in the test\r\n\r\n* Fix more: too many values to unpack in the test\r\n\r\n* Add a comment for extended_attention_mask when there is past_key_values\r\n\r\n* Fix TFElectra extended_attention_mask when past_key_values is provided\r\n\r\n* Add tests to other TF models\r\n\r\n* Fix for TF Electra test: add prepare_config_and_inputs_for_decoder\r\n\r\n* Fix not passing training arg to lm_head in TFRobertaForCausalLM\r\n\r\n* Fix tests (with past) for TF Roberta\r\n\r\n* add testing for pask_key_values for TFElectra model\r\n\r\nCo-authored-by: ydshieh ", "code": "def test_causal_lm_model_past_with_attn_mask(self):\n \n config_and_inputs = self.model_tester.prepare_config_and_inputs()\n self.model_tester.create_and_check_causal_lm_model_past_with_attn_mask(*config_and_inputs)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 27, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 43, "n_identifiers": 6, "d_id": 6472, "documentation": { "docstring": "Test the causal LM model with `past_key_values` and `attention_mask`", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 44455, "commit_id": "3e9828022b03b60d9e112f1f64340a528c8407e3", "repo": "airflow", "path": "airflow/www/utils.py", "file_name": "utils.py", "fun_name": "should_show", "commit_message": "Simplify fab has access lookup (#19294)\n\n* Use FAB models.\r\n\r\n* Remove incorrect conversions to new permission naming scheme.\r\n\r\n* Fix missing FAB renames.\r\n\r\n* Remove unused FAB compatibility fixes in models.py.\r\n\r\n* Set perms directly on user objects.\r\n\r\n* Set perms properties on User model.\r\n\r\n* Rename missed old naming scheme conversion.\r\n\r\n* Remove unused imports.\r\n\r\n* Remove unused imports.\r\n\r\n* Remeve get_user_roles() method.\r\n\r\n* Make permissions eagerload.\r\n\r\n* Remove unused imports.\r\n\r\n* Clarify query params.\r\n\r\n* Modify sort logic so MSSQL passes.\r\n\r\n* Add text modifier to order_by values.\r\n\r\n* Remove calls to get_*_dags.\r\n\r\n* Add back execution_date\r\n\r\n* Add back comma to match rest of file.\r\n\r\n* Remove unused permission functions.\r\n\r\n* Fix failing tests.\r\n\r\n* Pass user object to current_app.appbuilder.sm.has_all_dags_access.\r\n\r\n* Remove attempts to fix query.\r\n\r\n* Update the api_connexion query builders.\r\n\r\n* Add typing.\r\n\r\n* Apply sorts directly to model objects.\r\n\r\n* Apply sorts directly to model objects.\r\n\r\n* Standardize custom sort code.\r\n\r\n* Code review\r\n\r\n* Augment xcom docs (#20755)\r\n\r\n* Fix relationship join bug in FAB/SecurityManager with SQLA 1.4 (#21296)\r\n\r\nThis is fixed in SQLA 1.4.19, but the fix makes the intent clearer here\r\nanyway.\r\n\r\n* Docs: Fix task order in overview example (#21282)\r\n\r\n* Update stat_name_handler documentation (#21298)\r\n\r\nPreviously stat_name_handler was under the scheduler section of the\r\nconfiguration but it was moved to the metrics section since 2.0.0.\r\n\r\n* Update recipe for Google Cloud SDK (#21268)\r\n\r\n* Use FAB models.\r\n\r\n* Remove incorrect conversions to new permission naming scheme.\r\n\r\n* Fix missing FAB renames.\r\n\r\n* Remove unused FAB compatibility fixes in models.py.\r\n\r\n* Set perms directly on user objects.\r\n\r\n* Set perms properties on User model.\r\n\r\n* Rename missed old naming scheme conversion.\r\n\r\n* Remove unused imports.\r\n\r\n* Remove unused imports.\r\n\r\n* Remeve get_user_roles() method.\r\n\r\n* Make permissions eagerload.\r\n\r\n* Remove unused imports.\r\n\r\n* Clarify query params.\r\n\r\n* Modify sort logic so MSSQL passes.\r\n\r\n* Add text modifier to order_by values.\r\n\r\n* Remove calls to get_*_dags.\r\n\r\n* Add back execution_date\r\n\r\n* Add back comma to match rest of file.\r\n\r\n* Remove unused permission functions.\r\n\r\n* Fix failing tests.\r\n\r\n* Pass user object to current_app.appbuilder.sm.has_all_dags_access.\r\n\r\n* Remove attempts to fix query.\r\n\r\n* Update the api_connexion query builders.\r\n\r\n* Add typing.\r\n\r\n* Apply sorts directly to model objects.\r\n\r\n* Apply sorts directly to model objects.\r\n\r\n* Standardize custom sort code.\r\n\r\n* Make sure joined fields prefetch.\r\n\r\n* Dont use cached_property, since its only on > 3.8.\r\n\r\nCo-authored-by: Ash Berlin-Taylor \r\nCo-authored-by: Lewis John McGibbney \r\nCo-authored-by: Ash Berlin-Taylor \r\nCo-authored-by: Lucia Kasman <38845383+luciakasman@users.noreply.github.com>\r\nCo-authored-by: Fran Sánchez \r\nCo-authored-by: Kamil Breguła ", "code": "def should_show(self, securitymanager) -> bool:\n \n if self.roles:\n user_roles = {r.name for r in securitymanager.current_user.roles}\n if not user_roles.intersection(set(self.roles)):\n return False\n return True\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 79, "n_words": 21, "vocab_size": 19, "complexity": 4, "nloc": 7, "token_counts": 48, "n_ast_nodes": 77, "n_identifiers": 11, "d_id": 8273, "documentation": { "docstring": "Determine if the user should see the message based on their role membership", "n_words": 13, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 219909, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pyio.py", "file_name": "_pyio.py", "fun_name": "writelines", "commit_message": "add python 3.10.4 for windows", "code": "def writelines(self, lines):\n \n self._checkClosed()\n for line in lines:\n self.write(line)\n\nio.IOBase.register(IOBase)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 41, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 4, "token_counts": 24, "n_ast_nodes": 54, "n_identifiers": 9, "d_id": 55895, "documentation": { "docstring": "Write a list of lines to the stream.\n\n Line separators are not added, so it is usual for each of the lines\n provided to have a line separator at the end.\n ", "n_words": 31, "vocab_size": 25, "n_whitespaces": 52, "language": "en" } }, { "id": 101558, "commit_id": "7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5", "repo": "faceswap", "path": "lib/training/preview_tk.py", "file_name": "preview_tk.py", "fun_name": "_on_slider_update", "commit_message": "Training - Use custom preview pop-out", "code": "def _on_slider_update(self, value) -> None:\n \n self.scale_var.set(f\"{value}%\")\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 10, "token_counts": 19, "n_ast_nodes": 37, "n_identifiers": 5, "d_id": 20968, "documentation": { "docstring": " Callback for when the scale slider is adjusted. Adjusts the combo box display to the\n current slider value.\n\n Parameters\n ----------\n value: int\n The value that the slider has been set to\n ", "n_words": 31, "vocab_size": 25, "n_whitespaces": 79, "language": "en" } }, { "id": 196204, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/subsets.py", "file_name": "subsets.py", "fun_name": "unrank_binary", "commit_message": "Updated import locations", "code": "def unrank_binary(self, rank, superset):\n \n bits = bin(rank)[2:].rjust(len(superset), '0')\n return Subset.subset_from_bitlist(superset, bits)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 32, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 39, "n_ast_nodes": 63, "n_identifiers": 10, "d_id": 47704, "documentation": { "docstring": "\n Gets the binary ordered subset of the specified rank.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Subset\n >>> Subset.unrank_binary(4, ['a', 'b', 'c', 'd']).subset\n ['b']\n\n See Also\n ========\n\n iterate_binary, rank_binary\n ", "n_words": 28, "vocab_size": 25, "n_whitespaces": 99, "language": "en" } }, { "id": 197244, "commit_id": "29e153dd0a70a3fe97c2a9a5f752334e937023c5", "repo": "sympy", "path": "sympy/polys/polytools.py", "file_name": "polytools.py", "fun_name": "lcm_list", "commit_message": "update some type hints", "code": "def lcm_list(seq, *gens, **args):\n \n seq = sympify(seq)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 16, "nloc": 35, "token_counts": 254, "n_ast_nodes": 31, "n_identifiers": 5, "d_id": 48406, "documentation": { "docstring": "\n Compute LCM of a list of polynomials.\n\n Examples\n ========\n\n >>> from sympy import lcm_list\n >>> from sympy.abc import x\n\n >>> lcm_list([x**3 - 1, x**2 - 1, x**2 - 3*x + 2])\n x**5 - x**4 - 2*x**3 - x**2 + x + 2\n\n ", "n_words": 42, "vocab_size": 26, "n_whitespaces": 67, "language": "en" } }, { "id": 280181, "commit_id": "8c401c032b3021f89609eac79bd1c881b9bbc84f", "repo": "keras", "path": "keras/layers/normalization/batch_normalization.py", "file_name": "batch_normalization.py", "fun_name": "_calculate_mean_and_var", "commit_message": "Merge `SyncBatchNormalization` into `BatchNormalization` with parameter `use_sync`\n\nPiperOrigin-RevId: 482921013", "code": "def _calculate_mean_and_var(self, x, axes, keep_dims):\n \n if self.synchronized:\n return self._sync_calculate_mean_and_var(x, axes, keep_dims)\n else:\n return super()._calculate_mean_and_var(x, axes, keep_dims)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 59, "n_words": 16, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 43, "n_ast_nodes": 65, "n_identifiers": 8, "d_id": 83281, "documentation": { "docstring": "Override mean and var calculation when used with `synchronized`.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 268054, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/test.py", "file_name": "test.py", "fun_name": "format_block", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def format_block(self) -> str:\n \n if self.summary:\n block = self.summary\n else:\n block = '\\n'.join(m.format() for m in self.messages)\n\n message = block.strip()\n\n # Hack to remove ANSI color reset code from SubprocessError messages.\n message = message.replace(display.clear, '')\n\n return message\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 108, "n_words": 37, "vocab_size": 31, "complexity": 3, "nloc": 9, "token_counts": 59, "n_ast_nodes": 103, "n_identifiers": 14, "d_id": 79328, "documentation": { "docstring": "Format the test summary or messages as a block of text and return the result.", "n_words": 15, "vocab_size": 14, "n_whitespaces": 14, "language": "en" } }, { "id": 229865, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/funnelarea/_domain.py", "file_name": "_domain.py", "fun_name": "row", "commit_message": "switch to black .22", "code": "def row(self):\n \n return self[\"row\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 61538, "documentation": { "docstring": "\n If there is a layout grid, use the domain for this row in the\n grid for this funnelarea trace .\n\n The 'row' property is a integer and may be specified as:\n - An int (or float that will be cast to an int)\n in the interval [0, 9223372036854775807]\n\n Returns\n -------\n int\n ", "n_words": 51, "vocab_size": 42, "n_whitespaces": 121, "language": "en" } }, { "id": 248680, "commit_id": "6ad012ef89c966cbb3616c1be63d964db48d49ca", "repo": "synapse", "path": "synapse/logging/scopecontextmanager.py", "file_name": "scopecontextmanager.py", "fun_name": "active", "commit_message": "More type hints for `synapse.logging` (#13103)\n\nCompletes type hints for synapse.logging.scopecontextmanager and (partially)\r\nfor synapse.logging.opentracing.", "code": "def active(self) -> Optional[Scope]:\n \n ctx = current_context()\n return ctx.scope\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 13, "token_counts": 20, "n_ast_nodes": 35, "n_identifiers": 7, "d_id": 72415, "documentation": { "docstring": "\n Returns the currently active Scope which can be used to access the\n currently active Scope.span.\n If there is a non-null Scope, its wrapped Span\n becomes an implicit parent of any newly-created Span at\n Tracer.start_active_span() time.\n\n Return:\n The Scope that is active, or None if not available.\n ", "n_words": 46, "vocab_size": 40, "n_whitespaces": 107, "language": "en" } }, { "id": 152977, "commit_id": "0faf4675140415e17d4112f9d0d37cfe87770b9e", "repo": "modin", "path": "modin/core/execution/ray/implementations/pandas_on_ray/io/io.py", "file_name": "io.py", "fun_name": "to_sql", "commit_message": "REFACTOR-#3871: move related to pandas functionality into 'PandasOnRayIO' class (#3872)\n\nSigned-off-by: Anatoly Myachev ", "code": "def to_sql(cls, qc, **kwargs):\n \n # we first insert an empty DF in order to create the full table in the database\n # This also helps to validate the input against pandas\n # we would like to_sql() to complete only when all rows have been inserted into the database\n # since the mapping operation is non-blocking, each partition will return an empty DF\n # so at the end, the blocking operation will be this empty DF to_pandas\n\n empty_df = qc.getitem_row_array([0]).to_pandas().head(0)\n empty_df.to_sql(**kwargs)\n # so each partition will append its respective DF\n kwargs[\"if_exists\"] = \"append\"\n columns = qc.columns\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 172, "n_words": 95, "vocab_size": 65, "complexity": 1, "nloc": 8, "token_counts": 77, "n_ast_nodes": 89, "n_identifiers": 9, "d_id": 35219, "documentation": { "docstring": "\n Write records stored in the `qc` to a SQL database.\n\n Parameters\n ----------\n qc : BaseQueryCompiler\n The query compiler of the Modin dataframe that we want to run ``to_sql`` on.\n **kwargs : dict\n Parameters for ``pandas.to_sql(**kwargs)``.\n ", "n_words": 35, "vocab_size": 31, "n_whitespaces": 100, "language": "en" } }, { "id": 109453, "commit_id": "c73f4c455514cf5422d27bf38c93250de8316b21", "repo": "matplotlib", "path": "lib/matplotlib/figure.py", "file_name": "figure.py", "fun_name": "align_xlabels", "commit_message": "Merge SubplotBase into AxesBase.", "code": "def align_xlabels(self, axs=None):\n \n if axs is None:\n axs = self.axes\n axs = [ax for ax in np.ravel(axs) if ax.get_subplotspec() is not None]\n for ax in axs:\n _log.debug(' Working on: %s', ax.get_xlabel())\n rowspan = ax.get_subplotspec().rowspan\n pos = ax.xaxis.get_label_position() # top or bottom\n # Search through other axes for label positions that are same as\n # this one and that share the appropriate row number.\n # Add to a grouper associated with each axes of siblings.\n # This list is inspected in `axis.draw` by\n # `axis._update_label_position`.\n for axc in axs:\n if axc.xaxis.get_label_position() == pos:\n rowspanc = axc.get_subplotspec().rowspan\n if (pos == 'top' and rowspan.start == rowspanc.start or\n pos == 'bottom' and rowspan.stop == rowspanc.stop):\n # grouper for groups of xlabels to align\n self._align_label_groups['x'].join(ax, axc)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 386, "n_words": 121, "vocab_size": 82, "complexity": 11, "nloc": 14, "token_counts": 143, "n_ast_nodes": 240, "n_identifiers": 21, "d_id": 23596, "documentation": { "docstring": "\n Align the xlabels of subplots in the same subplot column if label\n alignment is being done automatically (i.e. the label position is\n not manually set).\n\n Alignment persists for draw events after this is called.\n\n If a label is on the bottom, it is aligned with labels on Axes that\n also have their label on the bottom and that have the same\n bottom-most subplot row. If the label is on the top,\n it is aligned with labels on Axes with the same top-most row.\n\n Parameters\n ----------\n axs : list of `~matplotlib.axes.Axes`\n Optional list of (or ndarray) `~matplotlib.axes.Axes`\n to align the xlabels.\n Default is to align all Axes on the figure.\n\n See Also\n --------\n matplotlib.figure.Figure.align_ylabels\n matplotlib.figure.Figure.align_labels\n\n Notes\n -----\n This assumes that ``axs`` are from the same `.GridSpec`, so that\n their `.SubplotSpec` positions correspond to figure positions.\n\n Examples\n --------\n Example with rotated xtick labels::\n\n fig, axs = plt.subplots(1, 2)\n for tick in axs[0].get_xticklabels():\n tick.set_rotation(55)\n axs[0].set_xlabel('XLabel 0')\n axs[1].set_xlabel('XLabel 1')\n fig.align_xlabels()\n ", "n_words": 156, "vocab_size": 99, "n_whitespaces": 422, "language": "en" } }, { "id": 121203, "commit_id": "b22121c0c1579dd5108825becac42d5db1b29276", "repo": "jax", "path": "jax/experimental/jax2tf/impl_no_xla.py", "file_name": "impl_no_xla.py", "fun_name": "_pad_spatial_dims", "commit_message": "[jax2tf] Fixes for handling of convolutions with shape_polymorphism and enable_xla=False\n\nIssue: #11402\n\nDue to a typo we were running no tests for convolutions with shape\npolymorphism and enable_xla=False.\n\nAdded a few more tests from #11402 (Thanks @sdenton4).\n\nThe main issue was that in presence of shape polymorphism we cannot\njust use `x.shape` for a TF value `x` because it will contain `None`\nin the place of unknown dimensions. We must use instead the JAX\nabstract values.\n\nThis does not fix all issues reported in #11402, there is still the\ncomputation of padding or padding=\"SAME\". Commented out the\ncorresponding test.", "code": "def _pad_spatial_dims(x, x_shape, padding, is_conv1d):\n \n # Add empty padding for batch and feature dimensions.\n no_pad = ((0, 0),)\n padding = tuple(padding)\n if is_conv1d:\n padding = no_pad + padding + no_pad\n # Add empty padding for dummy dimension, too.\n padding = no_pad + padding + no_pad + no_pad\n else:\n padding = no_pad + padding + no_pad\n x = tf.pad(x, padding)\n assert len(x.shape) == len(padding)\n x_shape = tuple(p0 + xs + p1 for xs, (p0, p1) in zip(x_shape, padding))\n jax2tf._assert_matching_abstract_shape(x, x_shape)\n return x, x_shape\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 105, "n_words": 82, "vocab_size": 47, "complexity": 3, "nloc": 13, "token_counts": 115, "n_ast_nodes": 178, "n_identifiers": 17, "d_id": 27040, "documentation": { "docstring": "Pads `x` using `padding`, which specifies padding for the spatial dimensions.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 196246, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/functions/elementary/piecewise.py", "file_name": "piecewise.py", "fun_name": "_intervals", "commit_message": "Updated import locations", "code": "def _intervals(self, sym):\n \n from sympy.solvers.inequalities import _solve_inequality\n\n assert isinstance(self, Piecewise)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 31, "nloc": 82, "token_counts": 577, "n_ast_nodes": 36, "n_identifiers": 9, "d_id": 47746, "documentation": { "docstring": "Return a list of unique tuples, (a, b, e, i), where a and b\n are the lower and upper bounds in which the expression e of\n argument i in self is defined and $a < b$ (when involving\n numbers) or $a \\le b$ when involving symbols.\n\n If there are any relationals not involving sym, or any\n relational cannot be solved for sym, NotImplementedError is\n raised. The calling routine should have removed such\n relationals before calling this routine.\n\n The evaluated conditions will be returned as ranges.\n Discontinuous ranges will be returned separately with\n identical expressions. The first condition that evaluates to\n True will be returned as the last tuple with a, b = -oo, oo.\n ", "n_words": 114, "vocab_size": 84, "n_whitespaces": 198, "language": "en" } }, { "id": 196113, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/pc_groups.py", "file_name": "pc_groups.py", "fun_name": "subword_index", "commit_message": "Updated import locations", "code": "def subword_index(self, word, w):\n \n low = -1\n high = -1\n for i in range(len(word)-len(w)+1):\n if word.subword(i, i+len(w)) == w:\n low = i\n high = i+len(w)\n break\n if low == high == -1:\n return -1, -1\n return low, high\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 147, "n_words": 38, "vocab_size": 23, "complexity": 4, "nloc": 11, "token_counts": 83, "n_ast_nodes": 133, "n_identifiers": 10, "d_id": 47613, "documentation": { "docstring": "\n Returns the start and ending index of a given\n subword in a word.\n\n Parameters\n ==========\n\n word : FreeGroupElement\n word defined on free group elements for a\n polycyclic group.\n w : FreeGroupElement\n subword of a given word, whose starting and\n ending index to be computed.\n\n Returns\n =======\n\n (i, j)\n A tuple containing starting and ending index of ``w``\n in the given word.\n\n Examples\n ========\n\n >>> from sympy.combinatorics.named_groups import SymmetricGroup\n >>> from sympy.combinatorics import free_group\n >>> G = SymmetricGroup(4)\n >>> PcGroup = G.polycyclic_group()\n >>> collector = PcGroup.collector\n >>> F, x1, x2 = free_group(\"x1, x2\")\n >>> word = x2**2*x1**7\n >>> w = x2**2*x1\n >>> collector.subword_index(word, w)\n (0, 3)\n >>> w = x1**7\n >>> collector.subword_index(word, w)\n (2, 9)\n\n ", "n_words": 114, "vocab_size": 69, "n_whitespaces": 356, "language": "en" } }, { "id": 271980, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_v1.py", "file_name": "training_v1.py", "fun_name": "_add_unique_metric_name", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _add_unique_metric_name(self, metric_name, metric_fn, output_index):\n \n # For multi-output models, prepend the output names to the metric name.\n if len(self.output_names) > 1:\n # If we're loading from an already-serialized model, we've already\n # prepended the output name, and we don't want to do it again.\n #\n # Alternatively, we may be receiving a stateless metric (e.g. the string\n # \"accuracy\") rather than a `Metric` object, in which case we want to\n # prepend the output name even if we are loading a serialized model.\n if not getattr(metric_fn, \"_from_serialized\", False):\n metric_name = \"%s_%s\" % (\n self.output_names[output_index],\n metric_name,\n )\n\n j = 1\n base_metric_name = metric_name\n while metric_name in self.metrics_names:\n metric_name = \"%s_%d\" % (base_metric_name, j)\n j += 1\n\n return metric_name\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 333, "n_words": 117, "vocab_size": 80, "complexity": 4, "nloc": 13, "token_counts": 75, "n_ast_nodes": 127, "n_identifiers": 11, "d_id": 80920, "documentation": { "docstring": "Makes the metric name unique.\n\n If there are multiple outputs for which the metrics are calculated, the\n metric names have to be made unique by appending an integer.\n\n Args:\n metric_name: Metric name that corresponds to the metric specified by the\n user. For example: 'acc'.\n metric_fn: The Metric object.\n output_index: The index of the model output for which the metric name is\n being added.\n\n Returns:\n string, name of the model's unique metric name\n ", "n_words": 72, "vocab_size": 48, "n_whitespaces": 171, "language": "en" } }, { "id": 101067, "commit_id": "049314429f71a21e6595e9d27e9e36f6a3479c42", "repo": "faceswap", "path": "plugins/convert/writer/opencv.py", "file_name": "opencv.py", "fun_name": "close", "commit_message": "Convert: Add option to output mask separately for draw-transparent", "code": "def close(self) -> None:\n \n return\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 3, "token_counts": 9, "n_ast_nodes": 18, "n_identifiers": 2, "d_id": 20504, "documentation": { "docstring": " Does nothing as OpenCV writer does not need a close method ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 12, "language": "en" } }, { "id": 116755, "commit_id": "47c5e0ac2d89807f8ff7239d423a3d346bd39a1e", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/teradata_handler/teradata_handler.py", "file_name": "teradata_handler.py", "fun_name": "get_tables", "commit_message": "feat: add teradata integration", "code": "def get_tables(self) -> Response:\n \n\n return self.native_query(\n str(text(f).bindparams(\n bindparam('database', value=self.database, type_=String)\n ).compile(compile_kwargs={\"literal_binds\": True}))\n )\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 71, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 18, "token_counts": 51, "n_ast_nodes": 86, "n_identifiers": 14, "d_id": 25827, "documentation": { "docstring": "\n List all tables in Teradata in the current database\n \n SELECT DataBaseName,\n TableName,\n TableKind\n FROM DBC.TablesV\n WHERE DatabaseName = :database\n AND (TableKind = 'T'\n OR TableKind = 'O'\n OR TableKind = 'Q')\n ", "n_words": 31, "vocab_size": 24, "n_whitespaces": 168, "language": "en" } }, { "id": 65644, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/controllers/queries.py", "file_name": "queries.py", "fun_name": "get_project_name", "commit_message": "style: format code with black", "code": "def get_project_name(doctype, txt, searchfield, start, page_len, filters):\n\tcond = \"\"\n\tif filters and filters.get(\"customer\"):\n\t\tcond = % (\n\t\t\tfrappe.db.escape(filters.get(\"customer\"))\n\t\t)\n\n\tfields = get_fields(\"Project\", [\"name\", \"project_name\"])\n\tsearchfields = frappe.get_meta(\"Project\").get_search_fields()\n\tsearchfields = \" or \".join([field + \" like %(txt)s\" for field in searchfields])\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\tfields=\", \".join([\"`tabProject`.{0}\".format(f) for f in fields]),\n\t\t\tcond=cond,\n\t\t\tscond=searchfields,\n\t\t\tmatch_cond=get_match_cond(doctype),\n\t\t\tstart=start,\n\t\t\tpage_len=page_len,\n\t\t),\n\t\t{\"txt\": \"%{0}%\".format(txt), \"_txt\": txt.replace(\"%\", \"\")},\n\t)\n\n\n@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 43, "n_words": 64, "vocab_size": 54, "complexity": 5, "nloc": 29, "token_counts": 166, "n_ast_nodes": 296, "n_identifiers": 28, "d_id": 13966, "documentation": { "docstring": "(`tabProject`.customer = %s or\n\t\t\tifnull(`tabProject`.customer,\"\")=\"\") andselect {fields} from `tabProject`\n\t\twhere\n\t\t\t`tabProject`.status not in (\"Completed\", \"Cancelled\")\n\t\t\tand {cond} {scond} {match_cond}\n\t\torder by\n\t\t\tif(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),\n\t\t\tidx desc,\n\t\t\t`tabProject`.name asc\n\t\tlimit {start}, {page_len}", "n_words": 33, "vocab_size": 32, "n_whitespaces": 23, "language": "en" } }, { "id": 56293, "commit_id": "c33f87fc7e0b6fb4714a88b492e7545f4dbd821f", "repo": "prefect", "path": "tests/utilities/test_collections.py", "file_name": "test_collections.py", "fun_name": "test_visit_collection_with_private_pydantic", "commit_message": "get private attrs working", "code": "async def test_visit_collection_with_private_pydantic(self):\n \n input = PrivatePydantic(x=1)\n input._y = 2\n input._z = 4\n\n result = await visit_collection(\n input, visit_fn=visit_even_numbers, return_data=False\n )\n assert result is None\n assert EVEN == {2, 4}\n\n result = await visit_collection(\n input, visit_fn=negative_even_numbers, return_data=True\n )\n assert result == input\n assert result.__private_attributes__ == input.__private_attributes__\n breakpoint()\n assert result._y == -2\n assert result._z == -4\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 181, "n_words": 54, "vocab_size": 33, "complexity": 1, "nloc": 17, "token_counts": 95, "n_ast_nodes": 150, "n_identifiers": 16, "d_id": 11499, "documentation": { "docstring": "Check that we successfully capture private pydantic fields", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 267584, "commit_id": "43153c58310d02223f2cb0964f4255ba1ac4ed53", "repo": "ansible", "path": "lib/ansible/playbook/playbook_include.py", "file_name": "playbook_include.py", "fun_name": "load_data", "commit_message": "`FieldAttribute`s as descriptors (#73908)", "code": "def load_data(self, ds, basedir, variable_manager=None, loader=None):\n \n\n # import here to avoid a dependency loop\n from ansible.playbook import Playbook\n from ansible.playbook.play import Play\n\n # first, we use the original parent method to correctly load the object\n # via the load_data/preprocess_data system we normally use for other\n # playbook objects\n new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader)\n\n all_vars = self.vars.copy()\n if variable_manager:\n all_vars.update(variable_manager.get_vars())\n\n templar = Templar(loader=loader, variables=all_vars)\n\n # then we use the object to load a Playbook\n pb = Playbook(loader=loader)\n\n file_name = templar.template(new_obj.import_playbook)\n\n # check for FQCN\n resource = _get_collection_playbook_path(file_name)\n if resource is not None:\n playbook = resource[1]\n playbook_collection = resource[2]\n else:\n # not FQCN try path\n playbook = file_name\n if not os.path.isabs(playbook):\n playbook = os.path.join(basedir, playbook)\n\n # might still be collection playbook\n playbook_collection = _get_collection_name_from_path(playbook)\n\n if playbook_collection:\n # it is a collection playbook, setup default collections\n AnsibleCollectionConfig.default_collection = playbook_collection\n else:\n # it is NOT a collection playbook, setup adjecent paths\n AnsibleCollectionConfig.playbook_paths.append(os.path.dirname(os.path.abspath(to_bytes(playbook, errors='surrogate_or_strict'))))\n\n pb._load_playbook_data(file_name=playbook, variable_manager=variable_manager, vars=self.vars.copy())\n\n # finally, update each loaded playbook entry with any variables specified\n # on the included playbook and/or any tags which may have been set\n for entry in pb._entries:\n\n # conditional includes on a playbook need a marker to skip gathering\n if new_obj.when and isinstance(entry, Play):\n entry._included_conditional = new_obj.when[:]\n\n temp_vars = entry.vars.copy()\n temp_vars.update(new_obj.vars)\n param_tags = temp_vars.pop('tags', None)\n if param_tags is not None:\n entry.tags.extend(param_tags.split(','))\n entry.vars = temp_vars\n entry.tags = list(set(entry.tags).union(new_obj.tags))\n if entry._included_path is None:\n entry._included_path = os.path.dirname(playbook)\n\n # Check to see if we need to forward the conditionals on to the included\n # plays. If so, we can take a shortcut here and simply prepend them to\n # those attached to each block (if any)\n if new_obj.when:\n for task_block in (entry.pre_tasks + entry.roles + entry.tasks + entry.post_tasks):\n task_block._when = new_obj.when[:] + task_block.when[:]\n\n return pb\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 829, "n_words": 285, "vocab_size": 164, "complexity": 12, "nloc": 40, "token_counts": 384, "n_ast_nodes": 620, "n_identifiers": 64, "d_id": 78964, "documentation": { "docstring": "\n Overrides the base load_data(), as we're actually going to return a new\n Playbook() object rather than a PlaybookInclude object\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 41, "language": "en" } }, { "id": 229084, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/carpet/_aaxis.py", "file_name": "_aaxis.py", "fun_name": "arraydtick", "commit_message": "switch to black .22", "code": "def arraydtick(self):\n \n return self[\"arraydtick\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 60757, "documentation": { "docstring": "\n The stride between grid lines along the axis\n\n The 'arraydtick' property is a integer and may be specified as:\n - An int (or float that will be cast to an int)\n in the interval [1, 9223372036854775807]\n\n Returns\n -------\n int\n ", "n_words": 39, "vocab_size": 35, "n_whitespaces": 102, "language": "en" } }, { "id": 8745, "commit_id": "e2dbab9adf85a018bc6279c9538d995a2227f619", "repo": "ludwig", "path": "ludwig/schema/model_config.py", "file_name": "model_config.py", "fun_name": "to_dict", "commit_message": "fix: Restrict allowed top-level config keys (#2826)\n\n* fix\r\n\r\n* add ludwig_version\r\n\r\n* prints\r\n\r\n* remove extra", "code": "def to_dict(self) -> Dict[str, any]:\n \n input_features = [feat for feat in self.input_features.to_list() if feat[\"active\"]]\n output_features = [feat for feat in self.output_features.to_list() if feat[\"active\"]]\n\n config_dict = {\n \"model_type\": self.model_type,\n \"input_features\": input_features,\n \"output_features\": output_features,\n \"trainer\": self.trainer.to_dict(),\n \"preprocessing\": self.preprocessing.to_dict(),\n \"hyperopt\": self.hyperopt,\n \"defaults\": self.defaults.to_dict(),\n }\n\n if self.combiner is not None:\n config_dict[\"combiner\"] = self.combiner.to_dict()\n return convert_submodules(config_dict)\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 188, "n_words": 51, "vocab_size": 41, "complexity": 6, "nloc": 21, "token_counts": 132, "n_ast_nodes": 219, "n_identifiers": 17, "d_id": 1495, "documentation": { "docstring": "This method converts the current config object into an equivalent dictionary representation for the\n parts of the codebase that use the dictionary representation of the config.\n\n Returns:\n Config Dictionary\n ", "n_words": 29, "vocab_size": 22, "n_whitespaces": 61, "language": "en" } }, { "id": 128345, "commit_id": "0e8eb8aedb3e158da8c3e7378e818ce87ca7813e", "repo": "ray", "path": "python/ray/train/train_loop_utils.py", "file_name": "train_loop_utils.py", "fun_name": "local_rank", "commit_message": "[AIR] More Train and Tune session deprecations (#28856)\n\nSigned-off-by: Amog Kamsetty amogkamsetty@yahoo.com\r\n\r\nFinish marking train. and tune. session APIs as deprecated", "code": "def local_rank() -> int:\n ", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "\"\"\"Get the local rank of thisrank of the worker on its..block::", "n_ast_errors": 4, "ast_levels": 10, "n_whitespaces": 7, "n_words": 4, "vocab_size": 4, "complexity": 2, "nloc": 28, "token_counts": 47, "n_ast_nodes": 57, "n_identifiers": 18, "d_id": 28679, "documentation": { "docstring": "Get the local rank of this worker (rank of the worker on its node).\n\n .. code-block:: python\n\n import time\n from ray import train\n", "n_words": 23, "vocab_size": 19, "n_whitespaces": 39, "language": "en" } }, { "id": 266525, "commit_id": "fbb5d56bd274c44b193cb95f0230b9352f62aab2", "repo": "ansible", "path": "lib/ansible/plugins/callback/junit.py", "file_name": "junit.py", "fun_name": "_build_test_case", "commit_message": "ansible-test - Use relative paths in junit output. (#76871)\n\n* ansible-test - Use relative paths in junit output.\r\n\r\nAlso fix a traceback in the junit callback during automatic fact gathering.\r\n\r\n* ansible-test - Handle out-of-tree JUnit paths.", "code": "def _build_test_case(self, task_data, host_data):\n \n\n name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)\n duration = host_data.finish - task_data.start\n\n if self._task_relative_path and task_data.path:\n junit_classname = os.path.relpath(task_data.path, self._task_relative_path)\n else:\n junit_classname = task_data.path\n\n if self._replace_out_of_tree_path is not None and junit_classname.startswith('../'):\n junit_classname = self._replace_out_of_tree_path + os.path.basename(junit_classname)\n\n if self._task_class == 'true':\n junit_classname = re.sub(r'\\.yml:[0-9]+$', '', junit_classname)\n\n if host_data.status == 'included':\n return TestCase(name=name, classname=junit_classname, time=duration, system_out=str(host_data.result))\n\n res = host_data.result._result\n rc = res.get('rc', 0)\n dump = self._dump_results(res, indent=0)\n dump = self._cleanse_string(dump)\n\n if host_data.status == 'ok':\n return TestCase(name=name, classname=junit_classname, time=duration, system_out=dump)\n\n test_case = TestCase(name=name, classname=junit_classname, time=duration)\n\n if host_data.status == 'failed':\n if 'exception' in res:\n message = res['exception'].strip().split('\\n')[-1]\n output = res['exception']\n test_case.errors.append(TestError(message=message, output=output))\n elif 'msg' in res:\n message = res['msg']\n test_case.failures.append(TestFailure(message=message, output=dump))\n else:\n test_case.failures.append(TestFailure(message='rc=%s' % rc, output=dump))\n elif host_data.status == 'skipped':\n if 'skip_reason' in res:\n message = res['skip_reason']\n else:\n message = 'skipped'\n test_case.skipped = message\n\n return test_case\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 509, "n_words": 138, "vocab_size": 81, "complexity": 13, "nloc": 37, "token_counts": 360, "n_ast_nodes": 589, "n_identifiers": 46, "d_id": 78457, "documentation": { "docstring": " build a TestCase from the given TaskData and HostData ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 173538, "commit_id": "f73ff0c91f0159a925fb6547612199bb7c915248", "repo": "magenta", "path": "magenta/models/onsets_frames_transcription/model.py", "file_name": "model.py", "fun_name": "model_fn", "commit_message": "Explicitly import estimator from tensorflow as a separate import instead of accessing it via tf.estimator and depend on the tensorflow estimator target.\n\nPiperOrigin-RevId: 436568278", "code": "def model_fn(features, labels, mode, params, config):\n \n del config\n hparams = params\n\n length = features.length\n spec = features.spec\n\n is_training = mode == tf_estimator.ModeKeys.TRAIN\n\n if is_training:\n onset_labels = labels.onsets\n offset_labels = labels.offsets\n velocity_labels = labels.velocities\n frame_labels = labels.labels\n frame_label_weights = labels.label_weights\n\n if hparams.stop_activation_gradient and not hparams.activation_loss:\n raise ValueError(\n 'If stop_activation_gradient is true, activation_loss must be true.')\n\n losses = {}\n with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):\n with tf.variable_scope('onsets'):\n onset_outputs = acoustic_model(\n spec,\n hparams,\n lstm_units=hparams.onset_lstm_units,\n lengths=length)\n onset_probs = slim.fully_connected(\n onset_outputs,\n constants.MIDI_PITCHES,\n activation_fn=tf.sigmoid,\n scope='onset_probs')\n\n # onset_probs_flat is used during inference.\n onset_probs_flat = flatten_maybe_padded_sequences(onset_probs, length)\n if is_training:\n onset_labels_flat = flatten_maybe_padded_sequences(onset_labels, length)\n onset_losses = tf_utils.log_loss(onset_labels_flat, onset_probs_flat)\n tf.losses.add_loss(tf.reduce_mean(onset_losses))\n losses['onset'] = onset_losses\n with tf.variable_scope('offsets'):\n offset_outputs = acoustic_model(\n spec,\n hparams,\n lstm_units=hparams.offset_lstm_units,\n lengths=length)\n offset_probs = slim.fully_connected(\n offset_outputs,\n constants.MIDI_PITCHES,\n activation_fn=tf.sigmoid,\n scope='offset_probs')\n\n # offset_probs_flat is used during inference.\n offset_probs_flat = flatten_maybe_padded_sequences(offset_probs, length)\n if is_training:\n offset_labels_flat = flatten_maybe_padded_sequences(\n offset_labels, length)\n offset_losses = tf_utils.log_loss(offset_labels_flat, offset_probs_flat)\n tf.losses.add_loss(tf.reduce_mean(offset_losses))\n losses['offset'] = offset_losses\n with tf.variable_scope('velocity'):\n velocity_outputs = acoustic_model(\n spec,\n hparams,\n lstm_units=hparams.velocity_lstm_units,\n lengths=length)\n velocity_values = slim.fully_connected(\n velocity_outputs,\n constants.MIDI_PITCHES,\n activation_fn=None,\n scope='onset_velocities')\n\n velocity_values_flat = flatten_maybe_padded_sequences(\n velocity_values, length)\n if is_training:\n velocity_labels_flat = flatten_maybe_padded_sequences(\n velocity_labels, length)\n velocity_loss = tf.reduce_sum(\n onset_labels_flat *\n tf.square(velocity_labels_flat - velocity_values_flat),\n axis=1)\n tf.losses.add_loss(tf.reduce_mean(velocity_loss))\n losses['velocity'] = velocity_loss\n\n with tf.variable_scope('frame'):\n if not hparams.share_conv_features:\n # TODO(eriche): this is broken when hparams.frame_lstm_units > 0\n activation_outputs = acoustic_model(\n spec,\n hparams,\n lstm_units=hparams.frame_lstm_units,\n lengths=length)\n activation_probs = slim.fully_connected(\n activation_outputs,\n constants.MIDI_PITCHES,\n activation_fn=tf.sigmoid,\n scope='activation_probs')\n else:\n activation_probs = slim.fully_connected(\n onset_outputs,\n constants.MIDI_PITCHES,\n activation_fn=tf.sigmoid,\n scope='activation_probs')\n\n probs = []\n if hparams.stop_onset_gradient:\n probs.append(tf.stop_gradient(onset_probs))\n else:\n probs.append(onset_probs)\n\n if hparams.stop_activation_gradient:\n probs.append(tf.stop_gradient(activation_probs))\n else:\n probs.append(activation_probs)\n\n if hparams.stop_offset_gradient:\n probs.append(tf.stop_gradient(offset_probs))\n else:\n probs.append(offset_probs)\n\n combined_probs = tf.concat(probs, 2)\n\n if hparams.combined_lstm_units > 0:\n outputs = lstm_layer(\n combined_probs,\n hparams.combined_lstm_units,\n lengths=length if hparams.use_lengths else None,\n stack_size=hparams.combined_rnn_stack_size,\n use_cudnn=hparams.use_cudnn,\n bidirectional=hparams.bidirectional)\n else:\n outputs = combined_probs\n\n frame_probs = slim.fully_connected(\n outputs,\n constants.MIDI_PITCHES,\n activation_fn=tf.sigmoid,\n scope='frame_probs')\n\n frame_probs_flat = flatten_maybe_padded_sequences(frame_probs, length)\n\n if is_training:\n frame_labels_flat = flatten_maybe_padded_sequences(frame_labels, length)\n frame_label_weights_flat = flatten_maybe_padded_sequences(\n frame_label_weights, length)\n if hparams.weight_frame_and_activation_loss:\n frame_loss_weights = frame_label_weights_flat\n else:\n frame_loss_weights = None\n frame_losses = tf_utils.log_loss(\n frame_labels_flat, frame_probs_flat, weights=frame_loss_weights)\n tf.losses.add_loss(tf.reduce_mean(frame_losses))\n losses['frame'] = frame_losses\n\n if hparams.activation_loss:\n if hparams.weight_frame_and_activation_loss:\n activation_loss_weights = frame_label_weights\n else:\n activation_loss_weights = None\n activation_losses = tf_utils.log_loss(\n frame_labels_flat,\n flatten_maybe_padded_sequences(activation_probs, length),\n weights=activation_loss_weights)\n tf.losses.add_loss(tf.reduce_mean(activation_losses))\n losses['activation'] = activation_losses\n\n frame_predictions = frame_probs_flat > hparams.predict_frame_threshold\n onset_predictions = onset_probs_flat > hparams.predict_onset_threshold\n offset_predictions = offset_probs_flat > hparams.predict_offset_threshold\n\n frame_predictions = tf.expand_dims(frame_predictions, axis=0)\n onset_predictions = tf.expand_dims(onset_predictions, axis=0)\n offset_predictions = tf.expand_dims(offset_predictions, axis=0)\n velocity_values = tf.expand_dims(velocity_values_flat, axis=0)\n\n metrics_values = metrics.define_metrics(\n frame_probs=frame_probs,\n onset_probs=onset_probs,\n frame_predictions=frame_predictions,\n onset_predictions=onset_predictions,\n offset_predictions=offset_predictions,\n velocity_values=velocity_values,\n length=features.length,\n sequence_label=labels.note_sequence,\n frame_labels=labels.labels,\n sequence_id=features.sequence_id,\n hparams=hparams)\n\n for label, loss_collection in losses.items():\n loss_label = 'losses/' + label\n metrics_values[loss_label] = loss_collection\n\n def predict_sequence():\n \n", "url": "https://github.com/magenta/magenta.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1488, "n_words": 385, "vocab_size": 217, "complexity": 23, "nloc": 228, "token_counts": 1401, "n_ast_nodes": 1442, "n_identifiers": 114, "d_id": 40851, "documentation": { "docstring": "Builds the acoustic model.Convert frame predictions into a sequence (TF).", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 287034, "commit_id": "b3a48389789549b3cb1aabd042310137baccc9b9", "repo": "core", "path": "tests/components/mqtt/test_init.py", "file_name": "test_init.py", "fun_name": "test_reload_entry_with_new_config", "commit_message": "Refactor MQTT tests to use modern platform schema part 1 (#77387)\n\n* Tests alarm_control_panel\r\n\r\n* Tests binary_sensor\r\n\r\n* Tests button\r\n\r\n* Tests camera\r\n\r\n* Tests Climate + corrections default config\r\n\r\n* Tests cover\r\n\r\n* Tests device_tracker\r\n\r\n* Tests fan\r\n\r\n* Tests humidifier\r\n\r\n* Fix test_supported_features test fan\r\n\r\n* Tests init\r\n\r\n* Tests legacy vacuum\r\n\r\n* Derive DEFAULT_CONFIG_LEGACY from DEFAULT_CONFIG\r\n\r\n* Commit suggestion comment changes", "code": "async def test_reload_entry_with_new_config(hass, tmp_path):\n \n config_old = [{\"name\": \"test_old1\", \"command_topic\": \"test-topic_old\"}]\n config_yaml_new = {\n \"mqtt\": {\n \"light\": [{\"name\": \"test_new_modern\", \"command_topic\": \"test-topic_new\"}]\n },\n # Test deprecated YAML configuration under the platform key\n # Scheduled to be removed in HA core 2022.12\n \"light\": [\n {\n \"platform\": \"mqtt\",\n \"name\": \"test_new_legacy\",\n \"command_topic\": \"test-topic_new\",\n }\n ],\n }\n await help_test_setup_manual_entity_from_yaml(hass, \"light\", config_old)\n assert hass.states.get(\"light.test_old1\") is not None\n\n await help_test_entry_reload_with_new_config(hass, tmp_path, config_yaml_new)\n assert hass.states.get(\"light.test_old1\") is None\n assert hass.states.get(\"light.test_new_modern\") is not None\n assert hass.states.get(\"light.test_new_legacy\") is not None\n\n\n@patch(\"homeassistant.components.mqtt.PLATFORMS\", [Platform.LIGHT])", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@patch(\"homeassistant.components.mqtt.PLATFORMS\", [Platform.LIGHT])", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 229, "n_words": 80, "vocab_size": 58, "complexity": 1, "nloc": 20, "token_counts": 127, "n_ast_nodes": 254, "n_identifiers": 12, "d_id": 86227, "documentation": { "docstring": "Test reloading the config entry with a new yaml config.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 188993, "commit_id": "471b19d2aa799cd73bded23379e864dd35bec2b6", "repo": "psutil", "path": "psutil/__init__.py", "file_name": "__init__.py", "fun_name": "suspend", "commit_message": "Fix typos", "code": "def suspend(self):\n \n if POSIX:\n self._send_signal(signal.SIGSTOP)\n else: # pragma: no cover\n self._proc.suspend()\n", "url": "https://github.com/giampaolo/psutil.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 55, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 26, "n_ast_nodes": 48, "n_identifiers": 7, "d_id": 45957, "documentation": { "docstring": "Suspend process execution with SIGSTOP pre-emptively checking\n whether PID has been reused.\n On Windows this has the effect of suspending all process threads.\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 44, "language": "en" } }, { "id": 60923, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/operations/check.py", "file_name": "check.py", "fun_name": "_simulate_installation_of", "commit_message": "upd; format", "code": "def _simulate_installation_of(to_install, package_set):\n # type: (List[InstallRequirement], PackageSet) -> Set[NormalizedName]\n \n\n # Keep track of packages that were installed\n installed = set()\n\n # Modify it as installing requirement_set would (assuming no errors)\n for inst_req in to_install:\n abstract_dist = make_distribution_for_install_requirement(inst_req)\n dist = abstract_dist.get_pkg_resources_distribution()\n\n assert dist is not None\n name = canonicalize_name(dist.key)\n package_set[name] = PackageDetails(dist.version, dist.requires())\n\n installed.add(name)\n\n return installed\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 118, "n_words": 55, "vocab_size": 46, "complexity": 2, "nloc": 10, "token_counts": 69, "n_ast_nodes": 115, "n_identifiers": 17, "d_id": 12346, "documentation": { "docstring": "Computes the version of packages after installing to_install.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 11, "language": "en" } }, { "id": 77580, "commit_id": "5994cc43dfc5cc1ed891ab78eff3a3bcf56f6830", "repo": "wagtail", "path": "wagtail/admin/tests/ui/test_tables.py", "file_name": "test_tables.py", "fun_name": "test_title_column", "commit_message": "Allow passing arbitrary link attributes to TitleColumn", "code": "def test_title_column(self):\n root_page = Page.objects.filter(depth=2).first()\n blog = Site.objects.create(\n hostname=\"blog.example.com\", site_name=\"My blog\", root_page=root_page\n )\n gallery = Site.objects.create(\n hostname=\"gallery.example.com\", site_name=\"My gallery\", root_page=root_page\n )\n data = [blog, gallery]\n\n table = Table(\n [\n TitleColumn(\n \"hostname\",\n url_name=\"wagtailsites:edit\",\n link_classname=\"choose-site\",\n link_attrs={\"data-chooser\": \"yes\"},\n ),\n Column(\"site_name\", label=\"Site name\"),\n ],\n data,\n )\n\n html = self.render_component(table)\n self.assertHTMLEqual(\n html,\n \n % (blog.pk, gallery.pk),\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 337, "n_words": 51, "vocab_size": 40, "complexity": 1, "nloc": 51, "token_counts": 136, "n_ast_nodes": 223, "n_identifiers": 27, "d_id": 16677, "documentation": { "docstring": "\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
    HostnameSite name
    \n \n My blog
    \n \n My gallery
    \n ", "n_words": 37, "vocab_size": 25, "n_whitespaces": 530, "language": "en" } }, { "id": 256238, "commit_id": "a59bca366174d9c692fa19750c24d65f47660ef7", "repo": "haystack", "path": "haystack/modeling/model/language_model.py", "file_name": "language_model.py", "fun_name": "silence_transformers_logs", "commit_message": "Apply black formatting (#2115)\n\n* Testing black on ui/\r\n\r\n* Applying black on docstores\r\n\r\n* Add latest docstring and tutorial changes\r\n\r\n* Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too\r\n\r\n* Remove comments\r\n\r\n* Relax constraints on pydoc-markdown\r\n\r\n* Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade\r\n\r\n* Fix a couple of bugs\r\n\r\n* Add a type: ignore that was missing somehow\r\n\r\n* Give path to black\r\n\r\n* Apply Black\r\n\r\n* Apply Black\r\n\r\n* Relocate a couple of type: ignore\r\n\r\n* Update documentation\r\n\r\n* Make Linux CI run after applying Black\r\n\r\n* Triggering Black\r\n\r\n* Apply Black\r\n\r\n* Remove dependency, does not work well\r\n\r\n* Remove manually double trailing commas\r\n\r\n* Update documentation\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def silence_transformers_logs(from_pretrained_func):\n \n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 4, "token_counts": 15, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 74819, "documentation": { "docstring": "\n Wrapper that raises the log level of Transformers to\n ERROR to hide some unnecessary warnings\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 25, "language": "en" } }, { "id": 104392, "commit_id": "e35be138148333078284b942ccc9ed7b1d826f97", "repo": "datasets", "path": "src/datasets/table.py", "file_name": "table.py", "fun_name": "num_columns", "commit_message": "Update docs to new frontend/UI (#3690)\n\n* WIP: update docs to new UI\r\n\r\n* make style\r\n\r\n* Rm unused\r\n\r\n* inject_arrow_table_documentation __annotations__\r\n\r\n* hasattr(arrow_table_method, \"__annotations__\")\r\n\r\n* Update task_template.rst\r\n\r\n* Codeblock PT-TF-SPLIT\r\n\r\n* Convert loading scripts\r\n\r\n* Convert docs to mdx\r\n\r\n* Fix mdx\r\n\r\n* Add \r\n\r\n* Convert mdx tables\r\n\r\n* Fix codeblock\r\n\r\n* Rm unneded hashlinks\r\n\r\n* Update index.mdx\r\n\r\n* Redo dev change\r\n\r\n* Rm circle ci `build_doc` & `deploy_doc`\r\n\r\n* Rm unneeded files\r\n\r\n* Update docs reamde\r\n\r\n* Standardize to `Example::`\r\n\r\n* mdx logging levels doc\r\n\r\n* Table properties inject_arrow_table_documentation\r\n\r\n* ``` to ```py mdx\r\n\r\n* Add Tips mdx\r\n\r\n* important,None -> \r\n\r\n* More misc\r\n\r\n* Center imgs\r\n\r\n* Update instllation page\r\n\r\n* `setup.py` docs section\r\n\r\n* Rm imgs since they are in hf.co\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* Update index mdx\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* just `Dataset` obj\r\n\r\n* Addedversion just italics\r\n\r\n* Update ReadInstruction doc example syntax\r\n\r\n* Change docstring for `prepare_for_task`\r\n\r\n* Chore\r\n\r\n* Remove `code` syntax from headings\r\n\r\n* Rm `code` syntax from headings\r\n\r\n* Hashlink backward compatability\r\n\r\n* S3FileSystem doc\r\n\r\n* S3FileSystem doc updates\r\n\r\n* index.mdx updates\r\n\r\n* Add darkmode gifs\r\n\r\n* Index logo img css classes\r\n\r\n* Index mdx dataset logo img size\r\n\r\n* Docs for DownloadMode class\r\n\r\n* Doc DownloadMode table\r\n\r\n* format docstrings\r\n\r\n* style\r\n\r\n* Add doc builder scripts (#3790)\r\n\r\n* add doc builder scripts\r\n\r\n* fix docker image\r\n\r\n* Docs new UI actions no self hosted (#3793)\r\n\r\n* No self hosted\r\n\r\n* replace doc injection by actual docstrings\r\n\r\n* Docstring formatted\r\n\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Mishig Davaadorj \r\n\r\nCo-authored-by: Lysandre Debut \r\nCo-authored-by: Mishig Davaadorj \r\n\r\n* Rm notebooks from docs actions since they dont exi\r\n\r\n* Update tsting branch\r\n\r\n* More docstring\r\n\r\n* Chore\r\n\r\n* bump up node version\r\n\r\n* bump up node\r\n\r\n* ``` -> ```py for audio_process.mdx\r\n\r\n* Update .github/workflows/build_documentation.yml\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Uodate dev doc build\r\n\r\n* remove run on PR\r\n\r\n* fix action\r\n\r\n* Fix gh doc workflow\r\n\r\n* forgot this change when merging master\r\n\r\n* Update build doc\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: Lysandre Debut ", "code": "def num_columns(self):\n \n return self.table.num_columns\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 3, "d_id": 21829, "documentation": { "docstring": "\n Number of columns in this table.\n\n Returns:\n int:\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 41, "language": "en" } }, { "id": 7263, "commit_id": "dfdc98caa35f38665dbe045ccff431715e976841", "repo": "ludwig", "path": "ludwig/modules/metric_modules.py", "file_name": "metric_modules.py", "fun_name": "compute", "commit_message": "Update R2 score to handle single sample computation (#2235)\n\n* Update R2 scores to handle single sample computation", "code": "def compute(self) -> Tensor:\n \n\n # self.total maps to the number of observations in preds/target computed during update()\n if self.total <= 1:\n logger.warning(\n \n )\n return torch.tensor(float(\"nan\"))\n\n return _r2_score_compute(\n self.sum_squared_error, self.sum_error, self.residual, self.total, self.adjusted, self.multioutput\n )\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 129, "n_words": 34, "vocab_size": 31, "complexity": 2, "nloc": 10, "token_counts": 58, "n_ast_nodes": 94, "n_identifiers": 15, "d_id": 1171, "documentation": { "docstring": "Computes r2 score over the metric states.R-squared (r2) is not defined for one sample. It needs at least two samples. Returning NaN.", "n_words": 22, "vocab_size": 22, "n_whitespaces": 21, "language": "en" } }, { "id": 280501, "commit_id": "5a105aadbdc6fde2c2529280c4789864adbb81c7", "repo": "keras", "path": "keras/optimizers/__init__.py", "file_name": "__init__.py", "fun_name": "convert_to_legacy_optimizer", "commit_message": "Move new optimizer out of optimizer_experimental/ directory.\n\nPiperOrigin-RevId: 488998585", "code": "def convert_to_legacy_optimizer(optimizer):\n \n if not isinstance(optimizer, base_optimizer.Optimizer):\n raise ValueError(\n \"`convert_to_legacy_optimizer` should only be called \"\n \"on instances of `tf.keras.optimizers.Optimizer`, but \"\n f\"received {optimizer} of type {type(optimizer)}.\"\n )\n optimizer_name = optimizer.__class__.__name__.lower()\n config = optimizer.get_config()\n # Remove fields that only exist in experimental optimizer.\n keys_to_remove = [\n \"weight_decay\",\n \"use_ema\",\n \"ema_momentum\",\n \"ema_overwrite_frequency\",\n \"jit_compile\",\n \"is_legacy_optimizer\",\n ]\n for key in keys_to_remove:\n config.pop(key, None)\n # Learning rate can be a custom LearningRateSchedule, which is stored as\n # a dict in config, and cannot be deserialized.\n if isinstance(\n optimizer._learning_rate, learning_rate_schedule.LearningRateSchedule\n ):\n config[\"learning_rate\"] = optimizer._learning_rate\n legacy_optimizer_config = {\n \"class_name\": optimizer_name,\n \"config\": config,\n }\n return deserialize(legacy_optimizer_config, use_legacy_optimizer=True)\n\n\n@keras_export(\"keras.optimizers.get\")", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.optimizers.get\")", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 266, "n_words": 98, "vocab_size": 82, "complexity": 4, "nloc": 28, "token_counts": 113, "n_ast_nodes": 220, "n_identifiers": 23, "d_id": 83358, "documentation": { "docstring": "Convert experimental optimizer to legacy optimizer.\n\n This function takes in a `tf.keras.optimizers.experimental.Optimizer`\n instance and converts it to the corresponding\n `tf.keras.optimizers.legacy.Optimizer` instance.\n For example, `tf.keras.optimizers.experimental.Adam(...)` to\n `tf.keras.optimizers.legacy.Adam(...)`.\n\n Args:\n optimizer: An instance of `tf.keras.optimizers.experimental.Optimizer`.\n ", "n_words": 32, "vocab_size": 29, "n_whitespaces": 60, "language": "en" } }, { "id": 84031, "commit_id": "5ff4754090259dea52c0554d82eeaf601490f383", "repo": "zulip", "path": "zerver/tests/test_upload.py", "file_name": "test_upload.py", "fun_name": "test_non_existing_file_download", "commit_message": "test_upload: Fix some URLs to uploaded files.\n\nUsing http://localhost:9991 is incorrect - e.g. messages sent with file\nurls constructed trigger do_claim_attachments to be called with empty\nlist in potential_path_ids.\n\nrealm.host should be used in all these places, like in the other tests\nin the file.", "code": "def test_non_existing_file_download(self) -> None:\n \n hamlet = self.example_user(\"hamlet\")\n self.login_user(hamlet)\n response = self.client_get(\n f\"http://{hamlet.realm.host}/user_uploads/{hamlet.realm_id}/ff/gg/abc.py\"\n )\n self.assertEqual(response.status_code, 404)\n self.assert_in_response(\"File not found.\", response)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 79, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 11, "token_counts": 49, "n_ast_nodes": 102, "n_identifiers": 13, "d_id": 17766, "documentation": { "docstring": "\n Trying to download a file that was never uploaded will return a json_error\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 28, "language": "en" } }, { "id": 60897, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/network/lazy_wheel.py", "file_name": "lazy_wheel.py", "fun_name": "readable", "commit_message": "upd; format", "code": "def readable(self):\n # type: () -> bool\n \n return True\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 8, "n_ast_nodes": 17, "n_identifiers": 2, "d_id": 12329, "documentation": { "docstring": "Return whether the file is readable, which is True.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 186741, "commit_id": "f251a13f322e10c530897be31aa07a1199061f10", "repo": "certbot", "path": "windows-installer/windows_installer/construct.py", "file_name": "construct.py", "fun_name": "_generate_pynsist_config", "commit_message": "Remove Windows 2016 environment, generate 64 bit installer (#9202)\n\n* Remove Windows 2016 environment, generate 64 bit installer\n\n* Add note to changelog\n\n* Use win_amd64 as installer suffix\n\n* Bump PYTHON_BITNESS to 64\n\n* Require 64 bit Windows for the installer_build job\n\n* Update certbot install path\n\n* update windows test name\n\n* Base installer suffix on PYTHON_BITNESS again\n\n* Update changelog to request users uninstall old version", "code": "def _generate_pynsist_config(repo_path, build_path):\n print('Generate pynsist configuration')\n\n installer_cfg_path = os.path.join(build_path, 'installer.cfg')\n\n certbot_pkg_path = os.path.join(repo_path, 'certbot')\n certbot_version = subprocess.check_output([sys.executable, '-c', 'import certbot; print(certbot.__version__)'],\n universal_newlines=True, cwd=certbot_pkg_path).strip()\n\n # If we change the installer name from `certbot-beta-installer-win_amd64.exe`, it should\n # also be changed in tools/create_github_release.py\n with open(installer_cfg_path, 'w') as file_h:\n file_h.write(.format(certbot_version=certbot_version,\n installer_suffix='win_amd64' if PYTHON_BITNESS == 64 else 'win32',\n python_bitness=PYTHON_BITNESS,\n python_version='.'.join(str(item) for item in PYTHON_VERSION)))\n\n return installer_cfg_path\n\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 170, "n_words": 61, "vocab_size": 56, "complexity": 3, "nloc": 38, "token_counts": 118, "n_ast_nodes": 202, "n_identifiers": 28, "d_id": 45615, "documentation": { "docstring": "\\\n[Application]\nname=Certbot\nversion={certbot_version}\nicon=certbot.ico\npublisher=Electronic Frontier Foundation\ntarget=$INSTDIR\\\\run.bat\n\n[Build]\ndirectory=nsis\nnsi_template=template.nsi\ninstaller_name=certbot-beta-installer-{installer_suffix}.exe\n\n[Python]\nversion={python_version}\nbitness={python_bitness}\n\n[Include]\nlocal_wheels=wheels\\\\*.whl\nfiles=run.bat\n renew-up.ps1\n renew-down.ps1\n\n[Command certbot]\nentry_point=certbot.main:main\nextra_preamble=preamble.py\n", "n_words": 25, "vocab_size": 25, "n_whitespaces": 15, "language": "en" } }, { "id": 12497, "commit_id": "ef662b529b2a2eecea7bb99759a9f7b9d86d3062", "repo": "jina", "path": "jina/orchestrate/deployments/__init__.py", "file_name": "__init__.py", "fun_name": "get_worker_host", "commit_message": "feat: add grpc health checking (#4779)", "code": "def get_worker_host(pod_args, pod_is_container, head_is_container):\n \n # Check if the current pod and head are both containerized on the same host\n # If so __docker_host__ needs to be advertised as the worker's address to the head\n worker_host = (\n __docker_host__\n if (pod_is_container and (head_is_container or in_docker()))\n and host_is_local(pod_args.host)\n else pod_args.host\n )\n return worker_host\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 137, "n_words": 51, "vocab_size": 40, "complexity": 5, "nloc": 8, "token_counts": 40, "n_ast_nodes": 65, "n_identifiers": 9, "d_id": 2318, "documentation": { "docstring": "\n Check if the current pod and head are both containerized on the same host\n If so __docker_host__ needs to be advertised as the worker's address to the head\n\n :param pod_args: arguments of the worker pod\n :param pod_is_container: boolean specifying if pod is to be run in container\n :param head_is_container: boolean specifying if head pod is to be run in container\n :return: host to pass in connection list of the head\n ", "n_words": 70, "vocab_size": 40, "n_whitespaces": 120, "language": "en" } }, { "id": 137963, "commit_id": "8e680c483ce326cefc62e44f68ab1a6948b1c3d2", "repo": "ray", "path": "rllib/examples/simulators/sumo/marlenvironment.py", "file_name": "marlenvironment.py", "fun_name": "step", "commit_message": "[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)", "code": "def step(self, action_dict):\n \n self.resetted = False\n self.steps += 1\n logger.debug(\n \"====> [SUMOTestMultiAgentEnv:step] Episode: %d - Step: %d <====\",\n self.episodes,\n self.steps,\n )\n dones = {}\n dones[\"__all__\"] = False\n\n shuffled_agents = sorted(\n action_dict.keys()\n ) # it may seem not smar to sort something that\n # may need to be shuffled afterwards, but it\n # is a matter of consistency instead of using\n # whatever insertion order was used in the dict\n if self._config[\"scenario_config\"][\"agent_rnd_order\"]:\n # randomize the agent order to minimize SUMO's\n # insertion queues impact\n logger.debug(\"Shuffling the order of the agents.\")\n self.rndgen.shuffle(shuffled_agents) # in-place shuffle\n\n # Take action\n for agent in shuffled_agents:\n self.agents[agent].step(action_dict[agent], self.simulation)\n\n logger.debug(\"Before SUMO\")\n ongoing_simulation = self.simulation.step(\n until_end=False, agents=set(action_dict.keys())\n )\n logger.debug(\"After SUMO\")\n\n # end of the episode\n if not ongoing_simulation:\n logger.info(\"Reached the end of the SUMO simulation.\")\n dones[\"__all__\"] = True\n\n obs, rewards, infos = {}, {}, {}\n\n for agent in action_dict:\n # check for collisions\n if self.simulation.collisions[agent] > 0:\n # punish the agent and remove it from the simulation\n dones[agent] = True\n obs[agent] = [0, 0]\n rewards[agent] = -self.agents[agent].config[\"max_speed\"]\n # infos[agent] = \"Collision\"\n self.simulation.traci_handler.remove(agent, reason=tc.REMOVE_VAPORIZED)\n else:\n dones[agent] = agent not in self.simulation.veh_subscriptions\n obs[agent] = self.get_observation(agent)\n rewards[agent] = self.get_reward(agent)\n # infos[agent] = \"\"\n\n logger.debug(\"Observations: %s\", pformat(obs))\n logger.debug(\"Rewards: %s\", pformat(rewards))\n logger.debug(\"Dones: %s\", pformat(dones))\n logger.debug(\"Info: %s\", pformat(infos))\n logger.debug(\"========================================================\")\n return obs, rewards, dones, dones, infos\n\n ###########################################################################\n # ACTIONS & OBSERATIONS SPACE\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 743, "n_words": 217, "vocab_size": 136, "complexity": 6, "nloc": 43, "token_counts": 329, "n_ast_nodes": 549, "n_identifiers": 36, "d_id": 31259, "documentation": { "docstring": "\n Returns observations from ready agents.\n\n The returns are dicts mapping from agent_id strings to values. The\n number of agents in the env can vary over time.\n\n Returns\n -------\n obs: New observations for each ready agent.\n rewards: Reward values for each ready agent. If the\n episode is just started, the value will be None.\n dones: Done values for each ready agent. The special key\n \"__all__\" (required) is used to indicate env termination.\n infos: Optional info values for each agent id.\n ", "n_words": 79, "vocab_size": 56, "n_whitespaces": 196, "language": "en" } }, { "id": 195866, "commit_id": "cda8dfe6f45dc5ed394c2f5cda706cd6c729f713", "repo": "sympy", "path": "sympy/functions/elementary/trigonometric.py", "file_name": "trigonometric.py", "fun_name": "_peeloff_pi", "commit_message": "Improved documentation formatting", "code": "def _peeloff_pi(arg):\n r\n pi_coeff = S.Zero\n rest_terms = []\n for a in Add.make_args(arg):\n K = a.coeff(S.Pi)\n if K and K.is_rational:\n pi_coeff += K\n else:\n rest_terms.append(a)\n\n if pi_coeff is S.Zero:\n return arg, S.Zero\n\n m1 = (pi_coeff % S.Half)\n m2 = pi_coeff - m1\n if m2.is_integer or ((2*m2).is_integer and m2.is_even is False):\n return Add(*(rest_terms + [m1*pi])), m2\n return arg, S.Zero\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 141, "n_words": 58, "vocab_size": 38, "complexity": 8, "nloc": 33, "token_counts": 124, "n_ast_nodes": 197, "n_identifiers": 20, "d_id": 47453, "documentation": { "docstring": "\n Split ARG into two parts, a \"rest\" and a multiple of $\\pi$.\n This assumes ARG to be an Add.\n The multiple of $\\pi$ returned in the second position is always a Rational.\n\n Examples\n ========\n\n >>> from sympy.functions.elementary.trigonometric import _peeloff_pi as peel\n >>> from sympy import pi\n >>> from sympy.abc import x, y\n >>> peel(x + pi/2)\n (x, 1/2)\n >>> peel(x + 2*pi/3 + pi*y)\n (x + pi*y + pi/6, 1/2)\n\n ", "n_words": 70, "vocab_size": 51, "n_whitespaces": 110, "language": "en" } }, { "id": 197150, "commit_id": "092c0c6ea1e6f435a2cddb6e6fe723088b73bd81", "repo": "sympy", "path": "sympy/core/random.py", "file_name": "random.py", "fun_name": "_randint", "commit_message": "Add sympy.core.random to Sphinx", "code": "def _randint(seed=None):\n \n if seed is None:\n return randint\n elif isinstance(seed, int):\n rng.seed(seed)\n return randint\n elif is_sequence(seed):\n seed = list(seed) # make a copy\n seed.reverse()\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 72, "n_words": 24, "vocab_size": 20, "complexity": 4, "nloc": 13, "token_counts": 59, "n_ast_nodes": 82, "n_identifiers": 9, "d_id": 48353, "documentation": { "docstring": "Return a randint generator.\n\n ``seed`` can be\n\n * None - return randomly seeded generator\n * int - return a generator seeded with the int\n * list - the values to be returned will be taken from the list\n in the order given; the provided list is not modified.\n\n Examples\n ========\n\n >>> from sympy.core.random import _randint\n >>> ri = _randint()\n >>> ri(1, 1000) # doctest: +SKIP\n 999\n >>> ri = _randint(3)\n >>> ri(1, 1000) # doctest: +SKIP\n 238\n >>> ri = _randint([0, 5, 1, 2, 4])\n >>> ri(1, 3), ri(1, 3)\n (1, 2)\n ", "n_words": 92, "vocab_size": 57, "n_whitespaces": 148, "language": "en" } }, { "id": 144666, "commit_id": "48adb6f7bb335b28fb0fb0d1190bd6c5dfc8ddfa", "repo": "ray", "path": "python/ray/serve/deployment_state.py", "file_name": "deployment_state.py", "fun_name": "_get_curr_status", "commit_message": "[serve] Introduce DeploymentStatus, poll for statuses instead of using async goals (#22121)", "code": "def _get_curr_status(self) -> Tuple[DeploymentStatusInfo, bool]:\n \n # TODO(edoakes): we could make this more efficient in steady-state by\n # having a \"healthy\" flag that gets flipped if an update or replica\n # failure happens.\n\n target_version = self._target_version\n target_replica_count = self._target_replicas\n\n all_running_replica_cnt = self._replicas.count(states=[ReplicaState.RUNNING])\n running_at_target_version_replica_cnt = self._replicas.count(\n states=[ReplicaState.RUNNING], version=target_version\n )\n\n failed_to_start_count = self._replica_constructor_retry_counter\n failed_to_start_threshold = min(\n MAX_DEPLOYMENT_CONSTRUCTOR_RETRY_COUNT, target_replica_count * 3\n )\n\n # Got to make a call to complete current deploy() goal after\n # start failure threshold reached, while we might still have\n # pending replicas in current goal.\n if (\n failed_to_start_count >= failed_to_start_threshold\n and failed_to_start_threshold != 0\n ):\n if running_at_target_version_replica_cnt > 0:\n # At least one RUNNING replica at target state, partial\n # success; We can stop tracking constructor failures and\n # leave it to the controller to fully scale to target\n # number of replicas and only return as completed once\n # reached target replica count\n self._replica_constructor_retry_counter = -1\n else:\n return (\n DeploymentStatusInfo(\n status=DeploymentStatus.FAILED,\n message=(\n \"The Deployment constructor failed \"\n f\"{failed_to_start_count} times in a row. See \"\n \"logs for details.\"\n ),\n ),\n False,\n )\n\n # If we have pending ops, the current goal is *not* ready.\n if (\n self._replicas.count(\n states=[\n ReplicaState.STARTING,\n ReplicaState.UPDATING,\n ReplicaState.RECOVERING,\n ReplicaState.STOPPING,\n ]\n )\n == 0\n ):\n # Check for deleting.\n if target_replica_count == 0 and all_running_replica_cnt == 0:\n return DeploymentStatusInfo(status=DeploymentStatus.UPDATING), True\n\n # Check for a non-zero number of deployments.\n elif target_replica_count == running_at_target_version_replica_cnt:\n return DeploymentStatusInfo(status=DeploymentStatus.RUNNING), False\n\n return (\n DeploymentStatusInfo(\n status=DeploymentStatus.UPDATING,\n message=(\n f\"Running replicas of target version: \"\n f\"{running_at_target_version_replica_cnt}, target \"\n \"replicas: {target_replica_count}\"\n ),\n ),\n False,\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1143, "n_words": 248, "vocab_size": 151, "complexity": 8, "nloc": 66, "token_counts": 216, "n_ast_nodes": 356, "n_identifiers": 30, "d_id": 33279, "documentation": { "docstring": "Get the current deployment status.\n\n Checks the difference between the target vs. running replica count for\n the target version.\n\n TODO(edoakes): we should report the status as FAILED if replicas are\n repeatedly failing health checks. Need a reasonable heuristic here.\n\n Returns:\n (DeploymentStatusInfo, was_deleted)\n ", "n_words": 42, "vocab_size": 37, "n_whitespaces": 95, "language": "en" } }, { "id": 136394, "commit_id": "326d84f1149319809191e7887155df7f04f6f46a", "repo": "ray", "path": "python/ray/train/predictor.py", "file_name": "predictor.py", "fun_name": "_batch_format_to_use", "commit_message": "[AIR][Predictor] Enable numpy based predictor (#28917)\n\nCo-authored-by: Clark Zinzow \r\nCo-authored-by: Amog Kamsetty ", "code": "def _batch_format_to_use(cls) -> BatchFormat:\n \n has_pandas_implemented = cls._predict_pandas != Predictor._predict_pandas\n has_numpy_implemented = cls._predict_numpy != Predictor._predict_numpy\n if has_pandas_implemented and has_numpy_implemented:\n return cls.preferred_batch_format()\n elif has_pandas_implemented:\n return BatchFormat.PANDAS\n elif has_numpy_implemented:\n return BatchFormat.NUMPY\n else:\n raise NotImplementedError(\n f\"Predictor {cls.__name__} must implement at least one of \"\n \"`_predict_pandas` and `_predict_numpy`.\"\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 178, "n_words": 44, "vocab_size": 36, "complexity": 5, "nloc": 15, "token_counts": 60, "n_ast_nodes": 109, "n_identifiers": 13, "d_id": 30905, "documentation": { "docstring": "Determine the batch format to use for the predictor.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 216680, "commit_id": "3205491166e190512608bf01754815cadae47a92", "repo": "Open-Assistant", "path": "bot/channel_handlers.py", "file_name": "channel_handlers.py", "fun_name": "read", "commit_message": "add channel handler async msg routing", "code": "async def read(self) -> discord.Message:\n \n msg = await self.queue.get()\n if msg is None and self.expired:\n raise ChannelExpiredException()\n return msg\n", "url": "https://github.com/LAION-AI/Open-Assistant.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 58, "n_words": 19, "vocab_size": 17, "complexity": 3, "nloc": 6, "token_counts": 35, "n_ast_nodes": 61, "n_identifiers": 9, "d_id": 54675, "documentation": { "docstring": "Call this method to read the next message from the user in the handler method.", "n_words": 15, "vocab_size": 13, "n_whitespaces": 14, "language": "en" } }, { "id": 266508, "commit_id": "d19b506ce8c5ee43865b1cead2246fc07cc8902b", "repo": "ansible", "path": "test/lib/ansible_test/_internal/util_common.py", "file_name": "util_common.py", "fun_name": "yamlcheck", "commit_message": "ansible-test - Clean up future boilerplate. (#76874)\n\n* ansible-test - Clarify need for empty __init__.py\r\n* ansible-test - Update code-smell boilerplate.\r\n* Update code-smell boilerplate for core.\r\n* Update future boilerplate test for ansible-test.\r\n\r\nAll ansible-test code (except for targets) and core-specific sanity tests now use the same boilerplate.\r\n\r\nThe test also checks for unwanted `__future__` and `metaclass` boilerplate.\r\n\r\n* Relocate target tools to the correct directory.\r\n\r\nSeveral tools used on target Python versions were incorrectly placed in the controller directory.", "code": "def yamlcheck(python):\n \n result = json.loads(raw_command([python.path, os.path.join(ANSIBLE_TEST_TARGET_TOOLS_ROOT, 'yamlcheck.py')], capture=True)[0])\n\n if not result['yaml']:\n return None\n\n return result['cloader']\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 34, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 5, "token_counts": 53, "n_ast_nodes": 88, "n_identifiers": 11, "d_id": 78447, "documentation": { "docstring": "Return True if PyYAML has libyaml support, False if it does not and None if it was not found.", "n_words": 19, "vocab_size": 15, "n_whitespaces": 18, "language": "en" } }, { "id": 228102, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_scattersmith.py", "file_name": "_scattersmith.py", "fun_name": "imag", "commit_message": "switch to black .22", "code": "def imag(self):\n \n return self[\"imag\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 59775, "documentation": { "docstring": "\n Sets the imaginary component of the data, in units of\n normalized impedance such that real=1, imag=0 is the center of\n the chart.\n\n The 'imag' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n ", "n_words": 44, "vocab_size": 37, "n_whitespaces": 108, "language": "en" } }, { "id": 48448, "commit_id": "79a2f79ff85a740d6b3680215dc2c9a143ddafbb", "repo": "airflow", "path": "tests/providers/amazon/aws/secrets/test_secrets_manager.py", "file_name": "test_secrets_manager.py", "fun_name": "test_get_conn_uri_non_existent_key", "commit_message": "cleanup usage of `get_connections()`` from test suite (#23757)\n\nThe function is deprecated and raises warnings https://github.com/apache/airflow/pull/10192\r\nReplacing the usage with `get_connection()`", "code": "def test_get_conn_uri_non_existent_key(self):\n \n conn_id = \"test_mysql\"\n\n secret_id = 'airflow/connections/test_postgres'\n create_param = {\n 'Name': secret_id,\n }\n\n param = {\n 'SecretId': secret_id,\n 'SecretString': 'postgresql://airflow:airflow@host:5432/airflow',\n }\n\n secrets_manager_backend = SecretsManagerBackend()\n secrets_manager_backend.client.create_secret(**create_param)\n secrets_manager_backend.client.put_secret_value(**param)\n\n assert secrets_manager_backend.get_conn_uri(conn_id=conn_id) is None\n assert secrets_manager_backend.get_connection(conn_id=conn_id) is None\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 152, "n_words": 35, "vocab_size": 25, "complexity": 1, "nloc": 15, "token_counts": 77, "n_ast_nodes": 137, "n_identifiers": 13, "d_id": 9496, "documentation": { "docstring": "\n Test that if the key with connection ID is not present,\n SecretsManagerBackend.get_connection should return None\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 37, "language": "en" } }, { "id": 140346, "commit_id": "d2f0c3b2f64b41f6541f6521e98cf3a37577c016", "repo": "ray", "path": "python/ray/tune/automlboard/backend/collector.py", "file_name": "collector.py", "fun_name": "_create_trial_info", "commit_message": "Clean up docstyle in data, ml, and tune packages (#25188)", "code": "def _create_trial_info(self, expr_dir):\n \n meta = self._build_trial_meta(expr_dir)\n\n self.logger.debug(\"Create trial for %s\" % meta)\n\n trial_record = TrialRecord.from_json(meta)\n trial_record.save()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 51, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 39, "n_ast_nodes": 68, "n_identifiers": 11, "d_id": 31930, "documentation": { "docstring": "Create information for given trial.\n\n Meta file will be loaded if exists, and the trial information\n will be saved in db backend.\n\n Args:\n expr_dir: Directory path of the experiment.\n ", "n_words": 29, "vocab_size": 25, "n_whitespaces": 68, "language": "en" } }, { "id": 106603, "commit_id": "b4115c0337b1bacc876bef1ece97e8fa8b3e2834", "repo": "visdom", "path": "example/components/image.py", "file_name": "image.py", "fun_name": "image_svg", "commit_message": "test: split demo.py into seperate files and functions", "code": "def image_svg(viz, env):\n svgstr = \n viz.svg(\n svgstr=svgstr,\n opts=dict(title='Example of SVG Rendering')\n )\n\n\n", "url": "https://github.com/fossasia/visdom.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 35, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 12, "token_counts": 27, "n_ast_nodes": 46, "n_identifiers": 8, "d_id": 22423, "documentation": { "docstring": "\n \n \n Sorry, your browser does not support inline SVG.\n \n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 45, "language": "en" } }, { "id": 106776, "commit_id": "60c90e313e106c0af62339d29eeda0e62823c648", "repo": "visdom", "path": "py/visdom/utils/server_utils.py", "file_name": "server_utils.py", "fun_name": "escape_eid", "commit_message": "Refactoring server.py into more intentional files", "code": "def escape_eid(eid):\n \n return eid.replace('/', '_')\n\n", "url": "https://github.com/fossasia/visdom.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 30, "n_identifiers": 3, "d_id": 22436, "documentation": { "docstring": "Replace slashes with underscores, to avoid recognizing them\n as directories.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 16, "language": "en" } }, { "id": 156682, "commit_id": "1a760229fc18c0c7df41669a13a329a287215819", "repo": "dask", "path": "dask/array/core.py", "file_name": "core.py", "fun_name": "size", "commit_message": "Only import IPython if type checking (#9230)", "code": "def size(self) -> int | np.signedinteger:\n \n return np.prod(self.shape)\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 5, "token_counts": 21, "n_ast_nodes": 36, "n_identifiers": 7, "d_id": 36713, "documentation": { "docstring": "\n The total number of blocks in the array.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 232553, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/template/_data.py", "file_name": "_data.py", "fun_name": "icicle", "commit_message": "switch to black .22", "code": "def icicle(self):\n \n return self[\"icicle\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 63997, "documentation": { "docstring": "\n The 'icicle' property is a tuple of instances of\n Icicle that may be specified as:\n - A list or tuple of instances of plotly.graph_objs.layout.template.data.Icicle\n - A list or tuple of dicts of string/value properties that\n will be passed to the Icicle constructor\n\n Supported dict properties:\n\n Returns\n -------\n tuple[plotly.graph_objs.layout.template.data.Icicle]\n ", "n_words": 48, "vocab_size": 33, "n_whitespaces": 131, "language": "en" } }, { "id": 287742, "commit_id": "02731efc4cb3f7ee94b0c08aecc10e3a5209dbf4", "repo": "core", "path": "homeassistant/components/ibeacon/coordinator.py", "file_name": "coordinator.py", "fun_name": "_async_update_rssi", "commit_message": "Handle iBeacons that broadcast multiple different uuids (#79011)\n\n* Handle iBeacons that broadcast multiple different uuids\r\n\r\n* fix flip-flopping between uuids\r\n\r\n* naming", "code": "def _async_update_rssi(self) -> None:\n \n for (\n unique_id,\n ibeacon_advertisement,\n ) in self._last_ibeacon_advertisement_by_unique_id.items():\n address = unique_id.split(\"_\")[-1]\n if (\n service_info := bluetooth.async_last_service_info(\n self.hass, address, connectable=False\n )\n ) and service_info.rssi != ibeacon_advertisement.rssi:\n ibeacon_advertisement.update_rssi(service_info.rssi)\n async_dispatcher_send(\n self.hass,\n signal_seen(unique_id),\n ibeacon_advertisement,\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 261, "n_words": 34, "vocab_size": 28, "complexity": 4, "nloc": 23, "token_counts": 86, "n_ast_nodes": 134, "n_identifiers": 17, "d_id": 86930, "documentation": { "docstring": "Check to see if the rssi has changed and update any devices.\n\n We don't callback on RSSI changes so we need to check them\n here and send them over the dispatcher periodically to\n ensure the distance calculation is update.\n ", "n_words": 39, "vocab_size": 33, "n_whitespaces": 67, "language": "en" } }, { "id": 63591, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/requests/sessions.py", "file_name": "sessions.py", "fun_name": "session", "commit_message": "upd; format", "code": "def session():\n \n return Session()\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 10, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 9, "n_ast_nodes": 19, "n_identifiers": 2, "d_id": 13405, "documentation": { "docstring": "\n Returns a :class:`Session` for context-management.\n\n .. deprecated:: 1.0.0\n\n This method has been deprecated since version 1.0.0 and is only kept for\n backwards compatibility. New code should use :class:`~requests.sessions.Session`\n to create a session. This may be removed at a future date.\n\n :rtype: Session\n ", "n_words": 42, "vocab_size": 37, "n_whitespaces": 76, "language": "en" } }, { "id": 289315, "commit_id": "599d61a4da096227ce4d5ba1dc0eaabceea56f49", "repo": "core", "path": "homeassistant/components/rest/data.py", "file_name": "data.py", "fun_name": "async_update", "commit_message": "Fix payload in rest (#80544)", "code": "async def async_update(self, log_errors=True):\n \n if not self._async_client:\n self._async_client = get_async_client(\n self._hass, verify_ssl=self._verify_ssl\n )\n\n rendered_headers = template.render_complex(self._headers, parse_result=False)\n rendered_params = template.render_complex(self._params)\n\n _LOGGER.debug(\"Updating from %s\", self._resource)\n try:\n response = await self._async_client.request(\n self._method,\n self._resource,\n headers=rendered_headers,\n params=rendered_params,\n auth=self._auth,\n content=self._request_data,\n timeout=self._timeout,\n follow_redirects=True,\n )\n self.data = response.text\n self.headers = response.headers\n except httpx.TimeoutException as ex:\n if log_errors:\n _LOGGER.error(\"Timeout while fetching data: %s\", self._resource)\n self.last_exception = ex\n self.data = None\n self.headers = None\n except httpx.RequestError as ex:\n if log_errors:\n _LOGGER.error(\n \"Error fetching data: %s failed with %s\", self._resource, ex\n )\n self.last_exception = ex\n self.data = None\n self.headers = None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 500, "n_words": 91, "vocab_size": 56, "complexity": 6, "nloc": 35, "token_counts": 202, "n_ast_nodes": 317, "n_identifiers": 38, "d_id": 88457, "documentation": { "docstring": "Get the latest data from REST service with provided method.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 291721, "commit_id": "c576a68d336bc91fd82c299d9b3e5dfdc1c14960", "repo": "core", "path": "tests/components/nest/test_api.py", "file_name": "test_api.py", "fun_name": "test_auth", "commit_message": "Upgrade pytest-aiohttp (#82475)\n\n* Upgrade pytest-aiohttp\r\n\r\n* Make sure executors, tasks and timers are closed\r\n\r\nSome test will trigger warnings on garbage collect, these warnings\r\nspills over into next test.\r\n\r\nSome test trigger tasks that raise errors on shutdown, these spill\r\nover into next test.\r\n\r\nThis is to mimic older pytest-aiohttp and it's behaviour on test\r\ncleanup.\r\n\r\nDiscussions on similar changes for pytest-aiohttp are here:\r\nhttps://github.com/pytest-dev/pytest-asyncio/pull/309\r\n\r\n* Replace loop with event_loop\r\n\r\n* Make sure time is frozen for tests\r\n\r\n* Make sure the ConditionType is not async\r\n\r\n /home-assistant/homeassistant/helpers/template.py:2082: RuntimeWarning: coroutine 'AsyncMockMixin._execute_mock_call' was never awaited\r\n def wrapper(*args, **kwargs):\r\n Enable tracemalloc to get traceback where the object was allocated.\r\n See https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings for more info.\r\n\r\n* Increase litejet press tests with a factor 10\r\n\r\nThe times are simulated anyway, and we can't stop the normal\r\nevent from occuring.\r\n\r\n* Use async handlers for aiohttp\r\n\r\ntests/components/motioneye/test_camera.py::test_get_still_image_from_camera\r\ntests/components/motioneye/test_camera.py::test_get_still_image_from_camera\r\ntests/components/motioneye/test_camera.py::test_get_stream_from_camera\r\ntests/components/motioneye/test_camera.py::test_get_stream_from_camera\r\ntests/components/motioneye/test_camera.py::test_camera_option_stream_url_template\r\ntests/components/motioneye/test_camera.py::test_camera_option_stream_url_template\r\n /Users/joakim/src/hass/home-assistant/venv/lib/python3.9/site-packages/aiohttp/web_urldispatcher.py:189: DeprecationWarning: Bare functions are deprecated, use async ones\r\n warnings.warn(\r\n\r\n* Switch to freezegun in modbus tests\r\n\r\nThe tests allowed clock to tick in between steps\r\n\r\n* Make sure skybell object are fully mocked\r\n\r\nOld tests would trigger attempts to post to could services:\r\n\r\n```\r\nDEBUG:aioskybell:HTTP post https://cloud.myskybell.com/api/v3/login/ Request with headers: {'content-type': 'application/json', 'accept': '*/*', 'x-skybell-app-id': 'd2b542c7-a7e4-4e1e-b77d-2b76911c7c46', 'x-skybell-client-id': '1f36a3c0-6dee-4997-a6db-4e1c67338e57'}\r\n```\r\n\r\n* Fix sorting that broke after rebase", "code": "async def test_auth(hass, aioclient_mock):\n \n\n expiration_time = time.time() + 86400\n create_config_entry(expiration_time).add_to_hass(hass)\n\n # Prepare to capture credentials in API request. Empty payloads just mean\n # no devices or structures are loaded.\n aioclient_mock.get(f\"{API_URL}/enterprises/{PROJECT_ID}/structures\", json={})\n aioclient_mock.get(f\"{API_URL}/enterprises/{PROJECT_ID}/devices\", json={})\n\n # Prepare to capture credentials for Subscriber\n captured_creds = None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 71, "n_words": 43, "vocab_size": 35, "complexity": 1, "nloc": 30, "token_counts": 208, "n_ast_nodes": 108, "n_identifiers": 12, "d_id": 90825, "documentation": { "docstring": "Exercise authentication library creates valid credentials.", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 188426, "commit_id": "edfca5eb2486c2f006257723ffeda6f56b170170", "repo": "jumpserver", "path": "apps/authentication/mixins.py", "file_name": "mixins.py", "fun_name": "authenticate", "commit_message": "Fix rbac (#7699)\n\n* perf: 优化 suggesstion\r\n\r\n* perf: 修改 migrations\r\n\r\n* feat: 添加OIDC认证逻辑\r\n\r\n* perf: 修改 backend\r\n\r\n* perf: 优化认证backends\r\n\r\n* perf: 优化认证backends\r\n\r\n* perf: 优化CAS认证, 用户多域名进行访问时回调到各自域名\r\n\r\nCo-authored-by: ibuler ", "code": "def authenticate(request=None, **credentials):\n \n username = credentials.get('username')\n\n for backend, backend_path in _get_backends(return_tuples=True):\n # 预先检查,不浪费认证时间\n if not backend.username_can_authenticate(username):\n continue\n\n # 原生\n backend_signature = inspect.signature(backend.authenticate)\n try:\n backend_signature.bind(request, **credentials)\n except TypeError:\n # This backend doesn't accept these credentials as arguments. Try the next one.\n continue\n try:\n user = backend.authenticate(request, **credentials)\n except PermissionDenied:\n # This backend says to stop in our tracks - this user should not be allowed in at all.\n break\n if user is None:\n continue\n\n # 再次检查遇检查中遗漏的用户\n if not backend.user_can_authenticate(user):\n continue\n\n # Annotate the user object with the path of the backend.\n user.backend = backend_path\n return user\n\n # The credentials supplied are invalid to all backends, fire signal\n user_login_failed.send(sender=__name__, credentials=_clean_credentials(credentials), request=request)\n\n\nauth.authenticate = authenticate\n\n", "url": "https://github.com/jumpserver/jumpserver.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 323, "n_words": 112, "vocab_size": 78, "complexity": 7, "nloc": 21, "token_counts": 125, "n_ast_nodes": 220, "n_identifiers": 24, "d_id": 45914, "documentation": { "docstring": "\n If the given credentials are valid, return a User object.\n 之所以 hack 这个 auticate\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 24, "language": "en" } }, { "id": 186582, "commit_id": "16aad35d31a887dab157f9d4f5e0fe9218d06064", "repo": "certbot", "path": "certbot-nginx/certbot_nginx/_internal/configurator.py", "file_name": "configurator.py", "fun_name": "config_test", "commit_message": "Fully type certbot-nginx module (#9124)\n\n* Work in progress\r\n\r\n* Fix type\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Oups.\r\n\r\n* Fix typing in UnspacedList\r\n\r\n* Fix logic\r\n\r\n* Finish typing\r\n\r\n* List certbot-nginx as fully typed in tox\r\n\r\n* Fix lint\r\n\r\n* Fix checks\r\n\r\n* Organize imports\r\n\r\n* Fix typing for Python 3.6\r\n\r\n* Fix checks\r\n\r\n* Fix lint\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/configurator.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/configurator.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Fix signature of deploy_cert regarding the installer interface\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/obj.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Fix types\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/parser.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Precise type\r\n\r\n* Precise _coerce possible inputs/outputs\r\n\r\n* Fix type\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/http_01.py\r\n\r\nCo-authored-by: ohemorange \r\n\r\n* Fix type\r\n\r\n* Remove an undesirable implementation.\r\n\r\n* Fix type\r\n\r\nCo-authored-by: alexzorin \r\nCo-authored-by: ohemorange ", "code": "def config_test(self) -> None:\n \n try:\n util.run_script([self.conf('ctl'), \"-c\", self.nginx_conf, \"-t\"])\n except errors.SubprocessError as err:\n raise errors.MisconfigurationError(str(err))\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 58, "n_words": 15, "vocab_size": 15, "complexity": 2, "nloc": 10, "token_counts": 48, "n_ast_nodes": 84, "n_identifiers": 11, "d_id": 45498, "documentation": { "docstring": "Check the configuration of Nginx for errors.\n\n :raises .errors.MisconfigurationError: If config_test fails\n\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 26, "language": "en" } }, { "id": 220797, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/tasks.py", "file_name": "tasks.py", "fun_name": "print_stack", "commit_message": "add python 3.10.4 for windows", "code": "def print_stack(self, *, limit=None, file=None):\n \n return base_tasks._task_print_stack(self, limit, file)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 27, "n_ast_nodes": 41, "n_identifiers": 6, "d_id": 56120, "documentation": { "docstring": "Print the stack or traceback for this task's coroutine.\n\n This produces output similar to that of the traceback module,\n for the frames retrieved by get_stack(). The limit argument\n is passed to get_stack(). The file argument is an I/O stream\n to which the output is written; by default output is written\n to sys.stderr.\n ", "n_words": 52, "vocab_size": 35, "n_whitespaces": 96, "language": "en" } }, { "id": 100615, "commit_id": "60291d49c4da1cd260fbc0b04aa6a312eedfefbb", "repo": "faceswap", "path": "plugins/convert/writer/ffmpeg.py", "file_name": "ffmpeg.py", "fun_name": "_rename_tmp_file", "commit_message": "ffmpeg writer: Create new filename if output pre-exists", "code": "def _rename_tmp_file(self) -> None:\n \n os.rename(self._video_tmp_file, self._output_filename)\n logger.debug(\"Removing temp file\")\n if os.path.isfile(self._video_tmp_file):\n os.remove(self._video_tmp_file)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 51, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 6, "token_counts": 46, "n_ast_nodes": 78, "n_identifiers": 11, "d_id": 20077, "documentation": { "docstring": " Rename the temporary video file if not muxing audio. ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 108560, "commit_id": "24b16804731d3a724e4ec0984da140b1a6b05c66", "repo": "matplotlib", "path": "lib/matplotlib/artist.py", "file_name": "artist.py", "fun_name": "get_tightbbox", "commit_message": "MNT: make renderer always optional", "code": "def get_tightbbox(self, renderer=None):\n \n bbox = self.get_window_extent(renderer)\n if self.get_clip_on():\n clip_box = self.get_clip_box()\n if clip_box is not None:\n bbox = Bbox.intersection(bbox, clip_box)\n clip_path = self.get_clip_path()\n if clip_path is not None:\n clip_path = clip_path.get_fully_transformed_path()\n bbox = Bbox.intersection(bbox, clip_path.get_extents())\n return bbox\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 154, "n_words": 37, "vocab_size": 20, "complexity": 4, "nloc": 11, "token_counts": 84, "n_ast_nodes": 137, "n_identifiers": 14, "d_id": 23258, "documentation": { "docstring": "\n Like `.Artist.get_window_extent`, but includes any clipping.\n\n Parameters\n ----------\n renderer : `.RendererBase` subclass\n renderer that will be used to draw the figures (i.e.\n ``fig.canvas.get_renderer()``)\n\n Returns\n -------\n `.Bbox`\n The enclosing bounding box (in figure pixel coordinates).\n ", "n_words": 34, "vocab_size": 33, "n_whitespaces": 124, "language": "en" } }, { "id": 55096, "commit_id": "b0af6cf8b1eaea33ee6809efc770fc041908b7ca", "repo": "prefect", "path": "src/prefect/cli/cloud.py", "file_name": "cloud.py", "fun_name": "logout", "commit_message": "Refactor settings context", "code": "async def logout():\n \n confirm_logged_in()\n\n profiles = prefect.settings.load_profiles()\n\n profiles.update_active_profile()\n\n update_profile(PREFECT_API_URL=None, PREFECT_API_KEY=None)\n\n profile = prefect.context.get_settings_context()\n exit_with_success(f\"Successfully logged out in profile {profile.name!r}\")\n\n\n@workspace_app.command()", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "@workspace_app.command()", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 40, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 7, "token_counts": 46, "n_ast_nodes": 101, "n_identifiers": 17, "d_id": 11209, "documentation": { "docstring": "\n Log out of Prefect Cloud.\n Removes PREFECT_API_URL and PREFECT_API_KEY from profile.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 21, "language": "en" } }, { "id": 207062, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_custom_urls/tests.py", "file_name": "tests.py", "fun_name": "test_post_save_change_redirect", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_post_save_change_redirect(self):\n \n Person.objects.create(name=\"John Doe\")\n self.assertEqual(Person.objects.count(), 1)\n person = Person.objects.all()[0]\n post_url = reverse(\n \"admin_custom_urls:admin_custom_urls_person_change\", args=[person.pk]\n )\n response = self.client.post(post_url, {\"name\": \"Jack Doe\"})\n self.assertRedirects(\n response,\n reverse(\n \"admin_custom_urls:admin_custom_urls_person_delete\", args=[person.pk]\n ),\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 150, "n_words": 28, "vocab_size": 23, "complexity": 1, "nloc": 14, "token_counts": 92, "n_ast_nodes": 154, "n_identifiers": 18, "d_id": 51854, "documentation": { "docstring": "\n ModelAdmin.response_post_save_change() controls the redirection after\n the 'Save' button has been pressed when editing an existing object.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 38, "language": "en" } }, { "id": 235481, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/table/_cells.py", "file_name": "_cells.py", "fun_name": "prefixsrc", "commit_message": "switch to black .22", "code": "def prefixsrc(self):\n \n return self[\"prefixsrc\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 66925, "documentation": { "docstring": "\n Sets the source reference on Chart Studio Cloud for `prefix`.\n\n The 'prefixsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 77, "language": "en" } }, { "id": 63938, "commit_id": "1bac7930834d6f688950e836c45305a62e7ecb3f", "repo": "erpnext", "path": "erpnext/selling/report/payment_terms_status_for_sales_order/payment_terms_status_for_sales_order.py", "file_name": "payment_terms_status_for_sales_order.py", "fun_name": "get_conditions", "commit_message": "feat: Payment Terms Status report\n\n - calculate status at runtime for payment terms based on invoices\n - invoices are used in FIFO method", "code": "def get_conditions(filters):\n\t\n\tfilters = frappe._dict(filters) if filters else frappe._dict({})\n\tconditions = frappe._dict({})\n\n\tconditions.company = filters.company or frappe.defaults.get_user_default(\"company\")\n\tconditions.end_date = filters.period_end_date or frappe.utils.today()\n\tconditions.start_date = filters.period_start_date or frappe.utils.add_months(\n\t\tconditions.end_date, -1\n\t)\n\tconditions.sales_order = filters.sales_order or []\n\n\treturn conditions\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 27, "n_words": 37, "vocab_size": 26, "complexity": 6, "nloc": 10, "token_counts": 97, "n_ast_nodes": 158, "n_identifiers": 16, "d_id": 13537, "documentation": { "docstring": "\n\tConvert filter options to conditions used in query\n\t", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 189212, "commit_id": "8a16d7d8ce5e3f97fb100af7a960224f7f80137d", "repo": "aws-cli", "path": "awscli/customizations/s3/comparator.py", "file_name": "comparator.py", "fun_name": "call", "commit_message": "Delete extra whitespace\n\nA correction that does not affect the operation.", "code": "def call(self, src_files, dest_files):\n \n # :var src_done: True if there are no more files from the source left.\n src_done = False\n # :var dest_done: True if there are no more files form the dest left.\n dest_done = False\n # :var src_take: Take the next source file from the generated files if\n # true\n src_take = True\n # :var dest_take: Take the next dest file from the generated files if\n # true\n dest_take = True\n while True:\n try:\n if (not src_done) and src_take:\n src_file = advance_iterator(src_files)\n except StopIteration:\n src_file = None\n src_done = True\n try:\n if (not dest_done) and dest_take:\n dest_file = advance_iterator(dest_files)\n except StopIteration:\n dest_file = None\n dest_done = True\n\n if (not src_done) and (not dest_done):\n src_take = True\n dest_take = True\n\n compare_keys = self.compare_comp_key(src_file, dest_file)\n\n if compare_keys == 'equal':\n should_sync = self._sync_strategy.determine_should_sync(\n src_file, dest_file\n )\n if should_sync:\n yield src_file\n elif compare_keys == 'less_than':\n src_take = True\n dest_take = False\n should_sync = self._not_at_dest_sync_strategy.determine_should_sync(src_file, None)\n if should_sync:\n yield src_file\n\n elif compare_keys == 'greater_than':\n src_take = False\n dest_take = True\n should_sync = self._not_at_src_sync_strategy.determine_should_sync(None, dest_file)\n if should_sync:\n yield dest_file\n\n elif (not src_done) and dest_done:\n src_take = True\n should_sync = self._not_at_dest_sync_strategy.determine_should_sync(src_file, None)\n if should_sync:\n yield src_file\n\n elif src_done and (not dest_done):\n dest_take = True\n should_sync = self._not_at_src_sync_strategy.determine_should_sync(None, dest_file)\n if should_sync:\n yield dest_file\n else:\n break\n", "url": "https://github.com/aws/aws-cli.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1052, "n_words": 210, "vocab_size": 68, "complexity": 22, "nloc": 52, "token_counts": 239, "n_ast_nodes": 402, "n_identifiers": 19, "d_id": 46019, "documentation": { "docstring": "\n This function preforms the actual comparisons. The parameters it takes\n are the generated files for both the source and the destination. The\n key concept in this function is that no matter the type of where the\n files are coming from, they are listed in the same order, least to\n greatest in collation order. This allows for easy comparisons to\n determine if file needs to be added or deleted. Comparison keys are\n used to determine if two files are the same and each file has a\n unique comparison key. If they are the same compare the size and\n last modified times to see if a file needs to be updated. Ultimately,\n it will yield a sequence of file info objectsthat will be sent to\n the ``S3Handler``.\n\n :param src_files: The generated FileInfo objects from the source.\n :param dest_files: The generated FileInfo objects from the dest.\n\n :returns: Yields the FilInfo objects of the files that need to be\n operated on\n\n Algorithm:\n Try to take next from both files. If it is empty signal\n corresponding done flag. If both generated lists are not done\n compare compare_keys. If equal, compare size and time to see if\n it needs to be updated. If source compare_key is less than dest\n compare_key, the file needs to be added to the destination. Take\n the next source file but not not destination file. If the source\n compare_key is greater than dest compare_key, that destination file\n needs to be deleted from the destination. Take the next dest file\n but not the source file. If the source list is empty delete the\n rest of the files in the dest list from the destination. If the\n dest list is empty add the rest of the file in source list to\n the destination.\n ", "n_words": 289, "vocab_size": 121, "n_whitespaces": 560, "language": "en" } }, { "id": 168434, "commit_id": "e94faa23e24c0abf9db74d79cfebe06676577867", "repo": "pandas", "path": "pandas/core/computation/expressions.py", "file_name": "expressions.py", "fun_name": "_bool_arith_fallback", "commit_message": "WARN,TST check stacklevel for all warnings (#47998)\n\n* use find_stack_level everywhere\n\n* fixup\n\n* pyx fixups\n\n* fixup test_optional_dependency\n\n* fixup api\n\n* set check_stacklevel=False for some tests\n\n* use lru_cache for currentframe\n\n* fixup import in __init__\n\n* add missing imports to pyx files\n\n* add missing import\n\n* fixup import in conversion\n\n* revert some __init__ changes\n\n* start n=1\n\n* temporarily dont check stacklevel in _check_plot_works\n\n* catch some more warnings\n\n* dont check stacklevel in check_plot_works\n\n* fixup\n\n* ignore stacklevel in check_plot_works", "code": "def _bool_arith_fallback(op_str, a, b):\n \n if _has_bool_dtype(a) and _has_bool_dtype(b):\n if op_str in _BOOL_OP_UNSUPPORTED:\n warnings.warn(\n f\"evaluating in Python space because the {repr(op_str)} \"\n \"operator is not supported by numexpr for the bool dtype, \"\n f\"use {repr(_BOOL_OP_UNSUPPORTED[op_str])} instead.\",\n stacklevel=find_stack_level(inspect.currentframe()),\n )\n return True\n return False\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 150, "n_words": 41, "vocab_size": 36, "complexity": 4, "nloc": 11, "token_counts": 52, "n_ast_nodes": 108, "n_identifiers": 13, "d_id": 40296, "documentation": { "docstring": "\n Check if we should fallback to the python `_evaluate_standard` in case\n of an unsupported operation by numexpr, which is the case for some\n boolean ops.\n ", "n_words": 25, "vocab_size": 23, "n_whitespaces": 38, "language": "en" } }, { "id": 221103, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/bdb.py", "file_name": "bdb.py", "fun_name": "get_file_breaks", "commit_message": "add python 3.10.4 for windows", "code": "def get_file_breaks(self, filename):\n \n filename = self.canonic(filename)\n if filename in self.breaks:\n return self.breaks[filename]\n else:\n return []\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 65, "n_words": 15, "vocab_size": 13, "complexity": 2, "nloc": 6, "token_counts": 35, "n_ast_nodes": 58, "n_identifiers": 5, "d_id": 56206, "documentation": { "docstring": "Return all lines with breakpoints for filename.\n\n If no breakpoints are set, return an empty list.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 30, "language": "en" } }, { "id": 177078, "commit_id": "28f78cfa9a386620ee1179582fda1db5ffc59f84", "repo": "networkx", "path": "networkx/algorithms/distance_measures.py", "file_name": "distance_measures.py", "fun_name": "radius", "commit_message": "Add weight distance metrics (#5305)\n\nAdds the weight keyword argument to allow users to compute weighted distance metrics\r\ne.g. diameter, eccentricity, periphery, etc. The kwarg works in the same fashion as the\r\nweight param for shortest paths - i.e. if a string, look up with edge attr by key, if callable,\r\ncompute the weight via the function. Default is None, meaning return unweighted result\r\nwhich is the current behavior.\r\n\r\nCo-authored-by: Dan Schult \r\nCo-authored-by: Ross Barnowski ", "code": "def radius(G, e=None, usebounds=False, weight=None):\n \n if usebounds is True and e is None and not G.is_directed():\n return _extrema_bounding(G, compute=\"radius\", weight=weight)\n if e is None:\n e = eccentricity(G, weight=weight)\n return min(e.values())\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 56, "n_words": 30, "vocab_size": 22, "complexity": 5, "nloc": 6, "token_counts": 71, "n_ast_nodes": 112, "n_identifiers": 11, "d_id": 42265, "documentation": { "docstring": "Returns the radius of the graph G.\n\n The radius is the minimum eccentricity.\n\n Parameters\n ----------\n G : NetworkX graph\n A graph\n\n e : eccentricity dictionary, optional\n A precomputed dictionary of eccentricities.\n\n weight : string, function, or None\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number.\n\n If this is None, every edge has weight/distance/cost 1.\n\n Weights stored as floating point values can lead to small round-off\n errors in distances. Use integer weights to avoid this.\n\n Weights should be positive, since they are distances.\n\n Returns\n -------\n r : integer\n Radius of graph\n\n Examples\n --------\n >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])\n >>> nx.radius(G)\n 2\n\n ", "n_words": 197, "vocab_size": 120, "n_whitespaces": 357, "language": "en" } }, { "id": 267273, "commit_id": "fe349a1ccd658d86cfcf10eecdce9d48ece6176c", "repo": "ansible", "path": "test/lib/ansible_test/_internal/host_profiles.py", "file_name": "host_profiles.py", "fun_name": "write", "commit_message": "ansible-test - Enhance the shell command. (#77734)\n\n* ansible-test - Add shell --export option.\r\n\r\n* ansible-test - Support cmd args for shell command.\r\n\r\nAlso allow shell to be used without a valid layout if no delegation is required.\r\n\r\n* ansible-test - Improve stderr/stdout consistency.\r\n\r\nBy default all output goes to stdout only, with the exception of a fatal error.\r\n\r\nWhen using any of the following, all output defaults to stderr instead:\r\n\r\n* sanity with the `--lint` option -- sanity messages to stdout\r\n* coverage analyze -- output to stdout if the output file is `/dev/stdout`\r\n* shell -- shell output to stdout\r\n\r\nThis fixes issues two main issues:\r\n\r\n* Unpredictable output order when using both info and error/warning messages.\r\n* Mixing of lint/command/shell output with bootstrapping messages on stdout.\r\n\r\n* ansible-test - Add changelog fragment.", "code": "def write(self, args, path): # type: (CommonConfig, str) -> None\n \n\n # NOTE: Switching the inventory generation to write JSON would be nice, but is currently not possible due to the use of hard-coded inventory filenames.\n # The name `inventory` works for the POSIX integration tests, but `inventory.winrm` and `inventory.networking` will only parse in INI format.\n # If tests are updated to use the `INVENTORY_PATH` environment variable, then this could be changed.\n # Also, some tests detect the test type by inspecting the suffix on the inventory filename, which would break if it were changed.\n\n inventory_text = ''\n\n for group, hosts in self.host_groups.items():\n inventory_text += f'[{group}]\\n'\n\n for host, variables in hosts.items():\n kvp = ' '.join(f'{key}=\"{value}\"' for key, value in variables.items())\n inventory_text += f'{host} {kvp}\\n'\n\n inventory_text += '\\n'\n\n for group, children in (self.extra_groups or {}).items():\n inventory_text += f'[{group}]\\n'\n\n for child in children:\n inventory_text += f'{child}\\n'\n\n inventory_text += '\\n'\n\n inventory_text = inventory_text.strip()\n\n if not args.explain:\n write_text_file(path, inventory_text + '\\n')\n\n display.info(f'>>> Inventory\\n{inventory_text}', verbosity=3)\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 377, "n_words": 159, "vocab_size": 109, "complexity": 8, "nloc": 17, "token_counts": 133, "n_ast_nodes": 268, "n_identifiers": 24, "d_id": 78828, "documentation": { "docstring": "Write the given inventory to the specified path on disk.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 196266, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/geometry/curve.py", "file_name": "curve.py", "fun_name": "ambient_dimension", "commit_message": "Updated import locations", "code": "def ambient_dimension(self):\n \n\n return len(self.args[0])\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 28, "n_identifiers": 4, "d_id": 47766, "documentation": { "docstring": "The dimension of the curve.\n\n Returns\n =======\n\n int :\n the dimension of curve.\n\n Examples\n ========\n\n >>> from sympy.abc import t\n >>> from sympy import Curve\n >>> C = Curve((t, t**2), (t, 0, 2))\n >>> C.ambient_dimension\n 2\n\n ", "n_words": 36, "vocab_size": 27, "n_whitespaces": 124, "language": "en" } }, { "id": 104411, "commit_id": "e35be138148333078284b942ccc9ed7b1d826f97", "repo": "datasets", "path": "src/datasets/table.py", "file_name": "table.py", "fun_name": "from_pandas", "commit_message": "Update docs to new frontend/UI (#3690)\n\n* WIP: update docs to new UI\r\n\r\n* make style\r\n\r\n* Rm unused\r\n\r\n* inject_arrow_table_documentation __annotations__\r\n\r\n* hasattr(arrow_table_method, \"__annotations__\")\r\n\r\n* Update task_template.rst\r\n\r\n* Codeblock PT-TF-SPLIT\r\n\r\n* Convert loading scripts\r\n\r\n* Convert docs to mdx\r\n\r\n* Fix mdx\r\n\r\n* Add \r\n\r\n* Convert mdx tables\r\n\r\n* Fix codeblock\r\n\r\n* Rm unneded hashlinks\r\n\r\n* Update index.mdx\r\n\r\n* Redo dev change\r\n\r\n* Rm circle ci `build_doc` & `deploy_doc`\r\n\r\n* Rm unneeded files\r\n\r\n* Update docs reamde\r\n\r\n* Standardize to `Example::`\r\n\r\n* mdx logging levels doc\r\n\r\n* Table properties inject_arrow_table_documentation\r\n\r\n* ``` to ```py mdx\r\n\r\n* Add Tips mdx\r\n\r\n* important,None -> \r\n\r\n* More misc\r\n\r\n* Center imgs\r\n\r\n* Update instllation page\r\n\r\n* `setup.py` docs section\r\n\r\n* Rm imgs since they are in hf.co\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* Update index mdx\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* just `Dataset` obj\r\n\r\n* Addedversion just italics\r\n\r\n* Update ReadInstruction doc example syntax\r\n\r\n* Change docstring for `prepare_for_task`\r\n\r\n* Chore\r\n\r\n* Remove `code` syntax from headings\r\n\r\n* Rm `code` syntax from headings\r\n\r\n* Hashlink backward compatability\r\n\r\n* S3FileSystem doc\r\n\r\n* S3FileSystem doc updates\r\n\r\n* index.mdx updates\r\n\r\n* Add darkmode gifs\r\n\r\n* Index logo img css classes\r\n\r\n* Index mdx dataset logo img size\r\n\r\n* Docs for DownloadMode class\r\n\r\n* Doc DownloadMode table\r\n\r\n* format docstrings\r\n\r\n* style\r\n\r\n* Add doc builder scripts (#3790)\r\n\r\n* add doc builder scripts\r\n\r\n* fix docker image\r\n\r\n* Docs new UI actions no self hosted (#3793)\r\n\r\n* No self hosted\r\n\r\n* replace doc injection by actual docstrings\r\n\r\n* Docstring formatted\r\n\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Mishig Davaadorj \r\n\r\nCo-authored-by: Lysandre Debut \r\nCo-authored-by: Mishig Davaadorj \r\n\r\n* Rm notebooks from docs actions since they dont exi\r\n\r\n* Update tsting branch\r\n\r\n* More docstring\r\n\r\n* Chore\r\n\r\n* bump up node version\r\n\r\n* bump up node\r\n\r\n* ``` -> ```py for audio_process.mdx\r\n\r\n* Update .github/workflows/build_documentation.yml\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Uodate dev doc build\r\n\r\n* remove run on PR\r\n\r\n* fix action\r\n\r\n* Fix gh doc workflow\r\n\r\n* forgot this change when merging master\r\n\r\n* Update build doc\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: Lysandre Debut ", "code": "def from_pandas(cls, *args, **kwargs):\n \n return cls(pa.Table.from_pandas(*args, **kwargs))\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 28, "n_ast_nodes": 46, "n_identifiers": 6, "d_id": 21847, "documentation": { "docstring": "\n Convert pandas.DataFrame to an Arrow Table.\n\n The column types in the resulting Arrow Table are inferred from the\n dtypes of the pandas.Series in the DataFrame. In the case of non-object\n Series, the NumPy dtype is translated to its Arrow equivalent. In the\n case of `object`, we need to guess the datatype by looking at the\n Python objects in this Series.\n\n Be aware that Series of the `object` dtype don't carry enough\n information to always lead to a meaningful Arrow type. In the case that\n we cannot infer a type, e.g. because the DataFrame is of length 0 or\n the Series only contains None/nan objects, the type is set to\n null. This behavior can be avoided by constructing an explicit schema\n and passing it to this function.\n\n Args:\n df (:obj:`pandas.DataFrame`):\n schema (:obj:`pyarrow.Schema`, optional):\n The expected schema of the Arrow Table. This can be used to\n indicate the type of columns if we cannot infer it automatically.\n If passed, the output will have exactly this schema. Columns\n specified in the schema that are not found in the DataFrame columns\n or its index will raise an error. Additional columns or index\n levels in the DataFrame which are not specified in the schema will\n be ignored.\n preserve_index (:obj:`bool`, optional):\n Whether to store the index as an additional column in the resulting\n ``Table``. The default of None will store the index as a column,\n except for RangeIndex which is stored as metadata only. Use\n ``preserve_index=True`` to force it to be stored as a column.\n nthreads (:obj:`int`, defaults to :obj:`None` (may use up to system CPU count threads))\n If greater than 1, convert columns to Arrow in parallel using\n indicated number of threads\n columns (:obj:`List[str]`, optional):\n List of column to be converted. If None, use all columns.\n safe (:obj:`bool`, defaults to :obj:`True`):\n Check for overflows or other unsafe conversions\n\n Returns:\n :class:`datasets.table.Table`:\n\n Examples:\n ```python\n >>> import pandas as pd\n >>> import pyarrow as pa\n >>> df = pd.DataFrame({\n ... 'int': [1, 2],\n ... 'str': ['a', 'b']\n ... })\n >>> pa.Table.from_pandas(df)\n \n ```\n ", "n_words": 338, "vocab_size": 191, "n_whitespaces": 841, "language": "en" } }, { "id": 197219, "commit_id": "b27e2b44626d138bd6ea235fbf114644baa5b144", "repo": "sympy", "path": "sympy/functions/combinatorial/numbers.py", "file_name": "numbers.py", "fun_name": "divides", "commit_message": "Deprecate redundant static methods", "code": "def divides(p, n):\n sympy_deprecation_warning(\n ,\n deprecated_since_version=\"1.11\",\n active_deprecations_target='deprecated-carmichael-static-methods',\n )\n return n % p == 0\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 55, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 9, "token_counts": 26, "n_ast_nodes": 44, "n_identifiers": 6, "d_id": 48392, "documentation": { "docstring": "\n divides can be replaced by directly testing n % p == 0.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 12701, "commit_id": "c3849c6fee4a65a77a82b2cfda9670d727ff0f53", "repo": "jina", "path": "jina/types/request/data.py", "file_name": "data.py", "fun_name": "is_decompressed", "commit_message": "feat: allow to access parameters of data request wo loading data (#4991)", "code": "def is_decompressed(self) -> bool:\n \n return type(self._pb_body) in [\n jina_pb2.DataRequestProto,\n jina_pb2.DataRequestProtoWoData,\n ]\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 54, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 10, "token_counts": 26, "n_ast_nodes": 42, "n_identifiers": 8, "d_id": 2387, "documentation": { "docstring": "\n Checks if the underlying proto object was already deserialized into a :class:`jina.proto.jina_pb2.DataRequestProto` or\n :class:`jina.proto.jina_pb2.DataRequestProtoWoData`. This does not necessarily mean that the data (docs) inside the request is also decompressed.\n :return: True if the proto was deserialized before\n ", "n_words": 37, "vocab_size": 30, "n_whitespaces": 69, "language": "en" } }, { "id": 195191, "commit_id": "b1acb681207559da56a787ba96e16f0e23697d92", "repo": "ParlAI", "path": "projects/bb3/agents/module.py", "file_name": "module.py", "fun_name": "model_file_path_key", "commit_message": "Patch 8322 (#4709)\n\n* add dafetymix teacher\r\n\r\n* safety_mix teacher\r\n\r\n* safety_mix teacher pos and neg teachers\r\n\r\n* add tests for teacher\r\n\r\n* add license info\r\n\r\n* improvement\r\n\r\n* add task list\r\n\r\n* add task list and lint\r\n\r\n* add init.py\r\n\r\n* adding some patch to director\r\n\r\n* seeker changes\r\n\r\n* th\r\n\r\n* 3\r\n\r\n* jing\r\n\r\n* changes\r\n\r\n* z and r\r\n\r\n* remove .opts\r\n\r\n* fix docs\r\n\r\n* add contrractions\r\n\r\n* lint\r\n\r\nCo-authored-by: Dexter Ju \r\nCo-authored-by: Jing Xu ", "code": "def model_file_path_key(self):\n \n return f\"{self.tag_to_agent()[self.value]}_response_model_path\"\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 9, "n_ast_nodes": 36, "n_identifiers": 4, "d_id": 47220, "documentation": { "docstring": "\n Opt key for model file path for this agent.\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 24, "language": "en" } }, { "id": 269309, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/activations.py", "file_name": "activations.py", "fun_name": "swish", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def swish(x):\n \n return tf.nn.silu(x)\n\n\n@keras_export(\"keras.activations.relu\")\n@tf.__internal__.dispatch.add_dispatch_support", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.activations.relu\")\n@tf.__internal__.dispatch.add_dispatch_support", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 10, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 50, "n_identifiers": 9, "d_id": 80023, "documentation": { "docstring": "Swish activation function, `swish(x) = x * sigmoid(x)`.\n\n Swish activation function which returns `x*sigmoid(x)`.\n It is a smooth, non-monotonic function that consistently matches\n or outperforms ReLU on deep networks, it is unbounded above and\n bounded below.\n\n\n Example Usage:\n\n >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32)\n >>> b = tf.keras.activations.swish(a)\n >>> b.numpy()\n array([-4.1223075e-08, -2.6894143e-01, 0.0000000e+00, 7.3105860e-01,\n 2.0000000e+01], dtype=float32)\n\n Args:\n x: Input tensor.\n\n Returns:\n The swish activation applied to `x` (see reference paper for details).\n\n Reference:\n - [Ramachandran et al., 2017](https://arxiv.org/abs/1710.05941)\n ", "n_words": 83, "vocab_size": 72, "n_whitespaces": 156, "language": "en" } }, { "id": 68812, "commit_id": "00ef499739959630cd7cf97419fbb6ca59be05f2", "repo": "erpnext", "path": "erpnext/stock/doctype/packing_slip/packing_slip.py", "file_name": "packing_slip.py", "fun_name": "item_details", "commit_message": "refactor: use db independent offset syntax (#31345)\n\n* chore: use db independent offset syntax\r\n\r\n* fix: typo\r\n\r\n* style: reformat code to black spec\r\n\r\nCo-authored-by: Ankush Menat ", "code": "def item_details(doctype, txt, searchfield, start, page_len, filters):\n\tfrom erpnext.controllers.queries import get_match_cond\n\n\treturn frappe.db.sql(\n\t\t\n\t\t% (\"%s\", searchfield, \"%s\", get_match_cond(doctype), \"%s\", \"%s\"),\n\t\t((filters or {}).get(\"delivery_note\"), \"%%%s%%\" % txt, page_len, start),\n\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 23, "n_words": 29, "vocab_size": 24, "complexity": 2, "nloc": 11, "token_counts": 72, "n_ast_nodes": 110, "n_identifiers": 15, "d_id": 14893, "documentation": { "docstring": "select name, item_name, description from `tabItem`\n\t\t\t\twhere name in ( select item_code FROM `tabDelivery Note Item`\n\t \t\t\t\t\t\twhere parent= %s)\n\t \t\t\tand %s like \"%s\" %s\n\t \t\t\tlimit %s offset %s ", "n_words": 28, "vocab_size": 23, "n_whitespaces": 28, "language": "en" } }, { "id": 113318, "commit_id": "f77db747d07d5c90a3a9f70bb17f71d4573f329e", "repo": "nni", "path": "nni/nas/oneshot/pytorch/base_lightning.py", "file_name": "base_lightning.py", "fun_name": "export_probs", "commit_message": "Enhancement of one-shot NAS (v2.9) (#5049)", "code": "def export_probs(self) -> dict[str, Any]:\n \n result = {}\n for module in self.nas_modules:\n try:\n result.update(module.export_probs(memo=result))\n except NotImplementedError:\n warnings.warn(\n 'Some super-modules you have used did not implement export_probs. You might find some logs are missing.',\n UserWarning\n )\n return result\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 170, "n_words": 37, "vocab_size": 36, "complexity": 3, "nloc": 22, "token_counts": 52, "n_ast_nodes": 86, "n_identifiers": 14, "d_id": 24885, "documentation": { "docstring": "\n Export the probability of every choice in the search space got chosen.\n\n .. note:: If such method of some modules is not implemented, they will be simply ignored.\n\n Returns\n -------\n dict\n In most cases, keys are names of ``nas_modules`` suffixed with ``/`` and choice name.\n Values are the probability / logits depending on the implementation.\n ", "n_words": 55, "vocab_size": 47, "n_whitespaces": 120, "language": "en" } }, { "id": 127026, "commit_id": "b1cad0a1121c06cae55aaed32f2b901b2b725521", "repo": "ray", "path": "python/ray/data/_internal/lazy_block_list.py", "file_name": "lazy_block_list.py", "fun_name": "clear", "commit_message": "[Datasets] Use detached lifetime for stats actor (#25271)\n\nThe actor handle held at Ray client will become dangling if the Ray cluster is shutdown, and in such case if the user tries to get the actor again it will result in crash. This happened in a real user and blocked them from making progress.\r\n\r\nThis change makes the stats actor detached, and instead of keeping a handle, we access it via its name. This way we can make sure re-create this actor if the cluster gets restarted.\r\n\r\nCo-authored-by: Ubuntu ", "code": "def clear(self):\n \n self._block_partition_refs = [None for _ in self._block_partition_refs]\n self._block_partition_meta_refs = [\n None for _ in self._block_partition_meta_refs\n ]\n self._cached_metadata = [None for _ in self._cached_metadata]\n self._stats_actor = None\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 81, "n_words": 28, "vocab_size": 16, "complexity": 4, "nloc": 7, "token_counts": 50, "n_ast_nodes": 78, "n_identifiers": 7, "d_id": 28336, "documentation": { "docstring": "Clears all object references (block partitions and base block partitions)\n from this lazy block list.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 29, "language": "en" } }, { "id": 319987, "commit_id": "0b8eff9643c12aa7c766538d8a3e4194934cf44c", "repo": "paperless-ngx", "path": ".github/scripts/github.py", "file_name": "github.py", "fun_name": "_read_all_pages", "commit_message": "Extends the cleanup of image versions to the library images and all the registry cache images as well", "code": "def _read_all_pages(self, endpoint):\n \n internal_data = []\n\n while True:\n resp = self._session.get(endpoint)\n if resp.status_code == 200:\n internal_data += resp.json()\n if \"next\" in resp.links:\n endpoint = resp.links[\"next\"][\"url\"]\n else:\n logger.debug(\"Exiting pagination loop\")\n break\n else:\n logger.warning(f\"Request to {endpoint} return HTTP {resp.status_code}\")\n break\n\n return internal_data\n\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 233, "n_words": 40, "vocab_size": 32, "complexity": 4, "nloc": 15, "token_counts": 78, "n_ast_nodes": 149, "n_identifiers": 13, "d_id": 117041, "documentation": { "docstring": "\n Helper function to read all pages of an endpoint, utilizing the\n next.url until exhausted. Assumes the endpoint returns a list\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 43, "language": "en" } }, { "id": 206667, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/utils/functional.py", "file_name": "functional.py", "fun_name": "lazy", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def lazy(func, *resultclasses):\n \n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 31, "token_counts": 68, "n_ast_nodes": 17, "n_identifiers": 3, "d_id": 51617, "documentation": { "docstring": "\n Turn any callable into a lazy evaluated callable. result classes or types\n is required -- at least one is needed so that the automatic forcing of\n the lazy evaluation code is triggered. Results are not memoized; the\n function is evaluated on every access.\n ", "n_words": 43, "vocab_size": 36, "n_whitespaces": 59, "language": "en" } }, { "id": 176832, "commit_id": "a3a383f7a90e478df40bc9d746c925f2c94a5a2b", "repo": "networkx", "path": "networkx/algorithms/polynomials.py", "file_name": "polynomials.py", "fun_name": "chromatic_polynomial", "commit_message": "Chromatic polynomial (#5675)\n\nAdds chromatic_polynomial function to the graph polynomials package.", "code": "def chromatic_polynomial(G):\n r\n import sympy\n\n x = sympy.Symbol(\"x\")\n stack = deque()\n stack.append(nx.MultiGraph(G, contraction_idx=0))\n\n polynomial = 0\n while stack:\n G = stack.pop()\n edges = list(G.edges)\n if not edges:\n polynomial += (-1) ** G.graph[\"contraction_idx\"] * x ** len(G)\n else:\n e = edges[0]\n C = nx.contracted_edge(G, e, self_loops=True)\n C.graph[\"contraction_idx\"] = G.graph[\"contraction_idx\"] + 1\n C.remove_edge(e[0], e[0])\n G.remove_edge(*e)\n stack.append(G)\n stack.append(C)\n return polynomial\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 196, "n_words": 57, "vocab_size": 45, "complexity": 3, "nloc": 120, "token_counts": 154, "n_ast_nodes": 253, "n_identifiers": 22, "d_id": 42128, "documentation": { "docstring": "Returns the chromatic polynomial of `G`\n\n This function computes the chromatic polynomial via an iterative version of\n the deletion-contraction algorithm.\n\n The chromatic polynomial `X_G(x)` is a fundamental graph polynomial\n invariant in one variable. Evaluating `X_G(k)` for an natural number `k`\n enumerates the proper k-colorings of `G`.\n\n There are several equivalent definitions; here are three:\n\n Def 1 (explicit formula):\n For `G` an undirected graph, `c(G)` the number of connected components of\n `G`, `E` the edge set of `G`, and `G(S)` the spanning subgraph of `G` with\n edge set `S` [1]_:\n\n .. math::\n\n X_G(x) = \\sum_{S \\subseteq E} (-1)^{|S|} x^{c(G(S))}\n\n\n Def 2 (interpolating polynomial):\n For `G` an undirected graph, `n(G)` the number of vertices of `G`, `k_0 = 0`,\n and `k_i` the number of distinct ways to color the vertices of `G` with `i`\n unique colors (for `i` a natural number at most `n(G)`), `X_G(x)` is the\n unique Lagrange interpolating polynomial of degree `n(G)` through the points\n `(0, k_0), (1, k_1), \\dots, (n(G), k_{n(G)})` [2]_.\n\n\n Def 3 (chromatic recurrence):\n For `G` an undirected graph, `G-e` the graph obtained from `G` by deleting\n edge `e`, `G/e` the graph obtained from `G` by contracting edge `e`, `n(G)`\n the number of vertices of `G`, and `e(G)` the number of edges of `G` [3]_:\n\n .. math::\n X_G(x) = \\begin{cases}\n \t x^{n(G)}, & \\text{if $e(G)=0$} \\\\\n X_{G-e}(x) - X_{G/e}(x), & \\text{otherwise, for an arbitrary edge $e$}\n \\end{cases}\n\n This formulation is also known as the Fundamental Reduction Theorem [4]_.\n\n\n Parameters\n ----------\n G : NetworkX graph\n\n Returns\n -------\n instance of `sympy.core.add.Add`\n A Sympy expression representing the chromatic polynomial for `G`.\n\n Examples\n --------\n >>> C = nx.cycle_graph(5)\n >>> nx.chromatic_polynomial(C)\n x**5 - 5*x**4 + 10*x**3 - 10*x**2 + 4*x\n\n >>> G = nx.complete_graph(4)\n >>> nx.chromatic_polynomial(G)\n x**4 - 6*x**3 + 11*x**2 - 6*x\n\n Notes\n -----\n Interpretation of the coefficients is discussed in [5]_. Several special\n cases are listed in [2]_.\n\n The chromatic polynomial is a specialization of the Tutte polynomial; in\n particular, `X_G(x) = `T_G(x, 0)` [6]_.\n\n The chromatic polynomial may take negative arguments, though evaluations\n may not have chromatic interpretations. For instance, `X_G(-1)` enumerates\n the acyclic orientations of `G` [7]_.\n\n References\n ----------\n .. [1] D. B. West,\n \"Introduction to Graph Theory,\" p. 222\n .. [2] E. W. Weisstein\n \"Chromatic Polynomial\"\n MathWorld--A Wolfram Web Resource\n https://mathworld.wolfram.com/ChromaticPolynomial.html\n .. [3] D. B. West,\n \"Introduction to Graph Theory,\" p. 221\n .. [4] J. Zhang, J. Goodall,\n \"An Introduction to Chromatic Polynomials\"\n https://math.mit.edu/~apost/courses/18.204_2018/Julie_Zhang_paper.pdf\n .. [5] R. C. Read,\n \"An Introduction to Chromatic Polynomials\"\n Journal of Combinatorial Theory, 1968\n https://math.berkeley.edu/~mrklug/ReadChromatic.pdf\n .. [6] W. T. Tutte,\n \"Graph-polynomials\"\n Advances in Applied Mathematics, 2004\n https://www.sciencedirect.com/science/article/pii/S0196885803000411\n .. [7] R. P. Stanley,\n \"Acyclic orientations of graphs\"\n Discrete Mathematics, 2006\n https://math.mit.edu/~rstan/pubs/pubfiles/18.pdf\n ", "n_words": 437, "vocab_size": 259, "n_whitespaces": 745, "language": "en" } }, { "id": 196052, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/fp_groups.py", "file_name": "fp_groups.py", "fun_name": "reidemeister_presentation", "commit_message": "Updated import locations", "code": "def reidemeister_presentation(fp_grp, H, C=None, homomorphism=False):\n \n if not C:\n C = coset_enumeration_r(fp_grp, H)\n C.compress(); C.standardize()\n define_schreier_generators(C, homomorphism=homomorphism)\n reidemeister_relators(C)\n gens, rels = C._schreier_generators, C._reidemeister_relators\n gens, rels = simplify_presentation(gens, rels, change_gens=True)\n\n C.schreier_generators = tuple(gens)\n C.reidemeister_relators = tuple(rels)\n\n if homomorphism:\n _gens = []\n for gen in gens:\n _gens.append(C._schreier_gen_elem[str(gen)])\n return C.schreier_generators, C.reidemeister_relators, _gens\n\n return C.schreier_generators, C.reidemeister_relators\n\n\nFpGroupElement = FreeGroupElement\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 125, "n_words": 54, "vocab_size": 41, "complexity": 4, "nloc": 16, "token_counts": 136, "n_ast_nodes": 217, "n_identifiers": 25, "d_id": 47552, "documentation": { "docstring": "\n Parameters\n ==========\n\n fp_group: A finitely presented group, an instance of FpGroup\n H: A subgroup whose presentation is to be found, given as a list\n of words in generators of `fp_grp`\n homomorphism: When set to True, return a homomorphism from the subgroup\n to the parent group\n\n Examples\n ========\n\n >>> from sympy.combinatorics import free_group\n >>> from sympy.combinatorics.fp_groups import FpGroup, reidemeister_presentation\n >>> F, x, y = free_group(\"x, y\")\n\n Example 5.6 Pg. 177 from [1]\n >>> f = FpGroup(F, [x**3, y**5, (x*y)**2])\n >>> H = [x*y, x**-1*y**-1*x*y*x]\n >>> reidemeister_presentation(f, H)\n ((y_1, y_2), (y_1**2, y_2**3, y_2*y_1*y_2*y_1*y_2*y_1))\n\n Example 5.8 Pg. 183 from [1]\n >>> f = FpGroup(F, [x**3, y**3, (x*y)**3])\n >>> H = [x*y, x*y**-1]\n >>> reidemeister_presentation(f, H)\n ((x_0, y_0), (x_0**3, y_0**3, x_0*y_0*x_0*y_0*x_0*y_0))\n\n Exercises Q2. Pg 187 from [1]\n >>> f = FpGroup(F, [x**2*y**2, y**-1*x*y*x**-3])\n >>> H = [x]\n >>> reidemeister_presentation(f, H)\n ((x_0,), (x_0**4,))\n\n Example 5.9 Pg. 183 from [1]\n >>> f = FpGroup(F, [x**3*y**-3, (x*y)**3, (x*y**-1)**2])\n >>> H = [x]\n >>> reidemeister_presentation(f, H)\n ((x_0,), (x_0**6,))\n\n ", "n_words": 160, "vocab_size": 96, "n_whitespaces": 276, "language": "en" } }, { "id": 201228, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/auth_tests/test_forms.py", "file_name": "test_forms.py", "fun_name": "test_preserve_username_case", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_preserve_username_case(self):\n \n user = User.objects.create_user(\"forms_test2\", \"tesT@EXAMple.com\", \"test\")\n self.assertEqual(user.email, \"tesT@example.com\")\n user = User.objects.create_user(\"forms_test3\", \"tesT\", \"test\")\n self.assertEqual(user.email, \"tesT\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 51, "n_words": 16, "vocab_size": 12, "complexity": 1, "nloc": 5, "token_counts": 54, "n_ast_nodes": 99, "n_identifiers": 8, "d_id": 49906, "documentation": { "docstring": "\n Preserve the case of the user name (before the @ in the email address)\n when creating a user (#5605).\n ", "n_words": 19, "vocab_size": 15, "n_whitespaces": 41, "language": "en" } }, { "id": 7519, "commit_id": "ed8d9cf20843744f18593b22fb6a30eaf5f325eb", "repo": "ludwig", "path": "ludwig/utils/triton_utils.py", "file_name": "triton_utils.py", "fun_name": "clean_up_synthetic_data", "commit_message": "Triton ensemble export (#2251)", "code": "def clean_up_synthetic_data():\n \n shutil.rmtree(\"audio_files\", ignore_errors=True)\n shutil.rmtree(\"image_files\", ignore_errors=True)\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 15, "n_words": 6, "vocab_size": 5, "complexity": 1, "nloc": 3, "token_counts": 25, "n_ast_nodes": 46, "n_identifiers": 4, "d_id": 1227, "documentation": { "docstring": "Clean up synthetic example generated data for audio and image features.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 280201, "commit_id": "e6f739a31247c43a86c37c33b0b8b2ba6be6a5f6", "repo": "keras", "path": "keras/saving/experimental/saving_lib.py", "file_name": "saving_lib.py", "fun_name": "load_weights_only", "commit_message": "- Add standalone weights file saving/loading functionality.\n- Switch to in-memory, single write / single read archive saving for better performance.\n- Remove ability to pick between zipping or not zipping a Keras saved artifact: it's always a zip archive now.\n\nPiperOrigin-RevId: 483705728", "code": "def load_weights_only(model, filepath):\n \n temp_dir = None\n archive = None\n if filepath.endswith(\".weights.h5\"):\n # TODO: download file if h5 filepath is remote\n weights_store = H5IOStore(filepath, mode=\"r\")\n elif filepath.endswith(\".keras\"):\n archive = zipfile.ZipFile(filepath, \"r\")\n weights_store = H5IOStore(\n _VARS_FNAME + \".h5\", archive=archive, mode=\"r\"\n )\n\n _load_state(\n model,\n weights_handler=weights_store,\n assets_handler=None,\n inner_path=\"\",\n visited_trackables=set(),\n )\n weights_store.close()\n if temp_dir and tf.io.gfile.exists(temp_dir):\n tf.io.gfile.rmtree(temp_dir)\n if archive:\n archive.close()\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 180, "n_words": 55, "vocab_size": 43, "complexity": 6, "nloc": 22, "token_counts": 126, "n_ast_nodes": 212, "n_identifiers": 24, "d_id": 83286, "documentation": { "docstring": "Load the weights of a model from a filepath (.keras or .weights.h5).\n\n Note: only supports h5 for now.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 24, "language": "en" } }, { "id": 119883, "commit_id": "998d60dd07d2c33438f606307de0276bcf110428", "repo": "jax", "path": "jax/_src/scipy/sparse/linalg.py", "file_name": "linalg.py", "fun_name": "cg", "commit_message": "DOC: clarify parameter types in cg/bicgstab", "code": "def cg(A, b, x0=None, *, tol=1e-5, atol=0.0, maxiter=None, M=None):\n \n return _isolve(_cg_solve,\n A=A, b=b, x0=x0, tol=tol, atol=atol,\n maxiter=maxiter, M=M, check_symmetric=True)\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 53, "n_words": 19, "vocab_size": 19, "complexity": 1, "nloc": 4, "token_counts": 71, "n_ast_nodes": 91, "n_identifiers": 11, "d_id": 26708, "documentation": { "docstring": "Use Conjugate Gradient iteration to solve ``Ax = b``.\n\n The numerics of JAX's ``cg`` should exact match SciPy's ``cg`` (up to\n numerical precision), but note that the interface is slightly different: you\n need to supply the linear operator ``A`` as a function instead of a sparse\n matrix or ``LinearOperator``.\n\n Derivatives of ``cg`` are implemented via implicit differentiation with\n another ``cg`` solve, rather than by differentiating *through* the solver.\n They will be accurate only if both solves converge.\n\n Parameters\n ----------\n A: ndarray or function\n 2D array or function that calculates the linear map (matrix-vector\n product) ``Ax`` when called like ``A(x)``. ``A`` must represent a\n hermitian, positive definite matrix, and must return array(s) with the\n same structure and shape as its argument.\n b : array or tree of arrays\n Right hand side of the linear system representing a single vector. Can be\n stored as an array or Python container of array(s) with any shape.\n\n Returns\n -------\n x : array or tree of arrays\n The converged solution. Has the same structure as ``b``.\n info : None\n Placeholder for convergence information. In the future, JAX will report\n the number of iterations when convergence is not achieved, like SciPy.\n\n Other Parameters\n ----------------\n x0 : array or tree of arrays\n Starting guess for the solution. Must have the same structure as ``b``.\n tol, atol : float, optional\n Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.\n We do not implement SciPy's \"legacy\" behavior, so JAX's tolerance will\n differ from SciPy unless you explicitly pass ``atol`` to SciPy's ``cg``.\n maxiter : integer\n Maximum number of iterations. Iteration will stop after maxiter\n steps even if the specified tolerance has not been achieved.\n M : ndarray or function\n Preconditioner for A. The preconditioner should approximate the\n inverse of A. Effective preconditioning dramatically improves the\n rate of convergence, which implies that fewer iterations are needed\n to reach a given error tolerance.\n\n See also\n --------\n scipy.sparse.linalg.cg\n jax.lax.custom_linear_solve\n ", "n_words": 314, "vocab_size": 205, "n_whitespaces": 438, "language": "en" } }, { "id": 204841, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/creation.py", "file_name": "creation.py", "fun_name": "_get_database_display_str", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _get_database_display_str(self, verbosity, database_name):\n \n return \"'%s'%s\" % (\n self.connection.alias,\n (\" ('%s')\" % database_name) if verbosity >= 2 else \"\",\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 63, "n_words": 20, "vocab_size": 19, "complexity": 2, "nloc": 5, "token_counts": 33, "n_ast_nodes": 55, "n_identifiers": 6, "d_id": 50920, "documentation": { "docstring": "\n Return display string for a database for use in various actions.\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 26, "language": "en" } }, { "id": 81641, "commit_id": "e87fabe6bb84691472ab67e5da737c9fe515cf3f", "repo": "awx", "path": "awx/main/tasks/jobs.py", "file_name": "jobs.py", "fun_name": "pre_run_hook", "commit_message": "Submit job to dispatcher as part of transaction (#12573)\n\nMake it so that submitting a task to the dispatcher happens as part of the transaction.\r\n this applies to dispatcher task \"publishers\" which NOTIFY the pg_notify queue\r\n if the transaction is not successful, it will not be sent, as per postgres docs\r\n\r\nThis keeps current behavior for pg_notify listeners\r\n practically, this only applies for the awx-manage run_dispatcher service\r\n this requires creating a separate connection and keeping it long-lived\r\n arbitrary code will occasionally close the main connection, which would stop listening\r\n\r\nStop sending the waiting status websocket message\r\n this is required because the ordering cannot be maintained with other changes here\r\n the instance group data is moved to the running websocket message payload\r\n\r\nMove call to create_partition from task manager to pre_run_hook\r\n mock this in relevant unit tests", "code": "def pre_run_hook(self, instance, private_data_dir):\n \n instance.log_lifecycle(\"pre_run\")\n\n # Before task is started, ensure that job_event partitions exist\n create_partition(instance.event_class._meta.db_table, start=instance.created)\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 45, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 3, "token_counts": 32, "n_ast_nodes": 54, "n_identifiers": 11, "d_id": 17240, "documentation": { "docstring": "\n Hook for any steps to run before the job/task starts\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 259567, "commit_id": "b4da3b406379b241bf5e81d0f60bbcddd424625b", "repo": "scikit-learn", "path": "sklearn/linear_model/_base.py", "file_name": "_base.py", "fun_name": "make_dataset", "commit_message": "MNT ensure creation of dataset is deterministic in SGD (#19716)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def make_dataset(X, y, sample_weight, random_state=None):\n \n\n rng = check_random_state(random_state)\n # seed should never be 0 in SequentialDataset64\n seed = rng.randint(1, np.iinfo(np.int32).max)\n\n if X.dtype == np.float32:\n CSRData = CSRDataset32\n ArrayData = ArrayDataset32\n else:\n CSRData = CSRDataset64\n ArrayData = ArrayDataset64\n\n if sp.issparse(X):\n dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight, seed=seed)\n intercept_decay = SPARSE_INTERCEPT_DECAY\n else:\n X = np.ascontiguousarray(X)\n dataset = ArrayData(X, y, sample_weight, seed=seed)\n intercept_decay = 1.0\n\n return dataset, intercept_decay\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 156, "n_words": 66, "vocab_size": 43, "complexity": 3, "nloc": 17, "token_counts": 130, "n_ast_nodes": 197, "n_identifiers": 30, "d_id": 75821, "documentation": { "docstring": "Create ``Dataset`` abstraction for sparse and dense inputs.\n\n This also returns the ``intercept_decay`` which is different\n for sparse datasets.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_features)\n Training data\n\n y : array-like, shape (n_samples, )\n Target values.\n\n sample_weight : numpy array of shape (n_samples,)\n The weight of each sample\n\n random_state : int, RandomState instance or None (default)\n Determines random number generation for dataset random sampling. It is not\n used for dataset shuffling.\n Pass an int for reproducible output across multiple function calls.\n See :term:`Glossary `.\n\n Returns\n -------\n dataset\n The ``Dataset`` abstraction\n intercept_decay\n The intercept decay\n ", "n_words": 95, "vocab_size": 74, "n_whitespaces": 197, "language": "en" } }, { "id": 735, "commit_id": "b2768484a1b5720be74c78335502cd996e0b1895", "repo": "PySyft", "path": "packages/syft/tests/syft/core/tensor/tensor_serde_test.py", "file_name": "tensor_serde_test.py", "fun_name": "test_big_ndept", "commit_message": "WIP: Having issue with 10M NDEPT serde\n\n- options.traversalLimitInWords; Message is too large.", "code": "def test_big_ndept() -> None:\n \n # for multiplier in [1, 10, 100, 1000]:\n \n for multiplier in [10]:\n ndim = 1_000_000\n rows = 1\n cols = 7\n num_entites = 1000\n\n upper = highest()\n lower = lowest()\n reference_data = np.random.randint(\n lower, upper, size=(multiplier * ndim, rows, cols), dtype=np.int32\n )\n big_ndept = NDEPT(\n child=reference_data,\n entities=[ishan() * num_entites],\n max_vals=make_bounds(reference_data, upper),\n min_vals=make_bounds(reference_data, lower),\n )\n\n ndept_metrics = time_and_size_serde(big_ndept)\n print(multiplier, ndept_metrics)\n # break\n assert False\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 229, "n_words": 67, "vocab_size": 53, "complexity": 2, "nloc": 27, "token_counts": 118, "n_ast_nodes": 185, "n_identifiers": 28, "d_id": 107, "documentation": { "docstring": "Create big NDEPTs\n failed capnp deserialize capnp/serialize.c++:197:\n failed: expected totalWords <= options.traversalLimitInWords;\n Message is too large. To increase the limit on the receiving end,\n see capnp::ReaderOptions.\n ", "n_words": 26, "vocab_size": 25, "n_whitespaces": 42, "language": "en" } }, { "id": 221679, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/configparser.py", "file_name": "configparser.py", "fun_name": "popitem", "commit_message": "add python 3.10.4 for windows", "code": "def popitem(self):\n \n for key in self.sections():\n value = self[key]\n del self[key]\n return key, value\n raise KeyError\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 70, "n_words": 16, "vocab_size": 14, "complexity": 2, "nloc": 6, "token_counts": 32, "n_ast_nodes": 52, "n_identifiers": 6, "d_id": 56473, "documentation": { "docstring": "Remove a section from the parser and return it as\n a (section_name, section_proxy) tuple. If no section is present, raise\n KeyError.\n\n The section DEFAULT is never returned because it cannot be removed.\n ", "n_words": 32, "vocab_size": 27, "n_whitespaces": 60, "language": "en" } }, { "id": 104788, "commit_id": "1904d0c0a3a96330d9b870cdca3e9a3a137f2977", "repo": "datasets", "path": "src/datasets/dataset_dict.py", "file_name": "dataset_dict.py", "fun_name": "cleanup_cache_files", "commit_message": "Add code examples for DatasetDict (#4245)\n\n* 📝 add code examples for DatasetDict\r\n\r\n* 🖍 apply quentin review", "code": "def cleanup_cache_files(self) -> Dict[str, int]:\n \n self._check_values_type()\n return {k: dataset.cleanup_cache_files() for k, dataset in self.items()}\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 35, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 18, "token_counts": 38, "n_ast_nodes": 62, "n_identifiers": 9, "d_id": 21970, "documentation": { "docstring": "Clean up all cache files in the dataset cache directory, excepted the currently used cache file if there is one.\n Be careful when running this command that no other process is currently using other cache files.\n\n Return:\n Dict with the number of removed files for each split\n\n Example:\n\n ```py\n >>> from datasets import load_dataset\n >>> ds = load_dataset(\"rotten_tomatoes\")\n >>> ds.cleanup_cache_files()\n {'test': 0, 'train': 0, 'validation': 0}\n ```\n ", "n_words": 67, "vocab_size": 55, "n_whitespaces": 148, "language": "en" } }, { "id": 262741, "commit_id": "080d95d83bb7f60ce2ec25b0c81c207d303ec46c", "repo": "pyinstaller", "path": "tests/unit/test_recursion_limit.py", "file_name": "test_recursion_limit.py", "fun_name": "test_recursion_to_deep", "commit_message": "Drop Python 3.6 support.", "code": "def test_recursion_to_deep(large_import_chain):\n \n if is_win:\n pytest.xfail(\"Worker is known to crash on Windows.\")\n path, script = large_import_chain\n mg = modulegraph.ModuleGraph(path)\n # Increase recursion limit to 5 times of the default. Given the module import chain created above\n # this still should fail.\n with pytest.raises(RecursionError):\n mg.add_script(str(script))\n\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 78, "n_words": 43, "vocab_size": 39, "complexity": 2, "nloc": 7, "token_counts": 45, "n_ast_nodes": 83, "n_identifiers": 14, "d_id": 77340, "documentation": { "docstring": "\n modulegraph is recursive and triggers RecursionError if nesting of imported modules is to deep.\n This can be worked around by increasing recursion limit.\n\n With the default recursion limit (1000), the recursion error occurs at about 115 modules, with limit 2000\n (as tested below) at about 240 modules, and with limit 5000 at about 660 modules.\n ", "n_words": 55, "vocab_size": 42, "n_whitespaces": 71, "language": "en" } }, { "id": 205824, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/sql/compiler.py", "file_name": "compiler.py", "fun_name": "field_as_sql", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def field_as_sql(self, field, val):\n \n if field is None:\n # A field value of None means the value is raw.\n sql, params = val, []\n elif hasattr(val, \"as_sql\"):\n # This is an expression, let's compile it.\n sql, params = self.compile(val)\n elif hasattr(field, \"get_placeholder\"):\n # Some fields (e.g. geo fields) need special munging before\n # they can be inserted.\n sql, params = field.get_placeholder(val, self, self.connection), [val]\n else:\n # Return the common case for the placeholder\n sql, params = \"%s\", [val]\n\n # The following hook is only used by Oracle Spatial, which sometimes\n # needs to yield 'NULL' and [] as its placeholder and params instead\n # of '%s' and [None]. The 'NULL' placeholder is produced earlier by\n # OracleOperations.get_geom_placeholder(). The following line removes\n # the corresponding None parameter. See ticket #10888.\n params = self.connection.ops.modify_insert_params(sql, params)\n\n return sql, params\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 319, "n_words": 136, "vocab_size": 90, "complexity": 4, "nloc": 11, "token_counts": 98, "n_ast_nodes": 168, "n_identifiers": 12, "d_id": 51226, "documentation": { "docstring": "\n Take a field and a value intended to be saved on that field, and\n return placeholder SQL and accompanying params. Check for raw values,\n expressions, and fields with get_placeholder() defined in that order.\n\n When field is None, consider the value raw and use it as the\n placeholder, with no corresponding parameters returned.\n ", "n_words": 52, "vocab_size": 41, "n_whitespaces": 95, "language": "en" } }, { "id": 199984, "commit_id": "a0989bcfd26470833cf03737941bfd80f511c745", "repo": "sympy", "path": "sympy/physics/qho_1d.py", "file_name": "qho_1d.py", "fun_name": "psi_n", "commit_message": "applied backtick correction to the remainder of the project", "code": "def psi_n(n, x, m, omega):\n \n\n # sympify arguments\n n, x, m, omega = map(S, [n, x, m, omega])\n nu = m * omega / hbar\n # normalization coefficient\n C = (nu/pi)**Rational(1, 4) * sqrt(1/(2**n*factorial(n)))\n\n return C * exp(-nu* x**2 /2) * hermite(n, sqrt(nu)*x)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 64, "n_words": 43, "vocab_size": 31, "complexity": 1, "nloc": 5, "token_counts": 97, "n_ast_nodes": 146, "n_identifiers": 16, "d_id": 49473, "documentation": { "docstring": "\n Returns the wavefunction psi_{n} for the One-dimensional harmonic oscillator.\n\n Parameters\n ==========\n\n n :\n the \"nodal\" quantum number. Corresponds to the number of nodes in the\n wavefunction. ``n >= 0``\n x :\n x coordinate.\n m :\n Mass of the particle.\n omega :\n Angular frequency of the oscillator.\n\n Examples\n ========\n\n >>> from sympy.physics.qho_1d import psi_n\n >>> from sympy.abc import m, x, omega\n >>> psi_n(0, x, m, omega)\n (m*omega)**(1/4)*exp(-m*omega*x**2/(2*hbar))/(hbar**(1/4)*pi**(1/4))\n\n ", "n_words": 66, "vocab_size": 46, "n_whitespaces": 146, "language": "en" } }, { "id": 289881, "commit_id": "dde763418a1c4ee0ecff17de76b6d670670a3bb7", "repo": "core", "path": "homeassistant/components/lifx/coordinator.py", "file_name": "coordinator.py", "fun_name": "async_get_hev_cycle", "commit_message": "Add an RSSI sensor to the LIFX integration (#80993)", "code": "async def async_get_hev_cycle(self) -> None:\n \n if lifx_features(self.device)[\"hev\"]:\n await async_execute_lifx(self.device.get_hev_cycle)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 34, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 4, "token_counts": 28, "n_ast_nodes": 51, "n_identifiers": 6, "d_id": 89009, "documentation": { "docstring": "Update the HEV cycle status from a LIFX Clean bulb.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 197085, "commit_id": "a25ba231f9c3fd6518f9ae81d1df0323898b9e44", "repo": "sympy", "path": "sympy/utilities/iterables.py", "file_name": "iterables.py", "fun_name": "subsets", "commit_message": "Optimization of subsets() to use return rather than yield from\n\nBy avoiding the use of yield in the body of iterables.subsets, Python\nsees it as just a regular function rather than a generator. Hence it\ncan call generators and return the resulting generator objects,\navoiding some overhead from a layer of yield from handling.", "code": "def subsets(seq, k=None, repetition=False):\n r\n if k is None:\n if not repetition:\n return chain.from_iterable((combinations(seq, k)\n for k in range(len(seq) + 1)))\n else:\n return chain.from_iterable((combinations_with_replacement(seq, k)\n for k in range(len(seq) + 1)))\n else:\n if not repetition:\n return combinations(seq, k)\n else:\n return combinations_with_replacement(seq, k)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 200, "n_words": 42, "vocab_size": 23, "complexity": 6, "nloc": 52, "token_counts": 100, "n_ast_nodes": 155, "n_identifiers": 10, "d_id": 48328, "documentation": { "docstring": "Generates all `k`-subsets (combinations) from an `n`-element set, ``seq``.\n\n A `k`-subset of an `n`-element set is any subset of length exactly `k`. The\n number of `k`-subsets of an `n`-element set is given by ``binomial(n, k)``,\n whereas there are `2^n` subsets all together. If `k` is ``None`` then all\n `2^n` subsets will be returned from shortest to longest.\n\n Examples\n ========\n\n >>> from sympy.utilities.iterables import subsets\n\n ``subsets(seq, k)`` will return the `\\frac{n!}{k!(n - k)!}` `k`-subsets (combinations)\n without repetition, i.e. once an item has been removed, it can no\n longer be \"taken\":\n\n >>> list(subsets([1, 2], 2))\n [(1, 2)]\n >>> list(subsets([1, 2]))\n [(), (1,), (2,), (1, 2)]\n >>> list(subsets([1, 2, 3], 2))\n [(1, 2), (1, 3), (2, 3)]\n\n\n ``subsets(seq, k, repetition=True)`` will return the `\\frac{(n - 1 + k)!}{k!(n - 1)!}`\n combinations *with* repetition:\n\n >>> list(subsets([1, 2], 2, repetition=True))\n [(1, 1), (1, 2), (2, 2)]\n\n If you ask for more items than are in the set you get the empty set unless\n you allow repetitions:\n\n >>> list(subsets([0, 1], 3, repetition=False))\n []\n >>> list(subsets([0, 1], 3, repetition=True))\n [(0, 0, 0), (0, 0, 1), (0, 1, 1), (1, 1, 1)]\n\n ", "n_words": 184, "vocab_size": 117, "n_whitespaces": 313, "language": "en" } }, { "id": 309149, "commit_id": "ea5b18c1ef16b64cd7916f2540692ab5de2d2edf", "repo": "core", "path": "tests/components/august/test_binary_sensor.py", "file_name": "test_binary_sensor.py", "fun_name": "test_doorbell_update_via_pubnub", "commit_message": "Split august motion and image capture binary sensors (#62154)", "code": "async def test_doorbell_update_via_pubnub(hass):\n \n doorbell_one = await _mock_doorbell_from_fixture(hass, \"get_doorbell.json\")\n pubnub = AugustPubNub()\n\n await _create_august_with_devices(hass, [doorbell_one], pubnub=pubnub)\n assert doorbell_one.pubsub_channel == \"7c7a6672-59c8-3333-ffff-dcd98705cccc\"\n\n binary_sensor_k98gidt45gul_name_motion = hass.states.get(\n \"binary_sensor.k98gidt45gul_name_motion\"\n )\n assert binary_sensor_k98gidt45gul_name_motion.state == STATE_OFF\n binary_sensor_k98gidt45gul_name_ding = hass.states.get(\n \"binary_sensor.k98gidt45gul_name_ding\"\n )\n assert binary_sensor_k98gidt45gul_name_ding.state == STATE_OFF\n\n pubnub.message(\n pubnub,\n Mock(\n channel=doorbell_one.pubsub_channel,\n timetoken=_timetoken(),\n message={\n \"status\": \"imagecapture\",\n \"data\": {\n \"result\": {\n \"created_at\": \"2021-03-16T01:07:08.817Z\",\n \"secure_url\": \"https://dyu7azbnaoi74.cloudfront.net/zip/images/zip.jpeg\",\n },\n },\n },\n ),\n )\n\n await hass.async_block_till_done()\n\n binary_sensor_k98gidt45gul_name_image_capture = hass.states.get(\n \"binary_sensor.k98gidt45gul_name_image_capture\"\n )\n assert binary_sensor_k98gidt45gul_name_image_capture.state == STATE_ON\n\n pubnub.message(\n pubnub,\n Mock(\n channel=doorbell_one.pubsub_channel,\n timetoken=_timetoken(),\n message={\n \"status\": \"doorbell_motion_detected\",\n \"data\": {\n \"event\": \"doorbell_motion_detected\",\n \"image\": {\n \"height\": 640,\n \"width\": 480,\n \"format\": \"jpg\",\n \"created_at\": \"2021-03-16T02:36:26.886Z\",\n \"bytes\": 14061,\n \"secure_url\": \"https://dyu7azbnaoi74.cloudfront.net/images/1f8.jpeg\",\n \"url\": \"https://dyu7azbnaoi74.cloudfront.net/images/1f8.jpeg\",\n \"etag\": \"09e839331c4ea59eef28081f2caa0e90\",\n },\n \"doorbellName\": \"Front Door\",\n \"callID\": None,\n \"origin\": \"mars-api\",\n \"mutableContent\": True,\n },\n },\n ),\n )\n\n await hass.async_block_till_done()\n\n binary_sensor_k98gidt45gul_name_motion = hass.states.get(\n \"binary_sensor.k98gidt45gul_name_motion\"\n )\n assert binary_sensor_k98gidt45gul_name_motion.state == STATE_ON\n\n binary_sensor_k98gidt45gul_name_ding = hass.states.get(\n \"binary_sensor.k98gidt45gul_name_ding\"\n )\n assert binary_sensor_k98gidt45gul_name_ding.state == STATE_OFF\n\n new_time = dt_util.utcnow() + datetime.timedelta(seconds=40)\n native_time = datetime.datetime.now() + datetime.timedelta(seconds=40)\n with patch(\n \"homeassistant.components.august.binary_sensor._native_datetime\",\n return_value=native_time,\n ):\n async_fire_time_changed(hass, new_time)\n await hass.async_block_till_done()\n\n binary_sensor_k98gidt45gul_name_image_capture = hass.states.get(\n \"binary_sensor.k98gidt45gul_name_image_capture\"\n )\n assert binary_sensor_k98gidt45gul_name_image_capture.state == STATE_OFF\n\n pubnub.message(\n pubnub,\n Mock(\n channel=doorbell_one.pubsub_channel,\n timetoken=_timetoken(),\n message={\n \"status\": \"buttonpush\",\n },\n ),\n )\n await hass.async_block_till_done()\n\n binary_sensor_k98gidt45gul_name_ding = hass.states.get(\n \"binary_sensor.k98gidt45gul_name_ding\"\n )\n assert binary_sensor_k98gidt45gul_name_ding.state == STATE_ON\n new_time = dt_util.utcnow() + datetime.timedelta(seconds=40)\n native_time = datetime.datetime.now() + datetime.timedelta(seconds=40)\n with patch(\n \"homeassistant.components.august.binary_sensor._native_datetime\",\n return_value=native_time,\n ):\n async_fire_time_changed(hass, new_time)\n await hass.async_block_till_done()\n\n binary_sensor_k98gidt45gul_name_ding = hass.states.get(\n \"binary_sensor.k98gidt45gul_name_ding\"\n )\n assert binary_sensor_k98gidt45gul_name_ding.state == STATE_OFF\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1162, "n_words": 211, "vocab_size": 87, "complexity": 1, "nloc": 109, "token_counts": 475, "n_ast_nodes": 819, "n_identifiers": 33, "d_id": 107857, "documentation": { "docstring": "Test creation of a doorbell that can be updated via pubnub.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 124698, "commit_id": "2cdb76789e6d0d59928891a4b520f588b7844edf", "repo": "ray", "path": "python/ray/tests/test_tracing.py", "file_name": "test_tracing.py", "fun_name": "get_span_dict", "commit_message": "Bump pytest from 5.4.3 to 7.0.1 (#26334)\n\nSee #23676 for context. This is another attempt at that as I figured out what's going wrong in `bazel test`. Supersedes #24828.\r\n\r\nNow that there are Python 3.10 wheels for Ray 1.13 and this is no longer a blocker for supporting Python 3.10, I still want to make `bazel test //python/ray/tests/...` work for developing in a 3.10 env, and make it easier to add Python 3.10 tests to CI in future.\r\n\r\nThe change contains three commits with rather descriptive commit message, which I repeat here:\r\n\r\nPass deps to py_test in py_test_module_list\r\n\r\n Bazel macro py_test_module_list takes a `deps` argument, but completely\r\n ignores it instead of passes it to `native.py_test`. Fixing that as we\r\n are going to use deps of py_test_module_list in BUILD in later changes.\r\n\r\n cpp/BUILD.bazel depends on the broken behaviour: it deps-on a cc_library\r\n from a py_test, which isn't working, see upstream issue:\r\n https://github.com/bazelbuild/bazel/issues/701.\r\n This is fixed by simply removing the (non-working) deps.\r\n\r\nDepend on conftest and data files in Python tests BUILD files\r\n\r\n Bazel requires that all the files used in a test run should be\r\n represented in the transitive dependencies specified for the test\r\n target. For py_test, it means srcs, deps and data.\r\n\r\n Bazel enforces this constraint by creating a \"runfiles\" directory,\r\n symbolic links files in the dependency closure and run the test in the\r\n \"runfiles\" directory, so that the test shouldn't see files not in the\r\n dependency graph.\r\n\r\n Unfortunately, the constraint does not apply for a large number of\r\n Python tests, due to pytest (>=3.9.0, <6.0) resolving these symbolic\r\n links during test collection and effectively \"breaks out\" of the\r\n runfiles tree.\r\n\r\n pytest >= 6.0 introduces a breaking change and removed the symbolic link\r\n resolving behaviour, see pytest pull request\r\n https://github.com/pytest-dev/pytest/pull/6523 for more context.\r\n\r\n Currently, we are underspecifying dependencies in a lot of BUILD files\r\n and thus blocking us from updating to newer pytest (for Python 3.10\r\n support). This change hopefully fixes all of them, and at least those in\r\n CI, by adding data or source dependencies (mostly for conftest.py-s)\r\n where needed.\r\n\r\nBump pytest version from 5.4.3 to 7.0.1\r\n\r\n We want at least pytest 6.2.5 for Python 3.10 support, but not past\r\n 7.1.0 since it drops Python 3.6 support (which Ray still supports), thus\r\n the version constraint is set to <7.1.\r\n\r\n Updating pytest, combined with earlier BUILD fixes, changed the ground\r\n truth of a few error message based unit test, these tests are updated to\r\n reflect the change.\r\n\r\n There are also two small drive-by changes for making test_traceback and\r\n test_cli pass under Python 3.10. These are discovered while debugging CI\r\n failures (on earlier Python) with a Python 3.10 install locally. Expect\r\n more such issues when adding Python 3.10 to CI.", "code": "def get_span_dict(span_list):\n \n strip_prefix = \"python.ray.tests.\"\n span_names = {}\n for span in span_list:\n span_name = span[\"name\"]\n if span_name.startswith(strip_prefix):\n span_name = span_name[len(strip_prefix) :]\n if span_name in span_names:\n span_names[span_name] += 1\n else:\n span_names[span_name] = 1\n return span_names\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 110, "n_words": 34, "vocab_size": 23, "complexity": 4, "nloc": 12, "token_counts": 63, "n_ast_nodes": 107, "n_identifiers": 8, "d_id": 27661, "documentation": { "docstring": "Given a list of span names, return dictionary of span names.", "n_words": 11, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 12508, "commit_id": "ef662b529b2a2eecea7bb99759a9f7b9d86d3062", "repo": "jina", "path": "jina/serve/runtimes/gateway/grpc/__init__.py", "file_name": "__init__.py", "fun_name": "dry_run", "commit_message": "feat: add grpc health checking (#4779)", "code": "async def dry_run(self, empty, context) -> jina_pb2.StatusProto:\n \n from docarray import DocumentArray\n from jina.clients.request import request_generator\n from jina.enums import DataInputType\n from jina.serve.executors import __dry_run_endpoint__\n\n da = DocumentArray()\n\n try:\n req_iterator = request_generator(\n exec_endpoint=__dry_run_endpoint__,\n data=da,\n data_type=DataInputType.DOCUMENT,\n )", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "async def dry_run(self, empty, context) -> jina_pb2.StatusProto:\n \"\"\"\n Process the the call requested by having a dry run call to every Executor in the graph\n\n :param empty: The service expects an empty protobuf message\n :param context: grpc context\n :returns: the response request\n \"\"\"\n from docarray import DocumentArray\n from jina.clients.request import request_generator\n from jina.enums import DataInputType\n from jina.serve.executors import __dry_run_endpoint__\n\n da = DocumentArray()\n\n try:\n req_iterator = request_generator(\n exec_endpoint=__dry_run_endpoint__,\n data=da,\n data_type=DataInputType.DOCUMENT,\n )", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 150, "n_words": 34, "vocab_size": 27, "complexity": 3, "nloc": 28, "token_counts": 121, "n_ast_nodes": 100, "n_identifiers": 23, "d_id": 2326, "documentation": { "docstring": "\n Process the the call requested by having a dry run call to every Executor in the graph\n\n :param empty: The service expects an empty protobuf message\n :param context: grpc context\n :returns: the response request\n ", "n_words": 34, "vocab_size": 29, "n_whitespaces": 70, "language": "en" } }, { "id": 102829, "commit_id": "1454af2d416f0eb738c2268ee3297cacb0215dd0", "repo": "kitty", "path": "kitty_tests/open_actions.py", "file_name": "open_actions.py", "fun_name": "test_parsing_of_open_actions", "commit_message": "macOS: Allow customizing the launch actions", "code": "def test_parsing_of_open_actions(self):\n from kitty.open_actions import actions_for_url, KeyAction\n spec = \n", "url": "https://github.com/kovidgoyal/kitty.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 22, "token_counts": 68, "n_ast_nodes": 28, "n_identifiers": 7, "d_id": 21573, "documentation": { "docstring": "\nprotocol file\nmime text/*\nfragment_matches .\nAcTion launch $EDITOR $FILE_PATH $FRAGMENT\naction\n\nprotocol file\nmime text/*\naction ignored\n\next py,txt\naction one\naction two\n", "n_words": 24, "vocab_size": 17, "n_whitespaces": 13, "language": "en" } }, { "id": 176163, "commit_id": "dec723f072eb997a497a159dbe8674cd39999ee9", "repo": "networkx", "path": "networkx/generators/small.py", "file_name": "small.py", "fun_name": "dodecahedral_graph", "commit_message": "Docstrings for the small.py module (#5240)\n\n* added description for the first 5 small graphs\r\n\r\n* modified descriptions based on comment and added description for two more functions\r\n\r\n* added doctrings to all the functions\r\n\r\n* Minor touchups.\r\n\r\nCo-authored-by: Ross Barnowski ", "code": "def dodecahedral_graph(create_using=None):\n \n G = LCF_graph(20, [10, 7, 4, -4, -7, 10, -4, 7, -7, 4], 2, create_using)\n G.name = \"Dodecahedral Graph\"\n return G\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 35, "n_words": 23, "vocab_size": 18, "complexity": 1, "nloc": 4, "token_counts": 51, "n_ast_nodes": 74, "n_identifiers": 5, "d_id": 41733, "documentation": { "docstring": "\n Returns the Platonic Dodecahedral graph.\n\n The dodecahedral graph has 20 nodes and 30 edges. The skeleton of the\n dodecahedron forms a graph. It is one of 5 Platonic graphs [1]_.\n It can be described in LCF notation as:\n ``[10, 7, 4, -4, -7, 10, -4, 7, -7, 4]^2`` [2]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n Dodecahedral Graph with 20 nodes and 30 edges\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Regular_dodecahedron#Dodecahedral_graph\n .. [2] https://mathworld.wolfram.com/DodecahedralGraph.html\n\n ", "n_words": 91, "vocab_size": 69, "n_whitespaces": 153, "language": "en" } }, { "id": 217396, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/fractions.py", "file_name": "fractions.py", "fun_name": "as_integer_ratio", "commit_message": "add python 3.10.4 for windows", "code": "def as_integer_ratio(self):\n \n return (self._numerator, self._denominator)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 27, "n_identifiers": 4, "d_id": 54748, "documentation": { "docstring": "Return the integer ratio as a tuple.\n\n Return a tuple of two integers, whose ratio is equal to the\n Fraction and with a positive denominator.\n ", "n_words": 25, "vocab_size": 20, "n_whitespaces": 46, "language": "en" } }, { "id": 66028, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/erpnext_integrations/taxjar_integration.py", "file_name": "taxjar_integration.py", "fun_name": "get_iso_3166_2_state_code", "commit_message": "style: format code with black", "code": "def get_iso_3166_2_state_code(address):\n\timport pycountry\n\n\tcountry_code = frappe.db.get_value(\"Country\", address.get(\"country\"), \"code\")\n\n\terror_message = _(\n\t\t\n\t).format(address.get(\"state\"))\n\tstate = address.get(\"state\").upper().strip()\n\n\t# The max length for ISO state codes is 3, excluding the country code\n\tif len(state) <= 3:\n\t\t# PyCountry returns state code as {country_code}-{state-code} (e.g. US-FL)\n\t\taddress_state = (country_code + \"-\" + state).upper()\n\n\t\tstates = pycountry.subdivisions.get(country_code=country_code.upper())\n\t\tstates = [pystate.code for pystate in states]\n\n\t\tif address_state in states:\n\t\t\treturn state\n\n\t\tfrappe.throw(_(error_message))\n\telse:\n\t\ttry:\n\t\t\tlookup_state = pycountry.subdivisions.lookup(state)\n\t\texcept LookupError:\n\t\t\tfrappe.throw(_(error_message))\n\t\telse:\n\t\t\treturn lookup_state.code.split(\"-\")[1]\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 56, "n_words": 78, "vocab_size": 58, "complexity": 5, "nloc": 21, "token_counts": 162, "n_ast_nodes": 280, "n_identifiers": 25, "d_id": 14090, "documentation": { "docstring": "{0} is not a valid state! Check for typos or enter the ISO code for your state.", "n_words": 17, "vocab_size": 16, "n_whitespaces": 16, "language": "en" } }, { "id": 265277, "commit_id": "23f391c5b59d5e01321cf5b83e5337c45f9a09ac", "repo": "netbox", "path": "netbox/netbox/models/features.py", "file_name": "features.py", "fun_name": "snapshot", "commit_message": "Closes #9228: Add serialize_object() method to ChangeLoggingMixin", "code": "def snapshot(self):\n \n self._prechange_snapshot = self.serialize_object()\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 28, "n_identifiers": 4, "d_id": 78058, "documentation": { "docstring": "\n Save a snapshot of the object's current state in preparation for modification. The snapshot is saved as\n `_prechange_snapshot` on the instance.\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 43, "language": "en" } }, { "id": 47692, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/utils/test_task_group.py", "file_name": "test_task_group.py", "fun_name": "test_sub_dag_task_group", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def test_sub_dag_task_group():\n \n execution_date = pendulum.parse(\"20200101\")\n with DAG(\"test_test_task_group_sub_dag\", start_date=execution_date) as dag:\n task1 = EmptyOperator(task_id=\"task1\")\n with TaskGroup(\"group234\") as group234:\n _ = EmptyOperator(task_id=\"task2\")\n\n with TaskGroup(\"group34\") as group34:\n _ = EmptyOperator(task_id=\"task3\")\n _ = EmptyOperator(task_id=\"task4\")\n\n with TaskGroup(\"group6\") as group6:\n _ = EmptyOperator(task_id=\"task6\")\n\n task7 = EmptyOperator(task_id=\"task7\")\n task5 = EmptyOperator(task_id=\"task5\")\n\n task1 >> group234\n group34 >> task5\n group234 >> group6\n group234 >> task7\n\n subdag = dag.partial_subset(task_ids_or_regex=\"task5\", include_upstream=True, include_downstream=False)\n\n assert extract_node_id(task_group_to_dict(subdag.task_group)) == {\n 'id': None,\n 'children': [\n {\n 'id': 'group234',\n 'children': [\n {\n 'id': 'group234.group34',\n 'children': [\n {'id': 'group234.group34.task3'},\n {'id': 'group234.group34.task4'},\n {'id': 'group234.group34.downstream_join_id'},\n ],\n },\n {'id': 'group234.upstream_join_id'},\n ],\n },\n {'id': 'task1'},\n {'id': 'task5'},\n ],\n }\n\n edges = dag_edges(subdag)\n assert sorted((e[\"source_id\"], e[\"target_id\"]) for e in edges) == [\n ('group234.group34.downstream_join_id', 'task5'),\n ('group234.group34.task3', 'group234.group34.downstream_join_id'),\n ('group234.group34.task4', 'group234.group34.downstream_join_id'),\n ('group234.upstream_join_id', 'group234.group34.task3'),\n ('group234.upstream_join_id', 'group234.group34.task4'),\n ('task1', 'group234.upstream_join_id'),\n ]\n\n subdag_task_groups = subdag.task_group.get_task_group_dict()\n assert subdag_task_groups.keys() == {None, \"group234\", \"group234.group34\"}\n\n included_group_ids = {\"group234\", \"group234.group34\"}\n included_task_ids = {'group234.group34.task3', 'group234.group34.task4', 'task1', 'task5'}\n\n for task_group in subdag_task_groups.values():\n assert task_group.upstream_group_ids.issubset(included_group_ids)\n assert task_group.downstream_group_ids.issubset(included_group_ids)\n assert task_group.upstream_task_ids.issubset(included_task_ids)\n assert task_group.downstream_task_ids.issubset(included_task_ids)\n\n for task in subdag.task_group:\n assert task.upstream_task_ids.issubset(included_task_ids)\n assert task.downstream_task_ids.issubset(included_task_ids)\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 732, "n_words": 160, "vocab_size": 97, "complexity": 4, "nloc": 60, "token_counts": 396, "n_ast_nodes": 704, "n_identifiers": 41, "d_id": 9214, "documentation": { "docstring": "\n Tests dag.partial_subset() updates task_group correctly.\n ", "n_words": 5, "vocab_size": 5, "n_whitespaces": 12, "language": "en" } }, { "id": 207392, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_scripts/tests.py", "file_name": "tests.py", "fun_name": "test_app_with_import", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_app_with_import(self):\n ", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "\"\"\"manage.py check does noterrors when an app imports a", "n_ast_errors": 2, "ast_levels": 6, "n_whitespaces": 9, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 15, "token_counts": 63, "n_ast_nodes": 28, "n_identifiers": 13, "d_id": 51948, "documentation": { "docstring": "manage.py check does not raise errors when an app imports a base", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 189207, "commit_id": "08542a0ad75642b52eebf15b3e052e3387904b05", "repo": "aws-cli", "path": "awscli/customizations/s3/filegenerator.py", "file_name": "filegenerator.py", "fun_name": "normalize_sort", "commit_message": "Fix a few typos", "code": "def normalize_sort(self, names, os_sep, character):\n \n names.sort(key=lambda item: item.replace(os_sep, character))\n", "url": "https://github.com/aws/aws-cli.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 30, "n_ast_nodes": 47, "n_identifiers": 9, "d_id": 46015, "documentation": { "docstring": "\n The purpose of this function is to ensure that the same path separator\n is used when sorting. In windows, the path operator is a backslash as\n opposed to a forward slash which can lead to differences in sorting\n between s3 and a windows machine.\n ", "n_words": 44, "vocab_size": 36, "n_whitespaces": 81, "language": "en" } }, { "id": 218069, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/importlib/_bootstrap.py", "file_name": "_bootstrap.py", "fun_name": "_load_module_shim", "commit_message": "add python 3.10.4 for windows", "code": "def _load_module_shim(self, fullname):\n \n msg = (\"the load_module() method is deprecated and slated for removal in \"\n \"Python 3.12; use exec_module() instead\")\n _warnings.warn(msg, DeprecationWarning)\n spec = spec_from_loader(fullname, self)\n if fullname in sys.modules:\n module = sys.modules[fullname]\n _exec(spec, module)\n return sys.modules[fullname]\n else:\n return _load(spec)\n\n# Module specifications #######################################################\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 99, "n_words": 45, "vocab_size": 40, "complexity": 2, "nloc": 11, "token_counts": 65, "n_ast_nodes": 108, "n_identifiers": 14, "d_id": 55111, "documentation": { "docstring": "Load the specified module into sys.modules and return it.\n\n This method is deprecated. Use loader.exec_module() instead.\n\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 23, "language": "en" } }, { "id": 222679, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/command/build_ext.py", "file_name": "build_ext.py", "fun_name": "get_ext_fullpath", "commit_message": "add python 3.10.4 for windows", "code": "def get_ext_fullpath(self, ext_name):\n \n fullname = self.get_ext_fullname(ext_name)\n modpath = fullname.split('.')\n filename = self.get_ext_filename(modpath[-1])\n\n if not self.inplace:\n # no further work needed\n # returning :\n # build_dir/package/path/filename\n filename = os.path.join(*modpath[:-1]+[filename])\n return os.path.join(self.build_lib, filename)\n\n # the inplace option requires to find the package directory\n # using the build_py command for that\n package = '.'.join(modpath[0:-1])\n build_py = self.get_finalized_command('build_py')\n package_dir = os.path.abspath(build_py.get_package_dir(package))\n\n # returning\n # package_dir/filename\n return os.path.join(package_dir, filename)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 214, "n_words": 64, "vocab_size": 44, "complexity": 2, "nloc": 11, "token_counts": 123, "n_ast_nodes": 209, "n_identifiers": 20, "d_id": 56693, "documentation": { "docstring": "Returns the path of the filename for a given extension.\n\n The file is located in `build_lib` or directly in the package\n (inplace option).\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 44, "language": "en" } }, { "id": 182485, "commit_id": "237c556673f058a60aa59f441a8bbb7c953be55f", "repo": "textual", "path": "src/textual/message_pump.py", "file_name": "message_pump.py", "fun_name": "check_idle", "commit_message": "refactor of compositor", "code": "def check_idle(self):\n \n if self._message_queue.empty():\n self.post_message_no_wait(messages.Prompt(sender=self))\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 30, "n_words": 5, "vocab_size": 5, "complexity": 2, "nloc": 3, "token_counts": 28, "n_ast_nodes": 49, "n_identifiers": 8, "d_id": 43842, "documentation": { "docstring": "Prompt the message pump to call idle if the queue is empty.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 320923, "commit_id": "5616a99eff34f7074641d1391ed77d6b4b743529", "repo": "qutebrowser", "path": "qutebrowser/utils/message.py", "file_name": "message.py", "fun_name": "flush", "commit_message": "Add a MessageInfo data class\n\nPreparation for #7246", "code": "def flush(self) -> None:\n \n self._connected = True\n for info in self._cache:\n self.show(**dataclasses.asdict(info))\n self._cache = []\n\n\nglobal_bridge = GlobalMessageBridge()\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 56, "n_words": 18, "vocab_size": 16, "complexity": 2, "nloc": 10, "token_counts": 38, "n_ast_nodes": 73, "n_identifiers": 10, "d_id": 117438, "documentation": { "docstring": "Flush messages which accumulated while no handler was connected.\n\n This is so we don't miss messages shown during some early init phase.\n It needs to be called once the show_message signal is connected.\n ", "n_words": 33, "vocab_size": 30, "n_whitespaces": 54, "language": "en" } }, { "id": 4171, "commit_id": "63af98e3b999d4b223237b51472a819915c5a558", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-recurly/source_recurly/streams.py", "file_name": "streams.py", "fun_name": "default_params", "commit_message": "🎉 Recurly Schema Revamp (#9866)\n\n* Cleanup Recurly connector schemas\r\n\r\n* Add more Recurly schemas to the connector\r\n\r\n- `billing_infos`\r\n- `shipping_addresses`\r\n- `shipping_methods`\r\n- `subscription_changes`\r\n\r\n* Add Recurly `add-on` resouce\r\n\r\n* Add Recurly's account notes resource schema\r\n\r\n* Add unique coupons to Recurly source\r\n\r\n* Add credit payments to Recurly connector\r\n\r\n* Add Recurly resources to integration tests configurations\r\n\r\n* Bump Recurly source version to `0.4.0`\r\n\r\n* Add `line_items` Recurly resource\r\n\r\n* Add `line_items` to Recurly documentation\r\n\r\n* Add missing `line_items` JSON schema\r\n\r\n* Replace Subscription Change Recurly API call with Subscription `pending_changes` field\r\n\r\n* Replace Recurly unique coupon codes API call with coupons `unique_coupon` field\r\n\r\nTo avoid the extra API call to import unique coupon calls\r\n\r\n* Revert \"Replace Recurly unique coupon codes API call with coupons `unique_coupon` field\"\r\n\r\nThis reverts commit 1c4592d82da3c5e5e0026dda8eb2ed7a896ac5b8.\r\n\r\n* Add `end_time` parameter to Recurly connector\r\n\r\n* Order Recurly specs\r\n\r\n* Set the Recurly `begin_time` and `end_time` to be optional\r\n\r\n* Add `order` to Recurly `source_spec.yaml`\r\n\r\n* Add `maxLength` to Recurly source schemas\r\n\r\n* Set `maxLength` for Recurly Subscription and Transaction `uuid`\r\n\r\n* Fix Recurly `export_dates` acceptance tests", "code": "def default_params(self) -> dict:\n \n return {\"order\": \"asc\", \"sort\": self.sort_key, \"limit\": self.limit}\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 25, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 26, "n_ast_nodes": 49, "n_identifiers": 5, "d_id": 628, "documentation": { "docstring": "\n Returns the parameters to be sent together with the API call to Recurly\n ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 28, "language": "en" } }, { "id": 68806, "commit_id": "00ef499739959630cd7cf97419fbb6ca59be05f2", "repo": "erpnext", "path": "erpnext/projects/utils.py", "file_name": "utils.py", "fun_name": "query_task", "commit_message": "refactor: use db independent offset syntax (#31345)\n\n* chore: use db independent offset syntax\r\n\r\n* fix: typo\r\n\r\n* style: reformat code to black spec\r\n\r\nCo-authored-by: Ankush Menat ", "code": "def query_task(doctype, txt, searchfield, start, page_len, filters):\n\tfrom frappe.desk.reportview import build_match_conditions\n\n\tsearch_string = \"%%%s%%\" % txt\n\torder_by_string = \"%s%%\" % txt\n\tmatch_conditions = build_match_conditions(\"Task\")\n\tmatch_conditions = (\"and\" + match_conditions) if match_conditions else \"\"\n\n\treturn frappe.db.sql(\n\t\t\n\t\t% (searchfield, \"%s\", \"%s\", match_conditions, \"%s\", searchfield, \"%s\", searchfield, \"%s\", \"%s\"),\n\t\t(search_string, search_string, order_by_string, order_by_string, page_len, start),\n\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 43, "n_words": 53, "vocab_size": 37, "complexity": 2, "nloc": 18, "token_counts": 96, "n_ast_nodes": 150, "n_identifiers": 16, "d_id": 14887, "documentation": { "docstring": "select name, subject from `tabTask`\n\t\twhere (`%s` like %s or `subject` like %s) %s\n\t\torder by\n\t\t\tcase when `subject` like %s then 0 else 1 end,\n\t\t\tcase when `%s` like %s then 0 else 1 end,\n\t\t\t`%s`,\n\t\t\tsubject\n\t\tlimit %s offset %s", "n_words": 42, "vocab_size": 25, "n_whitespaces": 34, "language": "en" } }, { "id": 209104, "commit_id": "3df072ecb66b53251f8ec66b0bf7129a649166ae", "repo": "scapy", "path": "scapy/utils.py", "file_name": "utils.py", "fun_name": "write", "commit_message": "Add ERF Ethernet Support", "code": "def write(self, pkt): # type: ignore\n # type: (_PacketIterable) -> None\n \n # Import here to avoid circular dependency\n from scapy.supersocket import IterSocket\n for p in IterSocket(pkt).iter:\n self.write_packet(p)\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 74, "n_words": 27, "vocab_size": 24, "complexity": 2, "nloc": 4, "token_counts": 30, "n_ast_nodes": 52, "n_identifiers": 9, "d_id": 52606, "documentation": { "docstring": "\n Writes a Packet, a SndRcvList object, or bytes to a ERF file.\n\n :param pkt: Packet(s) to write (one record for each Packet)\n :type pkt: iterable[scapy.packet.Packet], scapy.packet.Packet\n ", "n_words": 26, "vocab_size": 22, "n_whitespaces": 55, "language": "en" } }, { "id": 20079, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distro.py", "file_name": "distro.py", "fun_name": "info", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def info(self, pretty=False, best=False):\n # type: (bool, bool) -> InfoDict\n \n return dict(\n id=self.id(),\n version=self.version(pretty, best),\n version_parts=dict(\n major=self.major_version(best),\n minor=self.minor_version(best),\n build_number=self.build_number(best),\n ),\n like=self.like(),\n codename=self.codename(),\n )\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 162, "n_words": 23, "vocab_size": 23, "complexity": 1, "nloc": 12, "token_counts": 86, "n_ast_nodes": 130, "n_identifiers": 15, "d_id": 3224, "documentation": { "docstring": "\n Return certain machine-readable information about the OS\n distribution.\n\n For details, see :func:`distro.info`.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 41, "language": "en" } }, { "id": 264315, "commit_id": "e03593d86f3082c19255ae24f39d1ed860a04c4d", "repo": "netbox", "path": "netbox/netbox/views/generic/bulk_views.py", "file_name": "bulk_views.py", "fun_name": "get_form", "commit_message": "Move get_extra_context() to base views", "code": "def get_form(self):\n ", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 9, "n_words": 2, "vocab_size": 2, "complexity": 2, "nloc": 6, "token_counts": 37, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 77684, "documentation": { "docstring": "\n Provide a standard bulk delete form if none has been specified for the view\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 29, "language": "en" } }, { "id": 221357, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/code.py", "file_name": "code.py", "fun_name": "push", "commit_message": "add python 3.10.4 for windows", "code": "def push(self, line):\n \n self.buffer.append(line)\n source = \"\\n\".join(self.buffer)\n more = self.runsource(source, self.filename)\n if not more:\n self.resetbuffer()\n return more\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 70, "n_words": 17, "vocab_size": 15, "complexity": 2, "nloc": 7, "token_counts": 49, "n_ast_nodes": 84, "n_identifiers": 11, "d_id": 56372, "documentation": { "docstring": "Push a line to the interpreter.\n\n The line should not have a trailing newline; it may have\n internal newlines. The line is appended to a buffer and the\n interpreter's runsource() method is called with the\n concatenated contents of the buffer as source. If this\n indicates that the command was executed or invalid, the buffer\n is reset; otherwise, the command is incomplete, and the buffer\n is left as it was after the line was appended. The return\n value is 1 if more input is required, 0 if the line was dealt\n with in some way (this is the same as runsource()).\n\n ", "n_words": 100, "vocab_size": 60, "n_whitespaces": 173, "language": "en" } }, { "id": 259027, "commit_id": "1c94c0b0be3b9146aae41376f3f4ef3853e0ca97", "repo": "scikit-learn", "path": "sklearn/naive_bayes.py", "file_name": "naive_bayes.py", "fun_name": "_update_feature_log_prob", "commit_message": "DOC Add abstract methods to _BaseDiscreteNB (#22596)\n\nCo-authored-by: avm19 <52547519avm19@users.noreply.github.com>\r\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def _update_feature_log_prob(self, alpha):\n \n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 10, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 1, "token_counts": 8, "n_ast_nodes": 15, "n_identifiers": 3, "d_id": 75533, "documentation": { "docstring": "Update feature log probabilities based on counts.\n\n This method is called each time `fit` or `partial_fit` update the\n model.\n\n Parameters\n ----------\n alpha : float\n smoothing parameter. See :meth:`_check_alpha`.\n ", "n_words": 28, "vocab_size": 28, "n_whitespaces": 81, "language": "en" } }, { "id": 39008, "commit_id": "e2abad62317180f0a2f9200f892320afff3a1dda", "repo": "recommenders", "path": "recommenders/evaluation/tf_evaluation.py", "file_name": "tf_evaluation.py", "fun_name": "accuracy", "commit_message": "added newlines", "code": "def accuracy(rating_true, rating_pred):\n \n\n with tf.compat.v1.name_scope(\"accuracy\"):\n\n # define and apply the mask\n mask = tf.not_equal(rating_true, 0)\n n_values = tf.reduce_sum(input_tensor=tf.cast(mask, \"float32\"), axis=1)\n\n # Take the difference between the input data and the inferred ones. This value is zero whenever\n # the two values coincides\n vd = tf.compat.v1.where(\n mask, x=tf.abs(tf.subtract(rating_true, rating_pred)), y=tf.ones_like(rating_true)\n )\n\n # correct values: find the location where rating_true = rating_pred\n corr = tf.cast(tf.equal(vd, 0), \"float32\")\n\n # evaluate accuracy\n accuracy_score = tf.reduce_mean(\n input_tensor=tf.compat.v1.div(\n tf.reduce_sum(input_tensor=corr, axis=1), n_values\n )\n )\n\n return accuracy_score\n", "url": "https://github.com/microsoft/recommenders.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 220, "n_words": 79, "vocab_size": 59, "complexity": 1, "nloc": 14, "token_counts": 139, "n_ast_nodes": 224, "n_identifiers": 26, "d_id": 7074, "documentation": { "docstring": "Accuracy\n\n Evaluates accuracy evaluated on the rated items only (rated items are the ones with non-zero ratings).\n\n :math:`accuracy = 1/m \\sum_{mu=1}^{m} \\sum{i=1}^Nv 1/s(i) I(rating_true - rating_pred = 0)_{mu,i}`\n\n where `m = Nusers`, `Nv = number of items = number of visible units` and `s(i)` is the number of non-zero elements\n per row.\n\n Args:\n rating_true (tf.Tensor, float32): True Data.\n rating_pred (tf.Tensor, float32): Predicted Data.\n\n Returns:\n tf.Tensor: accuracy.\n\n ", "n_words": 66, "vocab_size": 49, "n_whitespaces": 108, "language": "en" } }, { "id": 261230, "commit_id": "5d8a1994620713c2e4226fb8e40fef7e81af1103", "repo": "scikit-learn", "path": "sklearn/utils/extmath.py", "file_name": "extmath.py", "fun_name": "density", "commit_message": "API Deprecate the extra keyword arguments of utils.extmath.density (#24523)\n\nCo-authored-by: Meekail Zain <34613774+Micky774@users.noreply.github.com>\r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def density(w, **kwargs):\n \n if kwargs:\n warnings.warn(\n \"Additional keyword arguments are deprecated in version 1.2 and will be\"\n \" removed in version 1.4.\",\n FutureWarning,\n )\n\n if hasattr(w, \"toarray\"):\n d = float(w.nnz) / (w.shape[0] * w.shape[1])\n else:\n d = 0 if w is None else float((w != 0).sum()) / w.size\n return d\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 126, "n_words": 50, "vocab_size": 42, "complexity": 4, "nloc": 12, "token_counts": 82, "n_ast_nodes": 135, "n_identifiers": 13, "d_id": 76700, "documentation": { "docstring": "Compute density of a sparse vector.\n\n Parameters\n ----------\n w : array-like\n The sparse vector.\n **kwargs : keyword arguments\n Ignored.\n\n .. deprecated:: 1.2\n ``**kwargs`` were deprecated in version 1.2 and will be removed in\n 1.4.\n\n Returns\n -------\n float\n The density of w, between 0 and 1.\n ", "n_words": 45, "vocab_size": 36, "n_whitespaces": 119, "language": "en" } }, { "id": 133242, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/sgd/tests/test_torch_2.py", "file_name": "test_torch_2.py", "fun_name": "test_dataset", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_dataset(ray_start_4_cpus, use_local):\n \n\n model_creator = mlp_identity.model_creator\n optimizer_creator = mlp_identity.optimizer_creator\n dataset_creator = mlp_identity.dataset_creator\n\n DatasetOperator = TrainingOperator.from_creators(\n model_creator=model_creator,\n optimizer_creator=optimizer_creator,\n loss_creator=nn.MSELoss,\n )\n\n trainer = TorchTrainer(\n training_operator_cls=DatasetOperator,\n use_local=use_local,\n num_workers=2,\n )\n\n dataset = dataset_creator()\n for i in range(5):\n trainer.train(dataset=dataset, num_steps=100)\n\n x = mlp_identity.to_mat(0.5)\n prediction = float(trainer.get_model()(x)[0][0])\n assert 0.4 <= prediction <= 0.6\n trainer.shutdown()\n\n\n@pytest.mark.parametrize(\"use_local\", [True, False])", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"use_local\", [True, False])", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 141, "n_words": 51, "vocab_size": 41, "complexity": 2, "nloc": 21, "token_counts": 130, "n_ast_nodes": 216, "n_identifiers": 31, "d_id": 29963, "documentation": { "docstring": "\n This test tries training the mlp_identity example. We check the accuracy of\n the model as an all inclusive way of ensuring that we are properly sharding\n and iterating over the entire dataset (instead of repeating the first set\n of points for example).\n ", "n_words": 42, "vocab_size": 35, "n_whitespaces": 58, "language": "en" } }, { "id": 181619, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/export_tests.py", "file_name": "export_tests.py", "fun_name": "test_export_pipeline_5", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_export_pipeline_5():\n \n pipeline_string = (\n 'DecisionTreeRegressor(SelectFromModel(input_matrix, '\n 'SelectFromModel__ExtraTreesRegressor__max_features=0.05, SelectFromModel__ExtraTreesRegressor__n_estimators=100, '\n 'SelectFromModel__threshold=0.05), DecisionTreeRegressor__max_depth=8,'\n 'DecisionTreeRegressor__min_samples_leaf=5, DecisionTreeRegressor__min_samples_split=5)'\n )\n pipeline = creator.Individual.from_string(pipeline_string, tpot_obj_reg._pset)\n expected_code = \n assert expected_code == export_pipeline(pipeline, tpot_obj_reg.operators, tpot_obj_reg._pset)\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 74, "n_words": 27, "vocab_size": 22, "complexity": 1, "nloc": 31, "token_counts": 45, "n_ast_nodes": 82, "n_identifiers": 11, "d_id": 43407, "documentation": { "docstring": "Assert that exported_pipeline() generated a compile source file as expected given a fixed simple pipeline with SelectFromModel.import numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import ExtraTreesRegressor\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.tree import DecisionTreeRegressor\n\n# NOTE: Make sure that the outcome column is labeled 'target' in the data file\ntpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)\nfeatures = tpot_data.drop('target', axis=1)\ntraining_features, testing_features, training_target, testing_target = \\\\\n train_test_split(features, tpot_data['target'], random_state=None)\n\nexported_pipeline = make_pipeline(\n SelectFromModel(estimator=ExtraTreesRegressor(max_features=0.05, n_estimators=100), threshold=0.05),\n DecisionTreeRegressor(max_depth=8, min_samples_leaf=5, min_samples_split=5)\n)\n\nexported_pipeline.fit(training_features, training_target)\nresults = exported_pipeline.predict(testing_features)\n", "n_words": 92, "vocab_size": 73, "n_whitespaces": 94, "language": "en" } }, { "id": 65129, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/party.py", "file_name": "party.py", "fun_name": "get_party_gle_currency", "commit_message": "style: format code with black", "code": "def get_party_gle_currency(party_type, party, company):\n\tdef generator():\n\t\texisting_gle_currency = frappe.db.sql(\n\t\t\t,\n\t\t\t{\"company\": company, \"party_type\": party_type, \"party\": party},\n\t\t)\n\n\t\treturn existing_gle_currency[0][0] if existing_gle_currency else None\n\n\treturn frappe.local_cache(\n\t\t\"party_gle_currency\", (party_type, party, company), generator, regenerate_if_none=True\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 22, "n_words": 32, "vocab_size": 27, "complexity": 1, "nloc": 5, "token_counts": 32, "n_ast_nodes": 109, "n_identifiers": 11, "d_id": 13801, "documentation": { "docstring": "select account_currency from `tabGL Entry`\n\t\t\twhere docstatus=1 and company=%(company)s and party_type=%(party_type)s and party=%(party)s\n\t\t\tlimit 1", "n_words": 15, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 206293, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/template/loaders/base.py", "file_name": "base.py", "fun_name": "get_template", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_template(self, template_name, skip=None):\n \n tried = []\n\n for origin in self.get_template_sources(template_name):\n if skip is not None and origin in skip:\n tried.append((origin, \"Skipped to avoid recursion\"))\n continue\n\n try:\n contents = self.get_contents(origin)\n except TemplateDoesNotExist:\n tried.append((origin, \"Source does not exist\"))\n continue\n else:\n return Template(\n contents,\n origin,\n origin.template_name,\n self.engine,\n )\n\n raise TemplateDoesNotExist(template_name, tried=tried)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 302, "n_words": 49, "vocab_size": 43, "complexity": 5, "nloc": 19, "token_counts": 98, "n_ast_nodes": 155, "n_identifiers": 13, "d_id": 51471, "documentation": { "docstring": "\n Call self.get_template_sources() and return a Template object for\n the first template matching template_name. If skip is provided, ignore\n template origins in skip. This is used to avoid recursion during\n template extending.\n ", "n_words": 31, "vocab_size": 28, "n_whitespaces": 67, "language": "en" } }, { "id": 79676, "commit_id": "c8689acb3724dc12fb09a0bfc14d7e4755a1ea0f", "repo": "wagtail", "path": "wagtail/models/reference_index.py", "file_name": "reference_index.py", "fun_name": "model_is_indexable", "commit_message": "Check field for .extract_references method instead of field type\n\nCo-authored-by: Matt Westcott ", "code": "def model_is_indexable(cls, model, allow_child_models=False):\n \n if getattr(model, \"wagtail_reference_index_ignore\", False):\n return False\n\n # Don't check any models that have a parental key, references from these will be collected from the parent\n if not allow_child_models and any(\n [isinstance(field, ParentalKey) for field in model._meta.get_fields()]\n ):\n return False\n\n for field in model._meta.get_fields():\n if field.is_relation and field.many_to_one:\n if getattr(field, \"wagtail_reference_index_ignore\", False):\n continue\n\n if getattr(\n field.related_model, \"wagtail_reference_index_ignore\", False\n ):\n continue\n\n if isinstance(field, (ParentalKey, GenericRel)):\n continue\n\n return True\n\n if hasattr(field, \"extract_references\"):\n return True\n\n if issubclass(model, ClusterableModel):\n for child_relation in get_all_child_relations(model):\n if cls.model_is_indexable(\n child_relation.related_model,\n allow_child_models=True,\n ):\n return True\n\n return False\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 466, "n_words": 91, "vocab_size": 59, "complexity": 15, "nloc": 28, "token_counts": 156, "n_ast_nodes": 244, "n_identifiers": 20, "d_id": 16955, "documentation": { "docstring": "\n Returns True if the given model may have outbound references that we would be interested in recording in the index.\n\n\n Args:\n model (type): a Django model class\n allow_child_models (boolean): Child models are not indexable on their own. If you are looking at\n a child model from the perspective of indexing it through its parent,\n set this to True to disable checking for this. Default False.\n ", "n_words": 65, "vocab_size": 55, "n_whitespaces": 191, "language": "en" } }, { "id": 296122, "commit_id": "bc2ba8e1c8c988ae24f6961ce64187782f5ba32d", "repo": "core", "path": "homeassistant/components/asuswrt/device_tracker.py", "file_name": "device_tracker.py", "fun_name": "is_connected", "commit_message": "Add missing type declaration to AsusWrt Scanner Entity (#69773)", "code": "def is_connected(self) -> bool:\n \n return self._device.is_connected\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 25, "n_identifiers": 4, "d_id": 95126, "documentation": { "docstring": "Return true if the device is connected to the network.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 131824, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/test_runtime_env_complicated.py", "file_name": "test_runtime_env_complicated.py", "fun_name": "test_get_conda_env_dir", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_get_conda_env_dir(tmp_path):\n \n\n # Simulate starting in an env named tf1.\n d = tmp_path / \"envs\" / \"tf1\"\n Path.mkdir(d, parents=True)\n with mock.patch.dict(\n os.environ, {\"CONDA_PREFIX\": str(d), \"CONDA_DEFAULT_ENV\": \"tf1\"}\n ):\n with pytest.raises(ValueError):\n # Env tf2 should not exist.\n env_dir = get_conda_env_dir(\"tf2\")\n tf2_dir = tmp_path / \"envs\" / \"tf2\"\n Path.mkdir(tf2_dir, parents=True)\n env_dir = get_conda_env_dir(\"tf2\")\n assert env_dir == str(tmp_path / \"envs\" / \"tf2\")\n\n # Simulate starting in (base) conda env.\n with mock.patch.dict(\n os.environ, {\"CONDA_PREFIX\": str(tmp_path), \"CONDA_DEFAULT_ENV\": \"base\"}\n ):\n with pytest.raises(ValueError):\n # Env tf3 should not exist.\n env_dir = get_conda_env_dir(\"tf3\")\n # Env tf2 still should exist.\n env_dir = get_conda_env_dir(\"tf2\")\n assert env_dir == str(tmp_path / \"envs\" / \"tf2\")\n\n\n@pytest.mark.skipif(\n os.environ.get(\"CI\") and sys.platform != \"linux\",\n reason=\"This test is only run on linux CI machines.\",\n)", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.skipif(\n os.environ.get(\"CI\") and sys.platform != \"linux\",\n reason=\"This test is only run on linux CI machines.\",\n)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 269, "n_words": 117, "vocab_size": 65, "complexity": 1, "nloc": 19, "token_counts": 152, "n_ast_nodes": 335, "n_identifiers": 24, "d_id": 29601, "documentation": { "docstring": "\n Typical output of `conda env list`, for context:\n\n base /Users/scaly/anaconda3\n my_env_1 /Users/scaly/anaconda3/envs/my_env_1\n\n For this test, `tmp_path` is a stand-in for `Users/scaly/anaconda3`.\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 65, "language": "en" } }, { "id": 249337, "commit_id": "2281427175e4c93a30c39607fb4ac23c2a1f399f", "repo": "synapse", "path": "tests/rest/admin/test_registration_tokens.py", "file_name": "test_registration_tokens.py", "fun_name": "test_create_expiry_time", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13488)\n\n* Use literals in place of `HTTPStatus` constants in tests\r\n\r\n* newsfile\r\n\r\n* code style\r\n\r\n* code style", "code": "def test_create_expiry_time(self) -> None:\n \n # Should fail with a time in the past\n channel = self.make_request(\n \"POST\",\n self.url + \"/new\",\n {\"expiry_time\": self.clock.time_msec() - 10000},\n access_token=self.admin_user_tok,\n )\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"errcode\"], Codes.INVALID_PARAM)\n\n # Should fail with float\n channel = self.make_request(\n \"POST\",\n self.url + \"/new\",\n {\"expiry_time\": self.clock.time_msec() + 1000000.5},\n access_token=self.admin_user_tok,\n )\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"errcode\"], Codes.INVALID_PARAM)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 219, "n_words": 54, "vocab_size": 33, "complexity": 1, "nloc": 18, "token_counts": 142, "n_ast_nodes": 226, "n_identifiers": 15, "d_id": 72840, "documentation": { "docstring": "Check you can't create a token with an invalid expiry_time.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 248082, "commit_id": "30c8e7e408322967e5beb2a64ef5f796cb8df226", "repo": "synapse", "path": "scripts-dev/release.py", "file_name": "release.py", "fun_name": "cli", "commit_message": "Make `scripts-dev` pass `mypy --disallow-untyped-defs` (#12356)\n\nNot enforced in config yet. One day.", "code": "def cli() -> None:\n \n\n\n@cli.command()", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "@cli.command()", "n_ast_errors": 1, "ast_levels": 6, "n_whitespaces": 7, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 28, "token_counts": 7, "n_ast_nodes": 26, "n_identifiers": 2, "d_id": 72090, "documentation": { "docstring": "An interactive script to walk through the parts of creating a release.\n\n Requires the dev dependencies be installed, which can be done via:\n\n pip install -e .[dev]\n\n Then to use:\n\n ./scripts-dev/release.py prepare\n\n # ... ask others to look at the changelog ...\n\n ./scripts-dev/release.py tag\n\n # ... wait for assets to build ...\n\n ./scripts-dev/release.py publish\n\n ./scripts-dev/release.py upload\n\n # Optional: generate some nice links for the announcement\n\n ./scripts-dev/release.py announce\n\n If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the\n `tag`/`publish` command, then a new draft release will be created/published.\n ", "n_words": 90, "vocab_size": 69, "n_whitespaces": 168, "language": "en" } }, { "id": 278684, "commit_id": "3613c3defc39c236fb1592c4f7ba1a9cc887343a", "repo": "keras", "path": "keras/engine/base_layer.py", "file_name": "base_layer.py", "fun_name": "add_update", "commit_message": "Remove pylint comments.\n\nPiperOrigin-RevId: 452353044", "code": "def add_update(self, updates):\n \n call_context = base_layer_utils.call_context()\n # No need to run updates during Functional API construction.\n if call_context.in_keras_graph:\n return\n\n # Callable updates are disabled by setting `trainable=False`.\n if not call_context.frozen:\n for update in tf.nest.flatten(updates):\n if callable(update):\n update()\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 135, "n_words": 37, "vocab_size": 33, "complexity": 5, "nloc": 8, "token_counts": 48, "n_ast_nodes": 83, "n_identifiers": 12, "d_id": 82676, "documentation": { "docstring": "Add update op(s), potentially dependent on layer inputs.\n\n Weight updates (for instance, the updates of the moving mean and\n variance in a BatchNormalization layer) may be dependent on the inputs\n passed when calling a layer. Hence, when reusing the same layer on\n different inputs `a` and `b`, some entries in `layer.updates` may be\n dependent on `a` and some on `b`. This method automatically keeps track\n of dependencies.\n\n This call is ignored when eager execution is enabled (in that case,\n variable updates are run on the fly and thus do not need to be tracked\n for later execution).\n\n Args:\n updates: Update op, or list/tuple of update ops, or zero-arg callable\n that returns an update op. A zero-arg callable should be passed in\n order to disable running the updates by setting `trainable=False`\n on this Layer, when executing in Eager mode.\n ", "n_words": 138, "vocab_size": 92, "n_whitespaces": 257, "language": "en" } }, { "id": 101893, "commit_id": "dab823a3eb7a5257cb1e0818ee10ed234d3de97f", "repo": "faceswap", "path": "lib/gui/display_command.py", "file_name": "display_command.py", "fun_name": "_add_option_iterations", "commit_message": "Typing - lib.gui.display_command", "code": "def _add_option_iterations(self) -> None:\n \n logger.debug(\"Adding Iterations Slider\")\n tk_var = self.vars[\"display_iterations\"]\n min_max = (0, 100000)\n hlp = _(\"Set the number of iterations to display. 0 displays the full session.\")\n\n ctl_frame = ttk.Frame(self.optsframe)\n ctl_frame.pack(padx=2, side=tk.RIGHT)\n\n lbl = ttk.Label(ctl_frame, text=\"Iterations:\", anchor=tk.W)\n lbl.pack(pady=5, side=tk.LEFT, anchor=tk.N, expand=True)\n\n tbox = ttk.Entry(ctl_frame, width=6, textvariable=tk_var, justify=tk.RIGHT)\n tbox.pack(padx=(0, 5), side=tk.RIGHT)\n\n ctl = ttk.Scale(\n ctl_frame,\n variable=tk_var,\n command=lambda val, var=tk_var, dt=int, rn=1000, mm=min_max: # type:ignore\n set_slider_rounding(val, var, dt, rn, mm))\n ctl[\"from_\"] = min_max[0]\n ctl[\"to\"] = min_max[1]\n ctl.pack(padx=5, pady=5, fill=tk.X, expand=True)\n for item in (tbox, ctl):\n Tooltip(item,\n text=hlp,\n wrap_length=200)\n logger.debug(\"Added Iterations Slider\")\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 303, "n_words": 90, "vocab_size": 77, "complexity": 2, "nloc": 25, "token_counts": 254, "n_ast_nodes": 384, "n_identifiers": 48, "d_id": 21275, "documentation": { "docstring": " Add a slider to adjust the amount if iterations to display ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 12, "language": "en" } }, { "id": 154121, "commit_id": "8864bc197974da6d8cda2de2f35ca31d561be1cc", "repo": "modin", "path": "modin/core/io/column_stores/parquet_dispatcher.py", "file_name": "parquet_dispatcher.py", "fun_name": "build_partition", "commit_message": "PERF-#4305: Parallelize `read_parquet` over row groups (#4700)\n\nCo-authored-by: mvashishtha ", "code": "def build_partition(cls, partition_ids, column_widths):\n \n return np.array(\n [\n [\n cls.frame_partition_cls(\n part_id[0],\n length=part_id[2],\n width=col_width,\n )\n for part_id, col_width in zip(part_ids, column_widths)\n ]\n for part_ids in partition_ids\n ]\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 240, "n_words": 26, "vocab_size": 21, "complexity": 3, "nloc": 14, "token_counts": 56, "n_ast_nodes": 81, "n_identifiers": 13, "d_id": 35794, "documentation": { "docstring": "\n Build array with partitions of `cls.frame_partition_cls` class.\n\n Parameters\n ----------\n partition_ids : list\n Array with references to the partitions data.\n column_widths : list\n Number of columns in each partition.\n\n Returns\n -------\n np.ndarray\n array with shape equals to the shape of `partition_ids` and\n filed with partition objects.\n\n Notes\n -----\n The second level of partitions_ids contains a list of object references\n for each read call:\n partition_ids[i][j] -> [ObjectRef(df), ObjectRef(df.index), ObjectRef(len(df))].\n ", "n_words": 67, "vocab_size": 50, "n_whitespaces": 210, "language": "en" } }, { "id": 244243, "commit_id": "0db1b9b3d2c3f231241b25c54b3632a0413732ed", "repo": "mmdetection", "path": "mmdet/utils/replace_cfg_vals.py", "file_name": "replace_cfg_vals.py", "fun_name": "replace_cfg_vals", "commit_message": "[Tools] Support replacing the ${key} with the value of cfg.key (#7492)\n\n* Support replacing config\r\n\r\n* Support replacing config\r\n\r\n* Add unit test for replace_cfig\r\n\r\n* pre-commit\r\n\r\n* fix\r\n\r\n* modify the docstring\r\n\r\n* rename function\r\n\r\n* fix a bug\r\n\r\n* fix a bug and simplify the code\r\n\r\n* simplify the code\r\n\r\n* add replace_cfg_vals for some scripts\r\n\r\n* add replace_cfg_vals for some scripts\r\n\r\n* add some unit tests", "code": "def replace_cfg_vals(ori_cfg):\n \n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 2, "nloc": 10, "token_counts": 64, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 70293, "documentation": { "docstring": "Replace the string \"${key}\" with the corresponding value.\n\n Replace the \"${key}\" with the value of ori_cfg.key in the config. And\n support replacing the chained ${key}. Such as, replace \"${key0.key1}\"\n with the value of cfg.key0.key1. Code is modified from `vars.py\n < https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/vars.py>`_ # noqa: E501\n\n Args:\n ori_cfg (mmcv.utils.config.Config):\n The origin config with \"${key}\" generated from a file.\n\n Returns:\n updated_cfg [mmcv.utils.config.Config]:\n The config with \"${key}\" replaced by the corresponding value.\n ", "n_words": 68, "vocab_size": 46, "n_whitespaces": 126, "language": "en" } }, { "id": 19154, "commit_id": "4c58179509e6f6047789efb0a95c2b0e20cb6c8f", "repo": "mlflow", "path": "mlflow/models/evaluation/base.py", "file_name": "base.py", "fun_name": "artifacts", "commit_message": "Improve evaluation api (#5256)\n\n* init\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add shap limitation on value type\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix format\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu ", "code": "def artifacts(self) -> Dict[str, \"mlflow.models.EvaluationArtifact\"]:\n \n return self._artifacts\n\n\n_cached_mlflow_client = None\n\n", "url": "https://github.com/mlflow/mlflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 23, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 6, "token_counts": 17, "n_ast_nodes": 35, "n_identifiers": 6, "d_id": 2900, "documentation": { "docstring": "\n A dictionary mapping standardized artifact names (e.g. \"roc_data\") to\n artifact content and location information\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 223504, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/_encoded_words.py", "file_name": "_encoded_words.py", "fun_name": "decode", "commit_message": "add python 3.10.4 for windows", "code": "def decode(ew):\n \n _, charset, cte, cte_string, _ = ew.split('?')\n charset, _, lang = charset.partition('*')\n cte = cte.lower()\n # Recover the original bytes and do CTE decoding.\n bstring = cte_string.encode('ascii', 'surrogateescape')\n bstring, defects = _cte_decoders[cte](bstring)\n # Turn the CTE decoded bytes into unicode.\n try:\n string = bstring.decode(charset)\n except UnicodeError:\n defects.append(errors.UndecodableBytesDefect(\"Encoded word \"\n \"contains bytes not decodable using {} charset\".format(charset)))\n string = bstring.decode(charset, 'surrogateescape')\n except LookupError:\n string = bstring.decode('ascii', 'surrogateescape')\n if charset.lower() != 'unknown-8bit':\n defects.append(errors.CharsetError(\"Unknown charset {} \"\n \"in encoded word; decoded as unknown bytes\".format(charset)))\n return string, charset, lang, defects\n\n\n_cte_encoders = {\n 'q': encode_q,\n 'b': encode_b,\n }\n\n_cte_encode_length = {\n 'q': len_q,\n 'b': len_b,\n }\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 228, "n_words": 104, "vocab_size": 74, "complexity": 4, "nloc": 18, "token_counts": 149, "n_ast_nodes": 304, "n_identifiers": 28, "d_id": 56938, "documentation": { "docstring": "Decode encoded word and return (string, charset, lang, defects) tuple.\n\n An RFC 2047/2243 encoded word has the form:\n\n =?charset*lang?cte?encoded_string?=\n\n where '*lang' may be omitted but the other parts may not be.\n\n This function expects exactly such a string (that is, it does not check the\n syntax and may raise errors if the string is not well formed), and returns\n the encoded_string decoded first from its Content Transfer Encoding and\n then from the resulting bytes into unicode using the specified charset. If\n the cte-decoded string does not successfully decode using the specified\n character set, a defect is added to the defects list and the unknown octets\n are replaced by the unicode 'unknown' character \\\\uFDFF.\n\n The specified charset and language are returned. The default for language,\n which is rarely if ever encountered, is the empty string.\n\n ", "n_words": 134, "vocab_size": 94, "n_whitespaces": 179, "language": "en" } }, { "id": 243996, "commit_id": "1516986a616fee8bb741d0ab2be40683045efccd", "repo": "mmdetection", "path": "mmdet/datasets/openimages.py", "file_name": "openimages.py", "fun_name": "add_supercategory_ann", "commit_message": "[Feature] Support OpenImages Dataset (#6331)\n\n* [Feature] support openimage group of eval\r\n\r\n* [Feature] support openimage group of eval\r\n\r\n* support openimage dataset\r\n\r\n* support openimage challenge dataset\r\n\r\n* fully support OpenImages-V6 and OpenImages Challenge 2019\r\n\r\n* Fix some logic error\r\n\r\n* update config file\r\n\r\n* fix get data_infos error\r\n\r\n* fully support OpenImages evaluation\r\n\r\n* update OpenImages config files\r\n\r\n* [Feature] support OpenImages datasets\r\n\r\n* fix bug\r\n\r\n* support load image metas from pipeline\r\n\r\n* fix bug\r\n\r\n* fix get classes logic error\r\n\r\n* update code\r\n\r\n* support get image metas\r\n\r\n* support openimags\r\n\r\n* support collect image metas\r\n\r\n* support Open Images\r\n\r\n* fix openimages logic\r\n\r\n* minor fix\r\n\r\n* add a new function to compute openimages tpfp\r\n\r\n* minor fix\r\n\r\n* fix ci error\r\n\r\n* minor fix\r\n\r\n* fix indication\r\n\r\n* minor fix\r\n\r\n* fix returns\r\n\r\n* fix returns\r\n\r\n* fix returns\r\n\r\n* fix returns\r\n\r\n* fix returns\r\n\r\n* minor fix\r\n\r\n* update readme\r\n\r\n* support loading image level labels and fix some logic\r\n\r\n* minor fix\r\n\r\n* minor fix\r\n\r\n* add class names\r\n\r\n* minor fix\r\n\r\n* minor fix\r\n\r\n* minor fix\r\n\r\n* add openimages test unit\r\n\r\n* minor fix\r\n\r\n* minor fix\r\n\r\n* fix test unit\r\n\r\n* minor fix\r\n\r\n* fix logic error\r\n\r\n* minor fix\r\n\r\n* fully support openimages\r\n\r\n* minor fix\r\n\r\n* fix docstring\r\n\r\n* fix docstrings in readthedocs\r\n\r\n* update get image metas script\r\n\r\n* label_description_file -> label_file\r\n\r\n* update openimages readme\r\n\r\n* fix test unit\r\n\r\n* fix test unit\r\n\r\n* minor fix\r\n\r\n* update readme file\r\n\r\n* Update get_image_metas.py", "code": "def add_supercategory_ann(self, annotations):\n \n for i, ann in enumerate(annotations):\n assert len(ann['labels']) == len(ann['bboxes']) == \\\n len(ann['gt_is_group_ofs'])\n gt_bboxes = []\n gt_is_group_ofs = []\n gt_labels = []\n for j in range(len(ann['labels'])):\n label = ann['labels'][j]\n bbox = ann['bboxes'][j]\n is_group = ann['gt_is_group_ofs'][j]\n label = np.where(self.class_label_tree[label])[0]\n if len(label) > 1:\n for k in range(len(label)):\n gt_bboxes.append(bbox)\n gt_is_group_ofs.append(is_group)\n gt_labels.append(label[k])\n else:\n gt_bboxes.append(bbox)\n gt_is_group_ofs.append(is_group)\n gt_labels.append(label[0])\n annotations[i] = dict(\n bboxes=np.array(gt_bboxes).astype(np.float32),\n labels=np.array(gt_labels).astype(np.int64),\n bboxes_ignore=ann['bboxes_ignore'],\n gt_is_group_ofs=np.array(gt_is_group_ofs).astype(np.bool))\n\n return annotations\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 464, "n_words": 64, "vocab_size": 47, "complexity": 5, "nloc": 27, "token_counts": 239, "n_ast_nodes": 384, "n_identifiers": 29, "d_id": 70186, "documentation": { "docstring": "Add parent classes of the corresponding class of the ground truth\n bboxes.", "n_words": 12, "vocab_size": 10, "n_whitespaces": 18, "language": "en" } }, { "id": 53059, "commit_id": "fc20231ae7707ca9ca51a3e25fe8991482a02e2e", "repo": "prefect", "path": "src/prefect/logging/handlers.py", "file_name": "handlers.py", "fun_name": "close", "commit_message": "Add more docstrings", "code": "def close(self) -> None:\n \n if self.worker:\n self.worker.stop()\n return super().close()\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 41, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 7, "token_counts": 28, "n_ast_nodes": 50, "n_identifiers": 5, "d_id": 10698, "documentation": { "docstring": "\n Shuts down this handler and the `OrionLogWorker`.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 2712, "commit_id": "e272ed2fa4c58e0a89e273a3e85da7d13a85e04c", "repo": "PySyft", "path": "packages/syft/src/syft/core/node/common/action/get_enum_attribute_action.py", "file_name": "get_enum_attribute_action.py", "fun_name": "_object2proto", "commit_message": "[syft.core.node.common.action] Change syft import absolute -> relative", "code": "def _object2proto(self) -> GetEnumAttributeAction_PB:\n \n\n return GetEnumAttributeAction_PB(\n path=self.path,\n id_at_location=serialize(self.id_at_location),\n address=serialize(self.address),\n msg_id=serialize(self.id),\n )\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 76, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 18, "token_counts": 45, "n_ast_nodes": 70, "n_identifiers": 9, "d_id": 343, "documentation": { "docstring": "Returns a protobuf serialization of self.\n As a requirement of all objects which inherit from Serializable,\n this method transforms the current object into the corresponding\n Protobuf object so that it can be further serialized.\n :return: returns a protobuf object\n :rtype: GetOrSetPropertyAction_PB\n .. note::\n This method is purely an internal method. Please use serialize(object) or one of\n the other public serialization methods if you wish to serialize an\n object.\n ", "n_words": 68, "vocab_size": 56, "n_whitespaces": 150, "language": "en" } }, { "id": 309309, "commit_id": "789c0a24dd558207b712ddf10a919d9353853e40", "repo": "core", "path": "tests/components/nest/test_events.py", "file_name": "test_events.py", "fun_name": "test_doorbell_event_session_update", "commit_message": "Improve nest media player clip/image and event handling for multiple events in a short time range (#63149)", "code": "async def test_doorbell_event_session_update(hass, auth):\n \n events = async_capture_events(hass, NEST_EVENT)\n subscriber = await async_setup_devices(\n hass,\n \"sdm.devices.types.DOORBELL\",\n create_device_traits(\n [\n \"sdm.devices.traits.CameraClipPreview\",\n \"sdm.devices.traits.CameraPerson\",\n \"sdm.devices.traits.CameraMotion\",\n ]\n ),\n auth,\n )\n registry = er.async_get(hass)\n entry = registry.async_get(\"camera.front\")\n assert entry is not None\n\n # Message #1 has a motion event\n timestamp1 = utcnow()\n await subscriber.async_receive_event(\n create_events(\n {\n \"sdm.devices.events.CameraMotion.Motion\": {\n \"eventSessionId\": EVENT_SESSION_ID,\n \"eventId\": \"n:1\",\n },\n \"sdm.devices.events.CameraClipPreview.ClipPreview\": {\n \"eventSessionId\": EVENT_SESSION_ID,\n \"previewUrl\": \"image-url-1\",\n },\n },\n timestamp=timestamp1,\n )\n )\n\n # Message #2 has an extra person event\n timestamp2 = utcnow()\n await subscriber.async_receive_event(\n create_events(\n {\n \"sdm.devices.events.CameraMotion.Motion\": {\n \"eventSessionId\": EVENT_SESSION_ID,\n \"eventId\": \"n:1\",\n },\n \"sdm.devices.events.CameraPerson.Person\": {\n \"eventSessionId\": EVENT_SESSION_ID,\n \"eventId\": \"n:2\",\n },\n \"sdm.devices.events.CameraClipPreview.ClipPreview\": {\n \"eventSessionId\": EVENT_SESSION_ID,\n \"previewUrl\": \"image-url-1\",\n },\n },\n timestamp=timestamp2,\n )\n )\n await hass.async_block_till_done()\n\n assert len(events) == 2\n assert event_view(events[0].data) == {\n \"device_id\": entry.device_id,\n \"type\": \"camera_motion\",\n \"timestamp\": timestamp1.replace(microsecond=0),\n }\n assert event_view(events[1].data) == {\n \"device_id\": entry.device_id,\n \"type\": \"camera_person\",\n \"timestamp\": timestamp2.replace(microsecond=0),\n }\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 775, "n_words": 134, "vocab_size": 75, "complexity": 1, "nloc": 65, "token_counts": 249, "n_ast_nodes": 434, "n_identifiers": 27, "d_id": 108015, "documentation": { "docstring": "Test a pubsub message with updates to an existing session.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 28452, "commit_id": "2611883cda3b84ccbfcbf37221f5b62a08bc9af1", "repo": "saleor", "path": "saleor/core/tests/test_tasks.py", "file_name": "test_tasks.py", "fun_name": "test_delete_files_from_storage_task_files_not_existing_files", "commit_message": "Fix the migration for removing media marked as to remove (#10429)\n\n* Add celery task for removing multiple files from storage\r\n\r\n* Fix the migration for removing media marked as to remove", "code": "def test_delete_files_from_storage_task_files_not_existing_files(media_root):\n \n # given\n path = \"random/test-path\"\n path_2 = \"random/test-path-2\"\n assert not default_storage.exists(path)\n assert not default_storage.exists(path_2)\n\n # when\n delete_files_from_storage_task([path, path_2])\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 44, "n_words": 20, "vocab_size": 16, "complexity": 1, "nloc": 6, "token_counts": 36, "n_ast_nodes": 67, "n_identifiers": 7, "d_id": 5170, "documentation": { "docstring": "Ensure method not fail when trying to remove not existing file.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 286002, "commit_id": "7979b1fc071a1c3e7463044bea617d7305b4a17e", "repo": "OpenBBTerminal", "path": "openbb_terminal/cryptocurrency/due_diligence/tokenterminal_model.py", "file_name": "tokenterminal_model.py", "fun_name": "get_project_ids", "commit_message": "Add 3 Token Terminal commands (#2447)\n\n* add crypto/ov/fun\r\n\r\n* add tokenterminal to dependencies\r\n\r\n* update website content\r\n\r\n* add to main.yml\r\n\r\n* fix tests\r\n\r\n* add tests\r\n\r\n* Update _index.md\r\n\r\n* Update _index.md\r\n\r\n* fix tests\r\n\r\n* fix test\r\n\r\n* List hint added\r\n\r\n* improve code based on Jose input\r\n\r\n* fix tests\r\n\r\n* requirements for token terminal\r\n\r\n* add source and fix source bug\r\n\r\n* some improvements\r\n\r\n* colors bars\r\n\r\n* fix dependencies\r\n\r\n* update kaleido version\r\n\r\n* update setuptools for pkg_resources\r\n\r\n* replace pkg_resources by importlib_metadata\r\n\r\n* Added fixes\r\n\r\n* Fixed tests\r\n\r\n* fix stuff for Josecas\r\n\r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: colin99d ", "code": "def get_project_ids() -> List[str]:\n \n return [project[\"project_id\"] for project in PROJECTS_DATA]\n\n\n@log_start_end(log=logger)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@log_start_end(log=logger)", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 16, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 9, "token_counts": 21, "n_ast_nodes": 48, "n_identifiers": 8, "d_id": 85500, "documentation": { "docstring": "This function returns the available project ids.\n\n Returns\n ----------\n List[str]\n A list with the all the project IDs\n ", "n_words": 18, "vocab_size": 15, "n_whitespaces": 37, "language": "en" } }, { "id": 178838, "commit_id": "70b7eee9555c8d5599d096eaf600521475b001d9", "repo": "Nuitka", "path": "nuitka/utils/CStrings.py", "file_name": "CStrings.py", "fun_name": "encodePythonUnicodeToC", "commit_message": "Python3.7+: Added support for get_resource_reader to our loader\n\n* This allows to avoid a useless file copy to a temporary file\n in case a \"importlib.resources.path\" is used.\n\n* Also fixed a few typos in tests.\n\n* And avoid compiling the meta path based loader separately, so it\n can use compiled code helpers easily.", "code": "def encodePythonUnicodeToC(value):\n \n assert type(value) is unicode, type(value)\n\n result = \"\"\n\n for c in value:\n cv = ord(c)\n\n result += r\"\\%o\" % cv\n\n return 'L\"%s\"' % result\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 55, "n_words": 26, "vocab_size": 20, "complexity": 2, "nloc": 7, "token_counts": 42, "n_ast_nodes": 73, "n_identifiers": 8, "d_id": 42841, "documentation": { "docstring": "Encode a string, so that it gives a wide C string literal.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 84333, "commit_id": "b775639f425d257d3367d6e462582ca926b1f7ee", "repo": "zulip", "path": "zerver/tests/test_scim.py", "file_name": "test_scim.py", "fun_name": "test_search_users", "commit_message": "test: Use list comprehension for dictionary values.\n\nSigned-off-by: Zixuan James Li ", "code": "def test_search_users(self) -> None:\n \n realm = get_realm(\"zulip\")\n\n # A payload to find all users whose email ends with @zulip.com\n payload = {\n \"schemas\": [\"urn:ietf:params:scim:api:messages:2.0:SearchRequest\"],\n \"filter\": 'userName ew \"@zulip.com\"',\n }\n result = self.client_post(\n \"/scim/v2/Users/.search\",\n payload,\n content_type=\"application/json\",\n **self.scim_headers(),\n )\n self.assertEqual(result.status_code, 200)\n output_data = orjson.loads(result.content)\n\n user_query = UserProfile.objects.filter(\n realm=realm, is_bot=False, delivery_email__endswith=\"@zulip.com\"\n )\n expected_response_schema = {\n \"schemas\": [\"urn:ietf:params:scim:api:messages:2.0:ListResponse\"],\n \"totalResults\": user_query.count(),\n \"itemsPerPage\": 50,\n \"startIndex\": 1,\n \"Resources\": [\n self.generate_user_schema(user_profile)\n for user_profile in UserProfile.objects.filter(realm=realm, is_bot=False).order_by(\n \"id\"\n )\n ],\n }\n\n self.assertEqual(output_data, expected_response_schema)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 379, "n_words": 74, "vocab_size": 63, "complexity": 2, "nloc": 34, "token_counts": 157, "n_ast_nodes": 266, "n_identifiers": 26, "d_id": 17818, "documentation": { "docstring": "\n Tests a basic .search POST query:\n https://datatracker.ietf.org/doc/html/rfc7644#section-3.4.3\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 29, "language": "en" } }, { "id": 153954, "commit_id": "a7354c9ca76525a265da98f2afe882c53f378840", "repo": "modin", "path": "modin/core/execution/dask/implementations/pandas_on_dask/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "_row_lengths", "commit_message": "FEAT-#4419: Extend virtual partitioning API to pandas on Dask (#4420)\n\nSigned-off-by: Rehan Durrani \r\n\r\nCo-authored-by: Mahesh Vashishtha ", "code": "def _row_lengths(self):\n \n if self._row_lengths_cache is None:\n row_lengths_list = DaskWrapper.materialize(\n [\n self._get_partition_size_along_axis(obj, axis=0)\n for obj in self._partitions.T[0]\n ]\n )\n self._row_lengths_cache = [sum(len_list) for len_list in row_lengths_list]\n return self._row_lengths_cache\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 149, "n_words": 27, "vocab_size": 22, "complexity": 4, "nloc": 10, "token_counts": 61, "n_ast_nodes": 95, "n_identifiers": 13, "d_id": 35722, "documentation": { "docstring": "\n Compute ther row partitions lengths if they are not cached.\n\n Returns\n -------\n list\n A list of row partitions lengths.\n ", "n_words": 19, "vocab_size": 16, "n_whitespaces": 66, "language": "en" } }, { "id": 319926, "commit_id": "b3b2519bf03185aa12028fa68d3b8f8860555e6e", "repo": "paperless-ngx", "path": "src/paperless_tesseract/tests/test_parser.py", "file_name": "test_parser.py", "fun_name": "test_multi_page_mixed_no_archive", "commit_message": "Fixes the creation of an archive file, even if noarchive was specified", "code": "def test_multi_page_mixed_no_archive(self):\n \n parser = RasterisedDocumentParser(None)\n parser.parse(\n os.path.join(self.SAMPLE_FILES, \"multi-page-mixed.pdf\"),\n \"application/pdf\",\n )\n self.assertIsNone(parser.archive_path)\n self.assertContainsStrings(\n parser.get_text().lower(),\n [\"page 4\", \"page 5\", \"page 6\"],\n )\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 113, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 11, "token_counts": 63, "n_ast_nodes": 109, "n_identifiers": 14, "d_id": 117021, "documentation": { "docstring": "\n GIVEN:\n - File with some text contained in images and some in text layer\n - OCR mode set to skip_noarchive\n WHEN:\n - Document is parsed\n THEN:\n - Text from images is extracted\n - No archive file is created\n ", "n_words": 38, "vocab_size": 28, "n_whitespaces": 122, "language": "en" } }, { "id": 258487, "commit_id": "8b6b519caf3b3b9602958a859b4d3a7eb1d9eadd", "repo": "scikit-learn", "path": "sklearn/random_projection.py", "file_name": "random_projection.py", "fun_name": "fit", "commit_message": "ENH Preserving dtype for np.float32 in RandomProjection (#22114)\n\nCo-authored-by: takoika <>\r\nCo-authored-by: Thomas J. Fan ", "code": "def fit(self, X, y=None):\n \n X = self._validate_data(\n X, accept_sparse=[\"csr\", \"csc\"], dtype=[np.float64, np.float32]\n )\n\n n_samples, n_features = X.shape\n\n if self.n_components == \"auto\":\n self.n_components_ = johnson_lindenstrauss_min_dim(\n n_samples=n_samples, eps=self.eps\n )\n\n if self.n_components_ <= 0:\n raise ValueError(\n \"eps=%f and n_samples=%d lead to a target dimension of \"\n \"%d which is invalid\" % (self.eps, n_samples, self.n_components_)\n )\n\n elif self.n_components_ > n_features:\n raise ValueError(\n \"eps=%f and n_samples=%d lead to a target dimension of \"\n \"%d which is larger than the original space with \"\n \"n_features=%d\"\n % (self.eps, n_samples, self.n_components_, n_features)\n )\n else:\n if self.n_components <= 0:\n raise ValueError(\n \"n_components must be greater than 0, got %s\" % self.n_components\n )\n\n elif self.n_components > n_features:\n warnings.warn(\n \"The number of components is higher than the number of\"\n \" features: n_features < n_components (%s < %s).\"\n \"The dimensionality of the problem will not be reduced.\"\n % (n_features, self.n_components),\n DataDimensionalityWarning,\n )\n\n self.n_components_ = self.n_components\n\n # Generate a projection matrix of size [n_components, n_features]\n self.components_ = self._make_random_matrix(\n self.n_components_, n_features\n ).astype(X.dtype, copy=False)\n\n # Check contract\n assert self.components_.shape == (self.n_components_, n_features), (\n \"An error has occurred the self.components_ matrix has \"\n \" not the proper shape.\"\n )\n\n return self\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 760, "n_words": 185, "vocab_size": 109, "complexity": 6, "nloc": 43, "token_counts": 220, "n_ast_nodes": 357, "n_identifiers": 25, "d_id": 75247, "documentation": { "docstring": "Generate a sparse random projection matrix.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix} of shape (n_samples, n_features)\n Training set: only the shape is used to find optimal random\n matrix dimensions based on the theory referenced in the\n afore mentioned papers.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n BaseRandomProjection class instance.\n ", "n_words": 60, "vocab_size": 53, "n_whitespaces": 171, "language": "en" } }, { "id": 222639, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/command/bdist_msi.py", "file_name": "bdist_msi.py", "fun_name": "xbutton", "commit_message": "add python 3.10.4 for windows", "code": "def xbutton(self, name, title, next, xpos):\n \n return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 31, "n_words": 17, "vocab_size": 16, "complexity": 1, "nloc": 2, "token_counts": 48, "n_ast_nodes": 66, "n_identifiers": 10, "d_id": 56681, "documentation": { "docstring": "Add a button with a given title, the tab-next button,\n its name in the Control table, giving its x position; the\n y-position is aligned with the other buttons.\n\n Return the button, so that events can be associated", "n_words": 37, "vocab_size": 29, "n_whitespaces": 57, "language": "en" } }, { "id": 156038, "commit_id": "cccb9d8d8e33a891396b1275c2448c352ef40c27", "repo": "dask", "path": "dask/array/core.py", "file_name": "core.py", "fun_name": "to_svg", "commit_message": "absolufy-imports - No relative - PEP8 (#8796)\n\nConversation in https://github.com/dask/distributed/issues/5889", "code": "def to_svg(self, size=500):\n \n from dask.array.svg import svg\n\n return svg(self.chunks, size=size)\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 29, "n_ast_nodes": 45, "n_identifiers": 7, "d_id": 36515, "documentation": { "docstring": "Convert chunks from Dask Array into an SVG Image\n\n Parameters\n ----------\n chunks: tuple\n size: int\n Rough size of the image\n\n Examples\n --------\n >>> x.to_svg(size=500) # doctest: +SKIP\n\n Returns\n -------\n text: An svg string depicting the array as a grid of chunks\n ", "n_words": 41, "vocab_size": 38, "n_whitespaces": 130, "language": "en" } }, { "id": 42558, "commit_id": "1f4a121aa781117bc0daa3b4485cf7757f8112ee", "repo": "nltk", "path": "nltk/parse/corenlp.py", "file_name": "corenlp.py", "fun_name": "tokenize", "commit_message": "Rework CoreNLP tests for 4.5.1, make them work if CoreNLP is on CLASSPATH\n\nIf not, they are skipped. Sadly this does make the docstrings a bit more confusing", "code": "def tokenize(self, text, properties=None):\n \n default_properties = {\"annotators\": \"tokenize,ssplit\"}\n\n default_properties.update(properties or {})\n\n result = self.api_call(text, properties=default_properties)\n\n for sentence in result[\"sentences\"]:\n for token in sentence[\"tokens\"]:\n yield token[\"originalText\"] or token[\"word\"]\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 88, "n_words": 27, "vocab_size": 23, "complexity": 5, "nloc": 7, "token_counts": 66, "n_ast_nodes": 116, "n_identifiers": 10, "d_id": 7620, "documentation": { "docstring": "Tokenize a string of text.\n\n Skip these tests if CoreNLP is likely not ready.\n >>> if \"CLASSPATH\" not in os.environ: import pytest; pytest.skip(\"CoreNLP jars unavailable\")\n\n The CoreNLP server can be started using the following notation, although\n we recommend the `with CoreNLPServer() as server:` context manager notation\n to ensure that the server is always stopped.\n >>> server = CoreNLPServer()\n >>> server.start()\n >>> parser = CoreNLPParser(url=server.url)\n\n >>> text = 'Good muffins cost $3.88\\\\nin New York. Please buy me\\\\ntwo of them.\\\\nThanks.'\n >>> list(parser.tokenize(text))\n ['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']\n\n >>> s = \"The colour of the wall is blue.\"\n >>> list(\n ... parser.tokenize(\n ... 'The colour of the wall is blue.',\n ... properties={'tokenize.options': 'americanize=true'},\n ... )\n ... )\n ['The', 'colour', 'of', 'the', 'wall', 'is', 'blue', '.']\n >>> server.stop()\n\n ", "n_words": 137, "vocab_size": 100, "n_whitespaces": 313, "language": "en" } }, { "id": 307671, "commit_id": "5e338d21665cb04f66fcebd9376cdda389c30c01", "repo": "core", "path": "homeassistant/components/automation/__init__.py", "file_name": "__init__.py", "fun_name": "async_will_remove_from_hass", "commit_message": "Improve type hints in automation (#78368)\n\n* Improve type hints in automation\r\n\r\n* Apply suggestion\r\n\r\n* Apply suggestion\r\n\r\n* Apply suggestion\r\n\r\n* Add Protocol for IfAction\r\n\r\n* Use ConfigType for IfAction\r\n\r\n* Rename variable", "code": "async def async_will_remove_from_hass(self) -> None:\n \n await super().async_will_remove_from_hass()\n await self.async_disable()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 30, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 22, "n_ast_nodes": 43, "n_identifiers": 4, "d_id": 106439, "documentation": { "docstring": "Remove listeners when removing automation from Home Assistant.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 202399, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/csrf_tests/tests.py", "file_name": "tests.py", "fun_name": "test_reading_post_data_raises_os_error", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_reading_post_data_raises_os_error(self):\n \n mw = CsrfViewMiddleware(post_form_view)\n req = self._get_POST_request_with_token(request_class=PostErrorRequest)\n req.post_error = OSError(\"Deleted directories/Missing permissions.\")\n mw.process_request(req)\n with self.assertRaises(OSError):\n mw.process_view(req, post_form_view, (), {})\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 73, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 7, "token_counts": 58, "n_ast_nodes": 99, "n_identifiers": 14, "d_id": 50107, "documentation": { "docstring": "\n An OSError raised while reading the POST data should not be handled by\n the middleware.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 37, "language": "en" } }, { "id": 338450, "commit_id": "bb6ee0b7bc72cb29e496a6d05aee9e11d6f745b1", "repo": "accelerate", "path": "src/accelerate/test_utils/testing.py", "file_name": "testing.py", "fun_name": "require_mps", "commit_message": "Support `init_on_device` (#926)\n\n* Support init_on_device\r\n\r\n* Support mps backend as well in testing", "code": "def require_mps(test_case):\n \n is_mps_supported = hasattr(torch.backends, \"mps\") and torch.backends.mps.is_available()\n return unittest.skipUnless(is_mps_supported, \"test requires a `mps` backend support in `torch`\")(test_case)\n\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 27, "n_words": 18, "vocab_size": 18, "complexity": 2, "nloc": 3, "token_counts": 38, "n_ast_nodes": 66, "n_identifiers": 10, "d_id": 121217, "documentation": { "docstring": "\n Decorator marking a test that requires MPS backend. These tests are skipped when torch doesn't support `mps`\n backend.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 28, "language": "en" } }, { "id": 93687, "commit_id": "2fbf550ec05c8501cbc9eca62e73526e717dcbdf", "repo": "sentry", "path": "src/sentry/integrations/jira/client.py", "file_name": "client.py", "fun_name": "request_hook", "commit_message": "ref(Jira): Split Jira Cloud and Jira Server (#37034)\n\n* Split Jira Cloud and Jira Server", "code": "def request_hook(self, method, path, data, params, **kwargs):\n \n # handle params that are already part of the path\n url_params = dict(parse_qs(urlsplit(path).query))\n url_params.update(params or {})\n path = path.split(\"?\")[0]\n\n jwt_payload = {\n \"iss\": JIRA_KEY,\n \"iat\": datetime.datetime.utcnow(),\n \"exp\": datetime.datetime.utcnow() + datetime.timedelta(seconds=5 * 60),\n \"qsh\": get_query_hash(path, method.upper(), url_params),\n }\n encoded_jwt = jwt.encode(jwt_payload, self.shared_secret)\n params = dict(jwt=encoded_jwt, **(url_params or {}))\n request_spec = kwargs.copy()\n request_spec.update(dict(method=method, path=path, data=data, params=params))\n return request_spec\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 191, "n_words": 63, "vocab_size": 54, "complexity": 3, "nloc": 15, "token_counts": 165, "n_ast_nodes": 262, "n_identifiers": 28, "d_id": 19009, "documentation": { "docstring": "\n Used by Jira Client to apply the jira-cloud authentication\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 100402, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "setup.py", "file_name": "setup.py", "fun_name": "_tensorflow_dependency_install", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def _tensorflow_dependency_install(self):\n \n # TODO This will need to be more robust if/when we accept multiple Tensorflow Versions\n versions = list(TENSORFLOW_REQUIREMENTS.values())[-1]\n condaexe = [\"conda\", \"search\"]\n pkgs = [\"cudatoolkit\", \"cudnn\"]\n shell = self.env.os_version[0] == \"Windows\"\n for pkg in pkgs:\n with Popen(condaexe + [pkg], shell=shell, stdout=PIPE) as chk:\n available = [line.split()\n for line\n in chk.communicate()[0].decode(self.env.encoding).splitlines()\n if line.startswith(pkg)]\n compatible = [req for req in available\n if (pkg == \"cudatoolkit\" and req[1].startswith(versions[0]))\n or (pkg == \"cudnn\" and versions[0] in req[2]\n and req[1].startswith(versions[1]))]\n\n candidate = \"==\".join(sorted(compatible, key=lambda x: x[1])[-1][:2])\n self.conda_installer(candidate, verbose=True, conda_only=True)\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 373, "n_words": 86, "vocab_size": 68, "complexity": 10, "nloc": 17, "token_counts": 211, "n_ast_nodes": 340, "n_identifiers": 34, "d_id": 19885, "documentation": { "docstring": " Install the Cuda/cuDNN dependencies from Conda when tensorflow is not available\n in Conda.\n\n This was used whilst Tensorflow 2.2 was not available for Windows in Conda. It is kept\n here in case it is required again in the future.\n ", "n_words": 39, "vocab_size": 29, "n_whitespaces": 68, "language": "en" } }, { "id": 205866, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/sql/query.py", "file_name": "query.py", "fun_name": "join", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def join(self, join, reuse=None):\n \n reuse_aliases = [\n a\n for a, j in self.alias_map.items()\n if (reuse is None or a in reuse) and j.equals(join)\n ]\n if reuse_aliases:\n if join.table_alias in reuse_aliases:\n reuse_alias = join.table_alias\n else:\n # Reuse the most recent alias of the joined table\n # (a many-to-many relation may be joined multiple times).\n reuse_alias = reuse_aliases[-1]\n self.ref_alias(reuse_alias)\n return reuse_alias\n\n # No reuse is possible, so we need a new alias.\n alias, _ = self.table_alias(\n join.table_name, create=True, filtered_relation=join.filtered_relation\n )\n if join.join_type:\n if self.alias_map[join.parent_alias].join_type == LOUTER or join.nullable:\n join_type = LOUTER\n else:\n join_type = INNER\n join.join_type = join_type\n join.table_alias = alias\n self.alias_map[alias] = join\n return alias\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 392, "n_words": 104, "vocab_size": 70, "complexity": 10, "nloc": 25, "token_counts": 151, "n_ast_nodes": 239, "n_identifiers": 22, "d_id": 51251, "documentation": { "docstring": "\n Return an alias for the 'join', either reusing an existing alias for\n that join or creating a new one. 'join' is either a base_table_class or\n join_class.\n\n The 'reuse' parameter can be either None which means all joins are\n reusable, or it can be a set containing the aliases that can be reused.\n\n A join is always created as LOUTER if the lhs alias is LOUTER to make\n sure chains like t1 LOUTER t2 INNER t3 aren't generated. All new\n joins are created as LOUTER if the join is nullable.\n ", "n_words": 89, "vocab_size": 57, "n_whitespaces": 153, "language": "en" } }, { "id": 101185, "commit_id": "32950897376b48e0f08b46385602e4df902cf49e", "repo": "faceswap", "path": "tools/sort/sort.py", "file_name": "sort.py", "fun_name": "estimate_blur_fft", "commit_message": "lib.detected_face.Mask\n - Add source + target offset and coverage to set_sub_crop method", "code": "def estimate_blur_fft(cls, image, metadata=None):\n \n if metadata is not None:\n alignments = metadata[\"alignments\"]\n det_face = DetectedFace()\n det_face.from_png_meta(alignments)\n aln_face = AlignedFace(np.array(alignments[\"landmarks_xy\"], dtype=\"float32\"),\n image=image,\n centering=\"legacy\",\n size=256,\n is_aligned=True)\n mask = det_face.mask[\"components\"]\n mask.set_sub_crop(aln_face.pose.offset[mask.stored_centering],\n aln_face.pose.offset[\"legacy\"],\n centering=\"legacy\")\n mask = cv2.resize(mask.mask, (256, 256), interpolation=cv2.INTER_CUBIC)[..., None]\n image = np.minimum(aln_face.face, mask)\n if image.ndim == 3:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n height, width = image.shape\n c_height, c_width = (int(height / 2.0), int(width / 2.0))\n fft = np.fft.fft2(image)\n fft_shift = np.fft.fftshift(fft)\n fft_shift[c_height - 75:c_height + 75, c_width - 75:c_width + 75] = 0\n ifft_shift = np.fft.ifftshift(fft_shift)\n shift_back = np.fft.ifft2(ifft_shift)\n magnitude = np.log(np.abs(shift_back))\n score = np.mean(magnitude)\n return score\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 478, "n_words": 94, "vocab_size": 71, "complexity": 3, "nloc": 28, "token_counts": 276, "n_ast_nodes": 424, "n_identifiers": 49, "d_id": 20606, "documentation": { "docstring": " Estimate the amount of blur a fft filtered image has.\n\n Parameters\n ----------\n image: :class:`numpy.ndarray`\n Use Fourier Transform to analyze the frequency characteristics of the masked\n face using 2D Discrete Fourier Transform (DFT) filter to find the frequency domain.\n A mean value is assigned to the magnitude spectrum and returns a blur score.\n Adapted from https://www.pyimagesearch.com/2020/06/15/\n opencv-fast-fourier-transform-fft-for-blur-detection-in-images-and-video-streams/\n metadata: dict, optional\n The metadata for the face image or ``None`` if no metadata is available. If metadata is\n provided the face will be masked by the \"components\" mask prior to calculating blur.\n Default:``None``\n\n Returns\n -------\n float\n The estimated fft blur score for the face\n ", "n_words": 101, "vocab_size": 71, "n_whitespaces": 257, "language": "en" } }, { "id": 268620, "commit_id": "cb2e434dd2359a9fe1c00e75431f4abeff7381e8", "repo": "ansible", "path": "lib/ansible/cli/galaxy.py", "file_name": "galaxy.py", "fun_name": "execute_info", "commit_message": "ansible-galaxy install - fix unnecessary api check when installing a role from git repo (#79090)\n\n* delay server api evaluation until a GalaxyRole needs to make an api call for info, list, and install", "code": "def execute_info(self):\n \n\n roles_path = context.CLIARGS['roles_path']\n\n data = ''\n for role in context.CLIARGS['args']:\n\n role_info = {'path': roles_path}\n gr = GalaxyRole(self.galaxy, self.lazy_role_api, role)\n\n install_info = gr.install_info\n if install_info:\n if 'version' in install_info:\n install_info['installed_version'] = install_info['version']\n del install_info['version']\n role_info.update(install_info)\n\n if not context.CLIARGS['offline']:\n remote_data = None\n try:\n remote_data = self.api.lookup_role_by_name(role, False)\n except AnsibleError as e:\n if e.http_code == 400 and 'Bad Request' in e.message:\n # Role does not exist in Ansible Galaxy\n data = u\"- the role %s was not found\" % role\n break\n\n raise AnsibleError(\"Unable to find info about '%s': %s\" % (role, e))\n\n if remote_data:\n role_info.update(remote_data)\n\n elif context.CLIARGS['offline'] and not gr._exists:\n data = u\"- the role %s was not found\" % role\n break\n\n if gr.metadata:\n role_info.update(gr.metadata)\n\n req = RoleRequirement()\n role_spec = req.role_yaml_parse({'role': role})\n if role_spec:\n role_info.update(role_spec)\n\n data += self._display_role_info(role_info)\n\n self.pager(data)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 617, "n_words": 128, "vocab_size": 85, "complexity": 13, "nloc": 34, "token_counts": 225, "n_ast_nodes": 385, "n_identifiers": 29, "d_id": 79567, "documentation": { "docstring": "\n prints out detailed information about an installed role as well as info available from the galaxy API.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 32, "language": "en" } }, { "id": 218388, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "unwrap", "commit_message": "add python 3.10.4 for windows", "code": "def unwrap(func, *, stop=None):\n \n if stop is None:", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 5, "nloc": 15, "token_counts": 94, "n_ast_nodes": 29, "n_identifiers": 3, "d_id": 55276, "documentation": { "docstring": "Get the object wrapped by *func*.\n\n Follows the chain of :attr:`__wrapped__` attributes returning the last\n object in the chain.\n\n *stop* is an optional callback accepting an object in the wrapper chain\n as its sole argument that allows the unwrapping to be terminated early if\n the callback returns a true value. If the callback never returns a true\n value, the last object in the chain is returned as usual. For example,\n :func:`signature` uses this to stop unwrapping if any object in the\n chain has a ``__signature__`` attribute defined.\n\n :exc:`ValueError` is raised if a cycle is encountered.\n\n ", "n_words": 95, "vocab_size": 58, "n_whitespaces": 116, "language": "en" } }, { "id": 42118, "commit_id": "72d1322ee583eb481346e5e661c2998c8a7445dd", "repo": "seaborn", "path": "seaborn/axisgrid.py", "file_name": "axisgrid.py", "fun_name": "tick_params", "commit_message": "Adding Grid.tick_params() method. (#2944)\n\n* Adding Grid.tick_params() method.\r\n\r\n* Address PR comments.\r\n\r\n* Add What's New entry.\r\n\r\n* Switch tick_params() test to use pad.", "code": "def tick_params(self, axis='both', **kwargs):\n \n for ax in self.figure.axes:\n ax.tick_params(axis=axis, **kwargs)\n return self\n\n\n_facet_docs = dict(\n\n data=dedent(),\n rowcol=dedent(),\n rowcol_order=dedent(),\n col_wrap=dedent(),\n share_xy=dedent(),\n height=dedent(),\n aspect=dedent(),\n palette=dedent(),\n legend_out=dedent(),\n margin_titles=dedent(),\n facet_kws=dedent(),\n)\n\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 90, "n_words": 27, "vocab_size": 27, "complexity": 2, "nloc": 4, "token_counts": 35, "n_ast_nodes": 219, "n_identifiers": 21, "d_id": 7487, "documentation": { "docstring": "Modify the ticks, tick labels, and gridlines.\n\n Parameters\n ----------\n axis : {'x', 'y', 'both'}\n The axis on which to apply the formatting.\n kwargs : keyword arguments\n Additional keyword arguments to pass to\n :meth:`matplotlib.axes.Axes.tick_params`.\n\n Returns\n -------\n self : Grid instance\n Returns self for easy chaining.\n\n \\\n data : DataFrame\n Tidy (\"long-form\") dataframe where each column is a variable and each\n row is an observation.\\\n \\\n row, col : vectors or keys in ``data``\n Variables that define subsets to plot on different facets.\\\n \\\n {row,col}_order : vector of strings\n Specify the order in which levels of the ``row`` and/or ``col`` variables\n appear in the grid of subplots.\\\n \\\n col_wrap : int\n \"Wrap\" the column variable at this width, so that the column facets\n span multiple rows. Incompatible with a ``row`` facet.\\\n \\\n share{x,y} : bool, 'col', or 'row' optional\n If true, the facets will share y axes across columns and/or x axes\n across rows.\\\n \\\n height : scalar\n Height (in inches) of each facet. See also: ``aspect``.\\\n \\\n aspect : scalar\n Aspect ratio of each facet, so that ``aspect * height`` gives the width\n of each facet in inches.\\\n \\\n palette : palette name, list, or dict\n Colors to use for the different levels of the ``hue`` variable. Should\n be something that can be interpreted by :func:`color_palette`, or a\n dictionary mapping hue levels to matplotlib colors.\\\n \\\n legend_out : bool\n If ``True``, the figure size will be extended, and the legend will be\n drawn outside the plot on the center right.\\\n \\\n margin_titles : bool\n If ``True``, the titles for the row variable are drawn to the right of\n the last column. This option is experimental and may not work in all\n cases.\\\n \\\n facet_kws : dict\n Additional parameters passed to :class:`FacetGrid`.\n ", "n_words": 290, "vocab_size": 175, "n_whitespaces": 603, "language": "en" } }, { "id": 62909, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/packaging/tags.py", "file_name": "tags.py", "fun_name": "mac_platforms", "commit_message": "upd; format", "code": "def mac_platforms(version=None, arch=None):\n # type: (Optional[MacVersion], Optional[str]) -> Iterator[str]\n \n version_str, _, cpu_arch = platform.mac_ver() # type: ignore\n if version is None:\n version = cast(\"MacVersion\", tuple(map(int, version_str.split(\".\")[:2])))\n else:\n version = version\n if arch is None:\n arch = _mac_arch(cpu_arch)\n else:\n arch = arch\n\n if (10, 0) <= version and version < (11, 0):\n # Prior to Mac OS 11, each yearly release of Mac OS bumped the\n # \"minor\" version number. The major version was always 10.\n for minor_version in range(version[1], -1, -1):\n compat_version = 10, minor_version\n binary_formats = _mac_binary_formats(compat_version, arch)\n for binary_format in binary_formats:\n yield \"macosx_{major}_{minor}_{binary_format}\".format(\n major=10, minor=minor_version, binary_format=binary_format\n )\n\n if version >= (11, 0):\n # Starting with Mac OS 11, each yearly release bumps the major version\n # number. The minor versions are now the midyear updates.\n for major_version in range(version[0], 10, -1):\n compat_version = major_version, 0\n binary_formats = _mac_binary_formats(compat_version, arch)\n for binary_format in binary_formats:\n yield \"macosx_{major}_{minor}_{binary_format}\".format(\n major=major_version, minor=0, binary_format=binary_format\n )\n\n if version >= (11, 0):\n # Mac OS 11 on x86_64 is compatible with binaries from previous releases.\n # Arm64 support was introduced in 11.0, so no Arm binaries from previous\n # releases exist.\n #\n # However, the \"universal2\" binary format can have a\n # macOS version earlier than 11.0 when the x86_64 part of the binary supports\n # that version of macOS.\n if arch == \"x86_64\":\n for minor_version in range(16, 3, -1):\n compat_version = 10, minor_version\n binary_formats = _mac_binary_formats(compat_version, arch)\n for binary_format in binary_formats:\n yield \"macosx_{major}_{minor}_{binary_format}\".format(\n major=compat_version[0],\n minor=compat_version[1],\n binary_format=binary_format,\n )\n else:\n for minor_version in range(16, 3, -1):\n compat_version = 10, minor_version\n binary_format = \"universal2\"\n yield \"macosx_{major}_{minor}_{binary_format}\".format(\n major=compat_version[0],\n minor=compat_version[1],\n binary_format=binary_format,\n )\n\n\n# From PEP 513, PEP 600", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 889, "n_words": 268, "vocab_size": 129, "complexity": 15, "nloc": 46, "token_counts": 319, "n_ast_nodes": 506, "n_identifiers": 24, "d_id": 13068, "documentation": { "docstring": "\n Yields the platform tags for a macOS system.\n\n The `version` parameter is a two-item tuple specifying the macOS version to\n generate platform tags for. The `arch` parameter is the CPU architecture to\n generate platform tags for. Both parameters default to the appropriate value\n for the current system.\n ", "n_words": 47, "vocab_size": 28, "n_whitespaces": 66, "language": "en" } }, { "id": 261040, "commit_id": "2710a9e7eefd2088ce35fd2fb6651d5f97e5ef8b", "repo": "scikit-learn", "path": "sklearn/utils/tests/test_array_api.py", "file_name": "test_array_api.py", "fun_name": "test_asarray_with_order", "commit_message": "ENH Adds Array API support to LinearDiscriminantAnalysis (#22554)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Julien Jerphanion ", "code": "def test_asarray_with_order(is_array_api):\n \n if is_array_api:\n xp = pytest.importorskip(\"numpy.array_api\")\n else:\n xp = numpy\n\n X = xp.asarray([1.2, 3.4, 5.1])\n X_new = _asarray_with_order(X, order=\"F\")\n\n X_new_np = numpy.asarray(X_new)\n assert X_new_np.flags[\"F_CONTIGUOUS\"]\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 60, "n_words": 25, "vocab_size": 20, "complexity": 2, "nloc": 9, "token_counts": 67, "n_ast_nodes": 104, "n_identifiers": 13, "d_id": 76641, "documentation": { "docstring": "Test _asarray_with_order passes along order for NumPy arrays.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 101287, "commit_id": "2beceffad9b15c1fd78f06b9b272563321c5a41e", "repo": "faceswap", "path": "lib/training/generator.py", "file_name": "generator.py", "fun_name": "_total_channels", "commit_message": "Data Augmentation update (#1263)\n\n- lib.detected_face\r\n - Subclass Masks for Landmark based masks\r\n - Add training mask propery + methods to DetectedFace\r\n - lib.training_training\r\n - subclass TrainingDataGenerator for training and preview data\r\n - Split cache into own module\r\n - Reduce thread count to 1 to prevent image corruption + data re-use\r\n - Process on largest model input/output size rather than stored image size\r\n - Size and crop masks during caching stage\r\n - Implement ring buffer for data flow\r\n - Fix preview reload bug\r\n - augmentation\r\n - typing\r\n - switch color aug order\r\n - better initialization\r\n - Fix warp + landmark warp to correctly apply at different image scales\r\n - Slightly improved warp caching\r\n - Don't store whether image is_preview. Handle all data as training images implicitly\r\n - plugins.trainer: Typing and fixes to work with trainingdata refactor", "code": "def _total_channels(self) -> int:\n \n channels = 3\n if self._config[\"mask_type\"] and (self._config[\"learn_mask\"] or\n self._config[\"penalized_mask_loss\"]):\n channels += 1\n\n mults = [area for area in [\"eye\", \"mouth\"] if int(self._config[f\"{area}_multiplier\"]) > 1]\n if self._config[\"penalized_mask_loss\"] and mults:\n channels += len(mults)\n return channels\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 142, "n_words": 37, "vocab_size": 29, "complexity": 8, "nloc": 11, "token_counts": 82, "n_ast_nodes": 143, "n_identifiers": 8, "d_id": 20706, "documentation": { "docstring": "int: The total number of channels, including mask channels that the target image\n should hold. ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 22, "language": "en" } }, { "id": 73775, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/models/__init__.py", "file_name": "__init__.py", "fun_name": "start", "commit_message": "Reformat with black", "code": "def start(self, workflow_state, user=None):\n \n task_state = self.get_task_state_class()(workflow_state=workflow_state)\n task_state.status = TaskState.STATUS_IN_PROGRESS\n task_state.page_revision = workflow_state.page.get_latest_revision()\n task_state.task = self\n task_state.save()\n task_submitted.send(\n sender=task_state.specific.__class__,\n instance=task_state.specific,\n user=user,\n )\n return task_state\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 120, "n_words": 24, "vocab_size": 20, "complexity": 1, "nloc": 12, "token_counts": 77, "n_ast_nodes": 122, "n_identifiers": 20, "d_id": 16102, "documentation": { "docstring": "Start this task on the provided workflow state by creating an instance of TaskState", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 39217, "commit_id": "f1b06e2f758b5b4a965f7bf428d006621d19c0b0", "repo": "recommenders", "path": "tests/ci/aml_tests_old/submit_azureml_pytest.py", "file_name": "submit_azureml_pytest.py", "fun_name": "setup_persistent_compute_target", "commit_message": "changed folder structure for aml tests", "code": "def setup_persistent_compute_target(workspace, cluster_name, vm_size, max_nodes):\n \n # setting vmsize and num nodes creates a persistent AzureML\n # compute resource\n\n logger.debug(\"setup: cluster_name {}\".format(cluster_name))\n # https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets\n\n try:\n cpu_cluster = ComputeTarget(workspace=workspace, name=cluster_name)\n logger.debug(\"setup: Found existing cluster, use it.\")\n except ComputeTargetException:\n logger.debug(\"setup: create cluster\")\n compute_config = AmlCompute.provisioning_configuration(\n vm_size=vm_size, max_nodes=max_nodes\n )\n cpu_cluster = ComputeTarget.create(workspace, cluster_name, compute_config)\n cpu_cluster.wait_for_completion(show_output=True)\n return cpu_cluster\n\n", "url": "https://github.com/microsoft/recommenders.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 133, "n_words": 53, "vocab_size": 44, "complexity": 2, "nloc": 13, "token_counts": 88, "n_ast_nodes": 147, "n_identifiers": 18, "d_id": 7139, "documentation": { "docstring": "\n Set up a persistent compute target on AzureML.\n A persistent compute target runs noticeably faster than a\n regular compute target for subsequent runs. The benefit\n is that AzureML manages turning the compute on/off as needed for\n each job so the user does not need to do this.\n\n Args:\n workspace (str): Centralized location on Azure to work with\n all the\n artifacts used by AzureML service\n cluster_name (str): the Azure cluster for this run. It can\n already exist or it will be created.\n vm_size (str): Azure VM size, like STANDARD_D3_V2\n max_nodes (int): Number of VMs, max_nodes=4 will\n autoscale up to 4 VMs\n Returns:\n cpu_cluster : cluster reference\n ", "n_words": 105, "vocab_size": 82, "n_whitespaces": 286, "language": "en" } }, { "id": 114080, "commit_id": "e8740eecac16c34cba133ba37939831bb66deea7", "repo": "mindsdb", "path": "mindsdb/migrations/versions/2022-02-09_27c5aca9e47e_test.py", "file_name": "2022-02-09_27c5aca9e47e_test.py", "fun_name": "upgrade", "commit_message": "changes from parent branch", "code": "def upgrade():\n op.drop_table('ai_table')\n\n op.create_table(\n 'analysis',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('analysis', mindsdb.interfaces.storage.db.Json(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n\n with op.batch_alter_table('datasource', schema=None) as batch_op:\n batch_op.add_column(sa.Column('analysis_id', sa.Integer(), nullable=True))\n batch_op.create_foreign_key('fk_analysis_id', 'analysis', ['analysis_id'], ['id'])\n batch_op.add_column(sa.Column('ds_class', sa.String(), nullable=True))\n\n conn = op.get_bind()\n session = sa.orm.Session(bind=conn)\n dsatasources = conn.execute('select id, analysis from datasource').fetchall()\n for row in dsatasources:\n if row['analysis'] is not None:\n # NOTE 'returning' is relatively new in sqlite, so better will be use select after insert.\n conn.execute(\n text(), {\n 'id': row['id']\n }\n )\n analysis_id = conn.execute(text()).fetchall()\n conn.execute(\n text(), {\n 'analysis_id': analysis_id[0][0],\n 'id': row['id']\n }\n )\n\n with op.batch_alter_table('datasource', schema=None) as batch_op:\n batch_op.drop_column('analysis')\n\n op.create_table(\n 'file',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.Column('company_id', sa.Integer(), nullable=True),\n sa.Column('source_file_path', sa.String(), nullable=False),\n sa.Column('file_path', sa.String(), nullable=False),\n sa.Column('row_count', sa.Integer(), nullable=False),\n sa.Column('columns', mindsdb.interfaces.storage.db.Json(), nullable=False),\n # sa.Column('created_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp()), # ?????\n # sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp(), server_onupdate=sa.func.current_timestamp()), # ????? erver_default=func.now()\n # sa.Column('created_at', sa.DateTime(), nullable=True, server_default=datetime.datetime.now), # ?????\n # sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=datetime.datetime.now, server_onupdate=datetime.datetime.now), # ????? erver_default=func.now()\n sa.Column('created_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp()), # ?????\n sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp(), server_onupdate=sa.func.current_timestamp()), # ????? erver_default=func.now()\n sa.Column('analysis_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['analysis_id'], ['analysis.id'], name='fk_analysis_id'),\n sa.PrimaryKeyConstraint('id')\n )\n\n # delete ds where data is none\n\n dsatasources = conn.execute(text('select * from datasource')).fetchall()\n for ds in dsatasources:\n if ds['data'] is None:\n conn.execute(text('delete from datasource where id = :id'), {'id': ds['id']})\n continue\n ds_data = json.loads(ds['data'])\n creation_info = json.loads(ds['creation_info'])\n datasource_name = ds_data.get('source_type')\n if datasource_name == 'file':\n created_at = None\n if isinstance(ds['created_at'], str):\n created_at = datetime.datetime.fromisoformat(ds['created_at'])\n elif isinstance(ds['created_at'], [float, int]):\n created_at = datetime.fromtimestamp(ds['created_at'])\n\n updated_at = None\n if isinstance(ds['updated_at'], str):\n updated_at = datetime.datetime.fromisoformat(ds['updated_at'])\n elif isinstance(ds['updated_at'], [float, int]):\n updated_at = datetime.fromtimestamp(ds['updated_at'])\n\n file = mindsdb.interfaces.storage.db.File(\n name=ds['name'],\n company_id=ds['company_id'],\n source_file_path=ds_data['source'],\n file_path=creation_info['args'][0],\n row_count=ds_data['row_count'],\n columns=ds_data['columns'],\n created_at=created_at,\n updated_at=updated_at,\n analysis_id=ds['analysis_id']\n )\n session.add(file)\n\n conn.execute(\n text(), {\n 'datasource_name': datasource_name,\n 'company_id': ds['company_id'],\n 'ds_class': creation_info['class'],\n 'id': ds['id']\n }\n )\n\n session.commit()\n\n op.rename_table('datasource', 'dataset')\n op.rename_table('integration', 'datasource')\n\n with op.batch_alter_table('dataset', schema=None) as batch_op:\n batch_op.alter_column('integration_id', new_column_name='datasource_id')\n batch_op.create_foreign_key('fk_datasource_id', 'datasource', ['datasource_id'], ['id'])\n\n # NOTE two different 'batch' is necessary, in other way FK is not creating\n with op.batch_alter_table('predictor', schema=None) as batch_op:\n batch_op.alter_column('datasource_id', new_column_name='dataset_id')\n with op.batch_alter_table('predictor', schema=None) as batch_op:\n batch_op.create_foreign_key('fk_dataset_id', 'dataset', ['dataset_id'], ['id'])\n\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1245, "n_words": 323, "vocab_size": 174, "complexity": 10, "nloc": 108, "token_counts": 948, "n_ast_nodes": 1602, "n_identifiers": 68, "d_id": 25088, "documentation": { "docstring": "\n insert into analysis (analysis) select analysis from datasource where id = :id;\n \n select id from analysis order by id desc limit 1;\n \n update datasource set analysis_id = :analysis_id where id = :id\n \n update datasource\n set integration_id = (select id from integration where name = :datasource_name and company_id = :company_id),\n ds_class = :ds_class\n where id = :id\n ", "n_words": 56, "vocab_size": 31, "n_whitespaces": 229, "language": "en" } }, { "id": 218044, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/importlib/_bootstrap.py", "file_name": "_bootstrap.py", "fun_name": "_verbose_message", "commit_message": "add python 3.10.4 for windows", "code": "def _verbose_message(message, *args, verbosity=1):\n \n if sys.flags.verbose >= verbosity:\n if not message.startswith(('#', 'import ')):\n message = '# ' + message\n print(message.format(*args), file=sys.stderr)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 52, "n_words": 21, "vocab_size": 19, "complexity": 3, "nloc": 5, "token_counts": 56, "n_ast_nodes": 95, "n_identifiers": 12, "d_id": 55095, "documentation": { "docstring": "Print the message to stderr if -v/PYTHONVERBOSE is turned on.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 221182, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/bz2.py", "file_name": "bz2.py", "fun_name": "readinto", "commit_message": "add python 3.10.4 for windows", "code": "def readinto(self, b):\n \n self._check_can_read()\n return self._buffer.readinto(b)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 27, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 22, "n_ast_nodes": 38, "n_identifiers": 5, "d_id": 56252, "documentation": { "docstring": "Read bytes into b.\n\n Returns the number of bytes read (0 for EOF).\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 173231, "commit_id": "2e007a160e652b2e7bbdeb5a8319560188324502", "repo": "calibre-web", "path": "cps/helper.py", "file_name": "helper.py", "fun_name": "check_send_to_kindle", "commit_message": "reenable startup logging\nBugfixes from refactoring and merge", "code": "def check_send_to_kindle(entry):\n \n formats = list()\n book_formats = list()\n if len(entry.data):\n for ele in iter(entry.data):\n if ele.uncompressed_size < config.mail_size:\n formats.append(ele.format)\n if 'MOBI' in formats:\n book_formats.append({'format': 'Mobi',\n 'convert': 0,\n 'text': _('Send %(format)s to Kindle', format='Mobi')})\n if 'PDF' in formats:\n book_formats.append({'format': 'Pdf',\n 'convert': 0,\n 'text': _('Send %(format)s to Kindle', format='Pdf')})\n if 'AZW' in formats:\n book_formats.append({'format': 'Azw',\n 'convert': 0,\n 'text': _('Send %(format)s to Kindle', format='Azw')})\n if config.config_converterpath:\n book_formats.extend(check_send_to_kindle_with_converter(formats))\n return book_formats\n else:\n log.error(u'Cannot find book entry %d', entry.id)\n return None\n\n\n# Check if a reader is existing for any of the book formats, if not, return empty list, otherwise return\n# list with supported formats", "url": "https://github.com/janeczku/calibre-web.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 431, "n_words": 100, "vocab_size": 62, "complexity": 8, "nloc": 25, "token_counts": 172, "n_ast_nodes": 312, "n_identifiers": 21, "d_id": 40831, "documentation": { "docstring": "\n returns all available book formats for sending to Kindle\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 20, "language": "en" } }, { "id": 65173, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/budget_variance_report/budget_variance_report.py", "file_name": "budget_variance_report.py", "fun_name": "get_actual_details", "commit_message": "style: format code with black", "code": "def get_actual_details(name, filters):\n\tbudget_against = frappe.scrub(filters.get(\"budget_against\"))\n\tcond = \"\"\n\n\tif filters.get(\"budget_against\") == \"Cost Center\":\n\t\tcc_lft, cc_rgt = frappe.db.get_value(\"Cost Center\", name, [\"lft\", \"rgt\"])\n\t\tcond = .format(\n\t\t\tlft=cc_lft, rgt=cc_rgt\n\t\t)\n\n\tac_details = frappe.db.sql(\n\t\t.format(\n\t\t\ttab=filters.budget_against, budget_against=budget_against, cond=cond\n\t\t),\n\t\t(filters.from_fiscal_year, filters.to_fiscal_year, name),\n\t\tas_dict=1,\n\t)\n\n\tcc_actual_details = {}\n\tfor d in ac_details:\n\t\tcc_actual_details.setdefault(d.account, []).append(d)\n\n\treturn cc_actual_details\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 33, "n_words": 52, "vocab_size": 43, "complexity": 3, "nloc": 53, "token_counts": 138, "n_ast_nodes": 223, "n_identifiers": 26, "d_id": 13815, "documentation": { "docstring": "\n\t\t\t\tand lft >= \"{lft}\"\n\t\t\t\tand rgt <= \"{rgt}\"\n\t\t\t\n\t\t\tselect\n\t\t\t\tgl.account,\n\t\t\t\tgl.debit,\n\t\t\t\tgl.credit,\n\t\t\t\tgl.fiscal_year,\n\t\t\t\tMONTHNAME(gl.posting_date) as month_name,\n\t\t\t\tb.{budget_against} as budget_against\n\t\t\tfrom\n\t\t\t\t`tabGL Entry` gl,\n\t\t\t\t`tabBudget Account` ba,\n\t\t\t\t`tabBudget` b\n\t\t\twhere\n\t\t\t\tb.name = ba.parent\n\t\t\t\tand b.docstatus = 1\n\t\t\t\tand ba.account=gl.account\n\t\t\t\tand b.{budget_against} = gl.{budget_against}\n\t\t\t\tand gl.fiscal_year between %s and %s\n\t\t\t\tand b.{budget_against} = %s\n\t\t\t\tand exists(\n\t\t\t\t\tselect\n\t\t\t\t\t\tname\n\t\t\t\t\tfrom\n\t\t\t\t\t\t`tab{tab}`\n\t\t\t\t\twhere\n\t\t\t\t\t\tname = gl.{budget_against}\n\t\t\t\t\t\t{cond}\n\t\t\t\t)\n\t\t\t\tgroup by\n\t\t\t\t\tgl.name\n\t\t\t\torder by gl.fiscal_year\n\t\t", "n_words": 70, "vocab_size": 46, "n_whitespaces": 38, "language": "en" } }, { "id": 155506, "commit_id": "e4ef652ead6e3fd4bf97deff992fb9065eab4b44", "repo": "modin", "path": "modin/core/dataframe/pandas/partitioning/partition_manager.py", "file_name": "partition_manager.py", "fun_name": "broadcast_apply", "commit_message": "REFACTOR-#5459: Install code linters through conda and unpin flake8 (#5450)\n\nCo-authored-by: Vasily Litvinov \r\nSigned-off-by: Anatoly Myachev ", "code": "def broadcast_apply(cls, axis, apply_func, left, right, other_name=\"right\"):\n \n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 14, "n_words": 7, "vocab_size": 7, "complexity": 4, "nloc": 20, "token_counts": 98, "n_ast_nodes": 28, "n_identifiers": 7, "d_id": 36414, "documentation": { "docstring": "\n Broadcast the `right` partitions to `left` and apply `apply_func` function.\n\n Parameters\n ----------\n axis : {0, 1}\n Axis to apply and broadcast over.\n apply_func : callable\n Function to apply.\n left : np.ndarray\n NumPy array of left partitions.\n right : np.ndarray\n NumPy array of right partitions.\n other_name : str, default: \"right\"\n Name of key-value argument for `apply_func` that\n is used to pass `right` to `apply_func`.\n\n Returns\n -------\n np.ndarray\n NumPy array of result partition objects.\n\n Notes\n -----\n This will often be overridden by implementations. It materializes the\n entire partitions of the right and applies them to the left through `apply`.\n ", "n_words": 97, "vocab_size": 64, "n_whitespaces": 287, "language": "en" } }, { "id": 220852, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/transports.py", "file_name": "transports.py", "fun_name": "writelines", "commit_message": "add python 3.10.4 for windows", "code": "def writelines(self, list_of_data):\n \n data = b''.join(list_of_data)\n self.write(data)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 28, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 23, "n_ast_nodes": 40, "n_identifiers": 6, "d_id": 56155, "documentation": { "docstring": "Write a list (or any iterable) of data bytes to the transport.\n\n The default implementation concatenates the arguments and\n calls write() on the result.\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 45, "language": "en" } }, { "id": 222552, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/archive_util.py", "file_name": "archive_util.py", "fun_name": "make_zipfile", "commit_message": "add python 3.10.4 for windows", "code": "def make_zipfile(base_name, base_dir, verbose=0, dry_run=0):\n \n zip_filename = base_name + \".zip\"\n mkpath(os.path.dirname(zip_filename), dry_run=dry_run)\n\n # If zipfile module is not available, try spawning an external\n # 'zip' command.\n if zipfile is None:\n if verbose:\n zipoptions = \"-r\"\n else:\n zipoptions = \"-rq\"\n\n try:\n spawn([\"zip\", zipoptions, zip_filename, base_dir],\n dry_run=dry_run)\n except DistutilsExecError:\n # XXX really should distinguish between \"couldn't find\n # external 'zip' command\" and \"zip failed\".\n raise DistutilsExecError((\"unable to create zip file '%s': \"\n \"could neither import the 'zipfile' module nor \"\n \"find a standalone zip utility\") % zip_filename)\n\n else:\n log.info(\"creating '%s' and adding '%s' to it\",\n zip_filename, base_dir)\n\n if not dry_run:\n try:\n zip = zipfile.ZipFile(zip_filename, \"w\",\n compression=zipfile.ZIP_DEFLATED)\n except RuntimeError:\n zip = zipfile.ZipFile(zip_filename, \"w\",\n compression=zipfile.ZIP_STORED)\n\n with zip:\n if base_dir != os.curdir:\n path = os.path.normpath(os.path.join(base_dir, ''))\n zip.write(path, path)\n log.info(\"adding '%s'\", path)\n for dirpath, dirnames, filenames in os.walk(base_dir):\n for name in dirnames:\n path = os.path.normpath(os.path.join(dirpath, name, ''))\n zip.write(path, path)\n log.info(\"adding '%s'\", path)\n for name in filenames:\n path = os.path.normpath(os.path.join(dirpath, name))\n if os.path.isfile(path):\n zip.write(path, path)\n log.info(\"adding '%s'\", path)\n\n return zip_filename\n\nARCHIVE_FORMATS = {\n 'gztar': (make_tarball, [('compress', 'gzip')], \"gzip'ed tar-file\"),\n 'bztar': (make_tarball, [('compress', 'bzip2')], \"bzip2'ed tar-file\"),\n 'xztar': (make_tarball, [('compress', 'xz')], \"xz'ed tar-file\"),\n 'ztar': (make_tarball, [('compress', 'compress')], \"compressed tar file\"),\n 'tar': (make_tarball, [('compress', None)], \"uncompressed tar file\"),\n 'zip': (make_zipfile, [],\"ZIP file\")\n }\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 860, "n_words": 203, "vocab_size": 134, "complexity": 11, "nloc": 41, "token_counts": 290, "n_ast_nodes": 638, "n_identifiers": 34, "d_id": 56631, "documentation": { "docstring": "Create a zip file from all the files under 'base_dir'.\n\n The output zip file will be named 'base_name' + \".zip\". Uses either the\n \"zipfile\" Python module (if available) or the InfoZIP \"zip\" utility\n (if installed and found on the default search path). If neither tool is\n available, raises DistutilsExecError. Returns the name of the output zip\n file.\n ", "n_words": 57, "vocab_size": 47, "n_whitespaces": 78, "language": "en" } }, { "id": 205304, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/migrations/loader.py", "file_name": "loader.py", "fun_name": "project_state", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def project_state(self, nodes=None, at_end=True):\n \n return self.graph.make_state(\n nodes=nodes, at_end=at_end, real_apps=self.unmigrated_apps\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 42, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 35, "n_ast_nodes": 53, "n_identifiers": 8, "d_id": 51080, "documentation": { "docstring": "\n Return a ProjectState object representing the most recent state\n that the loaded migrations represent.\n\n See graph.make_state() for the meaning of \"nodes\" and \"at_end\".\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 52, "language": "en" } }, { "id": 54644, "commit_id": "110742d1fee98e793ccdbf47a0a55eeaf70e81e0", "repo": "prefect", "path": "src/prefect/orion/database/dependencies.py", "file_name": "dependencies.py", "fun_name": "provide_database_interface", "commit_message": "Add temporary_database_interface", "code": "def provide_database_interface() -> OrionDBInterface:\n \n connection_url = PREFECT_ORION_DATABASE_CONNECTION_URL.value()\n\n database_config = MODELS_DEPENDENCIES.get(\"database_config\")\n query_components = MODELS_DEPENDENCIES.get(\"query_components\")\n orm = MODELS_DEPENDENCIES.get(\"orm\")\n dialect = get_dialect(connection_url)\n\n if database_config is None:\n\n if dialect.name == \"postgresql\":\n database_config = AsyncPostgresConfiguration(connection_url=connection_url)\n elif dialect.name == \"sqlite\":\n database_config = AioSqliteConfiguration(connection_url=connection_url)\n else:\n raise ValueError(\n f\"Unable to infer database configuration from provided dialect. Got dialect name {dialect.name!r}\"\n )\n\n MODELS_DEPENDENCIES[\"database_config\"] = database_config\n\n if query_components is None:\n if dialect.name == \"postgresql\":\n query_components = AsyncPostgresQueryComponents()\n elif dialect.name == \"sqlite\":\n query_components = AioSqliteQueryComponents()\n else:\n raise ValueError(\n f\"Unable to infer query components from provided dialect. Got dialect name {dialect.name!r}\"\n )\n\n MODELS_DEPENDENCIES[\"query_components\"] = query_components\n\n if orm is None:\n if dialect.name == \"postgresql\":\n orm = AsyncPostgresORMConfiguration()\n elif dialect.name == \"sqlite\":\n orm = AioSqliteORMConfiguration()\n else:\n raise ValueError(\n f\"Unable to infer orm configuration from provided dialect. Got dialect name {dialect.name!r}\"\n )\n\n MODELS_DEPENDENCIES[\"orm\"] = orm\n\n return OrionDBInterface(\n database_config=database_config,\n query_components=query_components,\n orm=orm,\n )\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 451, "n_words": 136, "vocab_size": 54, "complexity": 10, "nloc": 47, "token_counts": 195, "n_ast_nodes": 367, "n_identifiers": 20, "d_id": 11117, "documentation": { "docstring": "\n Get the current orion database interface.\n\n If components of the interface are not set, defaults will be inferred\n based on the dialect of the connection url.\n ", "n_words": 26, "vocab_size": 22, "n_whitespaces": 39, "language": "en" } }, { "id": 70567, "commit_id": "0a9b23979bbc55c0a95ff357ee589dae5363dc18", "repo": "wagtail", "path": "wagtail/admin/tests/test_contentstate.py", "file_name": "test_contentstate.py", "fun_name": "test_image_inside_link", "commit_message": "Update links to wagtail.io website to point to wagtail.org\n\nThis covers only links to the website, not other sites", "code": "def test_image_inside_link(self):\n # https://github.com/wagtail/wagtail/issues/4602 - ensure that an inside\n # a link is handled. This is not valid in Draftail as images are block-level,\n # but should be handled without errors, splitting the image into its own block\n converter = ContentstateConverter(features=['image', 'link'])\n result = json.loads(converter.from_database_format(\n \n ))\n self.assertContentStateEqual(result, {\n 'blocks': [\n {'key': '00000', 'inlineStyleRanges': [], 'entityRanges': [{'key': 0, 'offset': 0, 'length': 6}], 'depth': 0, 'text': 'before', 'type': 'unstyled'},\n {'key': '00000', 'inlineStyleRanges': [], 'entityRanges': [{'key': 1, 'offset': 0, 'length': 1}], 'depth': 0, 'text': ' ', 'type': 'atomic'},\n {'key': '00000', 'inlineStyleRanges': [], 'entityRanges': [{'key': 0, 'offset': 0, 'length': 5}], 'depth': 0, 'text': 'after', 'type': 'unstyled'},\n {'key': '00000', 'inlineStyleRanges': [], 'entityRanges': [{'key': 2, 'offset': 0, 'length': 0}], 'depth': 0, 'text': '', 'type': 'unstyled'},\n {'key': '00000', 'inlineStyleRanges': [], 'entityRanges': [{'key': 3, 'offset': 0, 'length': 1}], 'depth': 0, 'text': ' ', 'type': 'atomic'},\n {'key': '00000', 'inlineStyleRanges': [], 'entityRanges': [{'key': 2, 'offset': 0, 'length': 0}], 'depth': 0, 'text': '', 'type': 'unstyled'},\n ],\n 'entityMap': {\n '0': {'mutability': 'MUTABLE', 'type': 'LINK', 'data': {'url': 'https://wagtail.org'}},\n '1': {\n 'data': {'format': 'left', 'alt': 'an image', 'id': '1', 'src': '/media/not-found'},\n 'mutability': 'IMMUTABLE', 'type': 'IMAGE'\n },\n '2': {'mutability': 'MUTABLE', 'type': 'LINK', 'data': {'url': 'https://wagtail.org'}},\n '3': {\n 'data': {'format': 'left', 'alt': 'an image', 'id': '1', 'src': '/media/not-found'},\n 'mutability': 'IMMUTABLE', 'type': 'IMAGE'\n },\n }\n })\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 577, "n_words": 210, "vocab_size": 100, "complexity": 1, "nloc": 30, "token_counts": 398, "n_ast_nodes": 782, "n_identifiers": 10, "d_id": 15525, "documentation": { "docstring": "\n

    before after

    \n

    \n ", "n_words": 18, "vocab_size": 12, "n_whitespaces": 52, "language": "en" } }, { "id": 163809, "commit_id": "6294d8490162442f9e73186f38b5545e5f22f7cb", "repo": "pandas", "path": "pandas/core/series.py", "file_name": "series.py", "fun_name": "unstack", "commit_message": "DOC: Improve reshaping.rst (#45612)", "code": "def unstack(self, level=-1, fill_value=None) -> DataFrame:\n \n from pandas.core.reshape.reshape import unstack\n\n return unstack(self, level, fill_value)\n\n # ----------------------------------------------------------------------\n # function application\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 46, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 44, "token_counts": 36, "n_ast_nodes": 55, "n_identifiers": 8, "d_id": 39502, "documentation": { "docstring": "\n Unstack, also known as pivot, Series with MultiIndex to produce DataFrame.\n\n Parameters\n ----------\n level : int, str, or list of these, default last level\n Level(s) to unstack, can pass level name.\n fill_value : scalar value, default None\n Value to use when replacing NaN values.\n\n Returns\n -------\n DataFrame\n Unstacked Series.\n\n Notes\n -----\n Reference :ref:`the user guide ` for more examples.\n\n Examples\n --------\n >>> s = pd.Series([1, 2, 3, 4],\n ... index=pd.MultiIndex.from_product([['one', 'two'],\n ... ['a', 'b']]))\n >>> s\n one a 1\n b 2\n two a 3\n b 4\n dtype: int64\n\n >>> s.unstack(level=-1)\n a b\n one 1 2\n two 3 4\n\n >>> s.unstack(level=0)\n one two\n a 1 3\n b 2 4\n ", "n_words": 108, "vocab_size": 79, "n_whitespaces": 471, "language": "en" } }, { "id": 151752, "commit_id": "f268187e9b357127151ae45704538aed6c89f7f5", "repo": "freqtrade", "path": "freqtrade/misc.py", "file_name": "misc.py", "fun_name": "sync_to_async_iter", "commit_message": "offload initial df computation to thread", "code": "def sync_to_async_iter(iter):\n \n\n loop = asyncio.get_event_loop()\n q = asyncio.Queue(1)\n exception = None\n _END = object()\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 29, "n_words": 14, "vocab_size": 11, "complexity": 1, "nloc": 9, "token_counts": 50, "n_ast_nodes": 52, "n_identifiers": 10, "d_id": 35131, "documentation": { "docstring": "\n Wrap blocking iterator into an asynchronous by\n offloading computation to thread and using\n pubsub pattern for yielding results\n\n :param iter: A synchronous iterator\n :returns: An asynchronous iterator\n ", "n_words": 27, "vocab_size": 24, "n_whitespaces": 46, "language": "en" } }, { "id": 196288, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/geometry/polygon.py", "file_name": "polygon.py", "fun_name": "is_right", "commit_message": "Updated import locations", "code": "def is_right(self):\n \n s = self.sides\n return Segment.is_perpendicular(s[0], s[1]) or \\\n Segment.is_perpendicular(s[1], s[2]) or \\\n Segment.is_perpendicular(s[0], s[2])\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 59, "n_words": 16, "vocab_size": 12, "complexity": 3, "nloc": 5, "token_counts": 58, "n_ast_nodes": 84, "n_identifiers": 6, "d_id": 47788, "documentation": { "docstring": "Is the triangle right-angled.\n\n Returns\n =======\n\n is_right : boolean\n\n See Also\n ========\n\n sympy.geometry.line.LinearEntity.is_perpendicular\n is_equilateral, is_isosceles, is_scalene\n\n Examples\n ========\n\n >>> from sympy import Triangle, Point\n >>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))\n >>> t1.is_right()\n True\n\n ", "n_words": 36, "vocab_size": 31, "n_whitespaces": 134, "language": "en" } }, { "id": 250578, "commit_id": "fd43ca19c4a34915bdbfb9c127716fb5a63156e1", "repo": "mitmproxy", "path": "mitmproxy/flow.py", "file_name": "flow.py", "fun_name": "killable", "commit_message": "Flow.kill: don't depend on reply status.\n\nIn principle, a flow is killable as long as the connection handler is still\nchecking the error status of the flow.\n\nThis is patch 2/4 of the reply-ectomy.", "code": "def killable(self):\n \n return not (self.error and self.error.msg == Error.KILLED_MESSAGE)\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 2, "token_counts": 23, "n_ast_nodes": 39, "n_identifiers": 6, "d_id": 73510, "documentation": { "docstring": "*Read-only:* `True` if this flow can be killed, `False` otherwise.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 100727, "commit_id": "afec52309326304f4323029039e49bfcf928ef43", "repo": "faceswap", "path": "lib/gui/popup_session.py", "file_name": "popup_session.py", "fun_name": "_check_valid_data", "commit_message": "Bugfixes:\n - Stats graph - Handle NaNs in data\n - logger - de-elevate matplotlib font messages", "code": "def _check_valid_data(self) -> bool:\n \n logger.debug(\"Validating data. %s\",\n {key: len(val) for key, val in self._display_data.stats.items()})\n if any(len(val) == 0 # pylint:disable=len-as-condition\n for val in self._display_data.stats.values()):\n return False\n return True\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 102, "n_words": 28, "vocab_size": 24, "complexity": 4, "nloc": 15, "token_counts": 64, "n_ast_nodes": 105, "n_identifiers": 13, "d_id": 20182, "documentation": { "docstring": " Check that the selections holds valid data to display\n NB: len-as-condition is used as data could be a list or a numpy array\n\n Returns\n -------\n bool\n ``True` if there is data to be displayed, otherwise ``False``\n ", "n_words": 36, "vocab_size": 30, "n_whitespaces": 87, "language": "en" } }, { "id": 120237, "commit_id": "2588c98586400a9a457670eabeba67085528e95f", "repo": "jax", "path": "jax/_src/dtypes.py", "file_name": "dtypes.py", "fun_name": "promote_types", "commit_message": "Add comment explaining implementation in promote_types", "code": "def promote_types(a, b):\n \n # Note: we deliberately avoid `if a in _weak_types` here because we want to check\n # object identity, not object equality, due to the behavior of np.dtype.__eq__\n a = a if any(a is t for t in _weak_types) else np.dtype(a)\n b = b if any(b is t for t in _weak_types) else np.dtype(b)\n return np.dtype(_least_upper_bound(a, b))\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 65, "n_words": 59, "vocab_size": 41, "complexity": 5, "nloc": 4, "token_counts": 62, "n_ast_nodes": 97, "n_identifiers": 9, "d_id": 26803, "documentation": { "docstring": "Returns the type to which a binary operation should cast its arguments.\n\n For details of JAX's type promotion semantics, see :ref:`type-promotion`.\n\n Args:\n a: a :class:`numpy.dtype` or a dtype specifier.\n b: a :class:`numpy.dtype` or a dtype specifier.\n\n Returns:\n A :class:`numpy.dtype` object.\n ", "n_words": 40, "vocab_size": 30, "n_whitespaces": 53, "language": "en" } }, { "id": 100848, "commit_id": "ff6b0209dd5ad57b81b0aca570df7f39a7119bfb", "repo": "faceswap", "path": "plugins/train/model/_base/settings.py", "file_name": "settings.py", "fun_name": "_get_mask_channels", "commit_message": "Refactoring and TravisCI to Github Actions (#1239)\n\n* refactor training\r\n\r\n* travis to actions", "code": "def _get_mask_channels(self) -> List[int]:\n \n eye_multiplier = self._config[\"eye_multiplier\"]\n mouth_multiplier = self._config[\"mouth_multiplier\"]\n if not self._config[\"penalized_mask_loss\"] and (eye_multiplier > 1 or\n mouth_multiplier > 1):\n logger.warning(\"You have selected eye/mouth loss multipliers greater than 1x, but \"\n \"Penalized Mask Loss is disabled. Disabling all multipliers.\")\n eye_multiplier = 1\n mouth_multiplier = 1\n uses_masks = (self._config[\"penalized_mask_loss\"],\n eye_multiplier > 1,\n mouth_multiplier > 1)\n mask_channels = [-1 for _ in range(len(uses_masks))]\n current_channel = 3\n for idx, mask_required in enumerate(uses_masks):\n if mask_required:\n mask_channels[idx] = current_channel\n current_channel += 1\n logger.debug(\"uses_masks: %s, mask_channels: %s\", uses_masks, mask_channels)\n return mask_channels\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 353, "n_words": 86, "vocab_size": 62, "complexity": 7, "nloc": 28, "token_counts": 130, "n_ast_nodes": 215, "n_identifiers": 19, "d_id": 20299, "documentation": { "docstring": " Obtain the channels from the face targets that the masks reside in from the training\n data generator.\n\n Returns\n -------\n list:\n A list of channel indices that contain the mask for the corresponding config item\n ", "n_words": 34, "vocab_size": 27, "n_whitespaces": 81, "language": "en" } }, { "id": 136563, "commit_id": "c51b0c9a5664e5c6df3d92f9093b56e61b48f514", "repo": "ray", "path": "python/ray/tests/test_autoscaler.py", "file_name": "test_autoscaler.py", "fun_name": "testDynamicScalingForegroundLauncher", "commit_message": "[autoscaler][kuberay] Batching node provider (#29933)\n\nImplements the abstract subclass of NodeProvider proposed in\r\nhttps://docs.google.com/document/d/1JyQINBFirZw7YenA_14zize0R3hIII1_fnfQytIXTPo/\r\n\r\nThe goal is to simplify the autoscaler's interactions with external cluster managers like the KubeRay operator.\r\n\r\nA follow-up PR will implement KuberayNodeProvider as a subclass of the BatchingNodeProvider added here.\r\n\r\nSigned-off-by: Dmitri Gekhtman ", "code": "def testDynamicScalingForegroundLauncher(self):\n \n self.helperDynamicScaling(foreground_node_launcher=True)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 17, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 26, "n_identifiers": 4, "d_id": 30943, "documentation": { "docstring": "Test autoscaling with node launcher in the foreground.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 311520, "commit_id": "58b8c30221a6f6e5acbbe98b7e3298b03fb741f5", "repo": "core", "path": "tests/components/homekit_controller/test_lock.py", "file_name": "test_lock.py", "fun_name": "test_switch_change_lock_state", "commit_message": "Improve homekit_controller tests (#65266)", "code": "async def test_switch_change_lock_state(hass, utcnow):\n \n helper = await setup_test_component(hass, create_lock_service)\n\n await hass.services.async_call(\n \"lock\", \"lock\", {\"entity_id\": \"lock.testdevice\"}, blocking=True\n )\n helper.async_assert_service_values(\n ServicesTypes.LOCK_MECHANISM,\n {\n CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: 1,\n },\n )\n\n await hass.services.async_call(\n \"lock\", \"unlock\", {\"entity_id\": \"lock.testdevice\"}, blocking=True\n )\n helper.async_assert_service_values(\n ServicesTypes.LOCK_MECHANISM,\n {\n CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: 0,\n },\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 147, "n_words": 39, "vocab_size": 23, "complexity": 1, "nloc": 20, "token_counts": 95, "n_ast_nodes": 158, "n_identifiers": 14, "d_id": 110185, "documentation": { "docstring": "Test that we can turn a HomeKit lock on and off again.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 80225, "commit_id": "10dbbddaf35607e4257f50dd960520a1268dd225", "repo": "wagtail", "path": "wagtail/snippets/tests/test_locking.py", "file_name": "test_locking.py", "fun_name": "test_edit_get_locked_by_self", "commit_message": "Add tests for locking snippets", "code": "def test_edit_get_locked_by_self(self):\n \n cases = [\n ([\"change\", \"unlock\"]),\n ([\"change\"]), # Can unlock even without unlock permission\n ]\n\n for permissions in cases:\n with self.subTest(\n \"User can edit and unlock an object they have locked\",\n permissions=permissions,\n ):\n # Lock the snippet\n self.lock_snippet(self.user)\n\n # Use the specified permissions\n self.set_permissions(permissions)\n\n # Get the edit page\n response = self.client.get(self.get_url(\"edit\"))\n html = response.content.decode()\n unlock_url = self.get_url(\"unlock\")\n\n # Should show lock message\n self.assertContains(\n response,\n \"'I'm a lockable snippet!' was locked by you on\",\n )\n\n # Should show Save action menu item\n self.assertContains(\n response,\n f\"{self.save_button_label}\",\n html=True,\n )\n\n # Should not show Locked action menu item\n self.assertTagInHTML(\n '',\n html,\n count=0,\n allow_extra_attrs=True,\n )\n\n # Should show lock information in the side panel\n self.assertContains(\n response,\n (\n f\"You can edit this {self.model_name}, but others may not. \"\n \"Unlock it to allow others to edit.\"\n ),\n )\n\n # Should show unlock buttons, one in the message and one in the side panel\n self.assertTagInHTML(\n f'',\n html,\n count=2,\n allow_extra_attrs=True,\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 957, "n_words": 159, "vocab_size": 103, "complexity": 2, "nloc": 43, "token_counts": 159, "n_ast_nodes": 287, "n_identifiers": 22, "d_id": 17034, "documentation": { "docstring": "A user can edit and unlock a snippet that is locked by themselves.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 199158, "commit_id": "e0aaa724190c49f2725bb7880eddd13ce4fef4b7", "repo": "sympy", "path": "sympy/solvers/solveset.py", "file_name": "solveset.py", "fun_name": "linear_eq_to_matrix", "commit_message": "more efficient coefficient extraction", "code": "def linear_eq_to_matrix(equations, *symbols):\n r\n if not symbols:\n raise ValueError(filldedent())\n\n if hasattr(symbols[0], '__iter__'):\n symbols = symbols[0]\n\n if has_dups(symbols):\n raise ValueError('Symbols must be unique')\n\n equations = sympify(equations)\n if isinstance(equations, MatrixBase):\n equations = list(equations)\n elif isinstance(equations, (Expr, Eq)):\n equations = [equations]\n elif not is_sequence(equations):\n raise ValueError(filldedent())\n\n # construct the dictionaries\n try:\n eq, c = _linear_eq_to_dict(equations, symbols)\n except PolyNonlinearError as err:\n raise NonlinearError(str(err))\n # prepare output matrices\n n, m = shape = len(eq), len(symbols)\n ix = dict(zip(symbols, range(m)))\n dat = {(row, ix[k]): d[k] for row, d in enumerate(eq) for k in d}\n rhs = [-i for i in c]\n del c\n A = SparseMatrix(*shape, dat)\n b = SparseMatrix(n, 1, rhs)\n return A, b\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 224, "n_words": 109, "vocab_size": 79, "complexity": 11, "nloc": 109, "token_counts": 221, "n_ast_nodes": 353, "n_identifiers": 39, "d_id": 49172, "documentation": { "docstring": "\n Converts a given System of Equations into Matrix form.\n Here `equations` must be a linear system of equations in\n `symbols`. Element ``M[i, j]`` corresponds to the coefficient\n of the jth symbol in the ith equation.\n\n The Matrix form corresponds to the augmented matrix form.\n For example:\n\n .. math:: 4x + 2y + 3z = 1\n .. math:: 3x + y + z = -6\n .. math:: 2x + 4y + 9z = 2\n\n This system will return $A$ and $b$ as:\n\n $$ A = \\left[\\begin{array}{ccc}\n 4 & 2 & 3 \\\\\n 3 & 1 & 1 \\\\\n 2 & 4 & 9\n \\end{array}\\right] \\ \\ b = \\left[\\begin{array}{c}\n 1 \\\\ -6 \\\\ 2\n \\end{array}\\right] $$\n\n The only simplification performed is to convert\n ``Eq(a, b)`` $\\Rightarrow a - b$.\n\n Raises\n ======\n\n NonlinearError\n The equations contain a nonlinear term.\n ValueError\n The symbols are not given or are not unique.\n\n Examples\n ========\n\n >>> from sympy import linear_eq_to_matrix, symbols\n >>> c, x, y, z = symbols('c, x, y, z')\n\n The coefficients (numerical or symbolic) of the symbols will\n be returned as matrices:\n\n >>> eqns = [c*x + z - 1 - c, y + z, x - y]\n >>> A, b = linear_eq_to_matrix(eqns, [x, y, z])\n >>> A\n Matrix([\n [c, 0, 1],\n [0, 1, 1],\n [1, -1, 0]])\n >>> b\n Matrix([\n [c + 1],\n [ 0],\n [ 0]])\n\n This routine does not simplify expressions and will raise an error\n if nonlinearity is encountered:\n\n >>> eqns = [\n ... (x**2 - 3*x)/(x - 3) - 3,\n ... y**2 - 3*y - y*(y - 4) + x - 4]\n >>> linear_eq_to_matrix(eqns, [x, y])\n Traceback (most recent call last):\n ...\n NonlinearError:\n symbol-dependent term can be ignored using `strict=False`\n\n Simplifying these equations will discard the removable singularity\n in the first and reveal the linear structure of the second:\n\n >>> [e.simplify() for e in eqns]\n [x - 3, x + y - 4]\n\n Any such simplification needed to eliminate nonlinear terms must\n be done *before* calling this routine.\n \n Symbols must be given, for which coefficients\n are to be found.\n \n Equation(s) must be given as a sequence, Expr,\n Eq or Matrix.\n ", "n_words": 351, "vocab_size": 197, "n_whitespaces": 798, "language": "en" } }, { "id": 331786, "commit_id": "c6e4b7895a7dbcd9b98396cbef383dd1c72b0ad3", "repo": "pytorch-image-models", "path": "timm/models/swin_transformer_v2_cr.py", "file_name": "swin_transformer_v2_cr.py", "fun_name": "_make_attention_mask", "commit_message": "Swin V2 CR impl refactor.\n* reformat and change some naming so closer to existing timm vision transformers\n* remove typing that wasn't adding clarity (or causing torchscript issues)\n* support non-square windows\n* auto window size adjust from image size\n* post-norm + main-branch no", "code": "def _make_attention_mask(self) -> None:\n \n # Make masks for shift case\n if any(self.shift_size):\n # calculate attention mask for SW-MSA\n H, W = self.feat_size\n img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1\n cnt = 0\n for h in (\n slice(0, -self.window_size[0]),\n slice(-self.window_size[0], -self.shift_size[0]),\n slice(-self.shift_size[0], None)):\n for w in (\n slice(0, -self.window_size[1]),\n slice(-self.window_size[1], -self.shift_size[1]),\n slice(-self.shift_size[1], None)):\n img_mask[:, h, w, :] = cnt\n cnt += 1\n mask_windows = window_partition(img_mask, self.window_size) # num_windows, window_size, window_size, 1\n mask_windows = mask_windows.view(-1, self.window_area)\n attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))\n else:\n attn_mask = None\n self.register_buffer(\"attn_mask\", attn_mask, persistent=False)\n", "url": "https://github.com/huggingface/pytorch-image-models.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 425, "n_words": 99, "vocab_size": 69, "complexity": 4, "nloc": 23, "token_counts": 244, "n_ast_nodes": 365, "n_identifiers": 25, "d_id": 119924, "documentation": { "docstring": "Method generates the attention mask used in shift case.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 61287, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/parallel.py", "file_name": "parallel.py", "fun_name": "_map_multiprocess", "commit_message": "upd; format", "code": "def _map_multiprocess(func, iterable, chunksize=1):\n # type: (Callable[[S], T], Iterable[S], int) -> Iterator[T]\n \n with closing(ProcessPool()) as pool:\n return pool.imap_unordered(func, iterable, chunksize)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 36, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 3, "token_counts": 33, "n_ast_nodes": 56, "n_identifiers": 8, "d_id": 12489, "documentation": { "docstring": "Chop iterable into chunks and submit them to a process pool.\n\n For very long iterables using a large value for chunksize can make\n the job complete much faster than using the default value of 1.\n\n Return an unordered iterator of the results.\n ", "n_words": 42, "vocab_size": 36, "n_whitespaces": 54, "language": "en" } }, { "id": 321174, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "qutebrowser/browser/webkit/tabhistory.py", "file_name": "tabhistory.py", "fun_name": "serialize", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def serialize(items):\n \n data = QByteArray()\n stream = QDataStream(data, QIODevice.OpenModeFlag.ReadWrite)\n user_data: List[Mapping[str, Any]] = []\n\n current_idx = None\n\n for i, item in enumerate(items):\n if item.active:\n if current_idx is not None:\n raise ValueError(\"Multiple active items ({} and {}) \"\n \"found!\".format(current_idx, i))\n current_idx = i\n\n if items:\n if current_idx is None:\n raise ValueError(\"No active item found!\")\n else:\n current_idx = 0\n\n _serialize_items(items, current_idx, stream)\n\n user_data += [item.user_data for item in items]\n\n stream.device().reset()\n qtutils.check_qdatastream(stream)\n return stream, data, user_data\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 213, "n_words": 73, "vocab_size": 52, "complexity": 7, "nloc": 21, "token_counts": 135, "n_ast_nodes": 219, "n_identifiers": 26, "d_id": 117577, "documentation": { "docstring": "Serialize a list of TabHistoryItems to a data stream.\n\n Args:\n items: An iterable of TabHistoryItems.\n\n Return:\n A (stream, data, user_data) tuple.\n stream: The reset QDataStream.\n data: The QByteArray with the raw data.\n user_data: A list with each item's user data.\n\n Warning:\n If 'data' goes out of scope, reading from 'stream' will result in a\n segfault!\n ", "n_words": 55, "vocab_size": 46, "n_whitespaces": 128, "language": "en" } }, { "id": 45337, "commit_id": "27d19e7626ef80687997a6799762fa00162c1328", "repo": "airflow", "path": "tests/providers/databricks/operators/test_databricks_sql.py", "file_name": "test_databricks_sql.py", "fun_name": "test_copy_with_expression", "commit_message": "Databricks SQL operators (#21363)", "code": "def test_copy_with_expression(self):\n expression = \"col1, col2\"\n op = DatabricksCopyIntoOperator(\n file_location=COPY_FILE_LOCATION,\n file_format='CSV',\n table_name='test',\n task_id=TASK_ID,\n pattern='folder1/file_[a-g].csv',\n expression_list=expression,\n format_options={'header': 'true'},\n force_copy=True,\n )\n assert (\n op._create_sql_query()\n == f.strip()\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 169, "n_words": 25, "vocab_size": 23, "complexity": 1, "nloc": 22, "token_counts": 64, "n_ast_nodes": 114, "n_identifiers": 17, "d_id": 8547, "documentation": { "docstring": "COPY INTO test\nFROM (SELECT {expression} FROM '{COPY_FILE_LOCATION}')\nFILEFORMAT = CSV\nPATTERN = 'folder1/file_[a-g].csv'\nFORMAT_OPTIONS ('header' = 'true')\nCOPY_OPTIONS ('force' = 'true')\n", "n_words": 22, "vocab_size": 17, "n_whitespaces": 16, "language": "en" } }, { "id": 132908, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/client/__init__.py", "file_name": "__init__.py", "fun_name": "_register_serializers", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _register_serializers(self):\n \n import ray.serialization_addons\n from ray.util.serialization import StandaloneSerializationContext\n\n ctx = StandaloneSerializationContext()\n ray.serialization_addons.apply(ctx)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 47, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 31, "n_ast_nodes": 52, "n_identifiers": 9, "d_id": 29864, "documentation": { "docstring": "Register the custom serializer addons at the client side.\n\n The server side should have already registered the serializers via\n regular worker's serialization_context mechanism.\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 44, "language": "en" } }, { "id": 125607, "commit_id": "b32c784c7fbeab39f77ec47e66c18e987efb582d", "repo": "ray", "path": "rllib/utils/exploration/random_encoder.py", "file_name": "random_encoder.py", "fun_name": "_postprocess_tf", "commit_message": "[RLLib] RE3 exploration algorithm TF2 framework support (#25221)", "code": "def _postprocess_tf(self, policy, sample_batch, tf_sess):\n \n if self.framework == \"tf\":\n obs_embeds = tf_sess.run(\n self._obs_embeds,\n feed_dict={self._obs_ph: sample_batch[SampleBatch.OBS]},\n )\n else:\n obs_embeds = tf.stop_gradient(\n self._encoder_net({SampleBatch.OBS: sample_batch[SampleBatch.OBS]})[0]\n ).numpy()\n sample_batch[SampleBatch.OBS_EMBEDS] = obs_embeds\n return sample_batch\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 152, "n_words": 28, "vocab_size": 24, "complexity": 2, "nloc": 12, "token_counts": 88, "n_ast_nodes": 137, "n_identifiers": 18, "d_id": 27923, "documentation": { "docstring": "Calculate states' embeddings and add it to SampleBatch.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 54795, "commit_id": "fc9f253912945e088e48cc723af383e6a9f46faf", "repo": "prefect", "path": "tests/orion/database/test_migrations.py", "file_name": "test_migrations.py", "fun_name": "test_backfill_state_name", "commit_message": "Add run.state_name columns", "code": "async def test_backfill_state_name(db, flow):\n \n connection_url = PREFECT_ORION_DATABASE_CONNECTION_URL.value()\n dialect = get_dialect(connection_url)\n\n # get the proper migration revisions\n if dialect.name == \"postgresql\":\n revisions = (\"605ebb4e9155\", \"14dc68cc5853\")\n else:\n revisions = (\"7f5f335cace3\", \"db6bde582447\")\n\n flow_run_id = uuid4()\n null_state_flow_run_id = uuid4()\n flow_run_state_1_id = uuid4()\n flow_run_state_2_id = uuid4()\n\n task_run_id = uuid4()\n null_state_task_run_id = uuid4()\n task_run_state_1_id = uuid4()\n task_run_state_2_id = uuid4()\n try:\n # downgrade to the previous revision\n await run_sync_in_worker_thread(alembic_downgrade, revision=revisions[0])\n session = await db.session()", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "async def test_backfill_state_name(db, flow):\n \"\"\"\n Tests state_name is backfilled correctly for the flow_run\n and task_run tables by a specific migration\n \"\"\"\n connection_url = PREFECT_ORION_DATABASE_CONNECTION_URL.value()\n dialect = get_dialect(connection_url)\n\n # get the proper migration revisions\n if dialect.name == \"postgresql\":\n revisions = (\"605ebb4e9155\", \"14dc68cc5853\")\n else:\n revisions = (\"7f5f335cace3\", \"db6bde582447\")\n\n flow_run_id = uuid4()\n null_state_flow_run_id = uuid4()\n flow_run_state_1_id = uuid4()\n flow_run_state_2_id = uuid4()\n\n task_run_id = uuid4()\n null_state_task_run_id = uuid4()\n task_run_state_1_id = uuid4()\n task_run_state_2_id = uuid4()\n try:\n # downgrade to the previous revision\n await run_sync_in_worker_thread(alembic_downgrade, revision=revisions[0])\n session = await db.session()", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 147, "n_words": 67, "vocab_size": 43, "complexity": 3, "nloc": 99, "token_counts": 385, "n_ast_nodes": 191, "n_identifiers": 23, "d_id": 11146, "documentation": { "docstring": "\n Tests state_name is backfilled correctly for the flow_run\n and task_run tables by a specific migration\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 25, "language": "en" } }, { "id": 247323, "commit_id": "2ffaf30803f93273a4d8a65c9e6c3110c8433488", "repo": "synapse", "path": "tests/rest/client/test_third_party_rules.py", "file_name": "test_third_party_rules.py", "fun_name": "test_legacy_check_event_allowed", "commit_message": "Add type hints to `tests/rest/client` (#12108)\n\n* Add type hints to `tests/rest/client`\r\n\r\n* newsfile\r\n\r\n* fix imports\r\n\r\n* add `test_account.py`\r\n\r\n* Remove one type hint in `test_report_event.py`\r\n\r\n* change `on_create_room` to `async`\r\n\r\n* update new functions in `test_third_party_rules.py`\r\n\r\n* Add `test_filter.py`\r\n\r\n* add `test_rooms.py`\r\n\r\n* change to `assertEquals` to `assertEqual`\r\n\r\n* lint", "code": "def test_legacy_check_event_allowed(self) -> None:\n \n channel = self.make_request(\n \"PUT\",\n \"/_matrix/client/r0/rooms/%s/send/m.room.message/1\" % self.room_id,\n {\n \"msgtype\": \"m.text\",\n \"body\": \"Original body\",\n },\n access_token=self.tok,\n )\n self.assertEqual(channel.result[\"code\"], b\"200\", channel.result)\n\n event_id = channel.json_body[\"event_id\"]\n\n channel = self.make_request(\n \"GET\",\n \"/_matrix/client/r0/rooms/%s/event/%s\" % (self.room_id, event_id),\n access_token=self.tok,\n )\n self.assertEqual(channel.result[\"code\"], b\"200\", channel.result)\n\n self.assertIn(\"foo\", channel.json_body[\"content\"].keys())\n self.assertEqual(channel.json_body[\"content\"][\"foo\"], \"bar\")\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 231, "n_words": 43, "vocab_size": 33, "complexity": 1, "nloc": 23, "token_counts": 142, "n_ast_nodes": 240, "n_identifiers": 13, "d_id": 71589, "documentation": { "docstring": "Tests that the wrapper for legacy check_event_allowed callbacks works\n correctly.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 24, "language": "en" } }, { "id": 73658, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/blocks/field_block.py", "file_name": "field_block.py", "fun_name": "bulk_to_python", "commit_message": "Reformat with black", "code": "def bulk_to_python(self, values):\n \n objects = self.target_model.objects.in_bulk(values)\n return [\n objects.get(id) for id in values\n ] # Keeps the ordering the same as in values.\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 63, "n_words": 23, "vocab_size": 21, "complexity": 2, "nloc": 5, "token_counts": 33, "n_ast_nodes": 54, "n_identifiers": 8, "d_id": 16083, "documentation": { "docstring": "Return the model instances for the given list of primary keys.\n\n The instances must be returned in the same order as the values and keep None values.\n ", "n_words": 27, "vocab_size": 23, "n_whitespaces": 41, "language": "en" } }, { "id": 267309, "commit_id": "dfde4be444ee66a1a0e44751b80bcf1afd6661d7", "repo": "ansible", "path": "test/lib/ansible_test/_internal/commands/sanity/__init__.py", "file_name": "__init__.py", "fun_name": "supported_python_versions", "commit_message": "Add Python 3.11 support.\n\nci_complete\nci_coverage", "code": "def supported_python_versions(self): # type: () -> t.Optional[t.Tuple[str, ...]]\n \n versions = super().supported_python_versions\n\n if self.minimum_python_version:\n versions = tuple(version for version in versions if str_to_version(version) >= str_to_version(self.minimum_python_version))\n\n if self.maximum_python_version:\n versions = tuple(version for version in versions if str_to_version(version) <= str_to_version(self.maximum_python_version))\n\n return versions\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 97, "n_words": 39, "vocab_size": 24, "complexity": 7, "nloc": 7, "token_counts": 69, "n_ast_nodes": 113, "n_identifiers": 9, "d_id": 78848, "documentation": { "docstring": "A tuple of supported Python versions or None if the test does not depend on specific Python versions.", "n_words": 18, "vocab_size": 17, "n_whitespaces": 17, "language": "en" } }, { "id": 93732, "commit_id": "2fbf550ec05c8501cbc9eca62e73526e717dcbdf", "repo": "sentry", "path": "src/sentry/integrations/jira_server/integration.py", "file_name": "integration.py", "fun_name": "sync_status_outbound", "commit_message": "ref(Jira): Split Jira Cloud and Jira Server (#37034)\n\n* Split Jira Cloud and Jira Server", "code": "def sync_status_outbound(self, external_issue, is_resolved, project_id, **kwargs):\n \n client = self.get_client()\n jira_issue = client.get_issue(external_issue.key)\n jira_project = jira_issue[\"fields\"][\"project\"]\n\n try:\n external_project = IntegrationExternalProject.objects.get(\n external_id=jira_project[\"id\"],\n organization_integration_id__in=OrganizationIntegration.objects.filter(\n organization_id=external_issue.organization_id,\n integration_id=external_issue.integration_id,\n ),\n )\n except IntegrationExternalProject.DoesNotExist:\n return\n\n jira_status = (\n external_project.resolved_status if is_resolved else external_project.unresolved_status\n )\n\n # don't bother updating if it's already the status we'd change it to\n if jira_issue[\"fields\"][\"status\"][\"id\"] == jira_status:\n return\n try:\n transitions = client.get_transitions(external_issue.key)\n except ApiHostError:\n raise IntegrationError(\"Could not reach host to get transitions.\")\n\n try:\n transition = [t for t in transitions if t.get(\"to\", {}).get(\"id\") == jira_status][0]\n except IndexError:\n # TODO(jess): Email for failure\n logger.warning(\n \"jira.status-sync-fail\",\n extra={\n \"organization_id\": external_issue.organization_id,\n \"integration_id\": external_issue.integration_id,\n \"issue_key\": external_issue.key,\n },\n )\n return\n\n client.transition_issue(external_issue.key, transition[\"id\"])\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 525, "n_words": 103, "vocab_size": 81, "complexity": 8, "nloc": 36, "token_counts": 213, "n_ast_nodes": 352, "n_identifiers": 37, "d_id": 19015, "documentation": { "docstring": "\n Propagate a sentry issue's status to a linked issue's status.\n ", "n_words": 10, "vocab_size": 8, "n_whitespaces": 25, "language": "en" } }, { "id": 246360, "commit_id": "eb609c65d0794dd49efcd924bdc8743fd4253a93", "repo": "synapse", "path": "tests/storage/test_state.py", "file_name": "test_state.py", "fun_name": "test_return_expanded", "commit_message": "Fix bug in `StateFilter.return_expanded()` and add some tests. (#12016)", "code": "def test_return_expanded(self):\n \n\n self.assertEqual(StateFilter.all().return_expanded(), StateFilter.all())\n\n self.assertEqual(StateFilter.none().return_expanded(), StateFilter.none())\n\n # Concrete-only state filters stay the same\n # (Case: mixed filter)\n self.assertEqual(\n StateFilter.freeze(\n {\n EventTypes.Member: {\"@wombat:test\", \"@alicia:test\"},\n \"some.other.state.type\": {\"\"},\n },\n include_others=False,\n ).return_expanded(),\n StateFilter.freeze(\n {\n EventTypes.Member: {\"@wombat:test\", \"@alicia:test\"},\n \"some.other.state.type\": {\"\"},\n },\n include_others=False,\n ),\n )\n\n # Concrete-only state filters stay the same\n # (Case: non-member-only filter)\n self.assertEqual(\n StateFilter.freeze(\n {\"some.other.state.type\": {\"\"}}, include_others=False\n ).return_expanded(),\n StateFilter.freeze({\"some.other.state.type\": {\"\"}}, include_others=False),\n )\n\n # Concrete-only state filters stay the same\n # (Case: member-only filter)\n self.assertEqual(\n StateFilter.freeze(\n {\n EventTypes.Member: {\"@wombat:test\", \"@alicia:test\"},\n },\n include_others=False,\n ).return_expanded(),\n StateFilter.freeze(\n {\n EventTypes.Member: {\"@wombat:test\", \"@alicia:test\"},\n },\n include_others=False,\n ),\n )\n\n # Wildcard member-only state filters stay the same\n self.assertEqual(\n StateFilter.freeze(\n {EventTypes.Member: None},\n include_others=False,\n ).return_expanded(),\n StateFilter.freeze(\n {EventTypes.Member: None},\n include_others=False,\n ),\n )\n\n # If there is a wildcard in the non-member portion of the filter,\n # it's expanded to include ALL non-member events.\n # (Case: mixed filter)\n self.assertEqual(\n StateFilter.freeze(\n {\n EventTypes.Member: {\"@wombat:test\", \"@alicia:test\"},\n \"some.other.state.type\": None,\n },\n include_others=False,\n ).return_expanded(),\n StateFilter.freeze(\n {EventTypes.Member: {\"@wombat:test\", \"@alicia:test\"}},\n include_others=True,\n ),\n )\n\n # If there is a wildcard in the non-member portion of the filter,\n # it's expanded to include ALL non-member events.\n # (Case: non-member-only filter)\n self.assertEqual(\n StateFilter.freeze(\n {\n \"some.other.state.type\": None,\n },\n include_others=False,\n ).return_expanded(),\n StateFilter.freeze({EventTypes.Member: set()}, include_others=True),\n )\n self.assertEqual(\n StateFilter.freeze(\n {\n \"some.other.state.type\": None,\n \"yet.another.state.type\": {\"wombat\"},\n },\n include_others=False,\n ).return_expanded(),\n StateFilter.freeze({EventTypes.Member: set()}, include_others=True),\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 1317, "n_words": 203, "vocab_size": 63, "complexity": 1, "nloc": 81, "token_counts": 410, "n_ast_nodes": 668, "n_identifiers": 12, "d_id": 71177, "documentation": { "docstring": "\n Tests the behaviour of the return_expanded() function that expands\n StateFilters to include more state types (for the sake of cache hit rate).\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 44, "language": "en" } }, { "id": 181946, "commit_id": "2635f58e7c3d10b161ee69a15ebfe6499ac26daa", "repo": "textual", "path": "src/textual/dom.py", "file_name": "dom.py", "fun_name": "parent", "commit_message": "docstrings and tidy", "code": "def parent(self) -> DOMNode:\n \n if self._parent is None:\n raise NoParent(f\"{self} has no parent\")\n assert isinstance(self._parent, DOMNode)\n return self._parent\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 57, "n_words": 18, "vocab_size": 17, "complexity": 2, "nloc": 13, "token_counts": 34, "n_ast_nodes": 60, "n_identifiers": 6, "d_id": 43685, "documentation": { "docstring": "Get the parent node.\n\n Raises:\n NoParent: If this is the root node.\n\n Returns:\n DOMNode: The node which is the direct parent of this node.\n ", "n_words": 24, "vocab_size": 17, "n_whitespaces": 67, "language": "en" } }, { "id": 276301, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/utils_v1/export_utils.py", "file_name": "export_utils.py", "fun_name": "_maybe_add_default_serving_output", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _maybe_add_default_serving_output(export_outputs):\n \n if len(export_outputs) == 1:\n ((key, value),) = export_outputs.items()\n if key != tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:\n export_outputs[\n tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY\n ] = value\n if len(export_outputs) > 1:\n if (\n tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY\n not in export_outputs\n ):\n raise ValueError(\n \"Multiple `export_outputs` were provided, but none of them are \"\n \"specified as the default. Use\"\n \"`tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY` to \"\n \"specify a default.\"\n )\n\n return export_outputs\n\n\n# LINT.ThenChange(//tensorflow/python/saved_model/model_utils/export_utils.py)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 238, "n_words": 58, "vocab_size": 49, "complexity": 5, "nloc": 19, "token_counts": 77, "n_ast_nodes": 131, "n_identifiers": 10, "d_id": 81623, "documentation": { "docstring": "Add a default serving output to the export_outputs if not present.\n\n Args:\n export_outputs: Describes the output signatures to be exported to\n `SavedModel` and used during serving. Should be a dict.\n\n Returns:\n export_outputs dict with default serving signature added if necessary\n\n Raises:\n ValueError: if multiple export_outputs were provided without a default\n serving key.\n ", "n_words": 52, "vocab_size": 37, "n_whitespaces": 93, "language": "en" } }, { "id": 59407, "commit_id": "295fd5d4b65dc967d8ddc99817b52d8273301063", "repo": "prefect", "path": "src/prefect/serializers.py", "file_name": "serializers.py", "fun_name": "check_compressionlib", "commit_message": "Add `CompressedSerializer` for compression of other result serializers (#7164)\n\nCo-authored-by: Terrence Dorsey ", "code": "def check_compressionlib(cls, value):\n \n try:\n compresser = from_qualified_name(value)\n except (ImportError, AttributeError) as exc:\n raise ValueError(\n f\"Failed to import requested compression library: {value!r}.\"\n ) from exc\n\n if not callable(getattr(compresser, \"compress\", None)):\n raise ValueError(\n f\"Compression library at {value!r} does not have a 'compress' method.\"\n )\n\n if not callable(getattr(compresser, \"decompress\", None)):\n raise ValueError(\n f\"Compression library at {value!r} does not have a 'decompress' method.\"\n )\n\n return value\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 226, "n_words": 62, "vocab_size": 42, "complexity": 4, "nloc": 16, "token_counts": 75, "n_ast_nodes": 139, "n_identifiers": 11, "d_id": 11900, "documentation": { "docstring": "\n Check that the given pickle library is importable and has compress/decompress\n methods.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 34, "language": "en" } }, { "id": 62018, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/locators.py", "file_name": "locators.py", "fun_name": "prefer_url", "commit_message": "upd; format", "code": "def prefer_url(self, url1, url2):\n \n result = url2\n if url1:\n s1 = self.score_url(url1)\n s2 = self.score_url(url2)\n if s1 > s2:\n result = url1\n if result != url2:\n logger.debug('Not replacing %r with %r', url1, url2)\n else:\n logger.debug('Replacing %r with %r', url1, url2)\n return result\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 170, "n_words": 42, "vocab_size": 27, "complexity": 4, "nloc": 12, "token_counts": 69, "n_ast_nodes": 113, "n_identifiers": 10, "d_id": 12828, "documentation": { "docstring": "\n Choose one of two URLs where both are candidates for distribution\n archives for the same version of a distribution (for example,\n .tar.gz vs. zip).\n\n The current implementation favours https:// URLs over http://, archives\n from PyPI over those from other locations, wheel compatibility (if a\n wheel) and then the archive name.\n ", "n_words": 50, "vocab_size": 41, "n_whitespaces": 100, "language": "en" } }, { "id": 69944, "commit_id": "b3c009b22ef6c47a54faa4c8bf4e10bb62caeef4", "repo": "glances", "path": "glances/compat.py", "file_name": "compat.py", "fun_name": "system_exec", "commit_message": "Correct unitary test failed", "code": "def system_exec(command):\n \n try:\n res = subprocess.run(command.split(' '), stdout=subprocess.PIPE).stdout.decode('utf-8')\n except Exception as e:\n logger.debug('Can not evaluate command {} ({})'.format(command, e))\n res = ''\n return res.rstrip()\n\n", "url": "https://github.com/nicolargo/glances.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 57, "n_words": 24, "vocab_size": 22, "complexity": 2, "nloc": 7, "token_counts": 61, "n_ast_nodes": 109, "n_identifiers": 15, "d_id": 15190, "documentation": { "docstring": "Execute a system command and return the result as a str", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 242759, "commit_id": "1997c814abcbc071fb9f289fda021e8d08cad4a7", "repo": "Pillow", "path": "src/PIL/GifImagePlugin.py", "file_name": "GifImagePlugin.py", "fun_name": "getdata", "commit_message": "Move useful comment into docstring", "code": "def getdata(im, offset=(0, 0), **params):\n \n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 8, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 24, "token_counts": 50, "n_ast_nodes": 28, "n_identifiers": 4, "d_id": 69911, "documentation": { "docstring": "\n Legacy Method\n\n Return a list of strings representing this image.\n The first string is a local image header, the rest contains\n encoded image data.\n\n To specify duration, add the time in milliseconds,\n e.g. ``getdata(im_frame, duration=1000)``\n\n :param im: Image object\n :param offset: Tuple of (x, y) pixels. Defaults to (0, 0)\n :param \\\\**params: e.g. duration or other encoder info parameters\n :returns: List of bytes containing GIF encoded frame data\n\n ", "n_words": 68, "vocab_size": 59, "n_whitespaces": 102, "language": "en" } }, { "id": 138804, "commit_id": "5c06e3f14900e3812061416759c25ff2b88c8a23", "repo": "ray", "path": "python/ray/serve/pipeline/tests/test_generate.py", "file_name": "test_generate.py", "fun_name": "test_shared_deployment_handle", "commit_message": "[DAG] add basic plotting on Ray DAGs (#24223)\n\nTo add basic plotting feature for Ray DAGs. \r\n\r\n`ray.experimental.dag.plot(dag: DAGNode, to_file=None)`\r\n\r\n### Behavior\r\n1. dump the dag plot (Dot) to file.\r\n2. also render the image whenever possible. E.g. if running in Jupyter notebook, the image will not only be saved, but also rendered in the notebook.\r\n3. when to_file is not set (i.e. None), it will be saved to a tempfile for rendering purpose only. This is common when users plot DAGs in notebook env to explore the DAG structure without wanting to save it to a file.", "code": "def test_shared_deployment_handle(serve_instance):\n \n ray_dag, _ = get_shared_deployment_handle_dag()\n\n with DAGNodeNameGenerator() as node_name_generator:\n serve_root_dag = ray_dag.apply_recursive(\n lambda node: transform_ray_dag_to_serve_dag(node, node_name_generator)\n )\n print(f\"Serve DAG: \\n{serve_root_dag}\")\n deployments = extract_deployments_from_serve_dag(serve_root_dag)\n assert len(deployments) == 2\n for deployment in deployments:\n deployment.deploy()\n\n _validate_consistent_python_output(\n deployments[1], ray_dag, \"Combine\", input=1, output=4\n )\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 106, "n_words": 40, "vocab_size": 36, "complexity": 2, "nloc": 14, "token_counts": 83, "n_ast_nodes": 143, "n_identifiers": 20, "d_id": 31529, "documentation": { "docstring": "\n Test we can re-use the same deployment handle multiple times or in\n multiple places, without incorrectly parsing duplicated deployments.\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 29, "language": "en" } }, { "id": 315082, "commit_id": "57fd84e20c9e98df52a6e81af1fa84ee86028aa8", "repo": "core", "path": "homeassistant/components/demo/camera.py", "file_name": "camera.py", "fun_name": "async_disable_motion_detection", "commit_message": "Improve type hints in demo (#74236)", "code": "async def async_disable_motion_detection(self) -> None:\n \n self._attr_motion_detection_enabled = False\n self.async_write_ha_state()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 18, "n_ast_nodes": 34, "n_identifiers": 4, "d_id": 113679, "documentation": { "docstring": "Disable the motion detection in base station (Disarm).", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 177335, "commit_id": "8a325d26aa7fdd3a72580c4720fa97f971bbefcb", "repo": "networkx", "path": "networkx/linalg/modularitymatrix.py", "file_name": "modularitymatrix.py", "fun_name": "directed_modularity_matrix", "commit_message": "Use scipy.sparse array datastructure (#6037)\n\n* Use scipy.sparse array datastructure\r\n\r\n* Add reminder to rm wrapper when scipy adds creation fns.\r\n\r\n* Rm mention of np matrix from code comment.\r\n\r\n* Update networkx/algorithms/bipartite/matrix.py\r\n\r\nCo-authored-by: Stefan van der Walt \r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Stefan van der Walt ", "code": "def directed_modularity_matrix(G, nodelist=None, weight=None):\n \n import numpy as np\n\n if nodelist is None:\n nodelist = list(G)\n A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format=\"csr\")\n k_in = A.sum(axis=0)\n k_out = A.sum(axis=1)\n m = k_in.sum()\n # Expected adjacency matrix\n X = np.outer(k_out, k_in) / m\n\n return A - X\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 81, "n_words": 44, "vocab_size": 35, "complexity": 2, "nloc": 10, "token_counts": 92, "n_ast_nodes": 147, "n_identifiers": 18, "d_id": 42354, "documentation": { "docstring": "Returns the directed modularity matrix of G.\n\n The modularity matrix is the matrix B = A - , where A is the adjacency\n matrix and is the expected adjacency matrix, assuming that the graph\n is described by the configuration model.\n\n More specifically, the element B_ij of B is defined as\n\n .. math::\n B_{ij} = A_{ij} - k_i^{out} k_j^{in} / m\n\n where :math:`k_i^{in}` is the in degree of node i, and :math:`k_j^{out}` is the out degree\n of node j, with m the number of edges in the graph. When weight is set\n to a name of an attribute edge, Aij, k_i, k_j and m are computed using\n its value.\n\n Parameters\n ----------\n G : DiGraph\n A NetworkX DiGraph\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n weight : string or None, optional (default=None)\n The edge attribute that holds the numerical value used for\n the edge weight. If None then all edge weights are 1.\n\n Returns\n -------\n B : Numpy array\n The modularity matrix of G.\n\n Examples\n --------\n >>> G = nx.DiGraph()\n >>> G.add_edges_from(\n ... (\n ... (1, 2),\n ... (1, 3),\n ... (3, 1),\n ... (3, 2),\n ... (3, 5),\n ... (4, 5),\n ... (4, 6),\n ... (5, 4),\n ... (5, 6),\n ... (6, 4),\n ... )\n ... )\n >>> B = nx.directed_modularity_matrix(G)\n\n\n Notes\n -----\n NetworkX defines the element A_ij of the adjacency matrix as 1 if there\n is a link going from node i to node j. Leicht and Newman use the opposite\n definition. This explains the different expression for B_ij.\n\n See Also\n --------\n to_numpy_array\n modularity_spectrum\n adjacency_matrix\n modularity_matrix\n\n References\n ----------\n .. [1] E. A. Leicht, M. E. J. Newman,\n \"Community structure in directed networks\",\n Phys. Rev Lett., vol. 100, no. 11, p. 118703, 2008.\n ", "n_words": 303, "vocab_size": 177, "n_whitespaces": 598, "language": "en" } }, { "id": 133019, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/collective/collective_group/nccl_util.py", "file_name": "nccl_util.py", "fun_name": "get_nccl_reduce_op", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def get_nccl_reduce_op(reduce_op):\n \n if reduce_op not in NCCL_REDUCE_OP_MAP:\n raise RuntimeError(\"NCCL does not support reduce op: '{}'.\".format(reduce_op))\n return NCCL_REDUCE_OP_MAP[reduce_op]\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 33, "n_words": 17, "vocab_size": 16, "complexity": 2, "nloc": 4, "token_counts": 27, "n_ast_nodes": 47, "n_identifiers": 5, "d_id": 29926, "documentation": { "docstring": "Map the reduce op to NCCL reduce op type.\n\n Args:\n reduce_op (ReduceOp): ReduceOp Enum (SUM/PRODUCT/MIN/MAX).\n Returns:\n (nccl.ncclRedOp_t): the mapped NCCL reduce op.\n ", "n_words": 22, "vocab_size": 17, "n_whitespaces": 45, "language": "en" } }, { "id": 268709, "commit_id": "cda16cc5e9aa8703fb4e1ac0a0be6b631d9076cc", "repo": "ansible", "path": "test/lib/ansible_test/_internal/completion.py", "file_name": "completion.py", "fun_name": "audit_enum", "commit_message": "ansible-test - Improve container management. (#78550)\n\nSee changelogs/fragments/ansible-test-container-management.yml for details.", "code": "def audit_enum(self) -> AuditMode:\n \n try:\n return AuditMode(self.audit)\n except ValueError:\n raise ValueError(f'Docker completion entry \"{self.name}\" has an invalid value \"{self.audit}\" for the \"audit\" setting.') from None\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 68, "n_words": 25, "vocab_size": 25, "complexity": 2, "nloc": 6, "token_counts": 28, "n_ast_nodes": 62, "n_identifiers": 6, "d_id": 79610, "documentation": { "docstring": "The audit requirements for the container. Raises an exception if the value is invalid.", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 271003, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/base_preprocessing_layer_test.py", "file_name": "base_preprocessing_layer_test.py", "fun_name": "test_post_build_adapt_update_dataset", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_post_build_adapt_update_dataset(self):\n \n input_dataset = tf.data.Dataset.from_tensor_slices(\n np.array([[1], [2], [3], [4], [5], [0]])\n )\n\n input_data = keras.Input(shape=(1,))\n layer = AddingPreprocessingLayer()\n output = layer(input_data)\n model = keras.Model(input_data, output)\n model._run_eagerly = test_utils.should_run_eagerly()\n\n layer.adapt(input_dataset)\n\n self.assertAllEqual([[16], [17], [18]], model.predict([1.0, 2.0, 3.0]))\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 116, "n_words": 35, "vocab_size": 30, "complexity": 1, "nloc": 11, "token_counts": 133, "n_ast_nodes": 193, "n_identifiers": 24, "d_id": 80650, "documentation": { "docstring": "Test that preproc layers can adapt() after build() is called.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 232063, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/mapbox/layer/_symbol.py", "file_name": "_symbol.py", "fun_name": "iconsize", "commit_message": "switch to black .22", "code": "def iconsize(self):\n \n return self[\"iconsize\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 63507, "documentation": { "docstring": "\n Sets the symbol icon size (mapbox.layer.layout.icon-size). Has\n an effect only when `type` is set to \"symbol\".\n\n The 'iconsize' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n ", "n_words": 35, "vocab_size": 34, "n_whitespaces": 94, "language": "en" } }, { "id": 217531, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/getopt.py", "file_name": "getopt.py", "fun_name": "gnu_getopt", "commit_message": "add python 3.10.4 for windows", "code": "def gnu_getopt(args, shortopts, longopts = []):\n \n\n opts = []\n prog_args = []\n if isinstance(longopts, str):\n longopts = [longopts]\n else:\n longopts = list(longopts)\n\n # Allow options after non-option arguments?\n if shortopts.startswith('+'):\n shortopts = shortopts[1:]\n all_options_first = True\n elif os.environ.get(\"POSIXLY_CORRECT\"):\n all_options_first = True\n else:\n all_options_first = False\n\n while args:\n if args[0] == '--':\n prog_args += args[1:]\n break\n\n if args[0][:2] == '--':\n opts, args = do_longs(opts, args[0][2:], longopts, args[1:])\n elif args[0][:1] == '-' and args[0] != '-':\n opts, args = do_shorts(opts, args[0][1:], shortopts, args[1:])\n else:\n if all_options_first:\n prog_args += args\n break\n else:\n prog_args.append(args[0])\n args = args[1:]\n\n return opts, prog_args\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 325, "n_words": 96, "vocab_size": 54, "complexity": 10, "nloc": 30, "token_counts": 209, "n_ast_nodes": 339, "n_identifiers": 17, "d_id": 54836, "documentation": { "docstring": "getopt(args, options[, long_options]) -> opts, args\n\n This function works like getopt(), except that GNU style scanning\n mode is used by default. This means that option and non-option\n arguments may be intermixed. The getopt() function stops\n processing options as soon as a non-option argument is\n encountered.\n\n If the first character of the option string is `+', or if the\n environment variable POSIXLY_CORRECT is set, then option\n processing stops as soon as a non-option argument is encountered.\n\n ", "n_words": 75, "vocab_size": 53, "n_whitespaces": 102, "language": "en" } }, { "id": 8509, "commit_id": "caaab8ba561850c1b274088f278ff2d27a6f5227", "repo": "ludwig", "path": "ludwig/utils/numerical_test_utils.py", "file_name": "numerical_test_utils.py", "fun_name": "_dict_like", "commit_message": "Check for nans before testing equality in test_training_determinism (#2687)\n\n* Adds test_numerical_test_utils\r\n\r\n* Check finite metrics before checking equality.\r\n\r\n* Catch TypeError and ValueError in _dict_like and _enumerable.\r\n\r\n* Edits comments.", "code": "def _dict_like(x):\n \n try:\n _ = dict(x)\n except (TypeError, ValueError):\n return False\n return True\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 39, "n_words": 13, "vocab_size": 12, "complexity": 2, "nloc": 6, "token_counts": 25, "n_ast_nodes": 43, "n_identifiers": 6, "d_id": 1440, "documentation": { "docstring": "Returns true if an object is a dict or convertible to one, false if not.", "n_words": 15, "vocab_size": 14, "n_whitespaces": 14, "language": "en" } }, { "id": 200685, "commit_id": "34555f1ebe2a2ed1fab2a0a2ae9a8457a75eaa26", "repo": "sympy", "path": "sympy/algebras/quaternion.py", "file_name": "quaternion.py", "fun_name": "to_rotation_matrix", "commit_message": "changed homogeneous to normal", "code": "def to_rotation_matrix(self, v=None, normal=False):\n \n\n q = self\n s = q.norm()**-2\n\n # diagonal elements are different according to parameter normal\n if normal:\n m00 = s*(q.a**2 + q.b**2 - q.c**2 - q.d**2)\n m11 = s*(q.a**2 - q.b**2 + q.c**2 - q.d**2)\n m22 = s*(q.a**2 - q.b**2 - q.c**2 + q.d**2)\n else:\n m00 = 1 - 2*s*(q.c**2 + q.d**2)\n m11 = 1 - 2*s*(q.b**2 + q.d**2)\n m22 = 1 - 2*s*(q.b**2 + q.c**2)\n\n m01 = 2*s*(q.b*q.c - q.d*q.a)\n m02 = 2*s*(q.b*q.d + q.c*q.a)\n\n m10 = 2*s*(q.b*q.c + q.d*q.a)\n m12 = 2*s*(q.c*q.d - q.b*q.a)\n\n m20 = 2*s*(q.b*q.d - q.c*q.a)\n m21 = 2*s*(q.c*q.d + q.b*q.a)\n\n if not v:\n return Matrix([[m00, m01, m02], [m10, m11, m12], [m20, m21, m22]])\n\n else:\n (x, y, z) = v\n\n m03 = x - x*m00 - y*m01 - z*m02\n m13 = y - x*m10 - y*m11 - z*m12\n m23 = z - x*m20 - y*m21 - z*m22\n m30 = m31 = m32 = 0\n m33 = 1\n\n return Matrix([[m00, m01, m02, m03], [m10, m11, m12, m13],\n [m20, m21, m22, m23], [m30, m31, m32, m33]])\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 450, "n_words": 173, "vocab_size": 92, "complexity": 3, "nloc": 28, "token_counts": 464, "n_ast_nodes": 690, "n_identifiers": 31, "d_id": 49764, "documentation": { "docstring": "Returns the equivalent rotation transformation matrix of the quaternion\n which represents rotation about the origin if v is not passed.\n\n Parameters\n ==========\n\n v : tuple or None\n Default value: None\n normal : bool\n When True, gives an expression that may be more efficient for\n symbolic calculations but less so for direct evaluation. Both\n formulas are mathematically equivalent.\n Default value: False\n\n Returns\n =======\n\n tuple\n Returns the equivalent rotation transformation matrix of the quaternion\n which represents rotation about the origin if v is not passed.\n\n Examples\n ========\n\n >>> from sympy import Quaternion\n >>> from sympy import symbols, trigsimp, cos, sin\n >>> x = symbols('x')\n >>> q = Quaternion(cos(x/2), 0, 0, sin(x/2))\n >>> trigsimp(q.to_rotation_matrix())\n Matrix([\n [cos(x), -sin(x), 0],\n [sin(x), cos(x), 0],\n [ 0, 0, 1]])\n\n Generates a 4x4 transformation matrix (used for rotation about a point\n other than the origin) if the point(v) is passed as an argument.\n\n Examples\n ========\n\n >>> from sympy import Quaternion\n >>> from sympy import symbols, trigsimp, cos, sin\n >>> x = symbols('x')\n >>> q = Quaternion(cos(x/2), 0, 0, sin(x/2))\n >>> trigsimp(q.to_rotation_matrix((1, 1, 1)))\n Matrix([\n [cos(x), -sin(x), 0, sin(x) - cos(x) + 1],\n [sin(x), cos(x), 0, -sin(x) - cos(x) + 1],\n [ 0, 0, 1, 0],\n [ 0, 0, 0, 1]])\n\n ", "n_words": 202, "vocab_size": 100, "n_whitespaces": 589, "language": "en" } }, { "id": 207841, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_overriding_has_module_permission", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_overriding_has_module_permission(self):\n \n articles = Article._meta.verbose_name_plural.title()\n sections = Section._meta.verbose_name_plural.title()\n index_url = reverse(\"admin7:index\")\n\n self.client.force_login(self.superuser)\n response = self.client.get(index_url)\n self.assertContains(response, sections)\n self.assertNotContains(response, articles)\n self.client.logout()\n\n self.client.force_login(self.viewuser)\n response = self.client.get(index_url)\n self.assertNotContains(response, \"admin_views\")\n self.assertNotContains(response, articles)\n self.client.logout()\n\n self.client.force_login(self.adduser)\n response = self.client.get(index_url)\n self.assertNotContains(response, \"admin_views\")\n self.assertNotContains(response, articles)\n self.client.logout()\n\n self.client.force_login(self.changeuser)\n response = self.client.get(index_url)\n self.assertNotContains(response, \"admin_views\")\n self.assertNotContains(response, articles)\n self.client.logout()\n\n self.client.force_login(self.deleteuser)\n response = self.client.get(index_url)\n self.assertNotContains(response, articles)\n\n # The app list displays Sections but not Articles as the latter has\n # ModelAdmin.has_module_permission() = False.\n self.client.force_login(self.superuser)\n response = self.client.get(reverse(\"admin7:app_list\", args=(\"admin_views\",)))\n self.assertContains(response, sections)\n self.assertNotContains(response, articles)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 310, "n_words": 79, "vocab_size": 39, "complexity": 1, "nloc": 31, "token_counts": 280, "n_ast_nodes": 459, "n_identifiers": 24, "d_id": 52128, "documentation": { "docstring": "\n If has_module_permission() always returns False, the module shouldn't\n be displayed on the admin index page for any users.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 40, "language": "en" } }, { "id": 207308, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_scripts/tests.py", "file_name": "tests.py", "fun_name": "test_precedence", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_precedence(self):\n \n with self.settings(\n INSTALLED_APPS=[\n \"admin_scripts.complex_app\",\n \"admin_scripts.simple_app\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n ]\n ):\n out = StringIO()\n call_command(\"duplicate\", stdout=out)\n self.assertEqual(out.getvalue().strip(), \"complex_app\")\n with self.settings(\n INSTALLED_APPS=[\n \"admin_scripts.simple_app\",\n \"admin_scripts.complex_app\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n ]\n ):\n out = StringIO()\n call_command(\"duplicate\", stdout=out)\n self.assertEqual(out.getvalue().strip(), \"simple_app\")\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 299, "n_words": 34, "vocab_size": 19, "complexity": 1, "nloc": 23, "token_counts": 102, "n_ast_nodes": 187, "n_identifiers": 11, "d_id": 51924, "documentation": { "docstring": "\n Apps listed first in INSTALLED_APPS have precedence.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 61481, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/cachecontrol/caches/redis_cache.py", "file_name": "redis_cache.py", "fun_name": "clear", "commit_message": "upd; format", "code": "def clear(self):\n \n for key in self.conn.keys():\n self.conn.delete(key)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 32, "n_words": 7, "vocab_size": 7, "complexity": 2, "nloc": 3, "token_counts": 25, "n_ast_nodes": 43, "n_identifiers": 6, "d_id": 12588, "documentation": { "docstring": "Helper for clearing all the keys in a database. Use with\n caution!", "n_words": 12, "vocab_size": 12, "n_whitespaces": 18, "language": "en" } }, { "id": 9408, "commit_id": "7375ee364e0df2a417f92593e09557f1b2a3575a", "repo": "insightface", "path": "reconstruction/ostec/external/stylegan2/dnnlib/tflib/ops/upfirdn_2d.py", "file_name": "upfirdn_2d.py", "fun_name": "upfirdn_2d", "commit_message": "initialize ostec", "code": "def upfirdn_2d(x, k, upx=1, upy=1, downx=1, downy=1, padx0=0, padx1=0, pady0=0, pady1=0, impl='cuda'):\n r\n\n impl_dict = {\n 'ref': _upfirdn_2d_ref,\n 'cuda': _upfirdn_2d_cuda,\n }\n return impl_dict[impl](x=x, k=k, upx=upx, upy=upy, downx=downx, downy=downy, padx0=padx0, padx1=padx1, pady0=pady0, pady1=pady1)\n\n#----------------------------------------------------------------------------\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 58, "n_words": 33, "vocab_size": 33, "complexity": 1, "nloc": 43, "token_counts": 103, "n_ast_nodes": 144, "n_identifiers": 15, "d_id": 1608, "documentation": { "docstring": "Pad, upsample, FIR filter, and downsample a batch of 2D images.\n\n Accepts a batch of 2D images of the shape `[majorDim, inH, inW, minorDim]`\n and performs the following operations for each image, batched across\n `majorDim` and `minorDim`:\n\n 1. Pad the image with zeros by the specified number of pixels on each side\n (`padx0`, `padx1`, `pady0`, `pady1`). Specifying a negative value\n corresponds to cropping the image.\n\n 2. Upsample the image by inserting the zeros after each pixel (`upx`, `upy`).\n\n 3. Convolve the image with the specified 2D FIR filter (`k`), shrinking the\n image so that the footprint of all output pixels lies within the input image.\n\n 4. Downsample the image by throwing away pixels (`downx`, `downy`).\n\n This sequence of operations bears close resemblance to scipy.signal.upfirdn().\n The fused op is considerably more efficient than performing the same calculation\n using standard TensorFlow ops. It supports gradients of arbitrary order.\n\n Args:\n x: Input tensor of the shape `[majorDim, inH, inW, minorDim]`.\n k: 2D FIR filter of the shape `[firH, firW]`.\n upx: Integer upsampling factor along the X-axis (default: 1).\n upy: Integer upsampling factor along the Y-axis (default: 1).\n downx: Integer downsampling factor along the X-axis (default: 1).\n downy: Integer downsampling factor along the Y-axis (default: 1).\n padx0: Number of pixels to pad on the left side (default: 0).\n padx1: Number of pixels to pad on the right side (default: 0).\n pady0: Number of pixels to pad on the top side (default: 0).\n pady1: Number of pixels to pad on the bottom side (default: 0).\n impl: Name of the implementation to use. Can be `\"ref\"` or `\"cuda\"` (default).\n\n Returns:\n Tensor of the shape `[majorDim, outH, outW, minorDim]`, and same datatype as `x`.\n ", "n_words": 277, "vocab_size": 153, "n_whitespaces": 442, "language": "en" } }, { "id": 9880, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "jina/peapods/pods/__init__.py", "file_name": "__init__.py", "fun_name": "start", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "def start(self) -> 'BasePod':\n \n ...\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 8, "token_counts": 9, "n_ast_nodes": 20, "n_identifiers": 2, "d_id": 1750, "documentation": { "docstring": "Start to run all :class:`Pea` in this BasePod.\n\n .. note::\n If one of the :class:`Pea` fails to start, make sure that all of them\n are properly closed.\n ", "n_words": 27, "vocab_size": 23, "n_whitespaces": 63, "language": "en" } }, { "id": 206860, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/views/generic/dates.py", "file_name": "dates.py", "fun_name": "get_date_list", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_date_list(self, queryset, date_type=None, ordering=\"ASC\"):\n \n date_field = self.get_date_field()\n allow_empty = self.get_allow_empty()\n if date_type is None:\n date_type = self.get_date_list_period()\n\n if self.uses_datetime_field:\n date_list = queryset.datetimes(date_field, date_type, ordering)\n else:\n date_list = queryset.dates(date_field, date_type, ordering)\n if date_list is not None and not date_list and not allow_empty:\n raise Http404(\n _(\"No %(verbose_name_plural)s available\")\n % {\n \"verbose_name_plural\": queryset.model._meta.verbose_name_plural,\n }\n )\n\n return date_list\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 230, "n_words": 55, "vocab_size": 38, "complexity": 6, "nloc": 17, "token_counts": 108, "n_ast_nodes": 175, "n_identifiers": 19, "d_id": 51761, "documentation": { "docstring": "\n Get a date list by calling `queryset.dates/datetimes()`, checking\n along the way for empty lists that aren't allowed.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 39, "language": "en" } }, { "id": 22163, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/utils.py", "file_name": "utils.py", "fun_name": "unquote_unreserved", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def unquote_unreserved(uri):\n \n parts = uri.split(\"%\")\n for i in range(1, len(parts)):\n h = parts[i][0:2]\n if len(h) == 2 and h.isalnum():\n try:\n c = chr(int(h, 16))\n except ValueError:\n raise InvalidURL(f\"Invalid percent-escape sequence: '{h}'\")\n\n if c in UNRESERVED_SET:\n parts[i] = c + parts[i][2:]\n else:\n parts[i] = f\"%{parts[i]}\"\n else:\n parts[i] = f\"%{parts[i]}\"\n return \"\".join(parts)\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 198, "n_words": 50, "vocab_size": 37, "complexity": 6, "nloc": 16, "token_counts": 119, "n_ast_nodes": 215, "n_identifiers": 16, "d_id": 4233, "documentation": { "docstring": "Un-escape any percent-escape sequences in a URI that are unreserved\n characters. This leaves all reserved, illegal and non-ASCII bytes encoded.\n\n :rtype: str\n ", "n_words": 22, "vocab_size": 22, "n_whitespaces": 31, "language": "en" } }, { "id": 109925, "commit_id": "df6f95703b60348e01603f98a439b133da2938a0", "repo": "matplotlib", "path": "lib/mpl_toolkits/mplot3d/art3d.py", "file_name": "art3d.py", "fun_name": "set_3d_properties", "commit_message": "Improve mpl_toolkit documentation", "code": "def set_3d_properties(self, path, zs=0, zdir='z'):\n \n Patch3D.set_3d_properties(self, path.vertices, zs=zs, zdir=zdir)\n self._code3d = path.codes\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 33, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 3, "token_counts": 41, "n_ast_nodes": 63, "n_identifiers": 9, "d_id": 23832, "documentation": { "docstring": "\n Set the *z* position and direction of the path patch.\n\n Parameters\n ----------\n path :\n zs : float\n The location along the *zdir* axis in 3D space to position the\n path patch.\n zdir : {'x', 'y', 'z', 3-tuple}\n Plane to plot path patch orthogonal to. Default: 'z'.\n See `.get_dir_vector` for a description of the values.\n ", "n_words": 54, "vocab_size": 41, "n_whitespaces": 148, "language": "en" } }, { "id": 5311, "commit_id": "150ab593f8ca1f1aa960a0811aece26c46ba6c75", "repo": "airbyte", "path": "airbyte-cdk/python/airbyte_cdk/sources/declarative/declarative_stream.py", "file_name": "declarative_stream.py", "fun_name": "name", "commit_message": "Low code connectors: core structure (#12850)\n\n* checkout from alex/cac\r\n\r\n* doc\r\n\r\n* doc\r\n\r\n* remove broken test\r\n\r\n* rename\r\n\r\n* rename file\r\n\r\n* delete unused file\r\n\r\n* rename\r\n\r\n* abstract property\r\n\r\n* isort\r\n\r\n* update state\r\n\r\n* Update comment\r\n\r\n* remove incremental mixin\r\n\r\n* delete comment\r\n\r\n* update comments\r\n\r\n* update comments\r\n\r\n* remove no_state\r\n\r\n* rename package\r\n\r\n* pass parameters through kwargs\r\n\r\n* update interface to pass source in interface\r\n\r\n* update interface to pass source in interface\r\n\r\n* rename to stream_slicer\r\n\r\n* Low code connectors: string interpolation with jinja (#12852)\r\n\r\n* checkout from alex/cac\r\n\r\n* Add missing tests\r\n\r\n* Add missing files\r\n\r\n* missing file\r\n\r\n* rename\r\n\r\n* jinja dependency\r\n\r\n* Add comment\r\n\r\n* comment\r\n\r\n* comment\r\n\r\n* Revert \"delete unused file\"\r\n\r\nThis reverts commit 758e939367775ddbefcd52c6e1e832976d3ba9fe.\r\n\r\n* delete unused field\r\n\r\n* delete unused field\r\n\r\n* rename\r\n\r\n* pass kwargs directly\r\n\r\n* isort\r\n\r\n* Revert \"isort\"\r\n\r\nThis reverts commit 4a792239440bc9950813ccc6ed368641ce2a96e4.\r\n\r\n* format\r\n\r\n* decoder\r\n\r\n* better error handling\r\n\r\n* remove nostate\r\n\r\n* isort\r\n\r\n* delete dead code\r\n\r\n* Update mapping type to [str, Any]\r\n\r\n* add comment\r\n\r\n* Add comment\r\n\r\n* pass parameters through kwargs\r\n\r\n* move test to right module\r\n\r\n* Add missing test\r\n\r\n* Use authbase instead of deprecated class\r\n\r\n* leverage generator\r\n\r\n* rename to declarative\r\n\r\n* rename the classes too", "code": "def name(self) -> str:\n \n return self._name\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 5, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 4, "d_id": 749, "documentation": { "docstring": "\n :return: Stream name. By default this is the implementing class name, but it can be overridden as needed.\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 33, "language": "en" } }, { "id": 177545, "commit_id": "9d5e11f27033049282e2d244132b0e946df6557d", "repo": "networkx", "path": "networkx/algorithms/smallworld.py", "file_name": "smallworld.py", "fun_name": "lattice_reference", "commit_message": "bug fix in smallworld.py: random_reference and lattice_reference (#6151)\n\n* raise exception if graph has less than 2 edges in random_reference and lattice_reference and tested\r\n\r\n* Updated lattice_reference doc\r\n\r\n* Update networkx/algorithms/smallworld.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/tests/test_smallworld.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Added some suggestions\r\n\r\n* Added some final suggestions\r\n\r\nCo-authored-by: Ross Barnowski ", "code": "def lattice_reference(G, niter=5, D=None, connectivity=True, seed=None):\n \n import numpy as np\n\n from networkx.utils import cumulative_distribution, discrete_sequence\n\n local_conn = nx.connectivity.local_edge_connectivity\n\n if len(G) < 4:\n raise nx.NetworkXError(\"Graph has fewer than four nodes.\")\n if len(G.edges) < 2:\n raise nx.NetworkXError(\"Graph has fewer that 2 edges\")\n # Instead of choosing uniformly at random from a generated edge list,\n # this algorithm chooses nonuniformly from the set of nodes with\n # probability weighted by degree.\n G = G.copy()\n keys, degrees = zip(*G.degree()) # keys, degree\n cdf = cumulative_distribution(degrees) # cdf of degree\n\n nnodes = len(G)\n nedges = nx.number_of_edges(G)\n if D is None:\n D = np.zeros((nnodes, nnodes))\n un = np.arange(1, nnodes)\n um = np.arange(nnodes - 1, 0, -1)\n u = np.append((0,), np.where(un < um, un, um))\n\n for v in range(int(np.ceil(nnodes / 2))):\n D[nnodes - v - 1, :] = np.append(u[v + 1 :], u[: v + 1])\n D[v, :] = D[nnodes - v - 1, :][::-1]\n\n niter = niter * nedges\n # maximal number of rewiring attempts per 'niter'\n max_attempts = int(nnodes * nedges / (nnodes * (nnodes - 1) / 2))\n\n for _ in range(niter):\n n = 0\n while n < max_attempts:\n # pick two random edges without creating edge list\n # choose source node indices from discrete distribution\n (ai, ci) = discrete_sequence(2, cdistribution=cdf, seed=seed)\n if ai == ci:\n continue # same source, skip\n a = keys[ai] # convert index to label\n c = keys[ci]\n # choose target uniformly from neighbors\n b = seed.choice(list(G.neighbors(a)))\n d = seed.choice(list(G.neighbors(c)))\n bi = keys.index(b)\n di = keys.index(d)\n\n if b in [a, c, d] or d in [a, b, c]:\n continue # all vertices should be different\n\n # don't create parallel edges\n if (d not in G[a]) and (b not in G[c]):\n if D[ai, bi] + D[ci, di] >= D[ai, ci] + D[bi, di]:\n # only swap if we get closer to the diagonal\n G.add_edge(a, d)\n G.add_edge(c, b)\n G.remove_edge(a, b)\n G.remove_edge(c, d)\n\n # Check if the graph is still connected\n if connectivity and local_conn(G, a, b) == 0:\n # Not connected, revert the swap\n G.remove_edge(a, d)\n G.remove_edge(c, b)\n G.add_edge(a, b)\n G.add_edge(c, d)\n else:\n break\n n += 1\n\n return G\n\n\n@py_random_state(3)\n@not_implemented_for(\"directed\")\n@not_implemented_for(\"multigraph\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@py_random_state(3)\n@not_implemented_for(\"directed\")\n@not_implemented_for(\"multigraph\")", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 997, "n_words": 350, "vocab_size": 221, "complexity": 15, "nloc": 52, "token_counts": 533, "n_ast_nodes": 858, "n_identifiers": 58, "d_id": 42433, "documentation": { "docstring": "Latticize the given graph by swapping edges.\n\n Parameters\n ----------\n G : graph\n An undirected graph.\n\n niter : integer (optional, default=1)\n An edge is rewired approximatively niter times.\n\n D : numpy.array (optional, default=None)\n Distance to the diagonal matrix.\n\n connectivity : boolean (optional, default=True)\n Ensure connectivity for the latticized graph when set to True.\n\n seed : integer, random_state, or None (default)\n Indicator of random number generation state.\n See :ref:`Randomness`.\n\n Returns\n -------\n G : graph\n The latticized graph.\n\n Raises\n ------\n NetworkXError\n If there are fewer than 4 nodes or 2 edges in `G`\n\n Notes\n -----\n The implementation is adapted from the algorithm by Sporns et al. [1]_.\n which is inspired from the original work by Maslov and Sneppen(2002) [2]_.\n\n References\n ----------\n .. [1] Sporns, Olaf, and Jonathan D. Zwi.\n \"The small world of the cerebral cortex.\"\n Neuroinformatics 2.2 (2004): 145-162.\n .. [2] Maslov, Sergei, and Kim Sneppen.\n \"Specificity and stability in topology of protein networks.\"\n Science 296.5569 (2002): 910-913.\n ", "n_words": 156, "vocab_size": 119, "n_whitespaces": 302, "language": "en" } }, { "id": 268987, "commit_id": "119cd4655d01570a70c70879dff4461ea46161bf", "repo": "keras", "path": "keras/utils/metrics_utils.py", "file_name": "metrics_utils.py", "fun_name": "binary_matches", "commit_message": "Added util metric method for binary_matches. Decoupled from public metric binarry_acc", "code": "def binary_matches(y_true, y_pred, threshold=0.5):\n \n y_pred = tf.convert_to_tensor(y_pred)\n threshold = tf.cast(threshold, y_pred.dtype)\n y_pred = tf.cast(y_pred > threshold, y_pred.dtype)\n return tf.cast(tf.equal(y_true, y_pred), tf.int8)", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 26, "n_words": 21, "vocab_size": 17, "complexity": 1, "nloc": 5, "token_counts": 66, "n_ast_nodes": 98, "n_identifiers": 10, "d_id": 79806, "documentation": { "docstring": "Creates int Tensor, 1 for label-prediction match, 0 for mismatch.\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n threshold: (Optional) Float representing the threshold for deciding whether\n prediction values are 1 or 0.\n\n Returns:\n Binary matches. shape = `[batch_size, d0, .. dN]`\n ", "n_words": 55, "vocab_size": 40, "n_whitespaces": 75, "language": "en" } }, { "id": 161297, "commit_id": "6abdd0ebf06ddede5cdf91329143b56167492a17", "repo": "MockingBird", "path": "synthesizer/models/sublayer/cbhg.py", "file_name": "cbhg.py", "fun_name": "_flatten_parameters", "commit_message": "Refactor (#649)\n\n* Refactor model\r\n\r\n* Refactor and fix bug to save plots", "code": "def _flatten_parameters(self):\n \n [m.flatten_parameters() for m in self._to_flatten]\n\n", "url": "https://github.com/babysor/MockingBird.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 2, "nloc": 2, "token_counts": 19, "n_ast_nodes": 33, "n_identifiers": 5, "d_id": 38959, "documentation": { "docstring": "Calls `flatten_parameters` on all the rnns used by the WaveRNN. Used\n to improve efficiency and avoid PyTorch yelling at us.", "n_words": 20, "vocab_size": 19, "n_whitespaces": 26, "language": "en" } }, { "id": 62961, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pep517/build.py", "file_name": "build.py", "fun_name": "compat_system", "commit_message": "upd; format", "code": "def compat_system(source_dir):\n \n try:\n system = load_system(source_dir)\n except (FileNotFoundError, KeyError):\n system = {}\n system.setdefault(\n 'build-backend',\n 'setuptools.build_meta:__legacy__',\n )\n system.setdefault('requires', ['setuptools', 'wheel'])\n return system\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 70, "n_words": 21, "vocab_size": 18, "complexity": 2, "nloc": 11, "token_counts": 48, "n_ast_nodes": 87, "n_identifiers": 7, "d_id": 13077, "documentation": { "docstring": "\n Given a source dir, attempt to get a build system backend\n and requirements from pyproject.toml. Fallback to\n setuptools but only if the file was not found or a build\n system was not indicated.\n ", "n_words": 33, "vocab_size": 26, "n_whitespaces": 49, "language": "en" } }, { "id": 118729, "commit_id": "72703b38029f9358a0ec7ca5ed875a6b438ece19", "repo": "streamlit", "path": "lib/streamlit/elements/dataframe_selector.py", "file_name": "dataframe_selector.py", "fun_name": "table", "commit_message": "Replace static apps with live Cloud apps (#4317)\n\nCo-authored-by: kajarenc ", "code": "def table(self, data=None):\n \n if _use_arrow():\n return self.dg._arrow_table(data)\n else:\n return self.dg._legacy_table(data)\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 53, "n_words": 10, "vocab_size": 9, "complexity": 2, "nloc": 5, "token_counts": 35, "n_ast_nodes": 59, "n_identifiers": 7, "d_id": 26386, "documentation": { "docstring": "Display a static table.\n\n This differs from `st.dataframe` in that the table in this case is\n static: its entire contents are laid out directly on the page.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, Iterable, dict, or None\n The table data.\n Pyarrow tables are not supported by Streamlit's legacy DataFrame serialization\n (i.e. with `config.dataFrameSerialization = \"legacy\"`).\n To use pyarrow tables, please enable pyarrow by changing the config setting,\n `config.dataFrameSerialization = \"arrow\"`.\n\n Example\n -------\n >>> df = pd.DataFrame(\n ... np.random.randn(10, 5),\n ... columns=('col %d' % i for i in range(5)))\n ...\n >>> st.table(df)\n\n .. output::\n https://share.streamlit.io/streamlit/docs/main/python/api-examples-source/data.table.py\n height: 480px\n\n ", "n_words": 98, "vocab_size": 83, "n_whitespaces": 277, "language": "en" } }, { "id": 195690, "commit_id": "d3c0fc825c4a80904a1fb9a2092137c3d9e0c3fe", "repo": "sympy", "path": "sympy/polys/numberfields/galoisgroups.py", "file_name": "galoisgroups.py", "fun_name": "_galois_group_degree_4_simple", "commit_message": "Add a `galois_group()` function", "code": "def _galois_group_degree_4_simple(T, max_tries=30, randomize=False):\n r\n from sympy.combinatorics.permutations import Permutation\n from sympy.combinatorics.named_groups import (\n CyclicGroup, AbelianGroup, DihedralGroup, AlternatingGroup, SymmetricGroup\n )\n # Consider the resolvent for the form\n # F = X0*X1^2 + X1*X2^2 + X2*X3^2 + X3*X0^2\n # and the group G = S4. In this case, the stabilizer H is C4 = < (0123) >,\n # and a set of representatives of G/H is {I, (01), (02), (03), (12), (23)}.\n X = symbols('X0 X1 X2 X3')\n F = X[0]*X[1]**2 + X[1]*X[2]**2 + X[2]*X[3]**2 + X[3]*X[0]**2\n s = [\n Permutation(3),\n Permutation(3)(0, 1),\n Permutation(3)(0, 2),\n Permutation(3)(0, 3),\n Permutation(3)(1, 2),\n Permutation(3)(2, 3),\n ]\n R = Resolvent(F, X, s)\n history = set()\n for i in range(max_tries):\n R_dup, _, _ = R.eval_for_poly(T)\n # If R is squarefree, we can proceed. Otherwise, apply a\n # Tschirnhausen transformation on T and try again.\n if dup_sqf_p(R_dup, ZZ):\n break\n _, T = tschirnhausen_transformation(T, max_tries=max_tries, history=history, fixed_order=not randomize)\n else:\n raise MaxTriesException\n\n # Compute list L of degrees of irreducible factors of R, in increasing order:\n fl = dup_factor_list(R_dup, ZZ)\n L = sorted(sum([\n [len(r) - 1] * e for r, e in fl[1]\n ], []))\n\n if L == [6]:\n return (AlternatingGroup(4), True) if has_square_disc(T) else (SymmetricGroup(4), False)\n\n if L == [1, 1, 4]:\n return (CyclicGroup(4), False)\n\n if L == [2, 2, 2]:\n return (AbelianGroup(2, 2), True)\n\n assert L == [2, 4]\n return (DihedralGroup(4), False)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 429, "n_words": 223, "vocab_size": 154, "complexity": 8, "nloc": 46, "token_counts": 353, "n_ast_nodes": 522, "n_identifiers": 41, "d_id": 47373, "documentation": { "docstring": "\n Compute the Galois group of a polynomial of degree 4, using Alg 6.3.6\n of Cohen.\n\n References\n ==========\n\n .. [1] Cohen, H. *A Course in Computational Algebraic Number Theory*.\n\n ", "n_words": 28, "vocab_size": 26, "n_whitespaces": 47, "language": "en" } }, { "id": 186652, "commit_id": "7d9e9a49005de7961e84d2a7c608db57dbab3046", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/interfaces.py", "file_name": "interfaces.py", "fun_name": "unsaved_files", "commit_message": "Add typing to certbot.apache (#9071)\n\n* Add typing to certbot.apache\r\n\r\nCo-authored-by: Adrien Ferrand ", "code": "def unsaved_files(self) -> List[str]:\n \n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 11, "token_counts": 11, "n_ast_nodes": 20, "n_identifiers": 4, "d_id": 45560, "documentation": { "docstring": "\n Returns a list of file paths that have been changed since the last save\n (or the initial configuration parse). The intended use for this method\n is to tell the Reverter which files need to be included in a checkpoint.\n\n This is typically called for the root of the ParserNode tree.\n\n :returns: list of file paths of files that have been changed but not yet\n saved to disk.\n ", "n_words": 67, "vocab_size": 47, "n_whitespaces": 121, "language": "en" } }, { "id": 107130, "commit_id": "ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22", "repo": "matplotlib", "path": "lib/matplotlib/_tight_bbox.py", "file_name": "_tight_bbox.py", "fun_name": "adjust_bbox", "commit_message": "ENH: implement and use base layout_engine for more flexible layout.", "code": "def adjust_bbox(fig, bbox_inches, fixed_dpi=None):\n \n origBbox = fig.bbox\n origBboxInches = fig.bbox_inches\n orig_layout = fig.get_layout_engine()\n fig.set_layout_engine(None)\n _boxout = fig.transFigure._boxout\n\n old_aspect = []\n locator_list = []\n sentinel = object()\n for ax in fig.axes:\n locator_list.append(ax.get_axes_locator())\n current_pos = ax.get_position(original=False).frozen()\n ax.set_axes_locator(lambda a, r, _pos=current_pos: _pos)\n # override the method that enforces the aspect ratio on the Axes\n if 'apply_aspect' in ax.__dict__:\n old_aspect.append(ax.apply_aspect)\n else:\n old_aspect.append(sentinel)\n ax.apply_aspect = lambda pos=None: None\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 164, "n_words": 63, "vocab_size": 51, "complexity": 4, "nloc": 32, "token_counts": 274, "n_ast_nodes": 221, "n_identifiers": 31, "d_id": 22596, "documentation": { "docstring": "\n Temporarily adjust the figure so that only the specified area\n (bbox_inches) is saved.\n\n It modifies fig.bbox, fig.bbox_inches,\n fig.transFigure._boxout, and fig.patch. While the figure size\n changes, the scale of the original figure is conserved. A\n function which restores the original values are returned.\n ", "n_words": 42, "vocab_size": 33, "n_whitespaces": 66, "language": "en" } }, { "id": 285622, "commit_id": "92991fc4e3097fdd9ac9f4f39bdd8e46289176cd", "repo": "OpenBBTerminal", "path": "openbb_terminal/stocks/fundamental_analysis/yahoo_finance_model.py", "file_name": "yahoo_finance_model.py", "fun_name": "get_currency", "commit_message": "Get rid of option expirations in the past for Nasdaq + bugs (#2498)\n\n* Get rid of option expirations in the past for Nasdaq + clean up bug\r\n\r\n* Add in currency for yfinance financials\r\n\r\n* Added fixes\r\n\r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: colin99d ", "code": "def get_currency(symbol) -> str:\n \n ticker_info = yf.Ticker(symbol).info\n if \"financialCurrency\" in ticker_info:\n return ticker_info[\"financialCurrency\"]\n return \"Not Specified\"\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 35, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 6, "token_counts": 30, "n_ast_nodes": 56, "n_identifiers": 7, "d_id": 85330, "documentation": { "docstring": "Quick helper to get currency for financial statements", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 258768, "commit_id": "e1db2a8173ca37e561cdfa4384481501c4d50868", "repo": "scikit-learn", "path": "sklearn/discriminant_analysis.py", "file_name": "discriminant_analysis.py", "fun_name": "_solve_eigen", "commit_message": "Use check_finite=False in discriminant analysis (#18909)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def _solve_eigen(self, X, y, shrinkage, covariance_estimator):\n ", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "\"\"\"Eigenvalue solver.\n\n The eigenvalue solver computes the optimal solution of thecoefficient (basically the ratio of between class scatter to within", "n_ast_errors": 2, "ast_levels": 5, "n_whitespaces": 13, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 18, "token_counts": 187, "n_ast_nodes": 46, "n_identifiers": 23, "d_id": 75415, "documentation": { "docstring": "Eigenvalue solver.\n\n The eigenvalue solver computes the optimal solution of the Rayleigh\n coefficient (basically the ratio of between class scatter to within", "n_words": 22, "vocab_size": 19, "n_whitespaces": 35, "language": "en" } }, { "id": 276243, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/saving_utils.py", "file_name": "saving_utils.py", "fun_name": "model_call_inputs", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def model_call_inputs(model, keep_original_batch_size=False):\n \n input_specs = model.save_spec(dynamic_batch=not keep_original_batch_size)\n if input_specs is None:\n return None, None\n input_specs = _enforce_names_consistency(input_specs)\n return input_specs\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 41, "n_words": 19, "vocab_size": 14, "complexity": 2, "nloc": 6, "token_counts": 38, "n_ast_nodes": 63, "n_identifiers": 7, "d_id": 81601, "documentation": { "docstring": "Inspect model to get its input signature.\n\n The model's input signature is a list with a single (possibly-nested) object.\n This is due to the Keras-enforced restriction that tensor inputs must be\n passed in as the first argument.\n\n For example, a model with input {'feature1': , 'feature2': }\n will have input signature: [{'feature1': TensorSpec, 'feature2': TensorSpec}]\n\n Args:\n model: Keras Model object.\n keep_original_batch_size: A boolean indicating whether we want to keep using\n the original batch size or set it to None. Default is `False`, which means\n that the batch dim of the returned input signature will always be set to\n `None`.\n\n Returns:\n A tuple containing `(args, kwargs)` TensorSpecs of the model call function\n inputs.\n `kwargs` does not contain the `training` argument.\n ", "n_words": 119, "vocab_size": 87, "n_whitespaces": 189, "language": "en" } }, { "id": 207740, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_login_redirect_for_direct_get", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_login_redirect_for_direct_get(self):\n \n response = self.client.get(reverse(\"admin:login\"))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.context[REDIRECT_FIELD_NAME], reverse(\"admin:index\"))\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 37, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 45, "n_ast_nodes": 77, "n_identifiers": 10, "d_id": 52079, "documentation": { "docstring": "\n Login redirect should be to the admin index page when going directly to\n /admin/login/.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 275787, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/preprocessing/text.py", "file_name": "text.py", "fun_name": "tokenizer_from_json", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def tokenizer_from_json(json_string):\n \n tokenizer_config = json.loads(json_string)\n config = tokenizer_config.get(\"config\")\n\n word_counts = json.loads(config.pop(\"word_counts\"))\n word_docs = json.loads(config.pop(\"word_docs\"))\n index_docs = json.loads(config.pop(\"index_docs\"))\n # Integer indexing gets converted to strings with json.dumps()\n index_docs = {int(k): v for k, v in index_docs.items()}\n index_word = json.loads(config.pop(\"index_word\"))\n index_word = {int(k): v for k, v in index_word.items()}\n word_index = json.loads(config.pop(\"word_index\"))\n\n tokenizer = Tokenizer(**config)\n tokenizer.word_counts = word_counts\n tokenizer.word_docs = word_docs\n tokenizer.index_docs = index_docs\n tokenizer.word_index = word_index\n tokenizer.index_word = index_word\n return tokenizer\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 124, "n_words": 70, "vocab_size": 41, "complexity": 3, "nloc": 17, "token_counts": 161, "n_ast_nodes": 274, "n_identifiers": 19, "d_id": 81466, "documentation": { "docstring": "Parses a JSON tokenizer configuration and returns a tokenizer instance.\n\n Deprecated: `tf.keras.preprocessing.text.Tokenizer` does not operate on\n tensors and is not recommended for new code. Prefer\n `tf.keras.layers.TextVectorization` which provides equivalent functionality\n through a layer which accepts `tf.Tensor` input. See the\n [text loading tutorial](https://www.tensorflow.org/tutorials/load_data/text)\n for an overview of the layer and text handling in tensorflow.\n\n Args:\n json_string: JSON string encoding a tokenizer configuration.\n\n Returns:\n A Keras Tokenizer instance\n ", "n_words": 66, "vocab_size": 53, "n_whitespaces": 107, "language": "en" } }, { "id": 196368, "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", "repo": "sympy", "path": "sympy/matrices/common.py", "file_name": "common.py", "fun_name": "shape", "commit_message": "Moved imports to higher level", "code": "def shape(self):\n \n return (self.rows, self.cols)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 27, "n_identifiers": 4, "d_id": 47868, "documentation": { "docstring": "The shape (dimensions) of the matrix as the 2-tuple (rows, cols).\n\n Examples\n ========\n\n >>> from sympy import zeros\n >>> M = zeros(2, 3)\n >>> M.shape\n (2, 3)\n >>> M.rows\n 2\n >>> M.cols\n 3\n ", "n_words": 33, "vocab_size": 27, "n_whitespaces": 110, "language": "en" } }, { "id": 135567, "commit_id": "d329147ae28c57b290f6b932f9f3044523f67c4e", "repo": "ray", "path": "rllib/utils/tests/test_actor_manager.py", "file_name": "test_actor_manager.py", "fun_name": "test_sync_call_healthy_only", "commit_message": "[RLlib] Introduce FaultTolerantActorManager (#29703)\n\nSigned-off-by: Jun Gong ", "code": "def test_sync_call_healthy_only(self):\n \n actors = [Actor.remote(i) for i in range(4)]\n manager = FaultTolerantActorManager(actors=actors)\n\n results = []\n for _ in range(10):\n results.extend(\n manager.foreach_actor(\n lambda w: w.call(), healthy_only=True\n ).ignore_errors()\n )\n # Wait for actors to recover.\n wait_for_restore()\n\n # Notice that since we only fire calls against healthy actors,\n # we wouldn't be aware that the actors have been recovered.\n # So once an actor is taken out of the lineup (10% chance),\n # it will not go back in, and we should have few results here.\n # Basically takes us 10 calls to kill all the actors.\n # Note that we can hardcode 10 here because we are using deterministic\n # sequences of random numbers.\n self.assertEqual(len(results), 10)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 298, "n_words": 114, "vocab_size": 86, "complexity": 3, "nloc": 12, "token_counts": 83, "n_ast_nodes": 144, "n_identifiers": 20, "d_id": 30658, "documentation": { "docstring": "Test synchronous remote calls to only healthy actors.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 167469, "commit_id": "80c005e67f96f431674a37ecd8a9e8a2808f7db4", "repo": "pandas", "path": "pandas/core/indexes/datetimes.py", "file_name": "datetimes.py", "fun_name": "_get_time_micros", "commit_message": "ENH: DatetimeIndex.indexer_between_time support non-nano (#47535)", "code": "def _get_time_micros(self) -> npt.NDArray[np.int64]:\n \n values = self._data._local_timestamps()\n\n reso = self._data._reso\n ppd = periods_per_day(reso)\n\n frac = values % ppd\n if reso == NpyDatetimeUnit.NPY_FR_ns.value:\n micros = frac // 1000\n elif reso == NpyDatetimeUnit.NPY_FR_us.value:\n micros = frac\n elif reso == NpyDatetimeUnit.NPY_FR_ms.value:\n micros = frac * 1000\n elif reso == NpyDatetimeUnit.NPY_FR_s.value:\n micros = frac * 1_000_000\n else: # pragma: no cover\n raise NotImplementedError(reso)\n\n micros[self._isnan] = -1\n return micros\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 204, "n_words": 64, "vocab_size": 35, "complexity": 5, "nloc": 24, "token_counts": 113, "n_ast_nodes": 185, "n_identifiers": 23, "d_id": 40025, "documentation": { "docstring": "\n Return the number of microseconds since midnight.\n\n Returns\n -------\n ndarray[int64_t]\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 46, "language": "en" } }, { "id": 131209, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/test_advanced_2.py", "file_name": "test_advanced_2.py", "fun_name": "test_whether_worker_leaked_when_task_finished_with_errors", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_whether_worker_leaked_when_task_finished_with_errors(ray_start_regular):\n\n driver_template = ", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "driver_template = \"\"\"\nimport ray\nimport os\nimport ray\nimport numpy as np\nimport time\n\nray.init(address=\"{address}\", namespace=\"test\")@ray.remote", "n_ast_errors": 2, "ast_levels": 5, "n_whitespaces": 7, "n_words": 4, "vocab_size": 4, "complexity": 2, "nloc": 60, "token_counts": 139, "n_ast_nodes": 22, "n_identifiers": 5, "d_id": 29485, "documentation": { "docstring": "\nimport ray\nimport os\nimport ray\nimport numpy as np\nimport time\n\nray.init(address=\"{address}\", namespace=\"test\")\n\n# The util actor to store the pid cross jobs.\n@ray.remote", "n_words": 25, "vocab_size": 20, "n_whitespaces": 17, "language": "en" } }, { "id": 12303, "commit_id": "65d6d6da50cb795499ea5e361bf14908f62a3168", "repo": "jina", "path": "jina/serve/runtimes/worker/__init__.py", "file_name": "__init__.py", "fun_name": "endpoint_discovery", "commit_message": "feat: gateway endpoint discovery (#4756)", "code": "async def endpoint_discovery(self, empty, context) -> jina_pb2.EndpointsProto:\n \n endpointsProto = jina_pb2.EndpointsProto()\n endpointsProto.endpoints.extend(\n list(self._data_request_handler._executor.requests.keys())\n )\n return endpointsProto\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 61, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 13, "token_counts": 44, "n_ast_nodes": 73, "n_identifiers": 14, "d_id": 2252, "documentation": { "docstring": "\n Process the the call requested and return the list of Endpoints exposed by the Executor wrapped inside this Runtime\n\n :param empty: The service expects an empty protobuf message\n :param context: grpc context\n :returns: the response request\n ", "n_words": 36, "vocab_size": 31, "n_whitespaces": 72, "language": "en" } }, { "id": 250941, "commit_id": "ea6f9727dab03b0811c180bab761d28b7e57ef50", "repo": "mitmproxy", "path": "mitmproxy/dns.py", "file_name": "dns.py", "fun_name": "to_json", "commit_message": "[dns] use snake_case in web flows", "code": "def to_json(self) -> dict:\n \n return {\n \"name\": self.name,\n \"type\": self.type.name,\n \"class\": self.class_.name,\n }\n\n\n@dataclass", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "@dataclass", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 67, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 10, "token_counts": 33, "n_ast_nodes": 61, "n_identifiers": 7, "d_id": 73570, "documentation": { "docstring": "\n Converts the question into json for mitmweb.\n Sync with web/src/flow.ts.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 32, "language": "en" } }, { "id": 133740, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/agents/impala/tests/test_vtrace.py", "file_name": "test_vtrace.py", "fun_name": "test_higher_rank_inputs_for_importance_weights", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_higher_rank_inputs_for_importance_weights(self):\n \n for fw in framework_iterator(frameworks=(\"torch\", \"tf\"), session=True):\n vtrace = vtrace_tf if fw != \"torch\" else vtrace_torch\n if fw == \"tf\":\n inputs_ = {\n \"log_rhos\": tf1.placeholder(\n dtype=tf.float32, shape=[None, None, 1]\n ),\n \"discounts\": tf1.placeholder(\n dtype=tf.float32, shape=[None, None, 1]\n ),\n \"rewards\": tf1.placeholder(\n dtype=tf.float32, shape=[None, None, 42]\n ),\n \"values\": tf1.placeholder(dtype=tf.float32, shape=[None, None, 42]),\n \"bootstrap_value\": tf1.placeholder(\n dtype=tf.float32, shape=[None, 42]\n ),\n }\n else:\n inputs_ = {\n \"log_rhos\": Box(-1.0, 1.0, (8, 10, 1)).sample(),\n \"discounts\": Box(-1.0, 1.0, (8, 10, 1)).sample(),\n \"rewards\": Box(-1.0, 1.0, (8, 10, 42)).sample(),\n \"values\": Box(-1.0, 1.0, (8, 10, 42)).sample(),\n \"bootstrap_value\": Box(-1.0, 1.0, (10, 42)).sample(),\n }\n output = vtrace.from_importance_weights(**inputs_)\n check(int(output.vs.shape[-1]), 42)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 583, "n_words": 96, "vocab_size": 47, "complexity": 4, "nloc": 29, "token_counts": 315, "n_ast_nodes": 447, "n_identifiers": 23, "d_id": 30091, "documentation": { "docstring": "Checks support for additional dimensions in inputs.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 259371, "commit_id": "8a6cf1a33e80d0e4caa16205ce199a9e1bea7657", "repo": "scikit-learn", "path": "sklearn/externals/_numpy_compiler_patch.py", "file_name": "_numpy_compiler_patch.py", "fun_name": "CCompiler_spawn", "commit_message": "BLD Monkeypatch windows build to stablize build (#22693)", "code": "def CCompiler_spawn(self, cmd, display=None, env=None):\n \n env = env if env is not None else dict(os.environ)\n if display is None:\n display = cmd\n if is_sequence(display):\n display = \" \".join(list(display))\n log.info(display)\n try:\n if self.verbose:\n subprocess.check_output(cmd, env=env)\n else:\n subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)\n except subprocess.CalledProcessError as exc:\n o = exc.output\n s = exc.returncode\n except OSError as e:\n # OSError doesn't have the same hooks for the exception\n # output, but exec_command() historically would use an\n # empty string for EnvironmentError (base class for\n # OSError)\n # o = b''\n # still that would make the end-user lost in translation!\n o = f\"\\n\\n{e}\\n\\n\\n\"\n try:\n o = o.encode(sys.stdout.encoding)\n except AttributeError:\n o = o.encode(\"utf8\")\n # status previously used by exec_command() for parent\n # of OSError\n s = 127\n else:\n # use a convenience return here so that any kind of\n # caught exception will execute the default code after the\n # try / except block, which handles various exceptions\n return None\n\n if is_sequence(cmd):\n cmd = \" \".join(list(cmd))\n\n if self.verbose:\n forward_bytes_to_stdout(o)\n\n if re.search(b\"Too many open files\", o):\n msg = \"\\nTry rerunning setup command until build succeeds.\"\n else:\n msg = \"\"\n raise DistutilsExecError(\n 'Command \"%s\" failed with exit status %d%s' % (cmd, s, msg)\n )\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 481, "n_words": 195, "vocab_size": 126, "complexity": 11, "nloc": 35, "token_counts": 213, "n_ast_nodes": 374, "n_identifiers": 36, "d_id": 75736, "documentation": { "docstring": "\n Execute a command in a sub-process.\n\n Parameters\n ----------\n cmd : str\n The command to execute.\n display : str or sequence of str, optional\n The text to add to the log file kept by `numpy.distutils`.\n If not given, `display` is equal to `cmd`.\n env: a dictionary for environment variables, optional\n\n Returns\n -------\n None\n\n Raises\n ------\n DistutilsExecError\n If the command failed, i.e. the exit status was not 0.\n\n ", "n_words": 66, "vocab_size": 51, "n_whitespaces": 134, "language": "en" } }, { "id": 79209, "commit_id": "180d43a200163f5b7c75280f7bbf7cb4e5de1b91", "repo": "wagtail", "path": "wagtail/query.py", "file_name": "query.py", "fun_name": "not_public", "commit_message": "Fix Page queryset.not_public returning all pages when no page restrictions exist. (#9067)\n\nFixes #8952", "code": "def not_public(self):\n \n return self.filter(self.private_q())\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 31, "n_identifiers": 4, "d_id": 16893, "documentation": { "docstring": "\n Filters the QuerySet to only contain pages that are in a private\n section and their descendants.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 38, "language": "en" } }, { "id": 93634, "commit_id": "272d35503a2d5174dfa8cad57f94a2354e453bf3", "repo": "sentry", "path": "src/sentry/models/release.py", "file_name": "release.py", "fun_name": "get_previous_release", "commit_message": "feat(ingest): Automatically associate commits to checksum release (#36491)\n\nFeature for Workflow 2.0. If the SDK is configured to file an event with the release version matching the release commit SHA, ingest will look to see if there have been commits between the release version and the previous release on Github. If there has been, it will register those GH commits as sentry commits and add them to the release.\r\n\r\nThis will allow sentry to only notify developers who worked on the current release and reduce notification spam.", "code": "def get_previous_release(self, project):\n \n return (\n ReleaseProject.objects.filter(project=project, release__date_added__lt=self.date_added)\n .order_by(\"-release__date_added\")\n .first()\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 64, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 6, "token_counts": 36, "n_ast_nodes": 60, "n_identifiers": 10, "d_id": 19000, "documentation": { "docstring": "Get the release prior to this one. None if none exists", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 121807, "commit_id": "260f1d8b843483df46cf397ae5a1afc0abc9c64f", "repo": "jax", "path": "jax/_src/profiler.py", "file_name": "profiler.py", "fun_name": "trace", "commit_message": "Add option to generate perfetto trace without generating link", "code": "def trace(log_dir, create_perfetto_link=False, create_perfetto_trace=False):\n \n start_trace(log_dir, create_perfetto_link, create_perfetto_trace)\n try:\n yield\n finally:\n stop_trace()\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 21, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 6, "token_counts": 30, "n_ast_nodes": 51, "n_identifiers": 6, "d_id": 27075, "documentation": { "docstring": "Context manager to take a profiler trace.\n\n The trace will capture CPU, GPU, and/or TPU activity, including Python\n functions and JAX on-device operations.\n\n The resulting trace can be viewed with TensorBoard. Note that TensorBoard\n doesn't need to be running when collecting the trace.\n\n Only once trace may be collected a time. A RuntimeError will be raised if a\n trace is started while another trace is running.\n\n Args:\n log_dir: The directory to save the profiler trace to (usually the\n TensorBoard log directory).\n create_perfetto_link: A boolean which, if true, creates and prints link to\n the Perfetto trace viewer UI (https://ui.perfetto.dev). The program will\n block until the link is opened and Perfetto loads the trace.\n create_perfetto_trace: A boolean which, if true, additionally dumps a\n ``perfetto_trace.json.gz`` file that is compatible for upload with the\n Perfetto trace viewer UI (https://ui.perfetto.dev). The file will also be\n generated if ``create_perfetto_link`` is true. This could be useful if you\n want to generate a Perfetto-compatible trace without blocking the\n processs.\n ", "n_words": 161, "vocab_size": 97, "n_whitespaces": 218, "language": "en" } }, { "id": 155395, "commit_id": "704ded959541bcf55acadfb49f3fda804267b767", "repo": "modin", "path": "modin/core/storage_formats/base/query_compiler.py", "file_name": "query_compiler.py", "fun_name": "repartition", "commit_message": "FEAT-#5367: Introduce new API for repartitioning Modin objects (#5366)\n\nCo-authored-by: Iaroslav Igoshev \r\nCo-authored-by: Vasily Litvinov \r\nSigned-off-by: Anatoly Myachev ", "code": "def repartition(self, axis=None):\n \n if StorageFormat.get() == \"Hdk\":\n # Hdk uses only one partition, it makes\n # no sense for it to repartition the dataframe.\n return self\n\n axes = [0, 1] if axis is None else [axis]\n\n new_query_compiler = self\n for _ax in axes:\n new_query_compiler = new_query_compiler.__constructor__(\n new_query_compiler._modin_frame.apply_full_axis(\n _ax, lambda df: df, keep_partitioning=False\n )\n )\n return new_query_compiler\n\n # End of DataFrame methods\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 210, "n_words": 61, "vocab_size": 49, "complexity": 4, "nloc": 12, "token_counts": 70, "n_ast_nodes": 113, "n_identifiers": 13, "d_id": 36375, "documentation": { "docstring": "\n Repartitioning QueryCompiler objects to get ideal partitions inside.\n\n Allows to improve performance where the query compiler can't improve\n yet by doing implicit repartitioning.\n\n Parameters\n ----------\n axis : {0, 1, None}, optional\n The axis along which the repartitioning occurs.\n `None` is used for repartitioning along both axes.\n\n Returns\n -------\n BaseQueryCompiler\n The repartitioned BaseQueryCompiler.\n ", "n_words": 52, "vocab_size": 45, "n_whitespaces": 156, "language": "en" } }, { "id": 222587, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/ccompiler.py", "file_name": "ccompiler.py", "fun_name": "show_compilers", "commit_message": "add python 3.10.4 for windows", "code": "def show_compilers():\n \n # XXX this \"knows\" that the compiler option it's describing is\n # \"--compiler\", which just happens to be the case for the three\n # commands that use it.\n from distutils.fancy_getopt import FancyGetopt\n compilers = []\n for compiler in compiler_class.keys():\n compilers.append((\"compiler=\"+compiler, None,\n compiler_class[compiler][2]))\n compilers.sort()\n pretty_printer = FancyGetopt(compilers)\n pretty_printer.print_help(\"List of available compilers:\")\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 114, "n_words": 52, "vocab_size": 44, "complexity": 2, "nloc": 9, "token_counts": 61, "n_ast_nodes": 106, "n_identifiers": 12, "d_id": 56655, "documentation": { "docstring": "Print list of available compilers (used by the \"--help-compiler\"\n options to \"build\", \"build_ext\", \"build_clib\").\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 20, "language": "en" } }, { "id": 138061, "commit_id": "edb17fd2069844f12237c85ba6607afae536401d", "repo": "ray", "path": "python/ray/air/tests/test_resource_manager_placement_group.py", "file_name": "test_resource_manager_placement_group.py", "fun_name": "test_acquire_unavailable", "commit_message": "[air/tune] Internal resource management 1 - Ray AIR resource manager implementation (#30777)\n\nPrerequisite to #30016\r\n\r\nThis PR adds a new Ray AIR resource manager to replace the PlacementGroupManager of Ray Tune. Details can be found in #30016.\r\n\r\nSpecifically, this PR\r\n- Adds the main resource manager abstractions\r\n- Renames (and moves) PlacementGroupFactory to ResourceRequest\r\n- Adds implementations and tests for a placement group based manager and a budget based manager\r\n\r\nSigned-off-by: Kai Fricke \r\nSigned-off-by: Kai Fricke \r\nCo-authored-by: matthewdeng ", "code": "def test_acquire_unavailable(ray_start_4_cpus):\n \n manager = PlacementGroupResourceManager(update_interval_s=0)\n assert not manager.acquire_resources(REQUEST_2_CPU)\n\n manager.request_resources(REQUEST_2_CPU)\n ray.wait(manager.get_resource_futures(), num_returns=1)\n assert manager.acquire_resources(REQUEST_2_CPU)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 13, "vocab_size": 11, "complexity": 1, "nloc": 6, "token_counts": 49, "n_ast_nodes": 82, "n_identifiers": 12, "d_id": 31300, "documentation": { "docstring": "Test that acquiring resources that are not available returns None.\n\n - Try to acquire\n - Assert this does not work\n - Request resources\n - Wait until ready\n - Acquire\n - Assert this did work\n ", "n_words": 34, "vocab_size": 23, "n_whitespaces": 55, "language": "en" } }, { "id": 195921, "commit_id": "25aaf2c3a6ac0d39da710d6e67f244930b56d669", "repo": "sympy", "path": "sympy/utilities/iterables.py", "file_name": "iterables.py", "fun_name": "multiset_derangements", "commit_message": "fix repeat covers all but 1", "code": "def multiset_derangements(s):\n \n ms = multiset(s)\n mx = max(ms.values())\n n = len(s)\n # special cases\n\n # 0) impossible case\n if mx*2 > n:\n return\n\n # 1) singletons\n if len(ms) == n:\n for p in generate_derangements(s):\n yield p\n return\n\n for M in ms:\n if ms[M] == mx:\n break\n inonM = [i for i in range(n) if s[i] != M]\n iM = [i for i in range(n) if s[i] == M]\n rv = [None]*n\n\n # 2) half are the same\n if 2*mx == n:\n for i in inonM:\n rv[i] = M\n for p in multiset_permutations([s[i] for i in inonM]):\n for i, pi in zip(iM, p):\n rv[i] = pi\n yield rv\n return\n\n # 3) single repeat covers all but 1 of the non-repeats\n if n - 2*mx == 1 and len(ms.values()) - 1 == n - mx:\n for i in range(len(inonM)):\n i1 = inonM[i]\n ifill = inonM[:i] + inonM[i+1:]\n for j in ifill:\n rv[j] = M\n for p in permutations([s[j] for j in ifill]):\n rv[i1] = s[i1]\n for j, pi in zip(iM, p):\n rv[j] = pi\n k = i1\n for j in iM:\n rv[j], rv[k] = rv[k], rv[j]\n yield rv\n k = j\n return\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 569, "n_words": 190, "vocab_size": 90, "complexity": 23, "nloc": 45, "token_counts": 358, "n_ast_nodes": 486, "n_identifiers": 25, "d_id": 47476, "documentation": { "docstring": "Generate derangements of the elements of s *in place*.\n\n Examples\n ========\n\n >>> from sympy.utilities.iterables import multiset_derangements, uniq\n\n Because the derangements of multisets (not sets) are generated\n in place, copies of the return value must be made if a collection\n of derangements is desired or else all values will be the same:\n\n >>> list(uniq([i for i in multiset_derangements('1233')]))\n [['3', '3', '2', '1']]\n >>> [i.copy() for i in multiset_derangements('1233')]\n [['3', '3', '1', '2'], ['3', '3', '2', '1']]\n ", "n_words": 75, "vocab_size": 54, "n_whitespaces": 108, "language": "en" } }, { "id": 264445, "commit_id": "7c105019d8ae9205051c302e7499b33a455f9176", "repo": "netbox", "path": "netbox/utilities/templatetags/builtins/filters.py", "file_name": "filters.py", "fun_name": "content_type", "commit_message": "Closes #8600: Document built-in template tags & filters", "code": "def content_type(model):\n \n return ContentType.objects.get_for_model(model)\n\n\n@register.filter()", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "@register.filter()", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 10, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 38, "n_identifiers": 7, "d_id": 77731, "documentation": { "docstring": "\n Return the ContentType for the given object.\n ", "n_words": 7, "vocab_size": 6, "n_whitespaces": 14, "language": "en" } }, { "id": 275342, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_v1.py", "file_name": "optimizer_v1.py", "fun_name": "set_weights", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def set_weights(self, weights):\n \n params = self.weights\n if len(params) != len(weights):\n raise ValueError(\n \"Length of the specified weight list (\"\n + str(len(weights))\n + \") does not match the number of weights \"\n \"of the optimizer (\" + str(len(params)) + \")\"\n )\n weight_value_tuples = []\n param_values = backend.batch_get_value(params)\n for pv, p, w in zip(param_values, params, weights):\n if pv.shape != w.shape:\n raise ValueError(\n \"Optimizer weight shape \"\n + str(pv.shape)\n + \" not compatible with \"\n \"provided weight shape \" + str(w.shape)\n )\n weight_value_tuples.append((p, w))\n backend.batch_set_value(weight_value_tuples)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 341, "n_words": 82, "vocab_size": 56, "complexity": 4, "nloc": 21, "token_counts": 125, "n_ast_nodes": 212, "n_identifiers": 18, "d_id": 81383, "documentation": { "docstring": "Sets the weights of the optimizer, from Numpy arrays.\n\n Should only be called after computing the gradients\n (otherwise the optimizer has no weights).\n\n Args:\n weights: a list of Numpy arrays. The number of arrays and their shape\n must match number of the dimensions of the weights of the optimizer\n (i.e. it should match the output of `get_weights`).\n\n Raises:\n ValueError: in case of incompatible weight shapes.\n ", "n_words": 65, "vocab_size": 45, "n_whitespaces": 148, "language": "en" } }, { "id": 215688, "commit_id": "ba58c71c55f8d65e702525faf435c2de91aae85c", "repo": "salt", "path": "tests/pytests/unit/modules/test_cp.py", "file_name": "test_cp.py", "fun_name": "test__render_filenames_undefined_template", "commit_message": "move cp exec module tests to pytest", "code": "def test__render_filenames_undefined_template():\n \n path = \"/srv/salt/saltines\"\n dest = \"/srv/salt/cheese\"\n saltenv = \"base\"\n template = \"biscuits\"\n ret = (path, dest)\n pytest.raises(\n CommandExecutionError, cp._render_filenames, path, dest, saltenv, template\n )\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 57, "n_words": 26, "vocab_size": 21, "complexity": 1, "nloc": 9, "token_counts": 42, "n_ast_nodes": 73, "n_identifiers": 11, "d_id": 54099, "documentation": { "docstring": "\n Test if _render_filenames fails upon getting a template not in\n TEMPLATE_REGISTRY.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 21, "language": "en" } }, { "id": 218860, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/lib2to3/pytree.py", "file_name": "pytree.py", "fun_name": "match", "commit_message": "add python 3.10.4 for windows", "code": "def match(self, node, results=None):\n \n if self.type is not None and node.type != self.type:\n return False\n if self.content is not None:\n r = None\n if results is not None:\n r = {}\n if not self._submatch(node, r):\n return False\n if r:\n results.update(r)\n if results is not None and self.name:\n results[self.name] = node\n return True\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 198, "n_words": 52, "vocab_size": 29, "complexity": 9, "nloc": 14, "token_counts": 93, "n_ast_nodes": 145, "n_identifiers": 10, "d_id": 55510, "documentation": { "docstring": "\n Does this pattern exactly match a node?\n\n Returns True if it matches, False if not.\n\n If results is not None, it must be a dict which will be\n updated with the nodes matching named subpatterns.\n\n Default implementation for non-wildcard patterns.\n ", "n_words": 40, "vocab_size": 36, "n_whitespaces": 83, "language": "en" } }, { "id": 204478, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/files/base.py", "file_name": "base.py", "fun_name": "endswith_cr", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def endswith_cr(line):\n \n return line.endswith(\"\\r\" if isinstance(line, str) else b\"\\r\")\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 15, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 2, "token_counts": 23, "n_ast_nodes": 43, "n_identifiers": 5, "d_id": 50741, "documentation": { "docstring": "Return True if line (a text or bytestring) ends with '\\r'.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 60598, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/commands/configuration.py", "file_name": "configuration.py", "fun_name": "_get_n_args", "commit_message": "upd; format", "code": "def _get_n_args(self, args, example, n):\n # type: (List[str], str, int) -> Any\n \n if len(args) != n:\n msg = (\n 'Got unexpected number of arguments, expected {}. '\n '(example: \"{} config {}\")'\n ).format(n, get_prog(), example)\n raise PipError(msg)\n\n if n == 1:\n return args[0]\n else:\n return args\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 165, "n_words": 45, "vocab_size": 43, "complexity": 3, "nloc": 11, "token_counts": 56, "n_ast_nodes": 93, "n_identifiers": 10, "d_id": 12217, "documentation": { "docstring": "Helper to make sure the command got the right number of arguments\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 19, "language": "en" } }, { "id": 156915, "commit_id": "bf66221722cce8f09a9b09895bdb4596f14a5430", "repo": "dask", "path": "dask/utils.py", "file_name": "utils.py", "fun_name": "tmpfile", "commit_message": "`tmpfile` does not end files with period on empty extension (#9429)", "code": "def tmpfile(extension=\"\", dir=None):\n \n extension = extension.lstrip(\".\")\n if extension:\n extension = \".\" + extension\n handle, filename = tempfile.mkstemp(extension, dir=dir)\n os.close(handle)\n os.remove(filename)\n\n try:\n yield filename\n finally:\n if os.path.exists(filename):\n with suppress(OSError): # sometimes we can't remove a generated temp file\n if os.path.isdir(filename):\n shutil.rmtree(filename)\n else:\n os.remove(filename)\n\n\n@contextmanager", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "@contextmanager", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 167, "n_words": 43, "vocab_size": 35, "complexity": 5, "nloc": 16, "token_counts": 100, "n_ast_nodes": 179, "n_identifiers": 19, "d_id": 36805, "documentation": { "docstring": "\n Function to create and return a unique temporary file with the given extension, if provided.\n\n Parameters\n ----------\n extension : str\n The extension of the temporary file to be created\n dir : str\n If ``dir`` is not None, the file will be created in that directory; otherwise,\n Python's default temporary directory is used.\n\n Returns\n -------\n out : str\n Path to the temporary file\n\n See Also\n --------\n NamedTemporaryFile : Built-in alternative for creating temporary files\n tmp_path : pytest fixture for creating a temporary directory unique to the test invocation\n\n Notes\n -----\n This context manager is particularly useful on Windows for opening temporary files multiple times.\n ", "n_words": 103, "vocab_size": 69, "n_whitespaces": 180, "language": "en" } }, { "id": 177474, "commit_id": "4376a6f751874dceff9dadc0a6a6bfc2dfa04000", "repo": "networkx", "path": "networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py", "file_name": "test_betweenness_centrality_subset.py", "fun_name": "test_normalized_P5_directed", "commit_message": "PR for issue #6033 Improve test coverage for algorithms in betweenness_subset.py #6033 (#6083)\n\n* Updated test_betweenness_centrality_subset.py\r\n\r\n* add test of normalized in test_betweenness_centrality_subset.py\r\n\r\n* add test of normalized in test_betweenness_centrality_subset.py\r\n\r\n* update test of normalized in test_betweenness_centrality_subset.py\r\n\r\n* update weight of test_betweenness_centrality_subset.py\r\n\r\n* add docstring\r\n\r\n* add docstring in test_betweenness_centrality_subset.py\r\n\r\n* add docstring in test_betweenness_centrality_subset.py", "code": "def test_normalized_P5_directed(self):\n \n G = nx.DiGraph()\n nx.add_path(G, range(5))\n b_answer = {0: 0, 1: 1.0 / 12.0, 2: 1.0 / 12.0, 3: 0, 4: 0, 5: 0}\n b = nx.betweenness_centrality_subset(\n G, sources=[0], targets=[3], normalized=True, weight=None\n )\n for n in sorted(G):\n assert b[n] == pytest.approx(b_answer[n], abs=1e-7)\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 114, "n_words": 43, "vocab_size": 36, "complexity": 2, "nloc": 9, "token_counts": 120, "n_ast_nodes": 162, "n_identifiers": 19, "d_id": 42386, "documentation": { "docstring": "Betweenness Centrality Subset: Normalized Directed P5", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 204877, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/operations.py", "file_name": "operations.py", "fun_name": "window_frame_rows_start_end", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def window_frame_rows_start_end(self, start=None, end=None):\n \n if not self.connection.features.supports_over_clause:\n raise NotSupportedError(\"This backend does not support window expressions.\")\n return self.window_frame_start(start), self.window_frame_end(end)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 50, "n_words": 18, "vocab_size": 17, "complexity": 2, "nloc": 4, "token_counts": 43, "n_ast_nodes": 71, "n_identifiers": 10, "d_id": 50950, "documentation": { "docstring": "\n Return SQL for start and end points in an OVER clause window frame.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 28, "language": "en" } }, { "id": 320775, "commit_id": "a20bb67a878b2e68abf8268c1b0a27f018d01352", "repo": "qutebrowser", "path": "qutebrowser/completion/completionwidget.py", "file_name": "completionwidget.py", "fun_name": "completion_item_yank", "commit_message": "mypy: Upgrade to PyQt5-stubs 5.15.6.0\n\nFor some unknown reason, those new stubs cause a *lot* of things now to be\nchecked by mypy which formerly probably got skipped due to Any being implied\nsomewhere.\n\nThe stubs themselves mainly improved, with a couple of regressions too.\n\nIn total, there were some 337 (!) new mypy errors. This commit fixes almost all\nof them, and the next commit improves a fix to get things down to 0 errors\nagain.\n\nOverview of the changes:\n\n==== qutebrowser/app.py\n\n- Drop type ignore due to improved stubs.\n\n==== qutebrowser/browser/browsertab.py\n\n- Specify the type of _widget members more closely than just QWidget.\n This is debatable: I suppose the abstract stuff shouldn't need to know\n anything about the concrete backends at all. But it seems like we cut some\n corners when initially implementing things, and put some code in browsertab.py\n just because the APIs of both backends happened to be compatible. Perhaps\n something to reconsider once we drop QtWebKit and hopefully implement a dummy\n backend.\n\n- Add an additional assertion in AbstractAction.run_string. This is already\n covered by the isinstance(member, self.action_base) above it, but that's too\n dynamic for mypy to understand.\n\n- Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x\n and y components), not a single int.\n\n- Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x\n and y components), not a single int.\n\n- Fix the argument types of AbstractScroller.to_perc, as it's possible to pass\n fractional percentages too.\n\n- Specify the type for AbstractHistoryPrivate._history. See above (_widget) re\n this being debatable.\n\n- Fix the return type of AbstractTabPrivate.event_target(), which can be None\n (see #3888).\n\n- Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS\n return value), not None.\n\n- Fix the argument type for AbstractTabPrivate.toggle_inspector: position can\n be None to use the last used position.\n\n- Declare the type of sub-objects of AbstractTab.\n\n- Fix the return value of AbstractTab.icon(), which is the QIcon, not None.\n\n==== qutebrowser/browser/commands.py\n\n- Make sure the active window is a MainWindow (with a .win_id attribute).\n\n==== qutebrowser/browser/downloadview.py\n\n- Add _model() which makes sure that self.model() is a DownloadModel, not None\n or any other model. This is needed because other methods access a variety of\n custom attributes on it, e.g. last_index().\n\n==== qutebrowser/browser/greasemonkey.py\n\n- Add an ignore for AbstractDownload.requested_url which we patch onto the\n downloads. Probably would be nicer to add it as a proper attribute which always\n gets set by the DownloadManager.\n\n==== qutebrowser/browser/hints.py\n\n- Remove type ignores for QUrl.toString().\n- Add a new type ignore for combining different URL flags (which works, but is\n not exactly type safe... still probably a regression in the stubs).\n- Make sure the things we get back from self._get_keyparser are what we actually\n expect. Probably should introduce a TypedDict (and/or overloads for\n _get_keyparser with typing.Literal) to teach mypy about the exact return value.\n See #7098.\n This is needed because we access Hint/NormalKeyParser-specific attributes such\n as .set_inhibited_timout() or .update_bindings().\n\n==== qutebrowser/browser/inspector.py\n\n- Similar changes than in browsertab.py to make some types where we share API\n (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next\n commit.\n\n==== qutebrowser/browser/network/pac.py\n\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/browser/qtnetworkdownloads.py\n\n- Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an\n AbstractDownload), so that we can call ._uses_nam() on it.\n\n==== qutebrowser/browser/qutescheme.py\n\n- Remove now unneeded type ignore for QUrl flags.\n\n==== qutebrowser/browser/urlmarks.py\n\n- Specify the type of UrlMarkManager._lineparser, as those only get initialized\n in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist.\n\n==== qutebrowser/browser/webelem.py\n\n- New casts to turn single KeyboardModifier (enum) entries into\n KeyboardModifiers (flags). Might not be needed anymore with Qt 6.\n- With that, casting the final value is now unneeded.\n\n==== qutebrowser/browser/webengine/notification.py\n\n- Remove now unneeded type ignore for signal.\n- Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished()\n is a QProcess, not just any QObject.\n\n==== qutebrowser/browser/webengine/webenginedownloads.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/browser/webengine/webengineelem.py\n\n- Specify the type of WebEngineElement._tab.\n- Remove now unneeded type ignore for mixed flags.\n\n==== qutebrowser/browser/webengine/webengineinspector.py\n\n- See changes to inspector.py and next commit.\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/browser/webengine/webenginequtescheme.py\n\n- Remove now unneeded type ignore for mixed flags.\n\n==== qutebrowser/browser/webengine/webenginesettings.py\n\n- Ignore access of .setter attribute which we patch onto QWebEngineProfile.\n Would be nice to have a subclass or wrapper-class instead.\n\n==== qutebrowser/browser/webengine/webenginetab.py\n\n- Specified the type of _widget members more closely than just QWidget.\n See browsertab.py changes for details.\n- Remove some now-unneeded type ignores for creating FindFlags.\n- Specify more concrete types for WebEngineTab members where we actually need to\n access WebEngine-specific attributes.\n- Make sure the page we get is our custom WebEnginePage subclass, not just any\n QWebEnginePage. This is needed because we access custom attributes on it.\n\n==== qutebrowser/browser/webengine/webview.py\n\n- Make sure the page we get is our custom WebEnginePage subclass, not just any\n QWebEnginePage. This is needed because we access custom attributes on it.\n\n==== qutebrowser/browser/webkit/network/networkreply.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/browser/webkit/webkitinspector.py\n\n- See changes to inspector.py and next commit.\n\n==== qutebrowser/browser/webkit/webkittab.py\n\n- Specify the type of _widget members more closely than just QWidget.\n See browsertab.py changes for details.\n- Add a type ignore for WebKitAction because our workaround needs to\n treat them as ints (which is allowed by PyQt, even if not type-safe).\n- Add new ignores for findText calls: The text is a QString and can be None; the\n flags are valid despite mypy thinking they aren't (stubs regression?).\n- Specify the type for WebKitHistoryPrivate._history, because we access\n WebKit-specific attributes. See above (_widget) re this being debatable.\n- Make mypy aware that .currentFrame() and .frameAt() can return None (stubs\n regression?).\n- Make sure the .page() and .page().networkAccessManager() are our subclasses\n rather than the more generic QtWebKit objects, as we use custom attributes.\n- Add new type ignores for signals (stubs regression!)\n\n==== qutebrowser/browser/webkit/webpage.py\n\n- Make sure the .networkAccessManager() is our subclass rather than the more\n generic QtWebKit object, as we use custom attributes.\n- Replace a cast by a type ignore. The cast didn't work anymore.\n\n==== qutebrowser/browser/webkit/webview.py\n\n- Make sure the .page() is our subclass rather than the more generic QtWebKit\n object, as we use custom attributes.\n\n==== qutebrowser/commands/userscripts.py\n\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/completion/completer.py\n\n- Add a new _completion() getter (which ensures it actually gets the completion\n view) rather than accessing the .parent() directly (which could be any QObject).\n\n==== qutebrowser/completion/completiondelegate.py\n\n- Make sure self.parent() is a CompletionView (no helper method as there is only\n one instance).\n- Remove a now-unneeded type ignore for adding QSizes.\n\n==== qutebrowser/completion/completionwidget.py\n\n- Add a ._model() getter which ensures that we get a CompletionModel (with\n custom attributes) rather than Qt's .model() which can be any QAbstractItemModel\n (or None).\n- Removed a now-unneeded type ignore for OR-ing flags.\n\n==== qutebrowser/completion/models/completionmodel.py\n\n- Remove now unneeded type ignores for signals.\n- Ignore a complaint about .set_pattern() not being defined. Completion\n categories don't share any common parent class, so it would be good to introduce\n a typing.Protocol for this. See #7098.\n\n==== qutebrowser/components/misccommands.py\n\n- Removed a now-unneeded type ignore for OR-ing flags.\n\n==== qutebrowser/components/readlinecommands.py\n\n- Make sure QApplication.instance() is a QApplication (and not just a\n QCoreApplication). This includes the former \"not None\" check.\n\n==== qutebrowser/components/scrollcommands.py\n\n- Add basic annotation for \"funcs\" dict. Could have a callable protocol to\n specify it needs a count kwarg, see #7098.\n\n==== qutebrowser/config/stylesheet.py\n\n- Correctly specify that stylesheet apply to QWidgets, not any QObject.\n- Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy\n about this with overloads and protocols (stylesheet for set_register being None\n => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not\n worth the troble. See #7098.\n\n==== qutebrowser/keyinput/keyutils.py\n\n- Remove some now-unneeded type ignores and add a cast for using a single enum\n value as flags. Might need to look at this again with Qt 6 support.\n\n==== qutebrowser/keyinput/modeman.py\n\n- Add a FIXME for using a TypedDict, see comments for hints.py above.\n\n==== qutebrowser/mainwindow/mainwindow.py\n\n- Remove now-unneeded type ignores for calling with OR-ed flags.\n- Improve where we cast from WindowType to WindowFlags, no int needed\n- Use new .tab_bar() getter, see below.\n\n==== qutebrowser/mainwindow/prompt.py\n\n- Remove now-unneeded type ignores for calling with OR-ed flags.\n\n==== qutebrowser/mainwindow/statusbar/bar.py\n\n- Adjust type ignores around @pyqtProperty. The fact one is still needed seems\n like a stub regression.\n\n==== qutebrowser/mainwindow/statusbar/command.py\n\n- Fix type for setText() override (from QLineEdit): text can be None\n (QString in C++).\n\n==== qutebrowser/mainwindow/statusbar/url.py\n\n- Adjust type ignores around @pyqtProperty. The fact one is still needed seems\n like a stub regression.\n\n==== qutebrowser/mainwindow/tabbedbrowser.py\n\n- Specify that TabDeque manages browser tabs, not any QWidgets. It accesses\n AbstractTab-specific attributes.\n- Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access\n .maybe_hide.\n- Fix the annotations for stored marks: Scroll positions are a QPoint, not int.\n- Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and\n .widget(), which ensures that the return values are valid AbstractTabs (or None\n for _tab_by_idx). This is needed because we access AbstractTab-specific\n attributes.\n- For some places, where the tab can be None, continue using .currentTab() but\n add asserts.\n- Remove some now-unneeded [unreachable] ignores, as mypy knows about the None\n possibility now.\n\n==== qutebrowser/mainwindow/tabwidget.py\n\n- Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and\n .widget() are of type TabBar and AbstractTab, respectively.\n- Add additional assertions where we expect ._tab_by_idx() to never be None.\n- Remove dead code in get_tab_fields for handling a None y scroll position. I\n was unable to find any place in the code where this could be set to None.\n- Remove some now-unneeded type ignores and casts, as mypy now knows that\n _type_by_idx() could be None.\n- Work around a strange instance where mypy complains about not being able to\n find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility,\n despite it clearly being shown as a bool *inside* that class without any\n annotation.\n- Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in\n fact a TabWidget.\n\n==== qutebrowser/misc/crashsignal.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/misc/editor.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/misc/ipc.py\n\n- Remove now unneeded type ignores for signals.\n- Add new type ignores for .error() which is both a signal and a getter\n (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was\n renamed to errorOccurred in 5.15.\n\n==== qutebrowser/misc/objects.py\n\n- Make sure mypy knows that objects.app is our custom Application (with custom\n attributes) rather than any QApplication.\n\n==== qutebrowser/utils/objreg.py\n\n- Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol,\n but ideally, the whole objreg stuff should die one day anyways.\n\n==== tests/unit/completion/test_completer.py\n\n- Make CompletionWidgetStub inherit from CompletionView so that it passes the\n new isinstance() asserts in completer.py (see above).", "code": "def completion_item_yank(self, sel=False):\n \n text = self._cmd.selectedText()\n if not text:\n index = self.currentIndex()\n if not index.isValid():\n raise cmdutils.CommandError(\"No item selected!\")\n text = self._model().data(index)\n\n if not utils.supports_selection():\n sel = False\n\n utils.set_clipboard(text, selection=sel)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 124, "n_words": 30, "vocab_size": 22, "complexity": 4, "nloc": 10, "token_counts": 78, "n_ast_nodes": 133, "n_identifiers": 17, "d_id": 117342, "documentation": { "docstring": "Yank the current completion item into the clipboard.\n\n Args:\n sel: Use the primary selection instead of the clipboard.\n ", "n_words": 18, "vocab_size": 14, "n_whitespaces": 43, "language": "en" } }, { "id": 79326, "commit_id": "9a1606c809b2daee005591d98e9e2058e4823c79", "repo": "wagtail", "path": "wagtail/admin/panels.py", "file_name": "panels.py", "fun_name": "show_panel_furniture", "commit_message": "Add show_panel_furniture() in BoundPanel\n\nThis allows TabbedInterface to hide a tab but still render its children", "code": "def show_panel_furniture(self):\n \n return self.is_shown()\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 26, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 23, "n_identifiers": 3, "d_id": 16917, "documentation": { "docstring": "\n Whether this panel shows the panel furniture instead of being rendered outside of it.\n ", "n_words": 14, "vocab_size": 12, "n_whitespaces": 37, "language": "en" } }, { "id": 223870, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/quoprimime.py", "file_name": "quoprimime.py", "fun_name": "header_encode", "commit_message": "add python 3.10.4 for windows", "code": "def header_encode(header_bytes, charset='iso-8859-1'):\n \n # Return empty headers as an empty string.\n if not header_bytes:\n return ''\n # Iterate over every byte, encoding if necessary.\n encoded = header_bytes.decode('latin1').translate(_QUOPRI_HEADER_MAP)\n # Now add the RFC chrome to each encoded chunk and glue the chunks\n # together.\n return '=?%s?q?%s?=' % (charset, encoded)\n\n\n_QUOPRI_BODY_ENCODE_MAP = _QUOPRI_BODY_MAP[:]\nfor c in b'\\r\\n':\n _QUOPRI_BODY_ENCODE_MAP[c] = chr(c)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 90, "n_words": 58, "vocab_size": 48, "complexity": 2, "nloc": 5, "token_counts": 37, "n_ast_nodes": 107, "n_identifiers": 11, "d_id": 57121, "documentation": { "docstring": "Encode a single header line with quoted-printable (like) encoding.\n\n Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but\n used specifically for email header fields to allow charsets with mostly 7\n bit characters (and some 8 bit) to remain more or less readable in non-RFC\n 2045 aware mail clients.\n\n charset names the character set to use in the RFC 2046 header. It\n defaults to iso-8859-1.\n ", "n_words": 67, "vocab_size": 57, "n_whitespaces": 89, "language": "en" } }, { "id": 322101, "commit_id": "b0c35d5e1ff02a634fa26392b60d3885c2c78677", "repo": "PaddleNLP", "path": "paddlenlp/transformers/transformer/modeling.py", "file_name": "modeling.py", "fun_name": "forward", "commit_message": "Fix the attention mask for fp16 (#1585)", "code": "def forward(self, src_word, trg_word):\n r\n src_max_len = paddle.shape(src_word)[-1]\n trg_max_len = paddle.shape(trg_word)[-1]\n src_slf_attn_bias = paddle.cast(\n src_word == self.bos_id,\n dtype=paddle.get_default_dtype()).unsqueeze([1, 2]) * -1e4\n src_slf_attn_bias.stop_gradient = True\n trg_slf_attn_bias = self.transformer.generate_square_subsequent_mask(\n trg_max_len)\n trg_slf_attn_bias.stop_gradient = True\n trg_src_attn_bias = src_slf_attn_bias\n src_pos = paddle.cast(\n src_word != self.bos_id, dtype=src_word.dtype) * paddle.arange(\n start=0, end=src_max_len, dtype=src_word.dtype)\n trg_pos = paddle.cast(\n trg_word != self.bos_id, dtype=src_word.dtype) * paddle.arange(\n start=0, end=trg_max_len, dtype=trg_word.dtype)\n with paddle.static.amp.fp16_guard():\n src_emb = self.src_word_embedding(src_word)\n src_pos_emb = self.src_pos_embedding(src_pos)\n src_emb = src_emb + src_pos_emb\n enc_input = F.dropout(\n src_emb, p=self.dropout,\n training=self.training) if self.dropout else src_emb\n\n trg_emb = self.trg_word_embedding(trg_word)\n trg_pos_emb = self.trg_pos_embedding(trg_pos)\n trg_emb = trg_emb + trg_pos_emb\n dec_input = F.dropout(\n trg_emb, p=self.dropout,\n training=self.training) if self.dropout else trg_emb\n\n dec_output = self.transformer(\n enc_input,\n dec_input,\n src_mask=src_slf_attn_bias,\n tgt_mask=trg_slf_attn_bias,\n memory_mask=trg_src_attn_bias)\n\n predict = self.linear(dec_output)\n\n return predict\n\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 528, "n_words": 115, "vocab_size": 67, "complexity": 3, "nloc": 84, "token_counts": 301, "n_ast_nodes": 457, "n_identifiers": 47, "d_id": 118058, "documentation": { "docstring": "\n The Transformer forward methods. The input are source/target sequences, and\n returns logits.\n\n Args:\n src_word (Tensor):\n The ids of source sequences words. It is a tensor with shape\n `[batch_size, source_sequence_length]` and its data type can be\n int or int64.\n trg_word (Tensor):\n The ids of target sequences words. It is a tensor with shape\n `[batch_size, target_sequence_length]` and its data type can be\n int or int64.\n\n Returns:\n Tensor:\n Output tensor of the final layer of the model whose data\n type can be float32 or float64 with shape\n `[batch_size, sequence_length, vocab_size]`.\n\n Example:\n .. code-block::\n\n import paddle\n from paddlenlp.transformers import TransformerModel\n\n transformer = TransformerModel(\n src_vocab_size=30000,\n trg_vocab_size=30000,\n max_length=257,\n num_encoder_layers=6,\n num_decoder_layers=6,\n n_head=8,\n d_model=512,\n d_inner_hid=2048,\n dropout=0.1,\n weight_sharing=True,\n bos_id=0,\n eos_id=1)\n\n batch_size = 5\n seq_len = 10\n predict = transformer(\n src_word=paddle.randint(low=3, high=30000, shape=[batch_size, seq_len]),\n trg_word=paddle.randint(low=3, high=30000, shape=[batch_size, seq_len]))\n ", "n_words": 128, "vocab_size": 85, "n_whitespaces": 706, "language": "en" } }, { "id": 219742, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "is_nan", "commit_message": "add python 3.10.4 for windows", "code": "def is_nan(self, a):\n \n a = _convert_other(a, raiseit=True)\n return a.is_nan()\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 40, "n_identifiers": 5, "d_id": 55761, "documentation": { "docstring": "Return True if the operand is a qNaN or sNaN;\n otherwise return False.\n\n >>> ExtendedContext.is_nan(Decimal('2.50'))\n False\n >>> ExtendedContext.is_nan(Decimal('NaN'))\n True\n >>> ExtendedContext.is_nan(Decimal('-sNaN'))\n True\n >>> ExtendedContext.is_nan(1)\n False\n ", "n_words": 25, "vocab_size": 19, "n_whitespaces": 95, "language": "en" } }, { "id": 8418, "commit_id": "4d2d81f9fdefc52eea6a9bf0826a6f2ffc8d681b", "repo": "ludwig", "path": "ludwig/schema/model_config.py", "file_name": "model_config.py", "fun_name": "_set_hyperopt_defaults", "commit_message": "Config Object (#2426)\n\n* Fixed loss instances across features\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed binary OneOfImplementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Flake 8\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix custom loss components\r\n\r\n* Fix gbm category\r\n\r\n* Remove config object code, out of scope\r\n\r\n* Fixed more tests\r\n\r\n* Fixed incorrect text preproc default, added clip to category feature level\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixes additional tests\r\n\r\n* Cache jsonschema validator to reduce memory pressure\r\n\r\n* Fix imports\r\n\r\n* Skip neuropod test\r\n\r\n* Added upgrade audio to default preproc back compat and cleaned up\r\n\r\n* Small nits\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Change backfill constant for audio\r\n\r\n* Add docstring to compute feature hash\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Unused import\r\n\r\n* Another backfill constant change\r\n\r\n* Unused import\r\n\r\n* remove default population functions\r\n\r\n* Added config object test\r\n\r\n* rewired build_inputs\r\n\r\n* rewired combiner in ecd, added logic to config object\r\n\r\n* Refactored ecd.py\r\n\r\n* Fixing up merge_with_defaults, need metadata changes in master\r\n\r\n* Refactored defaults section and mega upgraded config obj\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed some formatting\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed feature col, proc col, and render config from defaults.py\r\n\r\n* Fix duplicate import\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added config initializer to merge defaults flow\r\n\r\n* Refactored update_config_with_metadata\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added dict conversion method to config object and refactored merge config function in config_utils\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Refactored until preproc entrypoint\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed update_config_with_metadata\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Removed load config base feature method - no longer necessary\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Formatting\r\n\r\n* Fixed input size assignment\r\n\r\n* Temp fix\r\n\r\n* Fixed pretrained encoder path referencing temp until preproc refactor\r\n\r\n* Solved the WORST BUG EVER\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Switch reduce_input to None for sequence tagger\r\n\r\n* Fixed another one\r\n\r\n* Fixed typo\r\n\r\n* Various test fixes\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Flake 8\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed excess defaults params issue\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Minor fixes\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed some defaults tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed more tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed more tests\r\n\r\n* Formatting\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* More test fixes\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed defaults tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix more tests\r\n\r\n* Flake 8\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix more tests\r\n\r\n* Fixed more tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed more tests\r\n\r\n* Fixed more tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fixing ghost tests attempt\r\n\r\n* Deep copy to smash the ghost failures\r\n\r\n* Copied top level modules now too\r\n\r\n* Started fixing hyperopt\r\n\r\n* Fixed Hyperopt Issues\r\n\r\n* Flake 8\r\n\r\n* Remove commented out code\r\n\r\n* Address Piero feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Flake 8\r\n\r\n* Removed merge with defaults\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed various issues with preprocessing and splitting positioning\r\n\r\n* Fixed hyperopt issues\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Refactored api pipeline to use all config obj references\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed more tests\r\n\r\n* Flake 8\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix more tests\r\n\r\n* Fixed auto tune learning rate and batch size\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed sequence feature tests\r\n\r\n* Fixed image feature test\r\n\r\n* Fixed last test\r\n\r\n* flake 8\r\n\r\n* Marshmallowify Config object, remove manual to dict method, add Factory method constructors\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Validate config within config object\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* All Travis feedback addressed\r\n\r\n* Using all new constructors now\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* removed from class attributes\r\n\r\n* Added deep copies back and piped repr inheritance\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Format\r\n\r\n* Small error fix, moved back compat into Config Object\r\n\r\n* Flake8\r\n\r\n* Docstring for hyperopt defaults method\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Address Joppe feedback\r\n\r\n* Revert \"Address Joppe feedback\"\r\n\r\nThis reverts commit 42f1665ef917d062a010550bb960594c355285ff.\r\n\r\n* Fix tests\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Flake8\r\n\r\n* fix test\r\n\r\n* Small improvement\r\n\r\n* Changed repr for input features, added feature enabling/disabling\r\n\r\n* Added feature enabling/disabling, and better reprs for SDK dev\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Flake 8\r\n\r\n* Added rich to requirements.txt\r\n\r\n* Add some more CO tests and comment more on CO code\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix explain issue\r\n\r\n* Julian feedback\r\n\r\n* Added TODOs for future refactor PRs\r\n\r\n* Fix explain test failure, test shared state improvement and bug fix, remove unncessary code from convert_submodules\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* implement Daniel's feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix residual errors\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Error fix\r\n\r\n* Using mixins now so no loose attributes on defaults, fixed height width schema restrictions\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Removed unnecessary filtering from defaults schema logic\r\n\r\n* Piero's simplification and cleanup\r\n\r\n* Flake 8\r\n\r\n* Fix test and update docstrings from Pieros change\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Address most of Justin's feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fix tests and more feedback implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Address feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Renamed files to correspond to ModelConfig class name\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Missing constant import\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed incorrect merge conflict resolution\r\n\r\n* Flake8\r\n\r\n* Fix remaining tests (except old models training from trainer type removal)\r\n\r\n* Fixed old models not validating trainer type\r\n\r\n* Add output_feature=False to test_hyperopt_ray.py\r\n\r\n* Implement Kabir's feedback\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Travis Addair \r\nCo-authored-by: w4nderlust ", "code": "def _set_hyperopt_defaults(self):\n \n if not self.hyperopt:\n return\n\n scheduler = self.hyperopt.get(\"executor\", {}).get(\"scheduler\")\n if not scheduler:\n return\n\n if EXECUTOR in self.hyperopt:\n set_default_value(self.hyperopt[EXECUTOR], TYPE, RAY)\n\n # Disable early stopping when using a scheduler. We achieve this by setting the parameter\n # to -1, which ensures the condition to apply early stopping is never met.\n early_stop = self.trainer.early_stop\n if early_stop is not None and early_stop != -1:\n warnings.warn(\"Can't utilize `early_stop` while using a hyperopt scheduler. Setting early stop to -1.\")\n self.trainer.early_stop = -1\n\n max_t = scheduler.get(\"max_t\")\n time_attr = scheduler.get(\"time_attr\")\n epochs = self.trainer.to_dict().get(\"epochs\", None)\n if max_t is not None:\n if time_attr == \"time_total_s\":\n if epochs is None:\n setattr(self.trainer, \"epochs\", sys.maxsize) # continue training until time limit hit\n # else continue training until either time or trainer epochs limit hit\n elif epochs is not None and epochs != max_t:\n raise ValueError(\n \"Cannot set trainer `epochs` when using hyperopt scheduler w/different training_iteration `max_t`. \"\n \"Unset one of these parameters in your config or make sure their values match.\"\n )\n else:\n setattr(self.trainer, \"epochs\", max_t) # run trainer until scheduler epochs limit hit\n elif epochs is not None:\n scheduler[\"max_t\"] = epochs # run scheduler until trainer epochs limit hit\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 517, "n_words": 189, "vocab_size": 106, "complexity": 12, "nloc": 28, "token_counts": 188, "n_ast_nodes": 326, "n_identifiers": 21, "d_id": 1427, "documentation": { "docstring": "This function was migrated from defaults.py with the intention of setting some hyperopt defaults while\n the hyperopt section of the config object is not fully complete.\n\n Returns:\n None -> modifies trainer and hyperopt sections\n ", "n_words": 34, "vocab_size": 29, "n_whitespaces": 66, "language": "en" } }, { "id": 108041, "commit_id": "c0a384e9f41673207eac75e276b293418bd32965", "repo": "matplotlib", "path": "lib/matplotlib/category.py", "file_name": "category.py", "fun_name": "update", "commit_message": "Fix incorrect deprecation warning", "code": "def update(self, data):\n \n data = np.atleast_1d(np.array(data, dtype=object))\n # check if convertible to number:\n convertible = True\n for val in OrderedDict.fromkeys(data):\n # OrderedDict just iterates over unique values in data.\n _api.check_isinstance((str, bytes), value=val)\n if convertible:\n # this will only be called so long as convertible is True.\n convertible = self._str_is_convertible(val)\n if val not in self._mapping:\n self._mapping[val] = next(self._counter)\n if data.size and convertible:\n _log.info('Using categorical units to plot a list of strings '\n 'that are all parsable as floats or dates. If these '\n 'strings should be plotted as numbers, cast to the '\n 'appropriate data type before plotting.')\n\n\n# Register the converter with Matplotlib's unit framework\nunits.registry[str] = StrCategoryConverter()\nunits.registry[np.str_] = StrCategoryConverter()\nunits.registry[bytes] = StrCategoryConverter()\nunits.registry[np.bytes_] = StrCategoryConverter()\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 317, "n_words": 117, "vocab_size": 85, "complexity": 6, "nloc": 14, "token_counts": 100, "n_ast_nodes": 238, "n_identifiers": 29, "d_id": 23035, "documentation": { "docstring": "\n Map new values to integer identifiers.\n\n Parameters\n ----------\n data : iterable of str or bytes\n\n Raises\n ------\n TypeError\n If elements in *data* are neither str nor bytes.\n ", "n_words": 27, "vocab_size": 26, "n_whitespaces": 95, "language": "en" } }, { "id": 243180, "commit_id": "a37593f004247ebf69d5582524da6dc5143cb023", "repo": "Pillow", "path": "src/PIL/Image.py", "file_name": "Image.py", "fun_name": "putpixel", "commit_message": "Allow RGB and RGBA values for PA image putpixel", "code": "def putpixel(self, xy, value):\n \n\n if self.readonly:\n self._copy()\n self.load()\n\n if self.pyaccess:\n return self.pyaccess.putpixel(xy, value)\n\n if (\n self.mode in (\"P\", \"PA\")\n and isinstance(value, (list, tuple))\n and len(value) in [3, 4]\n ):\n # RGB or RGBA value for a P or PA image\n if self.mode == \"PA\":\n alpha = value[3] if len(value) == 4 else 255\n value = value[:3]\n value = self.palette.getcolor(value, self)\n if self.mode == \"PA\":\n value = (value, alpha)\n return self.im.putpixel(xy, value)\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 264, "n_words": 71, "vocab_size": 49, "complexity": 9, "nloc": 18, "token_counts": 142, "n_ast_nodes": 225, "n_identifiers": 17, "d_id": 70002, "documentation": { "docstring": "\n Modifies the pixel at the given position. The color is given as\n a single numerical value for single-band images, and a tuple for\n multi-band images. In addition to this, RGB and RGBA tuples are\n accepted for P and PA images.\n\n Note that this method is relatively slow. For more extensive changes,\n use :py:meth:`~PIL.Image.Image.paste` or the :py:mod:`~PIL.ImageDraw`\n module instead.\n\n See:\n\n * :py:meth:`~PIL.Image.Image.paste`\n * :py:meth:`~PIL.Image.Image.putdata`\n * :py:mod:`~PIL.ImageDraw`\n\n :param xy: The pixel coordinate, given as (x, y). See\n :ref:`coordinate-system`.\n :param value: The pixel value.\n ", "n_words": 81, "vocab_size": 60, "n_whitespaces": 191, "language": "en" } }, { "id": 130855, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/serve/batching.py", "file_name": "batching.py", "fun_name": "batch", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def batch(_func=None, max_batch_size=10, batch_wait_timeout_s=0.0):\n ", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "\"\"\"Converts a function to asynchronously handle batches.\n\n The function can be a standalonea class method. Inthe function must betake a list ofits solereturn a list of the sameainvokedthe caller passes a single object. These will beand executed asynchronously oncea batch ofor `batch_wait_timeout_s` hasoccurs first:\n\n >>>", "n_ast_errors": 14, "ast_levels": 10, "n_whitespaces": 7, "n_words": 4, "vocab_size": 4, "complexity": 11, "nloc": 21, "token_counts": 137, "n_ast_nodes": 167, "n_identifiers": 54, "d_id": 29405, "documentation": { "docstring": "Converts a function to asynchronously handle batches.\n\n The function can be a standalone function or a class method. In both\n cases, the function must be `async def` and take a list of objects as\n its sole argument and return a list of the same length as a result.\n\n When invoked, the caller passes a single object. These will be batched\n and executed asynchronously once there is a batch of `max_batch_size`\n or `batch_wait_timeout_s` has elapsed, whichever occurs first.\n\n Example:\n\n >>> @serve.batch(max_batch_size=50, batch_wait_timeout_s=0.5)", "n_words": 81, "vocab_size": 59, "n_whitespaces": 104, "language": "en" } }, { "id": 196096, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/graycode.py", "file_name": "graycode.py", "fun_name": "n", "commit_message": "Updated import locations", "code": "def n(self):\n \n return self.args[0]\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 23, "n_identifiers": 3, "d_id": 47596, "documentation": { "docstring": "\n Returns the dimension of the Gray code.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import GrayCode\n >>> a = GrayCode(5)\n >>> a.n\n 5\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 78, "language": "en" } }, { "id": 201203, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/auth_tests/test_context_processors.py", "file_name": "test_context_processors.py", "fun_name": "test_session_not_accessed", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_session_not_accessed(self):\n \n response = self.client.get(\"/auth_processor_no_attr_access/\")\n self.assertContains(response, \"Session not accessed\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 45, "n_identifiers": 6, "d_id": 49899, "documentation": { "docstring": "\n The session is not accessed simply by including\n the auth context processor\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 34, "language": "en" } }, { "id": 221370, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/codecs.py", "file_name": "codecs.py", "fun_name": "iterencode", "commit_message": "add python 3.10.4 for windows", "code": "def iterencode(iterator, encoding, errors='strict', **kwargs):\n \n encoder = getincrementalencoder(encoding)(errors, **kwargs)\n for input in iterator:\n output = encoder.encode(input)\n if output:\n yield output\n output = encoder.encode(\"\", True)\n if output:\n yield output\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 75, "n_words": 28, "vocab_size": 20, "complexity": 4, "nloc": 9, "token_counts": 60, "n_ast_nodes": 100, "n_identifiers": 10, "d_id": 56383, "documentation": { "docstring": "\n Encoding iterator.\n\n Encodes the input strings from the iterator using an IncrementalEncoder.\n\n errors and kwargs are passed through to the IncrementalEncoder\n constructor.\n ", "n_words": 22, "vocab_size": 20, "n_whitespaces": 38, "language": "en" } }, { "id": 167771, "commit_id": "f65417656ba8c59438d832b6e2a431f78d40c21c", "repo": "pandas", "path": "pandas/core/groupby/groupby.py", "file_name": "groupby.py", "fun_name": "indices", "commit_message": "TYP: more return annotations in core/ (#47618)\n\n* TYP: more return annotations in core/\r\n\r\n* from __future__ import annotations\r\n\r\n* more __future__", "code": "def indices(self) -> dict[Hashable, npt.NDArray[np.intp]]:\n \n return self.grouper.indices\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 5, "token_counts": 26, "n_ast_nodes": 41, "n_identifiers": 9, "d_id": 40114, "documentation": { "docstring": "\n Dict {group name -> group indices}.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 265475, "commit_id": "e96620260a6c1b5cf8cff2112d40d061984a7b2c", "repo": "netbox", "path": "netbox/netbox/denormalized.py", "file_name": "denormalized.py", "fun_name": "register", "commit_message": "Closes #9903: Implement a mechanism for automatically updating denormalized fields", "code": "def register(model, field_name, mappings):\n \n logger.debug(f'Registering denormalized field {model}.{field_name}')\n\n field = model._meta.get_field(field_name)\n rel_model = field.related_model\n\n registry['denormalized_fields'][rel_model].append(\n (model, field_name, mappings)\n )\n\n\n@receiver(post_save)", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "@receiver(post_save)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 44, "n_words": 20, "vocab_size": 17, "complexity": 1, "nloc": 7, "token_counts": 50, "n_ast_nodes": 97, "n_identifiers": 15, "d_id": 78110, "documentation": { "docstring": "\n Register a denormalized model field to ensure that it is kept up-to-date with the related object.\n\n Args:\n model: The class being updated\n field_name: The name of the field related to the triggering instance\n mappings: Dictionary mapping of local to remote fields\n ", "n_words": 41, "vocab_size": 33, "n_whitespaces": 72, "language": "en" } }, { "id": 313987, "commit_id": "4bc5d7bfed07c20d6f3438ab91c734a620505a33", "repo": "core", "path": "tests/components/zha/test_switch.py", "file_name": "test_switch.py", "fun_name": "switch_platform_only", "commit_message": "Speed up zha tests (#73627)", "code": "def switch_platform_only():\n \n with patch(\n \"homeassistant.components.zha.PLATFORMS\",\n (\n Platform.DEVICE_TRACKER,\n Platform.SENSOR,\n Platform.SELECT,\n Platform.SWITCH,\n ),\n ):\n yield\n\n\n@pytest.fixture", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 94, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 11, "token_counts": 32, "n_ast_nodes": 61, "n_identifiers": 9, "d_id": 112598, "documentation": { "docstring": "Only setup the switch and required base platforms to speed up tests.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 223420, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/doctest.py", "file_name": "doctest.py", "fun_name": "report_start", "commit_message": "add python 3.10.4 for windows", "code": "def report_start(self, out, test, example):\n \n if self._verbose:\n if example.want:\n out('Trying:\\n' + _indent(example.source) +\n 'Expecting:\\n' + _indent(example.want))\n else:\n out('Trying:\\n' + _indent(example.source) +\n 'Expecting nothing\\n')\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 127, "n_words": 23, "vocab_size": 16, "complexity": 3, "nloc": 8, "token_counts": 57, "n_ast_nodes": 104, "n_identifiers": 9, "d_id": 56893, "documentation": { "docstring": "\n Report that the test runner is about to process the given\n example. (Only displays a message if verbose=True)\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 41, "language": "en" } }, { "id": 215577, "commit_id": "56045b0ee4c11b395895cb0a11279dfea8c2242f", "repo": "salt", "path": "tests/pytests/unit/utils/jinja/test_salt_cache_loader.py", "file_name": "test_salt_cache_loader.py", "fun_name": "test_cached_file_client", "commit_message": "Clean up salt.transport.(client,server) references", "code": "def test_cached_file_client(get_loader, minion_opts):\n \n with patch(\"salt.channel.client.ReqChannel.factory\", Mock()):\n loader_a = SaltCacheLoader(minion_opts)\n loader_b = SaltCacheLoader(minion_opts)\n assert loader_a._file_client is loader_b._file_client\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 39, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 38, "n_ast_nodes": 67, "n_identifiers": 9, "d_id": 54037, "documentation": { "docstring": "\n Multiple instantiations of SaltCacheLoader use the cached file client\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 182785, "commit_id": "b115db9d8d4f1c9ab20a3d3bef5d5a729ea8b57a", "repo": "textual", "path": "src/textual/app.py", "file_name": "app.py", "fun_name": "get_css_variables", "commit_message": "docstring", "code": "def get_css_variables(self) -> dict[str, str]:\n \n variables = self.design.generate(self.dark)\n return variables\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 8, "token_counts": 27, "n_ast_nodes": 44, "n_identifiers": 8, "d_id": 43965, "documentation": { "docstring": "Get a mapping of variables used to pre-populate CSS.\n\n Returns:\n dict[str, str]: A mapping of variable name to value.\n ", "n_words": 19, "vocab_size": 16, "n_whitespaces": 44, "language": "en" } }, { "id": 246600, "commit_id": "64c73c6ac88a740ee480a0ad1f9afc8596bccfa4", "repo": "synapse", "path": "tests/rest/client/test_login.py", "file_name": "test_login.py", "fun_name": "test_multi_sso_redirect_to_cas", "commit_message": "Add type hints to `tests/rest/client` (#12066)", "code": "def test_multi_sso_redirect_to_cas(self) -> None:\n \n\n channel = self.make_request(\n \"GET\",\n \"/_synapse/client/pick_idp?redirectUrl=\"\n + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)\n + \"&idp=cas\",\n shorthand=False,\n )\n self.assertEqual(channel.code, 302, channel.result)\n location_headers = channel.headers.getRawHeaders(\"Location\")\n assert location_headers\n cas_uri = location_headers[0]\n cas_uri_path, cas_uri_query = cas_uri.split(\"?\", 1)\n\n # it should redirect us to the login page of the cas server\n self.assertEqual(cas_uri_path, CAS_SERVER + \"/login\")\n\n # check that the redirectUrl is correctly encoded in the service param - ie, the\n # place that CAS will redirect to\n cas_uri_params = urllib.parse.parse_qs(cas_uri_query)\n service_uri = cas_uri_params[\"service\"][0]\n _, service_uri_query = service_uri.split(\"?\", 1)\n service_uri_params = urllib.parse.parse_qs(service_uri_query)\n self.assertEqual(service_uri_params[\"redirectUrl\"][0], TEST_CLIENT_REDIRECT_URL)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 260, "n_words": 86, "vocab_size": 66, "complexity": 1, "nloc": 20, "token_counts": 143, "n_ast_nodes": 239, "n_identifiers": 26, "d_id": 71290, "documentation": { "docstring": "If CAS is chosen, should redirect to the CAS server", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 22107, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/sessions.py", "file_name": "sessions.py", "fun_name": "merge_hooks", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):\n \n if session_hooks is None or session_hooks.get(\"response\") == []:\n return request_hooks\n\n if request_hooks is None or request_hooks.get(\"response\") == []:\n return session_hooks\n\n return merge_setting(request_hooks, session_hooks, dict_class)\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 54, "n_words": 28, "vocab_size": 17, "complexity": 5, "nloc": 6, "token_counts": 55, "n_ast_nodes": 90, "n_identifiers": 7, "d_id": 4184, "documentation": { "docstring": "Properly merges both requests and session hooks.\n\n This is necessary because when request_hooks == {'response': []}, the\n merge breaks Session hooks entirely.\n ", "n_words": 22, "vocab_size": 22, "n_whitespaces": 31, "language": "en" } }, { "id": 24443, "commit_id": "ddaa2c2552e19635cd6cdf38619f1f176c358f89", "repo": "PaddleOCR", "path": "ppocr/modeling/heads/table_att_head.py", "file_name": "table_att_head.py", "fun_name": "_decode", "commit_message": "add SLANet", "code": "def _decode(self, pre_chars, features, hidden):\n ", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "\"\"\"\n Predict tablecoordinates for each", "n_ast_errors": 2, "ast_levels": 7, "n_whitespaces": 12, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 7, "token_counts": 60, "n_ast_nodes": 28, "n_identifiers": 12, "d_id": 4732, "documentation": { "docstring": "\n Predict table label and coordinates for each step", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 128381, "commit_id": "3e7c207f02e7368e1245e2cfafd27cb0bf179ff7", "repo": "ray", "path": "rllib/models/specs/specs_base.py", "file_name": "specs_base.py", "fun_name": "_full_shape", "commit_message": "[RLlib] Introduce TensorSpec data structure for RLModule / Model definitions (#28946)\n\n* added tensor specs\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi \r\n\r\n* lint\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi \r\n\r\n* 1. Added numpy specs\r\n2. Added spec.sample()\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi \r\n\r\n* added unittests for sampling\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi \r\n\r\n* added tensorflow specs\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi \r\n\r\n* added jax\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi \r\n\r\n* lint\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi \r\n\r\n* removed jax test to be able to merge this part\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi \r\n\r\n* lint\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi \r\n\r\n* added docs\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi \r\n\r\n* fixed typo\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi \r\n\r\n* use full/fill instead of sample\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi \r\n\r\n* changed the input delimiter to be comma instead of whitespace. It also ignores whitespaces now.\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi \r\n\r\n* simplified parser code\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi \r\n\r\n* simplified parser code\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi \r\n\r\nSigned-off-by: Kourosh Hakhamaneshi ", "code": "def _full_shape(self) -> Tuple[int]:\n \n sampled_shape = tuple()\n for d in self._expected_shape:\n if isinstance(d, int):\n sampled_shape += (d,)\n else:\n sampled_shape += (1,)\n return sampled_shape\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 103, "n_words": 23, "vocab_size": 19, "complexity": 3, "nloc": 10, "token_counts": 47, "n_ast_nodes": 76, "n_identifiers": 9, "d_id": 28689, "documentation": { "docstring": "Converts the expected shape to a shape by replacing the unknown dimension\n sizes with a value of 1.", "n_words": 18, "vocab_size": 15, "n_whitespaces": 24, "language": "en" } }, { "id": 177333, "commit_id": "8a325d26aa7fdd3a72580c4720fa97f971bbefcb", "repo": "networkx", "path": "networkx/linalg/laplacianmatrix.py", "file_name": "laplacianmatrix.py", "fun_name": "laplacian_matrix", "commit_message": "Use scipy.sparse array datastructure (#6037)\n\n* Use scipy.sparse array datastructure\r\n\r\n* Add reminder to rm wrapper when scipy adds creation fns.\r\n\r\n* Rm mention of np matrix from code comment.\r\n\r\n* Update networkx/algorithms/bipartite/matrix.py\r\n\r\nCo-authored-by: Stefan van der Walt \r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Stefan van der Walt ", "code": "def laplacian_matrix(G, nodelist=None, weight=\"weight\"):\n \n import scipy as sp\n import scipy.sparse # call as sp.sparse\n\n if nodelist is None:\n nodelist = list(G)\n A = nx.to_scipy_sparse_array(G, nodelist=nodelist, weight=weight, format=\"csr\")\n n, m = A.shape\n # TODO: rm csr_array wrapper when spdiags can produce arrays\n D = sp.sparse.csr_array(sp.sparse.spdiags(A.sum(axis=1), 0, m, n, format=\"csr\"))\n return D - A\n\n\n@not_implemented_for(\"directed\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@not_implemented_for(\"directed\")", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 87, "n_words": 53, "vocab_size": 43, "complexity": 2, "nloc": 9, "token_counts": 98, "n_ast_nodes": 167, "n_identifiers": 21, "d_id": 42352, "documentation": { "docstring": "Returns the Laplacian matrix of G.\n\n The graph Laplacian is the matrix L = D - A, where\n A is the adjacency matrix and D is the diagonal matrix of node degrees.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n weight : string or None, optional (default='weight')\n The edge data key used to compute each value in the matrix.\n If None, then each edge has weight 1.\n\n Returns\n -------\n L : SciPy sparse array\n The Laplacian matrix of G.\n\n Notes\n -----\n For MultiGraph, the edges weights are summed.\n\n See Also\n --------\n to_numpy_array\n normalized_laplacian_matrix\n laplacian_spectrum\n ", "n_words": 121, "vocab_size": 76, "n_whitespaces": 213, "language": "en" } }, { "id": 176967, "commit_id": "435b4622d106d14a3627e162ee163b113bac9854", "repo": "networkx", "path": "networkx/algorithms/efficiency_measures.py", "file_name": "efficiency_measures.py", "fun_name": "global_efficiency", "commit_message": "added examples to efficiency_measures.py (#5643)\n\n* added example on efficiency\r\n\r\n* added example on global_efficiency\r\n\r\n* added example on local_efficiency\r\n\r\n* adjused round up", "code": "def global_efficiency(G):\n \n n = len(G)\n denom = n * (n - 1)\n if denom != 0:\n lengths = nx.all_pairs_shortest_path_length(G)\n g_eff = 0\n for source, targets in lengths:\n for target, distance in targets.items():\n if distance > 0:\n g_eff += 1 / distance\n g_eff /= denom\n # g_eff = sum(1 / d for s, tgts in lengths\n # for t, d in tgts.items() if d > 0) / denom\n else:\n g_eff = 0\n # TODO This can be made more efficient by computing all pairs shortest\n # path lengths in parallel.\n return g_eff\n\n\n@not_implemented_for(\"directed\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@not_implemented_for(\"directed\")", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 227, "n_words": 92, "vocab_size": 55, "complexity": 5, "nloc": 14, "token_counts": 76, "n_ast_nodes": 138, "n_identifiers": 15, "d_id": 42195, "documentation": { "docstring": "Returns the average global efficiency of the graph.\n\n The *efficiency* of a pair of nodes in a graph is the multiplicative\n inverse of the shortest path distance between the nodes. The *average\n global efficiency* of a graph is the average efficiency of all pairs of\n nodes [1]_.\n\n Parameters\n ----------\n G : :class:`networkx.Graph`\n An undirected graph for which to compute the average global efficiency.\n\n Returns\n -------\n float\n The average global efficiency of the graph.\n\n Examples\n --------\n >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])\n >>> round(nx.global_efficiency(G), 12)\n 0.916666666667\n\n Notes\n -----\n Edge weights are ignored when computing the shortest path distances.\n\n See also\n --------\n local_efficiency\n\n References\n ----------\n .. [1] Latora, Vito, and Massimo Marchiori.\n \"Efficient behavior of small-world networks.\"\n *Physical Review Letters* 87.19 (2001): 198701.\n \n\n ", "n_words": 129, "vocab_size": 86, "n_whitespaces": 248, "language": "en" } }, { "id": 101978, "commit_id": "2e8ef5e3c8f2df0f1cca9b342baa8aaa6f620650", "repo": "faceswap", "path": "lib/gui/utils/image.py", "file_name": "image.py", "fun_name": "image", "commit_message": "GUI - Preview updates\n - Training preview. Embed preview pop-out window\n - Bugfix - convert/extract previews", "code": "def image(self) -> ImageTk.PhotoImage:\n \n assert self._preview_image_tk is not None\n return self._preview_image_tk\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 32, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 21, "n_ast_nodes": 35, "n_identifiers": 5, "d_id": 21352, "documentation": { "docstring": ":class:`PIL.ImageTk.PhotoImage` The preview image for displaying in a tkinter canvas ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 241585, "commit_id": "650c710efacd633fa283955145342bb64063c883", "repo": "lightning", "path": "tests/strategies/test_sharded_strategy.py", "file_name": "test_sharded_strategy.py", "fun_name": "test_ddp_sharded_strategy_fit_ckpt_path_downsize_gpus", "commit_message": "Rename training plugin test files & names to strategy (#11303)", "code": "def test_ddp_sharded_strategy_fit_ckpt_path_downsize_gpus(tmpdir):\n \n model = BoringModel()\n trainer = Trainer(strategy=\"ddp_sharded_spawn\", fast_dev_run=True, gpus=2)\n\n trainer.fit(model)\n\n checkpoint_path = os.path.join(tmpdir, \"model.pt\")\n trainer.save_checkpoint(checkpoint_path)\n\n model = BoringModel()\n\n trainer = Trainer(strategy=\"ddp_sharded_spawn\", fast_dev_run=True, gpus=1)\n\n trainer.fit(model, ckpt_path=checkpoint_path)\n\n\n@RunIf(min_gpus=1, skip_windows=True, fairscale=True)", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "@RunIf(min_gpus=1, skip_windows=True, fairscale=True)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 55, "n_words": 29, "vocab_size": 20, "complexity": 1, "nloc": 9, "token_counts": 82, "n_ast_nodes": 158, "n_identifiers": 20, "d_id": 69610, "documentation": { "docstring": "Test to ensure that resuming from checkpoint works when downsizing number of GPUS.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 247588, "commit_id": "2fcf4b3f6cd2a0be6597622664636d2219957c2a", "repo": "synapse", "path": "tests/util/caches/test_descriptors.py", "file_name": "test_descriptors.py", "fun_name": "test_cancel_logcontexts", "commit_message": "Add cancellation support to `@cached` and `@cachedList` decorators (#12183)\n\nThese decorators mostly support cancellation already. Add cancellation\r\ntests and fix use of finished logging contexts by delaying cancellation,\r\nas suggested by @erikjohnston.\r\n\r\nSigned-off-by: Sean Quah ", "code": "def test_cancel_logcontexts(self):\n \n complete_lookup: \"Deferred[None]\" = Deferred()\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 16, "token_counts": 81, "n_ast_nodes": 27, "n_identifiers": 4, "d_id": 71762, "documentation": { "docstring": "Test that cancellation does not break logcontexts.\n\n * The `CancelledError` must be raised with the correct logcontext.\n * The inner lookup must not resume with a finished logcontext.\n * The inner lookup must not restore a finished logcontext when done.\n ", "n_words": 40, "vocab_size": 26, "n_whitespaces": 68, "language": "en" } }, { "id": 201697, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/backends/mysql/test_features.py", "file_name": "test_features.py", "fun_name": "test_supports_transactions", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_supports_transactions(self):\n \n with mock.patch(\n \"django.db.connection.features._mysql_storage_engine\", \"InnoDB\"\n ):\n self.assertTrue(connection.features.supports_transactions)\n del connection.features.supports_transactions\n with mock.patch(\n \"django.db.connection.features._mysql_storage_engine\", \"MyISAM\"\n ):\n self.assertFalse(connection.features.supports_transactions)\n del connection.features.supports_transactions\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 111, "n_words": 18, "vocab_size": 12, "complexity": 1, "nloc": 11, "token_counts": 58, "n_ast_nodes": 105, "n_identifiers": 9, "d_id": 49980, "documentation": { "docstring": "\n All storage engines except MyISAM support transactions.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 100868, "commit_id": "94c3dcff7ebd02a5a5758f33a3eb2bfc66282117", "repo": "faceswap", "path": "lib/model/losses_plaid.py", "file_name": "losses_plaid.py", "fun_name": "_apply_mask", "commit_message": "Training updates\n - Add multiple selected loss functions\n - Unlock loss as a model configuration\n - Phaze-A remove encoder scaling max xap", "code": "def _apply_mask(cls, y_true, y_pred, mask_channel, mask_prop=1.0):\n \n if mask_channel == -1:\n logger.debug(\"No mask to apply\")\n return y_true[..., :3], y_pred[..., :3]\n\n logger.debug(\"Applying mask from channel %s\", mask_channel)\n\n mask = K.tile(K.expand_dims(y_true[..., mask_channel], axis=-1), (1, 1, 1, 3))\n mask_as_k_inv_prop = 1 - mask_prop\n mask = (mask * mask_prop) + mask_as_k_inv_prop\n\n m_true = y_true[..., :3] * mask\n m_pred = y_pred[..., :3] * mask\n\n return m_true, m_pred\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 146, "n_words": 61, "vocab_size": 42, "complexity": 2, "nloc": 11, "token_counts": 127, "n_ast_nodes": 187, "n_identifiers": 16, "d_id": 20319, "documentation": { "docstring": " Apply the mask to the input y_true and y_pred. If a mask is not required then\n return the unmasked inputs.\n\n Parameters\n ----------\n y_true: tensor or variable\n The ground truth value\n y_pred: tensor or variable\n The predicted value\n mask_channel: int\n The channel within y_true that the required mask resides in\n mask_prop: float, optional\n The amount of mask propagation. Default: `1.0`\n\n Returns\n -------\n tuple\n (n_true, n_pred): The ground truth and predicted value tensors with the mask applied\n ", "n_words": 75, "vocab_size": 52, "n_whitespaces": 208, "language": "en" } }, { "id": 123465, "commit_id": "df4293473d2fb6e887e31522cab5aff95e201581", "repo": "sqlmap", "path": "lib/core/common.py", "file_name": "common.py", "fun_name": "wasLastResponseDelayed", "commit_message": "Fixing DeprecationWarning (logger.warn)", "code": "def wasLastResponseDelayed():\n \n\n # 99.9999999997440% of all non time-based SQL injection affected\n # response times should be inside +-7*stdev([normal response times])\n # Math reference: http://www.answers.com/topic/standard-deviation\n\n deviation = stdev(kb.responseTimes.get(kb.responseTimeMode, []))\n threadData = getCurrentThreadData()\n\n if deviation and not conf.direct and not conf.disableStats:\n if len(kb.responseTimes[kb.responseTimeMode]) < MIN_TIME_RESPONSES:\n warnMsg = \"time-based standard deviation method used on a model \"\n warnMsg += \"with less than %d response times\" % MIN_TIME_RESPONSES\n logger.warning(warnMsg)\n\n lowerStdLimit = average(kb.responseTimes[kb.responseTimeMode]) + TIME_STDEV_COEFF * deviation\n retVal = (threadData.lastQueryDuration >= max(MIN_VALID_DELAYED_RESPONSE, lowerStdLimit))\n\n if not kb.testMode and retVal:\n if kb.adjustTimeDelay is None:\n msg = \"do you want sqlmap to try to optimize value(s) \"\n msg += \"for DBMS delay responses (option '--time-sec')? [Y/n] \"\n\n kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE if not readInput(msg, default='Y', boolean=True) else ADJUST_TIME_DELAY.YES\n if kb.adjustTimeDelay is ADJUST_TIME_DELAY.YES:\n adjustTimeDelay(threadData.lastQueryDuration, lowerStdLimit)\n\n return retVal\n else:\n delta = threadData.lastQueryDuration - conf.timeSec\n if Backend.getIdentifiedDbms() in (DBMS.MYSQL,): # MySQL's SLEEP(X) lasts 0.05 seconds shorter on average\n delta += 0.05\n return delta >= 0\n", "url": "https://github.com/sqlmapproject/sqlmap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 360, "n_words": 153, "vocab_size": 110, "complexity": 11, "nloc": 23, "token_counts": 200, "n_ast_nodes": 327, "n_identifiers": 39, "d_id": 27379, "documentation": { "docstring": "\n Returns True if the last web request resulted in a time-delay\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 18, "language": "en" } }, { "id": 283199, "commit_id": "ab4de1dd70fba866930150e440a03e461a6ca6a8", "repo": "OpenBBTerminal", "path": "build/pyinstaller/user_agent/base.py", "file_name": "base.py", "fun_name": "generate_navigator", "commit_message": "Create a packaged app bundle with Pyinstaller (#1525)\n\n* Add dashboard widget assets\r\n\r\n* Add ipywidgets and ipyflex to project\r\n\r\n* Add currencies dashboard notebook\r\n\r\n* Update docs and docstrings\r\n\r\n* Add pyinstaller to project deps\r\n\r\n* Add pyinstaller artifacts to gitignore\r\n\r\n* Fix linter errors in terminal.py\r\n\r\n* Update cspell hook and action with a pyinstaller specific word\r\n\r\n* Add pyinstaller specfile and artifacts\r\n\r\n* Add splashscreen image\r\n\r\n* Add app icon\r\n\r\n* adding splash screen support to terminal.spec and terminal.py\r\n\r\n* Restore the conda env build files\r\n\r\n* Sync deps\r\n\r\n* Add border to the splashscreen image\r\n\r\n* Clean up terminal launcher\r\n\r\n* Add support for default feature flags in packages apps\r\n\r\n* Fix types and linting\r\n\r\n* Add splashscreen management to app bootup\r\n\r\n* Check prediction feature flag when entering crypto/pred\r\n\r\n* Update pyinstaller spec file\r\n\r\n* fix .spec file to work for splash and icon - removed the \"..\"\r\n\r\n* Allows to export when using installer (#1568)\r\n\r\n* fix export for packaged apps\r\n\r\n* fix filename\r\n\r\n* Git : replace commit_hash when it is set in config_terminal\r\n\r\n* Add update of the git commit hash in gtff default during build\r\n\r\n* Add packaged app name and feature flag to logs\r\n\r\n* Add platform specific icon assignment\r\n\r\n* Add macOS build assets\r\n\r\n* Add tensorflow to hidden imports\r\n\r\n* Move LOGGING_COMMIT_HASH to gtff\r\n\r\n* Adding files/folders needed to .spec and pyinstaller folder. This will make certain commands work again.\r\n\r\n* Linting\r\n\r\n* Workflow : ignore ./build/pyinstaller from codespell\r\n\r\n* Workflow : exclude ./build/pyinstaller from flake8\r\n\r\n* Poetry + Workflow : add types-six\r\n\r\n* Pyinstaller : remove property_cached, user_agent and vaderSentiment\r\n\r\n* Revert \"Pyinstaller : remove property_cached, user_agent and vaderSentiment\"\r\n\r\nThis reverts commit dbb3e2b81086f97819ebd21457148c7160a4d703.\r\n\r\n* Clean up local paths in specfile\r\n\r\n* Validate deps have correct Jinja version (they do)\r\n\r\n* Fix logging commit hash to be set correctly for the logger to see it\r\n\r\nCo-authored-by: Andrew \r\nCo-authored-by: didierlopes.eth \r\nCo-authored-by: Chavithra PARANA ", "code": "def generate_navigator(os=None, navigator=None, platform=None, device_type=None):\n \n\n if platform is not None:\n os = platform\n warn(\n \"The `platform` option is deprecated.\" \" Use `os` option instead.\",\n stacklevel=3,\n )\n device_type, os_id, navigator_id = pick_config_ids(device_type, os, navigator)\n system = build_system_components(device_type, os_id, navigator_id)\n app = build_app_components(os_id, navigator_id)\n ua_template = choose_ua_template(device_type, navigator_id, app)\n user_agent = ua_template.format(system=system, app=app)\n app_version = build_navigator_app_version(\n os_id, navigator_id, system[\"platform_version\"], user_agent\n )\n return {\n # ids\n \"os_id\": os_id,\n \"navigator_id\": navigator_id,\n # system components\n \"platform\": system[\"platform\"],\n \"oscpu\": system[\"oscpu\"],\n # app components\n \"build_version\": app[\"build_version\"],\n \"build_id\": app[\"build_id\"],\n \"app_version\": app_version,\n \"app_name\": app[\"name\"],\n \"app_code_name\": \"Mozilla\",\n \"product\": \"Gecko\",\n \"product_sub\": app[\"product_sub\"],\n \"vendor\": app[\"vendor\"],\n \"vendor_sub\": \"\",\n # compiled user agent\n \"user_agent\": user_agent,\n }\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 311, "n_words": 102, "vocab_size": 79, "complexity": 2, "nloc": 31, "token_counts": 190, "n_ast_nodes": 321, "n_identifiers": 20, "d_id": 84465, "documentation": { "docstring": "\n Generates web navigator's config\n\n :param os: limit list of oses for generation\n :type os: string or list/tuple or None\n :param navigator: limit list of browser engines for generation\n :type navigator: string or list/tuple or None\n :param device_type: limit possible oses by device type\n :type device_type: list/tuple or None, possible values:\n \"desktop\", \"smartphone\", \"tablet\", \"all\"\n\n :return: User-Agent config\n :rtype: dict with keys (os, name, platform, oscpu, build_version,\n build_id, app_version, app_name, app_code_name,\n product, product_sub, vendor, vendor_sub,\n user_agent)\n :raises InvalidOption: if could not generate user-agent for\n any combination of allowed platforms and navigators\n :raise InvalidOption: if any of passed options is invalid\n ", "n_words": 99, "vocab_size": 69, "n_whitespaces": 231, "language": "en" } }, { "id": 178747, "commit_id": "3b1c76ce9d79543de81353f358b3108df91078fc", "repo": "Nuitka", "path": "nuitka/nodes/ModuleNodes.py", "file_name": "ModuleNodes.py", "fun_name": "isTechnical", "commit_message": "Standalone: Exclude more of standard library modules\n\n* This removes tkinter and many modules expected to never\n be implicit dependencies.\n\n* The real reduction will be achieved using Python PGO once\n it covers bytecode too.\n\n* Don't keep required extension modules as root modules,\n instead make them proper early init modules.", "code": "def isTechnical(self):\n \n return self.technical\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 42811, "documentation": { "docstring": "Must be present as it's used in CPython library initialization.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 42538, "commit_id": "8a4cf5d94eb94b6427c5d1d7907ba07b119932c5", "repo": "nltk", "path": "nltk/corpus/reader/framenet.py", "file_name": "framenet.py", "fun_name": "fes", "commit_message": "Docstring tests (#3050)\n\n* fixed pytests\r\n\r\n* fixed more pytests\r\n\r\n* fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py\r\n\r\n* fixed pytests (mainly multiline or rounding issues)\r\n\r\n* fixed treebank pytests, removed test for return_string=True (deprecated)\r\n\r\n* fixed destructive.py pytests, removed test for return_string=True (deprecated)\r\n\r\n* fixed pytest (rounding issues)\r\n\r\n* fixed pytest (initialised missing object)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* added pytest +SKIP for deprecated module stanford\r\n\r\n* updated AUTHORS.md\r\n\r\n* changed docstring corrections by usage of ELLIPSIS and different roundings\r\n\r\n* fixed AUTHORS.md to be consistent\r\n\r\n* Fix framenet doctest formatting with pprint\r\n\r\n* Change docstring on MultiListBox.__init__\r\n\r\nI believe the original typo was misinterpreted and changed to something that was not originally intended.\r\n\r\nCo-authored-by: Jan Lennartz \r\nCo-authored-by: Tom Aarsen <37621491+tomaarsen@users.noreply.github.com>\r\nCo-authored-by: Tom Aarsen ", "code": "def fes(self, name=None, frame=None):\n \n # what frames are we searching in?\n if frame is not None:\n if isinstance(frame, int):\n frames = [self.frame(frame)]\n elif isinstance(frame, str):\n frames = self.frames(frame)\n else:\n frames = [frame]\n else:\n frames = self.frames()\n\n return PrettyList(\n fe\n for f in frames\n for fename, fe in f.FE.items()\n if name is None or re.search(name, fename, re.I)\n )\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 232, "n_words": 57, "vocab_size": 40, "complexity": 8, "nloc": 16, "token_counts": 108, "n_ast_nodes": 169, "n_identifiers": 17, "d_id": 7600, "documentation": { "docstring": "\n Lists frame element objects. If 'name' is provided, this is treated as\n a case-insensitive regular expression to filter by frame name.\n (Case-insensitivity is because casing of frame element names is not always\n consistent across frames.) Specify 'frame' to filter by a frame name pattern,\n ID, or object.\n\n >>> from nltk.corpus import framenet as fn\n >>> fn.fes('Noise_maker')\n []\n >>> sorted([(fe.frame.name,fe.name) for fe in fn.fes('sound')]) # doctest: +NORMALIZE_WHITESPACE\n [('Cause_to_make_noise', 'Sound_maker'), ('Make_noise', 'Sound'),\n ('Make_noise', 'Sound_source'), ('Sound_movement', 'Location_of_sound_source'),\n ('Sound_movement', 'Sound'), ('Sound_movement', 'Sound_source'),\n ('Sounds', 'Component_sound'), ('Sounds', 'Location_of_sound_source'),\n ('Sounds', 'Sound_source'), ('Vocalizations', 'Location_of_sound_source'),\n ('Vocalizations', 'Sound_source')]\n >>> sorted([(fe.frame.name,fe.name) for fe in fn.fes('sound',r'(?i)make_noise')]) # doctest: +NORMALIZE_WHITESPACE\n [('Cause_to_make_noise', 'Sound_maker'),\n ('Make_noise', 'Sound'),\n ('Make_noise', 'Sound_source')]\n >>> sorted(set(fe.name for fe in fn.fes('^sound')))\n ['Sound', 'Sound_maker', 'Sound_source']\n >>> len(fn.fes('^sound$'))\n 2\n\n :param name: A regular expression pattern used to match against\n frame element names. If 'name' is None, then a list of all\n frame elements will be returned.\n :type name: str\n :return: A list of matching frame elements\n :rtype: list(AttrDict)\n ", "n_words": 156, "vocab_size": 95, "n_whitespaces": 382, "language": "en" } }, { "id": 44001, "commit_id": "2b4bf7fe67fc656ceb7bdaad36453b7a5b83ef04", "repo": "airflow", "path": "tests/www/views/test_views.py", "file_name": "test_views.py", "fun_name": "test_mark_task_instance_state", "commit_message": "Use `DagRun.run_id` instead of `execution_date` when updating state of TIs(UI & REST API) (#18724)\n\nWe can now use run_id as well as execution_date to update states\r\nof task instances\r\n\r\nCo-authored-by: Tzu-ping Chung \r\nCo-authored-by: Ash Berlin-Taylor ", "code": "def test_mark_task_instance_state(test_app):\n \n from airflow.models import DAG, DagBag, TaskInstance\n from airflow.operators.dummy import DummyOperator\n from airflow.utils.session import create_session\n from airflow.utils.state import State\n from airflow.utils.timezone import datetime\n from airflow.utils.types import DagRunType\n from airflow.www.views import Airflow\n from tests.test_utils.db import clear_db_runs\n\n clear_db_runs()\n start_date = datetime(2020, 1, 1)\n with DAG(\"test_mark_task_instance_state\", start_date=start_date) as dag:\n task_1 = DummyOperator(task_id=\"task_1\")\n task_2 = DummyOperator(task_id=\"task_2\")\n task_3 = DummyOperator(task_id=\"task_3\")\n task_4 = DummyOperator(task_id=\"task_4\")\n task_5 = DummyOperator(task_id=\"task_5\")\n\n task_1 >> [task_2, task_3, task_4, task_5]\n\n dagrun = dag.create_dagrun(\n start_date=start_date,\n execution_date=start_date,\n data_interval=(start_date, start_date),\n state=State.FAILED,\n run_type=DagRunType.SCHEDULED,\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 197, "n_words": 78, "vocab_size": 57, "complexity": 1, "nloc": 56, "token_counts": 437, "n_ast_nodes": 285, "n_identifiers": 41, "d_id": 8118, "documentation": { "docstring": "\n Test that _mark_task_instance_state() does all three things:\n - Marks the given TaskInstance as SUCCESS;\n - Clears downstream TaskInstances in FAILED/UPSTREAM_FAILED state;\n - Set DagRun to QUEUED.\n ", "n_words": 26, "vocab_size": 24, "n_whitespaces": 42, "language": "en" } }, { "id": 207727, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_delete_view_uses_get_deleted_objects", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_delete_view_uses_get_deleted_objects(self):\n \n book = Book.objects.create(name=\"Test Book\")\n response = self.client.get(\n reverse(\"admin2:admin_views_book_delete\", args=(book.pk,))\n )\n # BookAdmin.get_deleted_objects() returns custom text.\n self.assertContains(response, \"a deletable object\")\n\n\n@override_settings(ROOT_URLCONF=\"admin_views.urls\")", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "@override_settings(ROOT_URLCONF=\"admin_views.urls\")", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 74, "n_words": 22, "vocab_size": 21, "complexity": 1, "nloc": 6, "token_counts": 48, "n_ast_nodes": 98, "n_identifiers": 16, "d_id": 52072, "documentation": { "docstring": "The delete view uses ModelAdmin.get_deleted_objects().", "n_words": 5, "vocab_size": 5, "n_whitespaces": 4, "language": "en" } }, { "id": 200169, "commit_id": "00ed353dda66aa068dd43d44018f6a394d1fb0a1", "repo": "sympy", "path": "sympy/physics/quantum/state.py", "file_name": "state.py", "fun_name": "_apply_from_right_to", "commit_message": "Fix the Ket*Op->Op*Ket bug", "code": "def _apply_from_right_to(self, op, **options):\n \n return dispatch_method(self, '_apply_from_right_to', op, **options)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 23, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 37, "n_identifiers": 5, "d_id": 49559, "documentation": { "docstring": "Apply an Operator to this Ket as Operator*Ket\n\n This method will dispatch to methods having the format::\n\n ``def _apply_from_right_to_OperatorName(op, **options):``\n\n Subclasses should define these methods (one for each OperatorName) to\n teach the Ket how to implement OperatorName*Ket\n\n Parameters\n ==========\n\n op : Operator\n The Operator that is acting on the Ket as op*Ket\n options : dict\n A dict of key/value pairs that control how the operator is applied\n to the Ket.\n ", "n_words": 70, "vocab_size": 51, "n_whitespaces": 170, "language": "en" } }, { "id": 196203, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/subsets.py", "file_name": "subsets.py", "fun_name": "unrank_gray", "commit_message": "Updated import locations", "code": "def unrank_gray(self, rank, superset):\n \n graycode_bitlist = GrayCode.unrank(len(superset), rank)\n return Subset.subset_from_bitlist(superset, graycode_bitlist)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 32, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 32, "n_ast_nodes": 50, "n_identifiers": 10, "d_id": 47703, "documentation": { "docstring": "\n Gets the Gray code ordered subset of the specified rank.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Subset\n >>> Subset.unrank_gray(4, ['a', 'b', 'c']).subset\n ['a', 'b']\n >>> Subset.unrank_gray(0, ['a', 'b', 'c']).subset\n []\n\n See Also\n ========\n\n iterate_graycode, rank_gray\n ", "n_words": 35, "vocab_size": 27, "n_whitespaces": 120, "language": "en" } }, { "id": 322456, "commit_id": "487162262196bead8d9b4c2306f313b8f64edf9b", "repo": "PaddleNLP", "path": "paddlenlp/transformers/prophetnet/tokenizer.py", "file_name": "tokenizer.py", "fun_name": "convert_ids_to_tokens", "commit_message": "Add model Prohetnet (#1698)\n\n* add Prohetnet model\r\n\r\n* update prohetnet\r\n\r\n* update format\r\n\r\n* pre commit\r\n\r\n* add prophetnet example\r\n\r\n* update tokenizer.py,run_train.sh,train_prophetnet.py\r\n\r\n* remove evaluate/gigaword/__init__.py\r\n\r\nCo-authored-by: smallv0221 <33639025+smallv0221@users.noreply.github.com>", "code": "def convert_ids_to_tokens(self, ids, skip_special_tokens=False):\r\n \r\n if not isinstance(ids, (list, tuple)):\r\n return self._convert_id_to_token(ids)\r\n tokens = [self._convert_id_to_token(_id) for _id in ids]\r\n if skip_special_tokens:\r\n return [\r\n token for token in tokens\r\n if token not in self.all_special_tokens\r\n ]\r\n return tokens\r\n\r", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 133, "n_words": 35, "vocab_size": 23, "complexity": 6, "nloc": 10, "token_counts": 66, "n_ast_nodes": 100, "n_identifiers": 12, "d_id": 118173, "documentation": { "docstring": "\r\n Converts a single index or a sequence of indices to a token or\r\n a sequence of tokens, using the vocabulary and added tokens.\r\n\r\n Args:\r\n ids (int or List[int]):\r\n The token id (or token ids) to be converted to token(s).\r\n skip_special_tokens (bool, optional):\r\n Whether or not to remove special tokens in the decoding.\r\n Defaults to `False` and we do not remove special tokens.\r\n\r\n Returns:\r\n str or List[str]: The decoded token(s).\r\n ", "n_words": 69, "vocab_size": 46, "n_whitespaces": 183, "language": "en" } }, { "id": 273174, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/preprocessing/index_lookup.py", "file_name": "index_lookup.py", "fun_name": "vocabulary_size", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def vocabulary_size(self):\n \n if tf.executing_eagerly():\n return (\n int(self.lookup_table.size().numpy())\n + self._token_start_index()\n )\n else:\n return self.lookup_table.size() + self._token_start_index()\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 99, "n_words": 15, "vocab_size": 12, "complexity": 2, "nloc": 8, "token_counts": 52, "n_ast_nodes": 90, "n_identifiers": 9, "d_id": 81097, "documentation": { "docstring": "Gets the current size of the layer's vocabulary.\n\n Returns:\n The integer size of the vocabulary, including optional mask and oov indices.\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 44, "language": "en" } }, { "id": 267900, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/commands/integration/filters.py", "file_name": "filters.py", "fun_name": "get_host_target_type_map", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def get_host_target_type_map() -> t.Dict[t.Type[HostConfig], t.Type[TargetFilter]]:\n \n return get_type_map(TargetFilter, HostConfig)\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 31, "n_ast_nodes": 48, "n_identifiers": 7, "d_id": 79176, "documentation": { "docstring": "Create and return a mapping of HostConfig types to TargetFilter types.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 215944, "commit_id": "f2a783643de61cac1ff3288b40241e5ce6e1ddc8", "repo": "salt", "path": "salt/modules/aptpkg.py", "file_name": "aptpkg.py", "fun_name": "show", "commit_message": "Update to latest ``pyupgrade`` hook. Stop skipping it on CI.\n\nSigned-off-by: Pedro Algarvio ", "code": "def show(*names, **kwargs):\n \n kwargs = salt.utils.args.clean_kwargs(**kwargs)\n refresh = kwargs.pop(\"refresh\", False)\n filter_ = salt.utils.args.split_input(\n kwargs.pop(\"filter\", []),\n lambda x: str(x) if not isinstance(x, str) else x.lower(),\n )\n if kwargs:\n salt.utils.args.invalid_kwargs(kwargs)\n\n if refresh:\n refresh_db()\n\n if not names:\n return {}\n\n result = _call_apt([\"apt-cache\", \"show\"] + list(names), scope=False)\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 105, "n_words": 43, "vocab_size": 36, "complexity": 10, "nloc": 34, "token_counts": 225, "n_ast_nodes": 200, "n_identifiers": 21, "d_id": 54267, "documentation": { "docstring": "\n .. versionadded:: 2019.2.0\n\n Runs an ``apt-cache show`` on the passed package names, and returns the\n results in a nested dictionary. The top level of the return data will be\n the package name, with each package name mapping to a dictionary of version\n numbers to any additional information returned by ``apt-cache show``.\n\n filter\n An optional comma-separated list (or quoted Python list) of\n case-insensitive keys on which to filter. This allows one to restrict\n the information returned for each package to a smaller selection of\n pertinent items.\n\n refresh : False\n If ``True``, the apt cache will be refreshed first. By default, no\n refresh is performed.\n\n CLI Examples:\n\n .. code-block:: bash\n\n salt myminion pkg.show gawk\n salt myminion pkg.show 'nginx-*'\n salt myminion pkg.show 'nginx-*' filter=description,provides\n ", "n_words": 121, "vocab_size": 88, "n_whitespaces": 215, "language": "en" } }, { "id": 200006, "commit_id": "bcb817024d689b65db350a5a565c08f367b899ee", "repo": "sympy", "path": "sympy/physics/wigner.py", "file_name": "wigner.py", "fun_name": "gaunt", "commit_message": "Update wigner.py", "code": "def gaunt(l_1, l_2, l_3, m_1, m_2, m_3, prec=None):\n r\n l_1, l_2, l_3, m_1, m_2, m_3 = [\n as_int(i) for i in (l_1, l_2, l_3, m_1, m_2, m_3)]\n\n if l_1 + l_2 - l_3 < 0:\n return S.Zero\n if l_1 - l_2 + l_3 < 0:\n return S.Zero\n if -l_1 + l_2 + l_3 < 0:\n return S.Zero\n if (m_1 + m_2 + m_3) != 0:\n return S.Zero\n if (abs(m_1) > l_1) or (abs(m_2) > l_2) or (abs(m_3) > l_3):\n return S.Zero\n bigL, remL = divmod(l_1 + l_2 + l_3, 2)\n if remL % 2:\n return S.Zero\n\n imin = max(-l_3 + l_1 + m_2, -l_3 + l_2 - m_1, 0)\n imax = min(l_2 + m_2, l_1 - m_1, l_1 + l_2 - l_3)\n\n _calc_factlist(max(l_1 + l_2 + l_3 + 1, imax + 1))\n\n ressqrt = sqrt((2 * l_1 + 1) * (2 * l_2 + 1) * (2 * l_3 + 1) * \\\n _Factlist[l_1 - m_1] * _Factlist[l_1 + m_1] * _Factlist[l_2 - m_2] * \\\n _Factlist[l_2 + m_2] * _Factlist[l_3 - m_3] * _Factlist[l_3 + m_3] / \\\n (4*pi))\n\n prefac = Integer(_Factlist[bigL] * _Factlist[l_2 - l_1 + l_3] *\n _Factlist[l_1 - l_2 + l_3] * _Factlist[l_1 + l_2 - l_3])/ \\\n _Factlist[2 * bigL + 1]/ \\\n (_Factlist[bigL - l_1] *\n _Factlist[bigL - l_2] * _Factlist[bigL - l_3])\n\n sumres = 0\n for ii in range(int(imin), int(imax) + 1):\n den = _Factlist[ii] * _Factlist[ii + l_3 - l_1 - m_2] * \\\n _Factlist[l_2 + m_2 - ii] * _Factlist[l_1 - ii - m_1] * \\\n _Factlist[ii + l_3 - l_2 + m_1] * _Factlist[l_1 + l_2 - l_3 - ii]\n sumres = sumres + Integer((-1) ** ii) / den\n\n res = ressqrt * prefac * sumres * Integer((-1) ** (bigL + l_3 + m_1 - m_2))\n if prec is not None:\n res = res.n(prec)\n return res\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 517, "n_words": 306, "vocab_size": 106, "complexity": 12, "nloc": 141, "token_counts": 505, "n_ast_nodes": 736, "n_identifiers": 34, "d_id": 49490, "documentation": { "docstring": "\n Calculate the Gaunt coefficient.\n\n Explanation\n ===========\n\n The Gaunt coefficient is defined as the integral over three\n spherical harmonics:\n\n .. math::\n\n \\begin{aligned}\n \\operatorname{Gaunt}(l_1,l_2,l_3,m_1,m_2,m_3)\n &=\\int Y_{l_1,m_1}(\\Omega)\n Y_{l_2,m_2}(\\Omega) Y_{l_3,m_3}(\\Omega) \\,d\\Omega \\\\\n &=\\sqrt{\\frac{(2l_1+1)(2l_2+1)(2l_3+1)}{4\\pi}}\n \\operatorname{Wigner3j}(l_1,l_2,l_3,0,0,0)\n \\operatorname{Wigner3j}(l_1,l_2,l_3,m_1,m_2,m_3)\n \\end{aligned}\n\n Parameters\n ==========\n\n l_1, l_2, l_3, m_1, m_2, m_3 :\n Integer.\n prec - precision, default: ``None``.\n Providing a precision can\n drastically speed up the calculation.\n\n Returns\n =======\n\n Rational number times the square root of a rational number\n (if ``prec=None``), or real number if a precision is given.\n\n Examples\n ========\n\n >>> from sympy.physics.wigner import gaunt\n >>> gaunt(1,0,1,1,0,-1)\n -1/(2*sqrt(pi))\n >>> gaunt(1000,1000,1200,9,3,-12).n(64)\n 0.00689500421922113448...\n\n It is an error to use non-integer values for `l` and `m`::\n\n sage: gaunt(1.2,0,1.2,0,0,0)\n Traceback (most recent call last):\n ...\n ValueError: l values must be integer\n sage: gaunt(1,0,1,1.1,0,-1.1)\n Traceback (most recent call last):\n ...\n ValueError: m values must be integer\n\n Notes\n =====\n\n The Gaunt coefficient obeys the following symmetry rules:\n\n - invariant under any permutation of the columns\n\n .. math::\n \\begin{aligned}\n Y(l_1,l_2,l_3,m_1,m_2,m_3)\n &=Y(l_3,l_1,l_2,m_3,m_1,m_2) \\\\\n &=Y(l_2,l_3,l_1,m_2,m_3,m_1) \\\\\n &=Y(l_3,l_2,l_1,m_3,m_2,m_1) \\\\\n &=Y(l_1,l_3,l_2,m_1,m_3,m_2) \\\\\n &=Y(l_2,l_1,l_3,m_2,m_1,m_3)\n \\end{aligned}\n\n - invariant under space inflection, i.e.\n\n .. math::\n Y(l_1,l_2,l_3,m_1,m_2,m_3)\n =Y(l_1,l_2,l_3,-m_1,-m_2,-m_3)\n\n - symmetric with respect to the 72 Regge symmetries as inherited\n for the `3j` symbols [Regge58]_\n\n - zero for `l_1`, `l_2`, `l_3` not fulfilling triangle relation\n\n - zero for violating any one of the conditions: `l_1 \\ge |m_1|`,\n `l_2 \\ge |m_2|`, `l_3 \\ge |m_3|`\n\n - non-zero only for an even sum of the `l_i`, i.e.\n `L = l_1 + l_2 + l_3 = 2n` for `n` in `\\mathbb{N}`\n\n Algorithms\n ==========\n\n This function uses the algorithm of [Liberatodebrito82]_ to\n calculate the value of the Gaunt coefficient exactly. Note that\n the formula contains alternating sums over large factorials and is\n therefore unsuitable for finite precision arithmetic and only\n useful for a computer algebra system [Rasch03]_.\n\n Authors\n =======\n\n Jens Rasch (2009-03-24): initial version for Sage.\n ", "n_words": 295, "vocab_size": 202, "n_whitespaces": 669, "language": "en" } }, { "id": 243028, "commit_id": "983a6139d57b37a883344972c6b1de50bb757de0", "repo": "Pillow", "path": "Tests/test_imagecms.py", "file_name": "test_imagecms.py", "fun_name": "test_profile_typesafety", "commit_message": "Check other exception messages", "code": "def test_profile_typesafety():\n \n\n with pytest.raises(TypeError, match=\"Invalid type for Profile\"):\n ImageCms.ImageCmsProfile(0).tobytes()\n with pytest.raises(TypeError, match=\"Invalid type for Profile\"):\n ImageCms.ImageCmsProfile(1).tobytes()\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 39, "n_words": 16, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 49, "n_ast_nodes": 90, "n_identifiers": 8, "d_id": 69956, "documentation": { "docstring": "Profile init type safety\n\n prepatch, these would segfault, postpatch they should emit a typeerror\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 20, "language": "en" } }, { "id": 100516, "commit_id": "71c20252c2e747f692289cdefe80ad0d5a456ea6", "repo": "faceswap", "path": "tools/preview/preview.py", "file_name": "preview.py", "fun_name": "_crop_source_faces", "commit_message": "bugfix: Preview Tool, ensure all config items are written", "code": "def _crop_source_faces(self):\n \n logger.debug(\"Updating source faces\")\n self._faces = {}\n for image in self.source:\n detected_face = image[\"detected_faces\"][0]\n src_img = image[\"image\"]\n detected_face.load_aligned(src_img, size=self._size, centering=self._centering)\n matrix = detected_face.aligned.matrix\n self._faces.setdefault(\"filenames\",\n []).append(os.path.splitext(image[\"filename\"])[0])\n self._faces.setdefault(\"matrix\", []).append(matrix)\n self._faces.setdefault(\"src\", []).append(transform_image(src_img,\n matrix,\n self._size,\n self._padding))\n self.update_source = False\n logger.debug(\"Updated source faces\")\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 396, "n_words": 39, "vocab_size": 33, "complexity": 2, "nloc": 17, "token_counts": 150, "n_ast_nodes": 247, "n_identifiers": 24, "d_id": 19982, "documentation": { "docstring": " Extract the source faces from the source frames, along with their filenames and the\n transformation matrix used to extract the faces. ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 29, "language": "en" } }, { "id": 13265, "commit_id": "107631e955b21db8a4ddb3bee02130de3650d032", "repo": "jina", "path": "tests/integration/instrumentation/__init__.py", "file_name": "__init__.py", "fun_name": "partition_spans_by_kind", "commit_message": "feat(instrumentation): add OpenTelemetry tracing and metrics with basic configurations (#5175)", "code": "def partition_spans_by_kind(traces):\n \n server_spans = []\n client_spans = []\n internal_spans = []\n\n for trace in traces:\n for span in trace['spans']:\n for tag in span['tags']:\n if 'span.kind' == tag.get('key', ''):\n span_kind = tag.get('value', '')\n if 'server' == span_kind:\n server_spans.append(span)\n elif 'client' == span_kind:\n client_spans.append(span)\n elif 'internal' == span_kind:\n internal_spans.append(span)\n\n return (server_spans, client_spans, internal_spans)\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 247, "n_words": 51, "vocab_size": 35, "complexity": 8, "nloc": 16, "token_counts": 102, "n_ast_nodes": 180, "n_identifiers": 11, "d_id": 2592, "documentation": { "docstring": "Returns three lists each containing spans of kind SpanKind.SERVER, SpanKind.CLIENT and SpandKind.INTERNAL", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 269281, "commit_id": "2d1086447a25d281f9428832d046c473d80ad761", "repo": "keras", "path": "keras/applications/convnext.py", "file_name": "convnext.py", "fun_name": "PreStem", "commit_message": "Corrected preprocess_input docstring in regnet.py and convnext.py", "code": "def PreStem(name=None):\n \n if name is None:\n name = \"prestem\" + str(backend.get_uid(\"prestem\"))\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 24, "n_words": 11, "vocab_size": 10, "complexity": 2, "nloc": 5, "token_counts": 30, "n_ast_nodes": 49, "n_identifiers": 5, "d_id": 80000, "documentation": { "docstring": "Normalizes inputs with ImageNet-1k mean and std.\n\n Args:\n name (str): Name prefix.\n\n Returns:\n A presemt function.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 25, "language": "en" } }, { "id": 248020, "commit_id": "1783156dbcf4164692e66275d1c29857c434995b", "repo": "synapse", "path": "synapse/storage/databases/main/registration.py", "file_name": "registration.py", "fun_name": "get_users_expiring_soon", "commit_message": "Add some type hints to datastore (#12423)\n\n* Add some type hints to datastore\r\n\r\n* newsfile\r\n\r\n* change `Collection` to `List`\r\n\r\n* refactor return type of `select_users_txn`\r\n\r\n* correct type hint in `stream.py`\r\n\r\n* Remove `Optional` in `select_users_txn`\r\n\r\n* remove not needed return type in `__init__`\r\n\r\n* Revert change in `get_stream_id_for_event_txn`\r\n\r\n* Remove import from `Literal`", "code": "async def get_users_expiring_soon(self) -> List[Tuple[str, int]]:\n \n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 13, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 15, "token_counts": 47, "n_ast_nodes": 27, "n_identifiers": 6, "d_id": 72051, "documentation": { "docstring": "Selects users whose account will expire in the [now, now + renew_at] time\n window (see configuration for account_validity for information on what renew_at\n refers to).\n\n Returns:\n A list of tuples, each with a user ID and expiration time (in milliseconds).\n ", "n_words": 40, "vocab_size": 38, "n_whitespaces": 79, "language": "en" } }, { "id": 65962, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/education/utils.py", "file_name": "utils.py", "fun_name": "get_topic_progress", "commit_message": "style: format code with black", "code": "def get_topic_progress(topic, course_name, program):\n\t\n\tstudent = get_current_student()\n\tif not student:\n\t\treturn None\n\tcourse_enrollment = get_or_create_course_enrollment(course_name, program)\n\tprogress = student.get_topic_progress(course_enrollment.name, topic)\n\tif not progress:\n\t\treturn None\n\tcount = sum([activity[\"is_complete\"] for activity in progress])\n\tif count == 0:\n\t\treturn {\"completed\": False, \"started\": False}\n\telif count == len(progress):\n\t\treturn {\"completed\": True, \"started\": True}\n\telif count < len(progress):\n\t\treturn {\"completed\": False, \"started\": True}\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 44, "n_words": 59, "vocab_size": 36, "complexity": 7, "nloc": 15, "token_counts": 113, "n_ast_nodes": 188, "n_identifiers": 14, "d_id": 14072, "documentation": { "docstring": "\n\tReturn the porgress of a course in a program as well as the content to continue from.\n\t :param topic_name:\n\t :param course_name:\n\t", "n_words": 21, "vocab_size": 17, "n_whitespaces": 34, "language": "en" } }, { "id": 27099, "commit_id": "b5e414c98a1535d287721c859994424cf0eea081", "repo": "saleor", "path": "saleor/plugins/webhook/tests/subscription_webhooks/fixtures.py", "file_name": "fixtures.py", "fun_name": "subscription_app_deleted_webhook", "commit_message": "New events related to apps changes. (#9698)\n\n* New events related to apps changes.\r\n\r\n* Schema update after rebase\r\n\r\n* CHANGELOG.md update\r\n\r\n* New events description fix\r\n\r\n* Missing app event added to CHANGELOG.md", "code": "def subscription_app_deleted_webhook(subscription_webhook):\n return subscription_webhook(\n APP_DELETED_SUBSCRIPTION_QUERY, WebhookEventAsyncType.APP_DELETED\n )\n\n\nAPP_STATUS_CHANGED_SUBSCRIPTION_QUERY = (\n APP_DETAILS_FRAGMENT\n + \n)\n\n\n@pytest.fixture", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 30, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 4, "token_counts": 14, "n_ast_nodes": 42, "n_identifiers": 9, "d_id": 5092, "documentation": { "docstring": "\n subscription{\n event{\n ...on AppStatusChanged{\n app{\n ...AppDetails\n }\n }\n }\n }\n", "n_words": 10, "vocab_size": 7, "n_whitespaces": 69, "language": "en" } }, { "id": 291227, "commit_id": "aa02a53ac667d08c66a536baf139993bcfe4d7d6", "repo": "core", "path": "homeassistant/helpers/template.py", "file_name": "template.py", "fun_name": "entity_id", "commit_message": "Add type hints to template states (#82582)\n\n* Add type hints to template states\r\n\r\n* Undo rename\r\n\r\n* Remove invalid mypy issue link", "code": "def entity_id(self) -> str: # type: ignore[override]\n \n return self._entity_id\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 24, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 6, "token_counts": 12, "n_ast_nodes": 23, "n_identifiers": 4, "d_id": 90337, "documentation": { "docstring": "Wrap State.entity_id.\n\n Intentionally does not collect state\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 21, "language": "en" } }, { "id": 262049, "commit_id": "176b712c1a40cf630da9a77f1826836723c40fde", "repo": "TTS", "path": "TTS/tts/datasets/dataset.py", "file_name": "dataset.py", "fun_name": "compute_or_load", "commit_message": "Refactor TTSDataset ⚡️", "code": "def compute_or_load(self, wav_file):\n \n pitch_file = self.create_pitch_file_path(wav_file, self.cache_path)\n if not os.path.exists(pitch_file):\n pitch = self._compute_and_save_pitch(self.ap, wav_file, pitch_file)\n else:\n pitch = np.load(pitch_file)\n return pitch.astype(np.float32)\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 78, "n_words": 21, "vocab_size": 18, "complexity": 2, "nloc": 7, "token_counts": 64, "n_ast_nodes": 102, "n_identifiers": 16, "d_id": 77108, "documentation": { "docstring": "\n compute pitch and return a numpy array of pitch values\n ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 25, "language": "en" } }, { "id": 47522, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/jobs/test_scheduler_job.py", "file_name": "test_scheduler_job.py", "fun_name": "test_enqueue_task_instances_sets_ti_state_to_None_if_dagrun_in_finish_state", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def test_enqueue_task_instances_sets_ti_state_to_None_if_dagrun_in_finish_state(self, state, dag_maker):\n \n dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'\n task_id_1 = 'dummy'\n session = settings.Session()\n with dag_maker(dag_id=dag_id, start_date=DEFAULT_DATE, session=session):\n task1 = EmptyOperator(task_id=task_id_1)\n\n self.scheduler_job = SchedulerJob(subdir=os.devnull)\n\n dr1 = dag_maker.create_dagrun(state=state)\n ti = dr1.get_task_instance(task1.task_id, session)\n ti.state = State.SCHEDULED\n session.merge(ti)\n session.commit()\n\n with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:\n self.scheduler_job._enqueue_task_instances_with_queued_state([ti])\n ti.refresh_from_db()\n assert ti.state == State.NONE\n mock_queue_command.assert_not_called()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 174, "n_words": 47, "vocab_size": 38, "complexity": 1, "nloc": 17, "token_counts": 139, "n_ast_nodes": 233, "n_identifiers": 35, "d_id": 9144, "documentation": { "docstring": "This tests that task instances whose dagrun is in finished state are not queued", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 261005, "commit_id": "2710a9e7eefd2088ce35fd2fb6651d5f97e5ef8b", "repo": "scikit-learn", "path": "sklearn/discriminant_analysis.py", "file_name": "discriminant_analysis.py", "fun_name": "_class_means", "commit_message": "ENH Adds Array API support to LinearDiscriminantAnalysis (#22554)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Julien Jerphanion ", "code": "def _class_means(X, y):\n \n xp, is_array_api = get_namespace(X)\n classes, y = xp.unique_inverse(y)\n means = xp.zeros(shape=(classes.shape[0], X.shape[1]))\n\n if is_array_api:\n for i in range(classes.shape[0]):\n means[i, :] = xp.mean(X[y == i], axis=0)\n else:\n # TODO: Explore the choice of using bincount + add.at as it seems sub optimal\n # from a performance-wise\n cnt = np.bincount(y)\n np.add.at(means, y, X)\n means /= cnt[:, None]\n return means\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 134, "n_words": 60, "vocab_size": 53, "complexity": 3, "nloc": 12, "token_counts": 121, "n_ast_nodes": 186, "n_identifiers": 20, "d_id": 76620, "documentation": { "docstring": "Compute class means.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Input data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_targets)\n Target values.\n\n Returns\n -------\n means : array-like of shape (n_classes, n_features)\n Class means.\n ", "n_words": 36, "vocab_size": 25, "n_whitespaces": 81, "language": "en" } }, { "id": 176972, "commit_id": "b8d1438e4ea3d8190c650110b3b7d7c141224842", "repo": "networkx", "path": "networkx/algorithms/centrality/degree_alg.py", "file_name": "degree_alg.py", "fun_name": "degree_centrality", "commit_message": "added examples to degree_alg.py (#5644)\n\n* added example on degree centrality\r\n\r\n* added example on in degree centrality\r\n\r\n* added example on out degree centrality\r\n\r\n* added opening braces", "code": "def degree_centrality(G):\n \n if len(G) <= 1:\n return {n: 1 for n in G}\n\n s = 1.0 / (len(G) - 1.0)\n centrality = {n: d * s for n, d in G.degree()}\n return centrality\n\n\n@not_implemented_for(\"undirected\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@not_implemented_for(\"undirected\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 55, "n_words": 34, "vocab_size": 26, "complexity": 4, "nloc": 6, "token_counts": 61, "n_ast_nodes": 102, "n_identifiers": 9, "d_id": 42200, "documentation": { "docstring": "Compute the degree centrality for nodes.\n\n The degree centrality for a node v is the fraction of nodes it\n is connected to.\n\n Parameters\n ----------\n G : graph\n A networkx graph\n\n Returns\n -------\n nodes : dictionary\n Dictionary of nodes with degree centrality as the value.\n\n Examples\n --------\n >>> G = nx.Graph([(0, 1), (0, 2), (0, 3), (1, 2), (1, 3)])\n >>> nx.degree_centrality(G)\n {0: 1.0, 1: 1.0, 2: 0.6666666666666666, 3: 0.6666666666666666}\n\n See Also\n --------\n betweenness_centrality, load_centrality, eigenvector_centrality\n\n Notes\n -----\n The degree centrality values are normalized by dividing by the maximum\n possible degree in a simple graph n-1 where n is the number of nodes in G.\n\n For multigraphs or graphs with self loops the maximum degree might\n be higher than n-1 and values of degree centrality greater than 1\n are possible.\n ", "n_words": 129, "vocab_size": 85, "n_whitespaces": 212, "language": "en" } }, { "id": 276868, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/generic_utils.py", "file_name": "generic_utils.py", "fun_name": "get_registered_name", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_registered_name(obj):\n \n if obj in _GLOBAL_CUSTOM_NAMES:\n return _GLOBAL_CUSTOM_NAMES[obj]\n else:\n return obj.__name__\n\n\n@tf_contextlib.contextmanager", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@tf_contextlib.contextmanager", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 34, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 22, "n_ast_nodes": 45, "n_identifiers": 6, "d_id": 81767, "documentation": { "docstring": "Returns the name registered to an object within the Keras framework.\n\n This function is part of the Keras serialization and deserialization\n framework. It maps objects to the string names associated with those objects\n for serialization/deserialization.\n\n Args:\n obj: The object to look up.\n\n Returns:\n The name associated with the object, or the default Python name if the\n object is not registered.\n ", "n_words": 60, "vocab_size": 41, "n_whitespaces": 95, "language": "en" } }, { "id": 180689, "commit_id": "b1dfc9a172440e9c9736566f326ba339ff559604", "repo": "gradio", "path": "gradio/event_queue.py", "file_name": "event_queue.py", "fun_name": "notify_clients", "commit_message": "Release new queue beta (#1969)\n\n* queue-refactor-backend (#1489)\r\n\r\n* queue-refactor-backend\r\n\r\n- create a template for the new design\r\n\r\n* queue-refactor-backend\r\n\r\n- clean after the old queue\r\n\r\n* queue-refactor-backend\r\n\r\n- add basic test to websocket endpoint\r\n\r\n* queue-refactor-backend\r\n\r\n- small fix\r\n\r\n* queue-refactor-backend\r\n\r\n- debugs&fixes&finalizations\r\n- test the flow with postman\r\n\r\n* queue-refactor-backend\r\n\r\n- tweaks on websocket closing\r\n\r\n* queue-refactor-backend\r\n\r\n- cleanup\r\n\r\n* queue-refactor-backend\r\n\r\n- cleanup & tweaks\r\n\r\n* queue-refactor-backend\r\n\r\n- cleanup & tweaks\r\n\r\n* queue-refactor-backend\r\n\r\n- cleanup & tweaks\r\n- correct the exception handling\r\n\r\n* queue-refactor-backend\r\n\r\n- add websockets dependency\r\n\r\n* queue-refactor-backend\r\n- reformat\r\n\r\n* queue-refactor-backend\r\n- add single event test\r\n\r\n* queue-refactor-backend\r\n- tweaks\r\n- remove outdated tests\r\n\r\n* queue-refactor-backend\r\n- reformat\r\n\r\n* queue-refactor-backend\r\n- reformat\r\n\r\n* queue-refactor-backend\r\n- reformat\r\n\r\n* queue-refactor-backend\r\n- add Queue configurations to Blocks.launch()\r\n- add live_queue_update to send estimations whenever a job gets fetched from the Queue\r\n\r\n* queue-refactor-backend\r\n- add Queue configurations to Blocks.launch()\r\n- add live_queue_update to send estimations whenever a job gets fetched from the Queue\r\n\r\n* queue-refactor-backend\r\n- tweaks\r\n\r\n* queue-refactor-backend\r\n- make SLEEP_WHEN_FREE shorter\r\n\r\nCo-authored-by: Ali Abid \r\n\r\n* Add estimation parameters to queue (#1889)\r\n\r\n* - tweaks on Estimation\r\n\r\n* version\r\n\r\n* Revert \"version\"\r\n\r\nThis reverts commit bd1f4d7bfe3658a4967b93126859a62a511a70e2.\r\n\r\n* some fix and tweaks\r\n\r\n* implement queue frontend (#1950)\r\n\r\n* implement queue frontend\r\n\r\n* fix types\r\n\r\n* fix ws endpoint in build mode\r\n\r\n* cleanup\r\n\r\n* Queue tweaks (#1909)\r\n\r\n* tweaks on estimation payload\r\n\r\n* Queue keep ws connections open (#1910)\r\n\r\n* 1. keep ws connections open after the event process is completed\r\n2. do not send estimations periodically if live queue updates is open\r\n\r\n* fix calculation\r\n\r\n* 1. tweaks on event_queue\r\n\r\n* fix issue - create new ws for each request\r\n\r\n* format\r\n\r\n* fix\r\n\r\n* fix tests\r\n\r\n* fix tests\r\n\r\n* tets\r\n\r\n* test\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* change'\r\n\r\n* wtf\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* file perms\r\n\r\n* Release queue beta v1 (#1971)\r\n\r\n* - release the new queue\r\n\r\n* - bypass the issue in the tests\r\n- rewrite the lost part in the codebase\r\n\r\n* - add concurrent queue example (#1978)\r\n\r\n* rank_eta calc\r\n\r\n* Queue fixes (#1981)\r\n\r\n* change\r\n\r\n* format\r\n\r\n* - comment out queue tests as they dont work well\r\n\r\n* - reformat\r\n\r\n* Update gradio/event_queue.py\r\n\r\nCo-authored-by: Ömer Faruk Özdemir \r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* change\r\n\r\n* weird fix\r\n\r\nCo-authored-by: Ömer Faruk Özdemir \r\n\r\n* release-queue-v3 (#1988)\r\n\r\n* Fix frontend queuing to target secure WSS (#1996)\r\n\r\n* change\r\n\r\n* format\r\n\r\n* changes\r\n\r\n* queue-concurrency-tweaks (#2002)\r\n\r\n1. make gather_data and broadcast_estimation sequential instead of concurrent because they were deleting elements at the same time and raising expections which was lowering the performance\r\n\r\n* Update Queue API, documentation (#2026)\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* fixes\r\n\r\n* changes\r\n\r\n* change\r\n\r\n* fix\r\n\r\nCo-authored-by: Ömer Faruk Özdemir \r\nCo-authored-by: pngwn ", "code": "async def notify_clients(cls) -> None:\n \n while not cls.STOP:\n await asyncio.sleep(cls.UPDATE_INTERVALS)\n if cls.EVENT_QUEUE:\n await cls.broadcast_estimations()\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 65, "n_words": 14, "vocab_size": 13, "complexity": 3, "nloc": 8, "token_counts": 34, "n_ast_nodes": 61, "n_identifiers": 8, "d_id": 43215, "documentation": { "docstring": "\n Notify clients about events statuses in the queue periodically.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 209602, "commit_id": "495b21f2867e48286767085c8cf2918e4092e9dc", "repo": "scapy", "path": "scapy/contrib/automotive/xcp/scanner.py", "file_name": "scanner.py", "fun_name": "scan_with_get_slave_id", "commit_message": "Add Automotive Logger for all debug outputs of the automotive layer", "code": "def scan_with_get_slave_id(self):\n # type: () -> List[XCPScannerResult]\n \n log_automotive.info(\"Start scan with GetSlaveId id in range: \" + str(\n self.id_range))\n\n for identifier in self.id_range:\n ids = self._send_get_slave_id(identifier)\n if len(ids) > 0:\n return ids\n\n return []\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 116, "n_words": 33, "vocab_size": 30, "complexity": 3, "nloc": 8, "token_counts": 47, "n_ast_nodes": 81, "n_identifiers": 10, "d_id": 52744, "documentation": { "docstring": "Starts the scan for XCP devices on CAN with the transport specific\n GetSlaveId Message", "n_words": 14, "vocab_size": 13, "n_whitespaces": 20, "language": "en" } }, { "id": 286656, "commit_id": "963ca9b2b924d0514e0e65243dc8d9d7af023ad1", "repo": "OpenBBTerminal", "path": "openbb_terminal/core/scripts/sdk_audit.py", "file_name": "sdk_audit.py", "fun_name": "all_view_models", "commit_message": "Audit SDK and View/Model functions (#3384)\n\n* Initial commit\r\n\r\n* Finalized functionality\r\n\r\n* update script\r\n\r\n* Allow using it without forecasting\r\n\r\n* Update gitignore\r\n\r\n* Update `sdk_audit.py`\r\n\r\n* Fixed issues, found more\r\n\r\n* Added fix for helper functions, and column for SDK type\r\n\r\n* Checked one more thing\r\n\r\n* Moved file\r\n\r\n* Move files ending with models/views\r\n\r\n* Added fix of name\r\n\r\n* Added file path fixes\r\n\r\n* Patch to fix sdk_audit for windows\r\n\r\n* fix\r\n\r\nCo-authored-by: Chavithra PARANA ", "code": "def all_view_models() -> List[Path]:\n \n\n file_list = []\n all_files = os.walk(base_path)\n for root, _, files in all_files:\n for filename in files:\n if filename.endswith(\".py\"):\n if \"view\" in filename or \"model\" in filename:\n file_list.append(f\"{root}/{filename}\")\n clean_list = set(file_list)\n return [Path(x) for x in clean_list]\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 110, "n_words": 40, "vocab_size": 30, "complexity": 7, "nloc": 17, "token_counts": 77, "n_ast_nodes": 137, "n_identifiers": 17, "d_id": 85961, "documentation": { "docstring": "Geta all files with 'view' or 'model' in the name.\n\n Returns:\n ----------\n List[Path]\n All paths in openbb_terminal with 'view' or 'model' in the name\n ", "n_words": 24, "vocab_size": 17, "n_whitespaces": 43, "language": "en" } }, { "id": 320770, "commit_id": "a20bb67a878b2e68abf8268c1b0a27f018d01352", "repo": "qutebrowser", "path": "qutebrowser/completion/completiondelegate.py", "file_name": "completiondelegate.py", "fun_name": "_get_textdoc", "commit_message": "mypy: Upgrade to PyQt5-stubs 5.15.6.0\n\nFor some unknown reason, those new stubs cause a *lot* of things now to be\nchecked by mypy which formerly probably got skipped due to Any being implied\nsomewhere.\n\nThe stubs themselves mainly improved, with a couple of regressions too.\n\nIn total, there were some 337 (!) new mypy errors. This commit fixes almost all\nof them, and the next commit improves a fix to get things down to 0 errors\nagain.\n\nOverview of the changes:\n\n==== qutebrowser/app.py\n\n- Drop type ignore due to improved stubs.\n\n==== qutebrowser/browser/browsertab.py\n\n- Specify the type of _widget members more closely than just QWidget.\n This is debatable: I suppose the abstract stuff shouldn't need to know\n anything about the concrete backends at all. But it seems like we cut some\n corners when initially implementing things, and put some code in browsertab.py\n just because the APIs of both backends happened to be compatible. Perhaps\n something to reconsider once we drop QtWebKit and hopefully implement a dummy\n backend.\n\n- Add an additional assertion in AbstractAction.run_string. This is already\n covered by the isinstance(member, self.action_base) above it, but that's too\n dynamic for mypy to understand.\n\n- Fix the return type of AbstractScroller.pos_px, which is a QPoint (with x\n and y components), not a single int.\n\n- Fix the return type of AbstractScroller.pos_perc, which is a Tuple (with x\n and y components), not a single int.\n\n- Fix the argument types of AbstractScroller.to_perc, as it's possible to pass\n fractional percentages too.\n\n- Specify the type for AbstractHistoryPrivate._history. See above (_widget) re\n this being debatable.\n\n- Fix the return type of AbstractTabPrivate.event_target(), which can be None\n (see #3888).\n\n- Fix the return type of AbstractTabPrivate.run_js_sync, which is Any (the JS\n return value), not None.\n\n- Fix the argument type for AbstractTabPrivate.toggle_inspector: position can\n be None to use the last used position.\n\n- Declare the type of sub-objects of AbstractTab.\n\n- Fix the return value of AbstractTab.icon(), which is the QIcon, not None.\n\n==== qutebrowser/browser/commands.py\n\n- Make sure the active window is a MainWindow (with a .win_id attribute).\n\n==== qutebrowser/browser/downloadview.py\n\n- Add _model() which makes sure that self.model() is a DownloadModel, not None\n or any other model. This is needed because other methods access a variety of\n custom attributes on it, e.g. last_index().\n\n==== qutebrowser/browser/greasemonkey.py\n\n- Add an ignore for AbstractDownload.requested_url which we patch onto the\n downloads. Probably would be nicer to add it as a proper attribute which always\n gets set by the DownloadManager.\n\n==== qutebrowser/browser/hints.py\n\n- Remove type ignores for QUrl.toString().\n- Add a new type ignore for combining different URL flags (which works, but is\n not exactly type safe... still probably a regression in the stubs).\n- Make sure the things we get back from self._get_keyparser are what we actually\n expect. Probably should introduce a TypedDict (and/or overloads for\n _get_keyparser with typing.Literal) to teach mypy about the exact return value.\n See #7098.\n This is needed because we access Hint/NormalKeyParser-specific attributes such\n as .set_inhibited_timout() or .update_bindings().\n\n==== qutebrowser/browser/inspector.py\n\n- Similar changes than in browsertab.py to make some types where we share API\n (e.g. .setPage()) more concrete. Didn't work out unfortunately, see next\n commit.\n\n==== qutebrowser/browser/network/pac.py\n\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/browser/qtnetworkdownloads.py\n\n- Make sure that downloads is a qtnetworkdownloads.DownloadItem (rather than an\n AbstractDownload), so that we can call ._uses_nam() on it.\n\n==== qutebrowser/browser/qutescheme.py\n\n- Remove now unneeded type ignore for QUrl flags.\n\n==== qutebrowser/browser/urlmarks.py\n\n- Specify the type of UrlMarkManager._lineparser, as those only get initialized\n in _init_lineparser of subclasses, so mypy doesn't know it's supposed to exist.\n\n==== qutebrowser/browser/webelem.py\n\n- New casts to turn single KeyboardModifier (enum) entries into\n KeyboardModifiers (flags). Might not be needed anymore with Qt 6.\n- With that, casting the final value is now unneeded.\n\n==== qutebrowser/browser/webengine/notification.py\n\n- Remove now unneeded type ignore for signal.\n- Make sure the self.sender() we get in HerbeNotificationAdapter._on_finished()\n is a QProcess, not just any QObject.\n\n==== qutebrowser/browser/webengine/webenginedownloads.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/browser/webengine/webengineelem.py\n\n- Specify the type of WebEngineElement._tab.\n- Remove now unneeded type ignore for mixed flags.\n\n==== qutebrowser/browser/webengine/webengineinspector.py\n\n- See changes to inspector.py and next commit.\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/browser/webengine/webenginequtescheme.py\n\n- Remove now unneeded type ignore for mixed flags.\n\n==== qutebrowser/browser/webengine/webenginesettings.py\n\n- Ignore access of .setter attribute which we patch onto QWebEngineProfile.\n Would be nice to have a subclass or wrapper-class instead.\n\n==== qutebrowser/browser/webengine/webenginetab.py\n\n- Specified the type of _widget members more closely than just QWidget.\n See browsertab.py changes for details.\n- Remove some now-unneeded type ignores for creating FindFlags.\n- Specify more concrete types for WebEngineTab members where we actually need to\n access WebEngine-specific attributes.\n- Make sure the page we get is our custom WebEnginePage subclass, not just any\n QWebEnginePage. This is needed because we access custom attributes on it.\n\n==== qutebrowser/browser/webengine/webview.py\n\n- Make sure the page we get is our custom WebEnginePage subclass, not just any\n QWebEnginePage. This is needed because we access custom attributes on it.\n\n==== qutebrowser/browser/webkit/network/networkreply.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/browser/webkit/webkitinspector.py\n\n- See changes to inspector.py and next commit.\n\n==== qutebrowser/browser/webkit/webkittab.py\n\n- Specify the type of _widget members more closely than just QWidget.\n See browsertab.py changes for details.\n- Add a type ignore for WebKitAction because our workaround needs to\n treat them as ints (which is allowed by PyQt, even if not type-safe).\n- Add new ignores for findText calls: The text is a QString and can be None; the\n flags are valid despite mypy thinking they aren't (stubs regression?).\n- Specify the type for WebKitHistoryPrivate._history, because we access\n WebKit-specific attributes. See above (_widget) re this being debatable.\n- Make mypy aware that .currentFrame() and .frameAt() can return None (stubs\n regression?).\n- Make sure the .page() and .page().networkAccessManager() are our subclasses\n rather than the more generic QtWebKit objects, as we use custom attributes.\n- Add new type ignores for signals (stubs regression!)\n\n==== qutebrowser/browser/webkit/webpage.py\n\n- Make sure the .networkAccessManager() is our subclass rather than the more\n generic QtWebKit object, as we use custom attributes.\n- Replace a cast by a type ignore. The cast didn't work anymore.\n\n==== qutebrowser/browser/webkit/webview.py\n\n- Make sure the .page() is our subclass rather than the more generic QtWebKit\n object, as we use custom attributes.\n\n==== qutebrowser/commands/userscripts.py\n\n- Remove now unneeded type ignore for signal.\n\n==== qutebrowser/completion/completer.py\n\n- Add a new _completion() getter (which ensures it actually gets the completion\n view) rather than accessing the .parent() directly (which could be any QObject).\n\n==== qutebrowser/completion/completiondelegate.py\n\n- Make sure self.parent() is a CompletionView (no helper method as there is only\n one instance).\n- Remove a now-unneeded type ignore for adding QSizes.\n\n==== qutebrowser/completion/completionwidget.py\n\n- Add a ._model() getter which ensures that we get a CompletionModel (with\n custom attributes) rather than Qt's .model() which can be any QAbstractItemModel\n (or None).\n- Removed a now-unneeded type ignore for OR-ing flags.\n\n==== qutebrowser/completion/models/completionmodel.py\n\n- Remove now unneeded type ignores for signals.\n- Ignore a complaint about .set_pattern() not being defined. Completion\n categories don't share any common parent class, so it would be good to introduce\n a typing.Protocol for this. See #7098.\n\n==== qutebrowser/components/misccommands.py\n\n- Removed a now-unneeded type ignore for OR-ing flags.\n\n==== qutebrowser/components/readlinecommands.py\n\n- Make sure QApplication.instance() is a QApplication (and not just a\n QCoreApplication). This includes the former \"not None\" check.\n\n==== qutebrowser/components/scrollcommands.py\n\n- Add basic annotation for \"funcs\" dict. Could have a callable protocol to\n specify it needs a count kwarg, see #7098.\n\n==== qutebrowser/config/stylesheet.py\n\n- Correctly specify that stylesheet apply to QWidgets, not any QObject.\n- Ignore an attr-defined for obj.STYLESHEET. Perhaps could somehow teach mypy\n about this with overloads and protocols (stylesheet for set_register being None\n => STYLESHEET needs to be defined, otherwise anything goes), but perhaps not\n worth the troble. See #7098.\n\n==== qutebrowser/keyinput/keyutils.py\n\n- Remove some now-unneeded type ignores and add a cast for using a single enum\n value as flags. Might need to look at this again with Qt 6 support.\n\n==== qutebrowser/keyinput/modeman.py\n\n- Add a FIXME for using a TypedDict, see comments for hints.py above.\n\n==== qutebrowser/mainwindow/mainwindow.py\n\n- Remove now-unneeded type ignores for calling with OR-ed flags.\n- Improve where we cast from WindowType to WindowFlags, no int needed\n- Use new .tab_bar() getter, see below.\n\n==== qutebrowser/mainwindow/prompt.py\n\n- Remove now-unneeded type ignores for calling with OR-ed flags.\n\n==== qutebrowser/mainwindow/statusbar/bar.py\n\n- Adjust type ignores around @pyqtProperty. The fact one is still needed seems\n like a stub regression.\n\n==== qutebrowser/mainwindow/statusbar/command.py\n\n- Fix type for setText() override (from QLineEdit): text can be None\n (QString in C++).\n\n==== qutebrowser/mainwindow/statusbar/url.py\n\n- Adjust type ignores around @pyqtProperty. The fact one is still needed seems\n like a stub regression.\n\n==== qutebrowser/mainwindow/tabbedbrowser.py\n\n- Specify that TabDeque manages browser tabs, not any QWidgets. It accesses\n AbstractTab-specific attributes.\n- Make sure that the .tabBar() we get is a tabwidget.TabBar, as we access\n .maybe_hide.\n- Fix the annotations for stored marks: Scroll positions are a QPoint, not int.\n- Add _current_tab() and _tab_by_idx() wrappers for .currentWidget() and\n .widget(), which ensures that the return values are valid AbstractTabs (or None\n for _tab_by_idx). This is needed because we access AbstractTab-specific\n attributes.\n- For some places, where the tab can be None, continue using .currentTab() but\n add asserts.\n- Remove some now-unneeded [unreachable] ignores, as mypy knows about the None\n possibility now.\n\n==== qutebrowser/mainwindow/tabwidget.py\n\n- Add new tab_bar() and _tab_by_idx() helpers which check that the .tabBar() and\n .widget() are of type TabBar and AbstractTab, respectively.\n- Add additional assertions where we expect ._tab_by_idx() to never be None.\n- Remove dead code in get_tab_fields for handling a None y scroll position. I\n was unable to find any place in the code where this could be set to None.\n- Remove some now-unneeded type ignores and casts, as mypy now knows that\n _type_by_idx() could be None.\n- Work around a strange instance where mypy complains about not being able to\n find the type of TabBar.drag_in_progress from TabWidget._toggle_visibility,\n despite it clearly being shown as a bool *inside* that class without any\n annotation.\n- Add a ._tab_widget() getter in TabBar which ensures that the .parent() is in\n fact a TabWidget.\n\n==== qutebrowser/misc/crashsignal.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/misc/editor.py\n\n- Remove now unneeded type ignores for signals.\n\n==== qutebrowser/misc/ipc.py\n\n- Remove now unneeded type ignores for signals.\n- Add new type ignores for .error() which is both a signal and a getter\n (stub regression?). Won't be relevant for Qt 6 anymore, as the signal was\n renamed to errorOccurred in 5.15.\n\n==== qutebrowser/misc/objects.py\n\n- Make sure mypy knows that objects.app is our custom Application (with custom\n attributes) rather than any QApplication.\n\n==== qutebrowser/utils/objreg.py\n\n- Ignore attr-defined for .win_id attributes. Maybe could add a typing.Protocol,\n but ideally, the whole objreg stuff should die one day anyways.\n\n==== tests/unit/completion/test_completer.py\n\n- Make CompletionWidgetStub inherit from CompletionView so that it passes the\n new isinstance() asserts in completer.py (see above).", "code": "def _get_textdoc(self, index):\n \n assert self._opt is not None\n # FIXME we probably should do eliding here. See\n # qcommonstyle.cpp:viewItemDrawText\n # https://github.com/qutebrowser/qutebrowser/issues/118\n text_option = QTextOption()\n if self._opt.features & QStyleOptionViewItem.WrapText:\n text_option.setWrapMode(QTextOption.WordWrap)\n else:\n text_option.setWrapMode(QTextOption.ManualWrap)\n text_option.setTextDirection(self._opt.direction)\n text_option.setAlignment(QStyle.visualAlignment(\n self._opt.direction, self._opt.displayAlignment))\n\n if self._doc is not None:\n self._doc.deleteLater()\n self._doc = QTextDocument(self)\n self._doc.setDefaultFont(self._opt.font)\n self._doc.setDefaultTextOption(text_option)\n self._doc.setDocumentMargin(2)\n\n if index.parent().isValid():\n view = self.parent()\n assert isinstance(view, completionwidget.CompletionView), view\n pattern = view.pattern\n columns_to_filter = index.model().columns_to_filter(index)\n if index.column() in columns_to_filter and pattern:\n if self._opt.state & QStyle.State_Selected:\n color = config.val.colors.completion.item.selected.match.fg\n else:\n color = config.val.colors.completion.match.fg\n _Highlighter(self._doc, pattern, color)\n self._doc.setPlainText(self._opt.text)\n else:\n self._doc.setHtml(\n '{}'.format(\n html.escape(config.val.fonts.completion.category),\n html.escape(self._opt.text)))\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 466, "n_words": 90, "vocab_size": 68, "complexity": 7, "nloc": 33, "token_counts": 292, "n_ast_nodes": 469, "n_identifiers": 55, "d_id": 117338, "documentation": { "docstring": "Create the QTextDocument of an item.\n\n Args:\n index: The QModelIndex of the item to draw.\n ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 40, "language": "en" } }, { "id": 9851, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "jina/peapods/peas/__init__.py", "file_name": "__init__.py", "fun_name": "role", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "def role(self) -> 'PeaRoleType':\n \n return self.args.pea_role\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 4, "token_counts": 14, "n_ast_nodes": 27, "n_identifiers": 4, "d_id": 1730, "documentation": { "docstring": "Get the role of this pea in a pod\n .. #noqa: DAR201", "n_words": 12, "vocab_size": 12, "n_whitespaces": 18, "language": "en" } }, { "id": 126889, "commit_id": "f084546d41f0533c1e9e96a7249532d0eb4ff47d", "repo": "ray", "path": "python/ray/actor.py", "file_name": "actor.py", "fun_name": "__reduce__", "commit_message": "Fix out-of-band deserialization of actor handle (#27700)\n\nWhen we deserialize actor handle via pickle, we will register it with an outer object ref equaling to itself which is wrong. For out-of-band deserialization, there should be no outer object ref.\r\n\r\nSigned-off-by: Jiajun Yao ", "code": "def __reduce__(self):\n \n (serialized, _) = self._serialization_helper()\n # There is no outer object ref when the actor handle is\n # deserialized out-of-band using pickle.\n return ActorHandle._deserialization_helper, (serialized, None)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 62, "n_words": 27, "vocab_size": 24, "complexity": 1, "nloc": 3, "token_counts": 27, "n_ast_nodes": 46, "n_identifiers": 7, "d_id": 28292, "documentation": { "docstring": "This code path is used by pickling but not by Ray forking.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 260513, "commit_id": "91f02270a8f49e3e52882dc0fa634eff4d138fc8", "repo": "scikit-learn", "path": "sklearn/utils/_param_validation.py", "file_name": "_param_validation.py", "fun_name": "validate_parameter_constraints", "commit_message": "MAINT Add one-sided set differences for clarity in param validation (#23772)\n\n\r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def validate_parameter_constraints(parameter_constraints, params, caller_name):\n \n if len(set(parameter_constraints) - set(params)) != 0:\n raise ValueError(\n f\"The parameter constraints {list(parameter_constraints)}\"\n \" contain unexpected parameters\"\n f\" {set(parameter_constraints) - set(params)}\"\n )\n\n for param_name, param_val in params.items():\n # We allow parameters to not have a constraint so that third party estimators\n # can inherit from sklearn estimators without having to necessarily use the\n # validation tools.\n if param_name not in parameter_constraints:\n continue\n\n constraints = parameter_constraints[param_name]\n\n if constraints == \"no_validation\":\n continue\n\n constraints = [make_constraint(constraint) for constraint in constraints]\n\n for constraint in constraints:\n if constraint.is_satisfied_by(param_val):\n # this constraint is satisfied, no need to check further.\n break\n else:\n # No constraint is satisfied, raise with an informative message.\n\n # Ignore constraints that we don't want to expose in the error message,\n # i.e. options that are for internal purpose or not officially supported.\n constraints = [\n constraint for constraint in constraints if not constraint.hidden\n ]\n\n if len(constraints) == 1:\n constraints_str = f\"{constraints[0]}\"\n else:\n constraints_str = (\n f\"{', '.join([str(c) for c in constraints[:-1]])} or\"\n f\" {constraints[-1]}\"\n )\n\n raise ValueError(\n f\"The {param_name!r} parameter of {caller_name} must be\"\n f\" {constraints_str}. Got {param_val!r} instead.\"\n )\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 25, "n_whitespaces": 590, "n_words": 181, "vocab_size": 117, "complexity": 11, "nloc": 32, "token_counts": 137, "n_ast_nodes": 330, "n_identifiers": 20, "d_id": 76307, "documentation": { "docstring": "Validate types and values of given parameters.\n\n Parameters\n ----------\n parameter_constraints : dict or {\"no_validation\"}\n If \"no_validation\", validation is skipped for this parameter.\n\n If a dict, it must be a dictionary `param_name: list of constraints`.\n A parameter is valid if it satisfies one of the constraints from the list.\n Constraints can be:\n - an Interval object, representing a continuous or discrete range of numbers\n - the string \"array-like\"\n - the string \"sparse matrix\"\n - the string \"random_state\"\n - callable\n - None, meaning that None is a valid value for the parameter\n - any type, meaning that any instance of this type is valid\n - a StrOptions object, representing a set of strings\n - the string \"boolean\"\n - the string \"verbose\"\n\n params : dict\n A dictionary `param_name: param_value`. The parameters to validate against the\n constraints.\n\n caller_name : str\n The name of the estimator or function or method that called this function.\n ", "n_words": 149, "vocab_size": 89, "n_whitespaces": 286, "language": "en" } }, { "id": 62784, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/msgpack/ext.py", "file_name": "ext.py", "fun_name": "from_bytes", "commit_message": "upd; format", "code": "def from_bytes(b):\n \n if len(b) == 4:\n seconds = struct.unpack(\"!L\", b)[0]\n nanoseconds = 0\n elif len(b) == 8:\n data64 = struct.unpack(\"!Q\", b)[0]\n seconds = data64 & 0x00000003FFFFFFFF\n nanoseconds = data64 >> 34\n elif len(b) == 12:\n nanoseconds, seconds = struct.unpack(\"!Iq\", b)\n else:\n raise ValueError(\n \"Timestamp type can only be created from 32, 64, or 96-bit byte objects\"\n )\n return Timestamp(seconds, nanoseconds)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 205, "n_words": 60, "vocab_size": 44, "complexity": 4, "nloc": 15, "token_counts": 96, "n_ast_nodes": 159, "n_identifiers": 10, "d_id": 13038, "documentation": { "docstring": "Unpack bytes into a `Timestamp` object.\n\n Used for pure-Python msgpack unpacking.\n\n :param b: Payload from msgpack ext message with code -1\n :type b: bytes\n\n :returns: Timestamp object unpacked from msgpack ext payload\n :rtype: Timestamp\n ", "n_words": 34, "vocab_size": 27, "n_whitespaces": 76, "language": "en" } }, { "id": 248214, "commit_id": "fa0eab9c8e159b698a31fc7cfaafed643f47e284", "repo": "synapse", "path": "synapse/util/patch_inline_callbacks.py", "file_name": "patch_inline_callbacks.py", "fun_name": "do_patch", "commit_message": "Use `ParamSpec` in a few places (#12667)", "code": "def do_patch() -> None:\n \n\n from synapse.logging.context import current_context\n\n global _already_patched\n\n orig_inline_callbacks = defer.inlineCallbacks\n if _already_patched:\n return\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 38, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 12, "token_counts": 36, "n_ast_nodes": 44, "n_identifiers": 9, "d_id": 72158, "documentation": { "docstring": "\n Patch defer.inlineCallbacks so that it checks the state of the logcontext on exit\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 20, "language": "en" } }, { "id": 20553, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pyparsing/core.py", "file_name": "core.py", "fun_name": "__rsub__", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def __rsub__(self, other):\n \n if isinstance(other, str_type):\n other = self._literalStringClass(other)\n if not isinstance(other, ParserElement):\n raise TypeError(\n \"Cannot combine element of type {} with ParserElement\".format(\n type(other).__name__\n )\n )\n return other - self\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 140, "n_words": 30, "vocab_size": 26, "complexity": 3, "nloc": 10, "token_counts": 52, "n_ast_nodes": 86, "n_identifiers": 11, "d_id": 3422, "documentation": { "docstring": "\n Implementation of ``-`` operator when left operand is not a :class:`ParserElement`\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 41391, "commit_id": "a07ef69882ed76e09a0ed43d6f3ea33780c1b2be", "repo": "seaborn", "path": "seaborn/_core/properties.py", "file_name": "properties.py", "fun_name": "_get_categorical_mapping", "commit_message": "Transition mappings->properties, leaving a few loose ends", "code": "def _get_categorical_mapping(self, scale, data):\n \n levels = categorical_order(data, scale.order)\n n = len(levels)\n values = scale.values\n\n if isinstance(values, dict):\n self._check_dict_entries(levels, values)\n # TODO where to ensure that dict values have consistent representation?\n colors = [values[x] for x in levels]\n elif isinstance(values, list):\n colors = self._check_list_length(levels, scale.values)\n elif isinstance(values, tuple):\n colors = blend_palette(values, n)\n elif isinstance(values, str):\n colors = color_palette(values, n)\n elif values is None:\n if n <= len(get_color_cycle()):\n # Use current (global) default palette\n colors = color_palette(n_colors=n)\n else:\n colors = color_palette(\"husl\", n)\n else:\n scale_class = scale.__class__.__name__\n msg = \" \".join([\n f\"Scale values for {self.variable} with a {scale_class} mapping\",\n f\"must be string, list, tuple, or dict; not {type(scale.values)}.\"\n ])\n raise TypeError(msg)\n\n # If color specified here has alpha channel, it will override alpha property\n colors = self._standardize_colors(colors)\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 415, "n_words": 124, "vocab_size": 89, "complexity": 8, "nloc": 28, "token_counts": 184, "n_ast_nodes": 311, "n_identifiers": 32, "d_id": 7413, "documentation": { "docstring": "Define mapping as lookup in list of discrete color values.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 48117, "commit_id": "3977e1798d8294ba628b5f330f43702c1a5c79fc", "repo": "airflow", "path": "tests/system/providers/google/tasks/example_queue.py", "file_name": "example_queue.py", "fun_name": "generate_random_string", "commit_message": "CloudTasks assets & system tests migration (AIP-47) (#23282)", "code": "def generate_random_string():\n \n import random\n import string\n\n return \"\".join(random.choices(string.ascii_uppercase + string.digits, k=8))\n\n random_string = generate_random_string()\n\n # [START create_queue]\n create_queue = CloudTasksQueueCreateOperator(\n location=LOCATION,\n task_queue=Queue(stackdriver_logging_config=dict(sampling_ratio=0.5)),\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n retry=Retry(maximum=10.0),\n timeout=5,\n task_id=\"create_queue\",\n )\n # [END create_queue]\n\n # [START delete_queue]\n delete_queue = CloudTasksQueueDeleteOperator(\n location=LOCATION,\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n task_id=\"delete_queue\",\n )\n # [END delete_queue]\n delete_queue.trigger_rule = TriggerRule.ALL_DONE\n\n # [START resume_queue]\n resume_queue = CloudTasksQueueResumeOperator(\n location=LOCATION,\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n task_id=\"resume_queue\",\n )\n # [END resume_queue]\n\n # [START pause_queue]\n pause_queue = CloudTasksQueuePauseOperator(\n location=LOCATION,\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n task_id=\"pause_queue\",\n )\n # [END pause_queue]\n\n # [START purge_queue]\n purge_queue = CloudTasksQueuePurgeOperator(\n location=LOCATION,\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n task_id=\"purge_queue\",\n )\n # [END purge_queue]\n\n # [START get_queue]\n get_queue = CloudTasksQueueGetOperator(\n location=LOCATION,\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n task_id=\"get_queue\",\n )\n\n get_queue_result = BashOperator(\n task_id=\"get_queue_result\",\n bash_command=f\"echo {get_queue.output}\",\n )\n # [END get_queue]\n\n # [START update_queue]\n update_queue = CloudTasksQueueUpdateOperator(\n task_queue=Queue(stackdriver_logging_config=dict(sampling_ratio=1)),\n location=LOCATION,\n queue_name=QUEUE_ID + \"{{ task_instance.xcom_pull(task_ids='random_string') }}\",\n update_mask=FieldMask(paths=[\"stackdriver_logging_config.sampling_ratio\"]),\n task_id=\"update_queue\",\n )\n # [END update_queue]\n\n # [START list_queue]\n list_queue = CloudTasksQueuesListOperator(location=LOCATION, task_id=\"list_queue\")\n # [END list_queue]\n\n chain(\n random_string,\n create_queue,\n update_queue,\n pause_queue,\n resume_queue,\n purge_queue,\n get_queue,\n get_queue_result,\n list_queue,\n delete_queue,\n )\n\n from tests.system.utils.watcher import watcher\n\n # This test needs watcher in order to properly mark success/failure\n # when \"tearDown\" task with trigger rule is part of the DAG\n list(dag.tasks) >> watcher()\n\n\nfrom tests.system.utils import get_test_run # noqa: E402\n\n# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)\ntest_run = get_test_run(dag)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 636, "n_words": 221, "vocab_size": 115, "complexity": 1, "nloc": 4, "token_counts": 31, "n_ast_nodes": 517, "n_identifiers": 59, "d_id": 9364, "documentation": { "docstring": "\n Generate random string for queue and task names.\n Queue name cannot be repeated in preceding 7 days and\n task name in the last 1 hour.\n ", "n_words": 25, "vocab_size": 21, "n_whitespaces": 54, "language": "en" } }, { "id": 128270, "commit_id": "65d0c0aa48be8f9f7faae857d3ab71444997755a", "repo": "ray", "path": "python/ray/serve/tests/test_deployment_state.py", "file_name": "test_deployment_state.py", "fun_name": "test_resource_requirements_none", "commit_message": "[Serve] add alpha gRPC support (#28175)", "code": "def test_resource_requirements_none(mock_get_all_node_ids, mock_deployment_state):\n \n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 7, "token_counts": 52, "n_ast_nodes": 15, "n_identifiers": 3, "d_id": 28652, "documentation": { "docstring": "Ensure resource_requirements doesn't break if a requirement is None", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 122219, "commit_id": "4da72cf3988b4918f65b1401e46c40b7c4504963", "repo": "jax", "path": "jax/experimental/pjit.py", "file_name": "pjit.py", "fun_name": "host_local_array_to_global_array", "commit_message": "Add `host_local_array_to_global_array` and `global_array_to_host_local_array` for enabling transition to jax.Array.\n\nAlso support `FROM_GDA` for `jax.Array` as a backwards compatible change so that users can continue to use that until they transition to jax.Array. Its currently required because of usage like `in_axis_resources = (FROM_GDA, FROM_GDA, P('data'), None)` and changing this on users side will require input from users so we as JAX can just support it as a temporary thing since GDA and Array act similarly in pjit.\n\nPiperOrigin-RevId: 479035326", "code": "def host_local_array_to_global_array(local_inputs, global_mesh, pspecs):\n \n def _convert(arr, pspec):\n if isinstance(arr, array.ArrayImpl) and isinstance(arr.sharding, PmapSharding):\n arr = np.array(arr)\n local_sharding = MeshPspecSharding(global_mesh.local_mesh, pspec)\n arrays = [\n device_put(arr[index], d)\n for d, index in local_sharding.devices_indices_map(arr.shape).items()\n ]\n global_aval = global_mesh._local_to_global(\n pxla._get_array_mapping(pspec),\n core.ShapedArray(arr.shape, arrays[0].dtype))\n return array.ArrayImpl(global_aval, MeshPspecSharding(global_mesh, pspec),\n arrays, committed=True)\n\n flattened_inps, in_tree = tree_flatten(local_inputs)\n in_pspecs = flatten_axis_resources(\n 'input pspecs', in_tree, pspecs, tupled_args=True)\n out = tree_map(_convert, tuple(flattened_inps), in_pspecs)\n return tree_unflatten(in_tree, out)\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 151, "n_words": 63, "vocab_size": 55, "complexity": 1, "nloc": 7, "token_counts": 54, "n_ast_nodes": 262, "n_identifiers": 41, "d_id": 27124, "documentation": { "docstring": "Converts a host local value to a globally sharded `jax.Array`.\n\n You can use this function to transition to `jax.Array`. Using `jax.Array` with\n `pjit` has the same semantics of using GDA with pjit i.e. all `jax.Array`\n inputs to pjit should be globally shaped.\n\n If you are currently passing host local values to pjit, you can use this\n function to convert your host local values to global Arrays and then pass that\n to pjit.\n\n Example usage:\n\n ```\n global_inputs = jax.experimental.pjit.host_local_array_to_global_array(\n host_local_inputs, global_mesh, in_pspecs)\n\n with mesh:\n global_out = pjitted_fun(global_inputs)\n\n host_local_output = jax.experimental.pjit.global_array_to_host_local_array(\n global_out, mesh, out_pspecs)\n ```\n\n Args:\n local_inputs: A Pytree of host local values.\n global_mesh: The global mesh.\n pspecs: A Pytree of PartitionSpecs.\n ", "n_words": 110, "vocab_size": 76, "n_whitespaces": 142, "language": "en" } }, { "id": 159329, "commit_id": "6339856514897056716bb531acb8489c9cf05d26", "repo": "rasa", "path": "rasa/telemetry.py", "file_name": "telemetry.py", "fun_name": "telemetry_write_key", "commit_message": "Add support for different recipes (#10641)\n\n* Add support for different recipes\r\n\r\nFixes https://github.com/RasaHQ/rasa/issues/10473\r\n\r\n* Update docs/docs/graph-recipe.mdx\r\n\r\nCo-authored-by: Joe Juzl ", "code": "def telemetry_write_key() -> Optional[Text]:\n \n return _fetch_write_key(\"segment\", TELEMETRY_WRITE_KEY_ENVIRONMENT_VARIABLE)\n\n", "url": "https://github.com/RasaHQ/rasa.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 13, "token_counts": 17, "n_ast_nodes": 31, "n_identifiers": 5, "d_id": 38201, "documentation": { "docstring": "Read the Segment write key from the segment key text file.\n\n The segment key text file should by present only in wheel/sdist packaged\n versions of Rasa Open Source. This avoids running telemetry locally when\n developing on Rasa or when running CI builds.\n\n In local development, this should always return `None` to avoid logging telemetry.\n\n Returns:\n Segment write key, if the key file was present.\n ", "n_words": 64, "vocab_size": 50, "n_whitespaces": 89, "language": "en" } }, { "id": 127553, "commit_id": "57cdbb1769a9c32972ba0ec9e7e857eeea961869", "repo": "ray", "path": "python/ray/tune/tests/test_multinode_sync.py", "file_name": "test_multinode_sync.py", "fun_name": "testClusterAutoscaling", "commit_message": "Migrate the deprecated placement_group option to PlacementGroupSchedulingStrategy (#28437)\n\nplacement_group option is deprecated, use PlacementGroupSchedulingStrategy instead.", "code": "def testClusterAutoscaling(self):\n \n self.cluster.update_config(\n {\n \"provider\": {\"head_resources\": {\"CPU\": 4, \"GPU\": 0}},\n }\n )\n self.cluster.start()\n self.cluster.connect(client=True, timeout=120)\n\n self.assertGreater(ray.cluster_resources().get(\"CPU\", 0), 0)\n\n # Trigger autoscaling\n pg = ray.util.placement_group([{\"CPU\": 1, \"GPU\": 1}] * 2)\n timeout = time.monotonic() + 120\n while ray.cluster_resources().get(\"GPU\", 0) < 2:\n if time.monotonic() > timeout:\n raise RuntimeError(\"Autoscaling failed or too slow.\")\n time.sleep(1)\n\n # Schedule task with resources\n self.assertEquals(\n 5,\n ray.get(\n remote_task.options(\n num_cpus=1,\n num_gpus=1,\n scheduling_strategy=PlacementGroupSchedulingStrategy(\n placement_group=pg\n ),\n ).remote(5)\n ),\n )\n\n print(\"Autoscaling worked\")\n ray.util.remove_placement_group(pg)\n\n time.sleep(2) # Give some time so nodes.json is updated\n\n self.cluster.kill_node(num=2)\n print(\"Killed GPU node.\")\n pg = ray.util.placement_group([{\"CPU\": 1, \"GPU\": 1}] * 2)\n\n table = ray.util.placement_group_table(pg)\n assert table[\"state\"] == \"PENDING\"\n\n timeout = time.monotonic() + 180\n while table[\"state\"] != \"CREATED\":\n if time.monotonic() > timeout:\n raise RuntimeError(\"Re-starting killed node failed or too slow.\")\n time.sleep(1)\n table = ray.util.placement_group_table(pg)\n\n print(\"Node was restarted.\")\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 579, "n_words": 126, "vocab_size": 90, "complexity": 5, "nloc": 42, "token_counts": 300, "n_ast_nodes": 513, "n_identifiers": 33, "d_id": 28467, "documentation": { "docstring": "Sanity check that multinode tests with autoscaling are working", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 223483, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/doctest.py", "file_name": "doctest.py", "fun_name": "_from_module", "commit_message": "add python 3.10.4 for windows", "code": "def _from_module(self, module, object):\n \n if module is None:\n return True\n elif inspect.getmodule(object) is not None:\n return module is inspect.getmodule(object)\n elif inspect.isfunction(object):\n return module.__dict__ is object.__globals__\n elif inspect.ismethoddescriptor(object):\n if hasattr(object, '__objclass__'):\n obj_mod = object.__objclass__.__module__\n elif hasattr(object, '__module__'):\n obj_mod = object.__module__\n else:\n return True # [XX] no easy way to tell otherwise\n return module.__name__ == obj_mod\n elif inspect.isclass(object):\n return module.__name__ == object.__module__\n elif hasattr(object, '__module__'):\n return module.__name__ == object.__module__\n elif isinstance(object, property):\n return True # [XX] no way not be sure.\n else:\n raise ValueError(\"object must be a class or function\")\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 317, "n_words": 88, "vocab_size": 47, "complexity": 10, "nloc": 23, "token_counts": 148, "n_ast_nodes": 242, "n_identifiers": 19, "d_id": 56928, "documentation": { "docstring": "\n Return true if the given object is defined in the given\n module.\n ", "n_words": 12, "vocab_size": 10, "n_whitespaces": 34, "language": "en" } }, { "id": 205963, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/forms/forms.py", "file_name": "forms.py", "fun_name": "add_prefix", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def add_prefix(self, field_name):\n \n return \"%s-%s\" % (self.prefix, field_name) if self.prefix else field_name\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 26, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 2, "token_counts": 24, "n_ast_nodes": 39, "n_identifiers": 4, "d_id": 51300, "documentation": { "docstring": "\n Return the field name with a prefix appended, if this Form has a\n prefix set.\n\n Subclasses may wish to override.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 49, "language": "en" } }, { "id": 300618, "commit_id": "4885331509eeffe50f42d76b234996467b06170f", "repo": "core", "path": "homeassistant/helpers/template.py", "file_name": "template.py", "fun_name": "arc_tangent", "commit_message": "Fail template functions when no default specified (#71687)", "code": "def arc_tangent(value, default=_SENTINEL):\n \n try:\n return math.atan(float(value))\n except (ValueError, TypeError):\n if default is _SENTINEL:\n raise_no_default(\"atan\", value)\n return default\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 58, "n_words": 17, "vocab_size": 15, "complexity": 3, "nloc": 7, "token_counts": 42, "n_ast_nodes": 70, "n_identifiers": 10, "d_id": 99478, "documentation": { "docstring": "Filter and function to get arc tangent of the value.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 270214, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/distribute/distribute_coordinator_utils.py", "file_name": "distribute_coordinator_utils.py", "fun_name": "master_target", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def master_target(self):\n \n return self._master_target\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 80417, "documentation": { "docstring": "Returns the session master for the corresponding task to connect to.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 217657, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/hmac.py", "file_name": "hmac.py", "fun_name": "new", "commit_message": "add python 3.10.4 for windows", "code": "def new(key, msg=None, digestmod=''):\n \n return HMAC(key, msg, digestmod)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 37, "n_identifiers": 5, "d_id": 54872, "documentation": { "docstring": "Create a new hashing object and return it.\n\n key: bytes or buffer, The starting key for the hash.\n msg: bytes or buffer, Initial input for the hash, or None.\n digestmod: A hash name suitable for hashlib.new(). *OR*\n A hashlib constructor returning a new hash object. *OR*\n A module supporting PEP 247.\n\n Required as of 3.8, despite its position after the optional\n msg argument. Passing it as a keyword argument is\n recommended, though not required for legacy API reasons.\n\n You can now feed arbitrary bytes into the object using its update()\n method, and can ask for the hash value at any time by calling its digest()\n or hexdigest() methods.\n ", "n_words": 108, "vocab_size": 80, "n_whitespaces": 200, "language": "en" } }, { "id": 118640, "commit_id": "704eab3478cf69847825b23dabf15813a8ac9fa2", "repo": "streamlit", "path": "lib/tests/streamlit/server_test.py", "file_name": "server_test.py", "fun_name": "test_websocket_connect", "commit_message": "Rename and refactor `Report` machinery (#4141)\n\nThis refactor renames (almost) everything related to the outdated \"report\" concept with more precise concepts that we use throughout our code, primarily \"script run\", \"session\", and \"app\".", "code": "def test_websocket_connect(self):\n \n\n with patch(\n \"streamlit.server.server.LocalSourcesWatcher\"\n ), self._patch_app_session():\n yield self.start_server_loop()\n\n self.assertFalse(self.server.browser_is_connected)\n\n # Open a websocket connection\n ws_client = yield self.ws_connect()\n self.assertTrue(self.server.browser_is_connected)\n\n # Get this client's SessionInfo object\n self.assertEqual(1, len(self.server._session_info_by_id))\n session_info = list(self.server._session_info_by_id.values())[0]\n\n # Close the connection\n ws_client.close()\n yield gen.sleep(0.1)\n self.assertFalse(self.server.browser_is_connected)\n\n # Ensure AppSession.shutdown() was called, and that our\n # SessionInfo was cleared.\n session_info.session.shutdown.assert_called_once()\n self.assertEqual(0, len(self.server._session_info_by_id))\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 262, "n_words": 54, "vocab_size": 42, "complexity": 1, "nloc": 15, "token_counts": 132, "n_ast_nodes": 224, "n_identifiers": 23, "d_id": 26342, "documentation": { "docstring": "Test that we can connect to the server via websocket.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 286398, "commit_id": "09f753da1c2a2f03c41fe6a3ca2eb79f6ea58995", "repo": "OpenBBTerminal", "path": "openbb_terminal/cryptocurrency/crypto_controller.py", "file_name": "crypto_controller.py", "fun_name": "call_find", "commit_message": "More Fixes to Crypto + key sort (#3244)\n\n* fix #3095 - autocomplete and command working + key sort\r\n\r\n* fix #3056\r\n\r\n* fix [Bug] bugs #3048\r\n\r\n* fix [Bug] bug #3017\r\n\r\n* sort -> sortby, not ascend, tests\r\n\r\n* fix my goof ups\r\n\r\nCo-authored-by: james ", "code": "def call_find(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"find\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=,\n )\n parser.add_argument(\n \"-c\",\n \"--coin\",\n help=\"Symbol Name or Id of Coin\",\n dest=\"coin\",\n required=\"-h\" not in other_args,\n type=str,\n )\n parser.add_argument(\n \"-k\",\n \"--key\",\n dest=\"key\",\n help=\"Specify by which column you would like to search: symbol, name, id\",\n type=str,\n choices=FIND_KEYS,\n default=\"symbol\",\n )\n parser.add_argument(\n \"-l\",\n \"--limit\",\n default=10,\n dest=\"limit\",\n help=\"Number of records to display\",\n type=check_positive,\n )\n parser.add_argument(\n \"-s\",\n \"--skip\",\n default=0,\n dest=\"skip\",\n help=\"Skip n of records\",\n type=check_positive,\n )\n if other_args and not other_args[0][0] == \"-\":\n other_args.insert(0, \"-c\")\n\n ns_parser = self.parse_known_args_and_warn(\n parser,\n other_args,\n EXPORT_ONLY_RAW_DATA_ALLOWED,\n )\n # TODO: merge find + display_all_coins\n if ns_parser:\n if ns_parser.coin == \"ALL\":\n display_all_coins(\n symbol=ns_parser.coin,\n source=ns_parser.source,\n limit=ns_parser.limit,\n skip=ns_parser.skip,\n show_all=True,\n export=ns_parser.export,\n )\n else:\n find(\n query=ns_parser.coin,\n source=ns_parser.source,\n key=ns_parser.key,\n limit=ns_parser.limit,\n export=ns_parser.export,\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 880, "n_words": 114, "vocab_size": 90, "complexity": 5, "nloc": 82, "token_counts": 257, "n_ast_nodes": 406, "n_identifiers": 36, "d_id": 85785, "documentation": { "docstring": "Process find command\n Find similar coin by name, symbol, or id. If you don't remember exact name or id of the Coin at CoinGecko,\n Binance, Coinbase or CoinPaprika you can use this command to display coins with similar name, symbol or id\n to your search query.\n Example of usage: coin name is something like \"polka\". So I can try: find -c polka -k name -t 25\n It will search for coin that has similar name to polka and display top 25 matches.\n -c, --coin stands for coin - you provide here your search query\n -k, --key it's a searching key. You can search by symbol, id or name of coin\n -l, --limit it displays top N number of records.\n coins: Shows list of coins available on CoinGecko, CoinPaprika and Binance.If you provide name of\n coin then in result you will see ids of coins with best match for all mentioned services.\n If you provide \"ALL\" in your coin search query, then all coins will be displayed. To move over coins you\n can use pagination mechanism with skip, top params. E.g. coins ALL --skip 100 --limit 30 then all coins\n from 100 to 130 will be displayed. By default skip = 0, limit = 10.\n If you won't provide source of the data everything will be displayed (CoinGecko, CoinPaprika, Binance).\n If you want to search only in given source then use --source flag. E.g. if you want to find coin with name\n uniswap on CoinPaprika then use: coins uniswap --source cp --limit 10\n ", "n_words": 252, "vocab_size": 139, "n_whitespaces": 439, "language": "en" } }, { "id": 284352, "commit_id": "34bc290dded1bd2418fc3c6b375a79f9cdd68d5a", "repo": "OpenBBTerminal", "path": "openbb_terminal/portfolio/portfolio_optimization/parameters/params_view.py", "file_name": "params_view.py", "fun_name": "load_file", "commit_message": "New portfolio optimization menu (#1642)\n\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* Update _index.md\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* configure portfolio optimization parameters ini\r\n\r\n* minor improvement\r\n\r\n* Revert \"New-Portfolio-Optimization-Menu\"\r\n\r\nThis reverts commit b4b7169cfbc8f28c379eb1920307c2cdd2e47a0f.\r\n\r\n* Add in Excel functionality and improve the capabilities\r\n\r\n* Add Excel load function\r\n\r\n* Tidying up the functions\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* Re-add my code\r\n\r\n* Some spacing and details\r\n\r\n* Add folder structure for portfolio\r\n\r\n* Update terminal file loading\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* Make it possible to move from params to po with loaded file\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* Making the connection between the parameters file and the functions\r\n\r\n* Add in allocation and new params files\r\n\r\n* Improve params default settings\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* Update Portfolios and Params sheets\r\n\r\n* Update sheets\r\n\r\n* Update command to load in correct sheet\r\n\r\n* Adjust function to only read specific columns\r\n\r\n* Update portfolio\r\n\r\n* Small correction\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* Patched up show error\r\n\r\n* Add Equity portfolio\r\n\r\n* Make functions more robust\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* Add in Params documentation\r\n\r\n* Fixing Linting\r\n\r\n* Add in Requirements and Poetry Updates\r\n\r\n* Update website\r\n\r\n* linting\r\n\r\n* Update tests\r\n\r\n* Minor fix\r\n\r\n* remove unneccesary READMEs\r\n\r\n* Remove expected variable type\r\n\r\n* Improve documentation\r\n\r\n* Clean up the code\r\n\r\n* Refractoring\r\n\r\n* Adjust names to make it OS friendly\r\n\r\nCo-authored-by: Jeroen Bouma \r\nCo-authored-by: jmaslek \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: DidierRLopes ", "code": "def load_file(file_location=None):\n \n if str(file_location).endswith(\".ini\"):\n params = configparser.RawConfigParser()\n params.read(file_location)\n params.optionxform = str # type: ignore\n params = params[\"OPENBB\"]\n\n if \"technique\" in params:\n current_model = params[\"technique\"]\n else:\n current_model = None\n\n elif str(file_location).endswith(\".xlsx\"):\n params, _ = excel_model.load_configuration(file_location)\n current_model = params[\"technique\"]\n else:\n console.print(\n \"Can not load in the file due to not being an .ini or .xlsx file.\"\n )\n return None, None\n\n max_len = max(len(k) for k in params.keys())\n help_text = \"[info]Parameters:[/info]\\n\"\n\n if current_model:\n for k, v in params.items():\n all_params = DEFAULT_PARAMETERS + MODEL_PARAMS[current_model]\n if k in all_params:\n help_text += f\" [param]{k}{' ' * (max_len - len(k))} :[/param] {v}\\n\"\n else:\n for k, v in params.items():\n help_text += f\" [param]{k}{' ' * (max_len - len(k))} :[/param] {v}\\n\"\n\n console.print(help_text)\n\n return params, current_model\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 325, "n_words": 116, "vocab_size": 70, "complexity": 9, "nloc": 30, "token_counts": 176, "n_ast_nodes": 363, "n_identifiers": 26, "d_id": 84704, "documentation": { "docstring": "\n Loads in the configuration file and return the parameters in a dictionary including the model if available.\n\n Parameters\n ----------\n file_location: str\n The location of the file to be loaded in either xlsx or ini.\n\n Returns\n -------\n Return the parameters and the model, if available.\n ", "n_words": 44, "vocab_size": 32, "n_whitespaces": 76, "language": "en" } }, { "id": 228743, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/bar/marker/_colorbar.py", "file_name": "_colorbar.py", "fun_name": "showticklabels", "commit_message": "switch to black .22", "code": "def showticklabels(self):\n \n return self[\"showticklabels\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 60416, "documentation": { "docstring": "\n Determines whether or not the tick labels are drawn.\n\n The 'showticklabels' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n ", "n_words": 25, "vocab_size": 23, "n_whitespaces": 75, "language": "en" } }, { "id": 112010, "commit_id": "553e91f4205b286ce7d71142f517c010bbcefac7", "repo": "nni", "path": "nni/trial.py", "file_name": "trial.py", "fun_name": "get_trial_id", "commit_message": "Update trial and experiment docstr (#4672)", "code": "def get_trial_id() -> str:\n \n return _trial_id\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 7, "token_counts": 9, "n_ast_nodes": 18, "n_identifiers": 3, "d_id": 24548, "documentation": { "docstring": "\n Return unique ID of the trial that is current running.\n\n This is shown as \"ID\" in the web portal's trial table.\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 31, "language": "en" } }, { "id": 67763, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/doctype/stock_entry/stock_entry_utils.py", "file_name": "stock_entry_utils.py", "fun_name": "make_stock_entry", "commit_message": "style: format code with black", "code": "def make_stock_entry(**args):\n\t\n\n\tdef process_serial_numbers(serial_nos_list):\n\t\tserial_nos_list = [\n\t\t\t\"\\n\".join(serial_num[\"serial_no\"] for serial_num in serial_nos_list if serial_num.serial_no)\n\t\t]\n\n\t\tuniques = list(set(serial_nos_list[0].split(\"\\n\")))\n\n\t\treturn \"\\n\".join(uniques)\n\n\ts = frappe.new_doc(\"Stock Entry\")\n\targs = frappe._dict(args)\n\n\tif args.posting_date or args.posting_time:\n\t\ts.set_posting_time = 1\n\n\tif args.posting_date:\n\t\ts.posting_date = args.posting_date\n\tif args.posting_time:\n\t\ts.posting_time = args.posting_time\n\tif args.inspection_required:\n\t\ts.inspection_required = args.inspection_required\n\n\t# map names\n\tif args.from_warehouse:\n\t\targs.source = args.from_warehouse\n\tif args.to_warehouse:\n\t\targs.target = args.to_warehouse\n\tif args.item_code:\n\t\targs.item = args.item_code\n\tif args.apply_putaway_rule:\n\t\ts.apply_putaway_rule = args.apply_putaway_rule\n\n\tif isinstance(args.qty, str):\n\t\tif \".\" in args.qty:\n\t\t\targs.qty = flt(args.qty)\n\t\telse:\n\t\t\targs.qty = cint(args.qty)\n\n\t# purpose\n\tif not args.purpose:\n\t\tif args.source and args.target:\n\t\t\ts.purpose = \"Material Transfer\"\n\t\telif args.source:\n\t\t\ts.purpose = \"Material Issue\"\n\t\telse:\n\t\t\ts.purpose = \"Material Receipt\"\n\telse:\n\t\ts.purpose = args.purpose\n\n\t# company\n\tif not args.company:\n\t\tif args.source:\n\t\t\targs.company = frappe.db.get_value(\"Warehouse\", args.source, \"company\")\n\t\telif args.target:\n\t\t\targs.company = frappe.db.get_value(\"Warehouse\", args.target, \"company\")\n\n\t# set vales from test\n\tif frappe.flags.in_test:\n\t\tif not args.company:\n\t\t\targs.company = \"_Test Company\"\n\t\tif not args.item:\n\t\t\targs.item = \"_Test Item\"\n\n\ts.company = args.company or erpnext.get_default_company()\n\ts.purchase_receipt_no = args.purchase_receipt_no\n\ts.delivery_note_no = args.delivery_note_no\n\ts.sales_invoice_no = args.sales_invoice_no\n\ts.is_opening = args.is_opening or \"No\"\n\tif not args.cost_center:\n\t\targs.cost_center = frappe.get_value(\"Company\", s.company, \"cost_center\")\n\n\tif not args.expense_account and s.is_opening == \"No\":\n\t\targs.expense_account = frappe.get_value(\"Company\", s.company, \"stock_adjustment_account\")\n\n\t# We can find out the serial number using the batch source document\n\tserial_number = args.serial_no\n\n\tif not args.serial_no and args.qty and args.batch_no:\n\t\tserial_number_list = frappe.get_list(\n\t\t\tdoctype=\"Stock Ledger Entry\",\n\t\t\tfields=[\"serial_no\"],\n\t\t\tfilters={\"batch_no\": args.batch_no, \"warehouse\": args.from_warehouse},\n\t\t)\n\t\tserial_number = process_serial_numbers(serial_number_list)\n\n\targs.serial_no = serial_number\n\n\ts.append(\n\t\t\"items\",\n\t\t{\n\t\t\t\"item_code\": args.item,\n\t\t\t\"s_warehouse\": args.source,\n\t\t\t\"t_warehouse\": args.target,\n\t\t\t\"qty\": args.qty,\n\t\t\t\"basic_rate\": args.rate or args.basic_rate,\n\t\t\t\"conversion_factor\": args.conversion_factor or 1.0,\n\t\t\t\"transfer_qty\": flt(args.qty) * (flt(args.conversion_factor) or 1.0),\n\t\t\t\"serial_no\": args.serial_no,\n\t\t\t\"batch_no\": args.batch_no,\n\t\t\t\"cost_center\": args.cost_center,\n\t\t\t\"expense_account\": args.expense_account,\n\t\t},\n\t)\n\n\ts.set_stock_entry_type()\n\tif not args.do_not_save:\n\t\ts.insert()\n\t\tif not args.do_not_submit:\n\t\t\ts.submit()\n\treturn s\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 186, "n_words": 280, "vocab_size": 164, "complexity": 35, "nloc": 84, "token_counts": 574, "n_ast_nodes": 1040, "n_identifiers": 61, "d_id": 14615, "documentation": { "docstring": "Helper function to make a Stock Entry\n\n\t:item_code: Item to be moved\n\t:qty: Qty to be moved\n\t:company: Company Name (optional)\n\t:from_warehouse: Optional\n\t:to_warehouse: Optional\n\t:rate: Optional\n\t:serial_no: Optional\n\t:batch_no: Optional\n\t:posting_date: Optional\n\t:posting_time: Optional\n\t:purpose: Optional\n\t:do_not_save: Optional flag\n\t:do_not_submit: Optional flag\n\t", "n_words": 43, "vocab_size": 29, "n_whitespaces": 29, "language": "en" } }, { "id": 221800, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ctypes/_aix.py", "file_name": "_aix.py", "fun_name": "get_one_match", "commit_message": "add python 3.10.4 for windows", "code": "def get_one_match(expr, lines):\n \n # member names in the ld_headers output are between square brackets\n expr = rf'\\[({expr})\\]'\n matches = list(filter(None, (re.search(expr, line) for line in lines)))\n if len(matches) == 1:\n return matches[0].group(1)\n else:\n return None\n\n# additional processing to deal with AIX legacy names for 64-bit members", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 78, "n_words": 47, "vocab_size": 41, "complexity": 3, "nloc": 7, "token_counts": 58, "n_ast_nodes": 98, "n_identifiers": 11, "d_id": 56517, "documentation": { "docstring": "\n Must be only one match, otherwise result is None.\n When there is a match, strip leading \"[\" and trailing \"]\"\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 30, "language": "en" } }, { "id": 178285, "commit_id": "e3c87a8a709006f9064b8c32782fbd5461bd0d1d", "repo": "label-studio", "path": "label_studio/tests/test_config_validation.py", "file_name": "test_config_validation.py", "fun_name": "test_config_validation_for_choices_workaround", "commit_message": "fix: DEV-4035: Fix single choice workaround for several choices tags (#3319)\n\n* fix: DEV-3635: Fix single choice workaround for several choices tags", "code": "def test_config_validation_for_choices_workaround(business_client, project_id):\n \n payload = {\n 'label_config': ''}\n response = business_client.patch(\n f\"/api/projects/{project_id}\",\n data=json.dumps(payload),\n content_type=\"application/json\",\n )\n assert response.status_code == 200\n\n payload = {\n 'label_config': ''}\n response = business_client.patch(\n f\"/api/projects/{project_id}\",\n data=json.dumps(payload),\n content_type=\"application/json\",\n )\n assert response.status_code == 200\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 138, "n_words": 55, "vocab_size": 28, "complexity": 1, "nloc": 17, "token_counts": 80, "n_ast_nodes": 143, "n_identifiers": 11, "d_id": 42647, "documentation": { "docstring": "\n Validate Choices tag for 1 choice with workaround\n Example bug DEV-3635\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 21, "language": "en" } }, { "id": 44412, "commit_id": "baf50cddd86ac07f064c8cbd95efb22d038b3832", "repo": "airflow", "path": "airflow/sensors/external_task.py", "file_name": "external_task.py", "fun_name": "get_count", "commit_message": "Fix tests for mssql after SQLA 1.4 upgrade (#21303)\n\nThe way SQLA 1.4 constructed the query then `exeuction_date.in_([])`\r\nchanged, and as a result it started failing.\r\n\r\nBut we don't even need to ask the database in this case, as we know it\r\nwon't return any rows.", "code": "def get_count(self, dttm_filter, session, states) -> int:\n \n TI = TaskInstance\n DR = DagRun\n if not dttm_filter:\n return 0\n\n if self.external_task_ids:\n count = (\n session.query(func.count()) # .count() is inefficient\n .filter(\n TI.dag_id == self.external_dag_id,\n TI.task_id.in_(self.external_task_ids),\n TI.state.in_(states),\n TI.execution_date.in_(dttm_filter),\n )\n .scalar()\n )\n count = count / len(self.external_task_ids)\n else:\n count = (\n session.query(func.count())\n .filter(\n DR.dag_id == self.external_dag_id,\n DR.state.in_(states),\n DR.execution_date.in_(dttm_filter),\n )\n .scalar()\n )\n return count\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 428, "n_words": 59, "vocab_size": 40, "complexity": 3, "nloc": 36, "token_counts": 152, "n_ast_nodes": 238, "n_identifiers": 23, "d_id": 8251, "documentation": { "docstring": "\n Get the count of records against dttm filter and states\n\n :param dttm_filter: date time filter for execution date\n :param session: airflow session object\n :param states: task or dag states\n :return: count of record against the filters\n ", "n_words": 36, "vocab_size": 27, "n_whitespaces": 79, "language": "en" } }, { "id": 308133, "commit_id": "7a6897c7578dffd6b67f57747ebd81b67b153e01", "repo": "core", "path": "tests/components/deconz/test_climate.py", "file_name": "test_climate.py", "fun_name": "test_boost_mode", "commit_message": "Add deconz current hvac operation to thermostate based on \"state\" (#59989)\n\n* deconz - add current hvac operation to thermostate based on \"state\"\r\n\r\n* deconz - extend current hvac operation to thermostate based on \"state\" and \"mode\"\r\n\r\n* Add tests for current hvac action\r\n\r\n* Add boost mode as special case\r\n\r\n* format using Black\r\n\r\n* sort imports\r\n\r\n* Add test for device with mode none and state none\r\n\r\n* Update homeassistant/components/deconz/climate.py\r\n\r\nCo-authored-by: Robert Svensson \r\n\r\n* Fix test_climate.py test_no_mode_no_state\r\n\r\n* Add test for boost mode\r\n\r\nCo-authored-by: Robert Svensson ", "code": "async def test_boost_mode(hass, aioclient_mock, mock_deconz_websocket):\n \n data = {\n \"sensors\": {\n \"0\": {\n \"config\": {\n \"battery\": 58,\n \"heatsetpoint\": 2200,\n \"locked\": False,\n \"mode\": \"heat\",\n \"offset\": -200,\n \"on\": True,\n \"preset\": \"manual\",\n \"reachable\": True,\n \"schedule\": {},\n \"schedule_on\": False,\n \"setvalve\": False,\n \"windowopen_set\": False,\n },\n \"ep\": 1,\n \"etag\": \"404c15db68c318ebe7832ce5aa3d1e30\",\n \"lastannounced\": \"2022-08-31T03:00:59Z\",\n \"lastseen\": \"2022-09-19T11:58Z\",\n \"manufacturername\": \"_TZE200_b6wax7g0\",\n \"modelid\": \"TS0601\",\n \"name\": \"Thermostat\",\n \"state\": {\n \"lastupdated\": \"2022-09-19T11:58:24.204\",\n \"lowbattery\": False,\n \"on\": False,\n \"temperature\": 2200,\n \"valve\": 0,\n },\n \"type\": \"ZHAThermostat\",\n \"uniqueid\": \"84:fd:27:ff:fe:8a:eb:89-01-0201\",\n }\n }\n }\n with patch.dict(DECONZ_WEB_REQUEST, data):\n config_entry = await setup_deconz_integration(hass, aioclient_mock)\n\n assert len(hass.states.async_all()) == 3\n\n climate_thermostat = hass.states.get(\"climate.thermostat\")\n\n assert climate_thermostat.state == HVACMode.HEAT\n\n assert climate_thermostat.attributes[\"preset_mode\"] is DECONZ_PRESET_MANUAL\n assert climate_thermostat.attributes[\"hvac_action\"] is HVACAction.IDLE\n\n # Event signals thermostat preset boost and valve 100 (real data)\n event_changed_sensor = {\n \"t\": \"event\",\n \"e\": \"changed\",\n \"r\": \"sensors\",\n \"id\": \"0\",\n \"config\": {\"preset\": \"boost\"},\n \"state\": {\"valve\": 100},\n }\n\n await mock_deconz_websocket(data=event_changed_sensor)\n await hass.async_block_till_done()\n\n climate_thermostat = hass.states.get(\"climate.thermostat\")\n assert climate_thermostat.attributes[\"preset_mode\"] is PRESET_BOOST\n assert climate_thermostat.attributes[\"hvac_action\"] is HVACAction.HEATING\n\n # Verify service calls\n mock_deconz_put_request(aioclient_mock, config_entry.data, \"/sensors/0/config\")\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 811, "n_words": 151, "vocab_size": 112, "complexity": 1, "nloc": 58, "token_counts": 297, "n_ast_nodes": 549, "n_identifiers": 27, "d_id": 106894, "documentation": { "docstring": "Test that a climate device with boost mode and different state works.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 39311, "commit_id": "4637482026d2afc5dd93e1fdce6a3c9285427062", "repo": "recommenders", "path": "recommenders/evaluation/spark_evaluation.py", "file_name": "spark_evaluation.py", "fun_name": "exp_var", "commit_message": "use numpy divide in explained variance", "code": "def exp_var(self):\n \n var1 = self.y_pred_true.selectExpr(\"variance(label - prediction)\").collect()[0][\n 0\n ]\n var2 = self.y_pred_true.selectExpr(\"variance(label)\").collect()[0][0]\n # numpy divide is more tolerant to var2 being zero\n return 1 - np.divide(var1, var2)\n\n", "url": "https://github.com/microsoft/recommenders.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 80, "n_words": 27, "vocab_size": 24, "complexity": 1, "nloc": 6, "token_counts": 57, "n_ast_nodes": 96, "n_identifiers": 9, "d_id": 7208, "documentation": { "docstring": "Calculate explained variance.\n\n .. note::\n Spark MLLib's implementation is buggy (can lead to values > 1), hence we use var().\n\n Returns:\n float: Explained variance (min=0, max=1).\n ", "n_words": 26, "vocab_size": 26, "n_whitespaces": 68, "language": "en" } }, { "id": 32353, "commit_id": "12d66b47012c9258f9557e6d3a0c13bcd1c72871", "repo": "transformers", "path": "src/transformers/models/owlvit/feature_extraction_owlvit.py", "file_name": "feature_extraction_owlvit.py", "fun_name": "center_to_corners_format", "commit_message": "Add OWL-ViT model for zero-shot object detection (#17938)\n\n* add owlvit model skeleton\r\n\r\n* add class and box predictor heads\r\n\r\n* convert modified flax clip to pytorch\r\n\r\n* fix box and class predictors\r\n\r\n* add OwlViTImageTextEmbedder\r\n\r\n* convert class and box head checkpoints\r\n\r\n* convert image text embedder checkpoints\r\n\r\n* add object detection head\r\n\r\n* fix bugs\r\n\r\n* update conversion script\r\n\r\n* update conversion script\r\n\r\n* fix q,v,k,out weight conversion conversion\r\n\r\n* add owlvit object detection output\r\n\r\n* fix bug in image embedder\r\n\r\n* fix bugs in text embedder\r\n\r\n* fix positional embeddings\r\n\r\n* fix bug in inference mode vision pooling\r\n\r\n* update docs, init tokenizer and processor files\r\n\r\n* support batch processing\r\n\r\n* add OwlViTProcessor\r\n\r\n* remove merge conflicts\r\n\r\n* readd owlvit imports\r\n\r\n* fix bug in OwlViTProcessor imports\r\n\r\n* fix bugs in processor\r\n\r\n* update docs\r\n\r\n* fix bugs in processor\r\n\r\n* update owlvit docs\r\n\r\n* add OwlViTFeatureExtractor\r\n\r\n* style changes, add postprocess method to feature extractor\r\n\r\n* add feature extractor and processor tests\r\n\r\n* add object detection tests\r\n\r\n* update conversion script\r\n\r\n* update config paths\r\n\r\n* update config paths\r\n\r\n* fix configuration paths and bugs\r\n\r\n* fix bugs in OwlViT tests\r\n\r\n* add import checks to processor\r\n\r\n* fix docs and minor issues\r\n\r\n* fix docs and minor issues\r\n\r\n* fix bugs and issues\r\n\r\n* fix bugs and issues\r\n\r\n* fix bugs and issues\r\n\r\n* fix bugs and issues\r\n\r\n* update docs and examples\r\n\r\n* fix bugs and issues\r\n\r\n* update conversion script, fix positional embeddings\r\n\r\n* process 2D input ids, update tests\r\n\r\n* fix style and quality issues\r\n\r\n* update docs\r\n\r\n* update docs and imports\r\n\r\n* update OWL-ViT index.md\r\n\r\n* fix bug in OwlViT feature ext tests\r\n\r\n* fix code examples, return_dict by default\r\n\r\n* return_dict by default\r\n\r\n* minor fixes, add tests to processor\r\n\r\n* small fixes\r\n\r\n* add output_attentions arg to main model\r\n\r\n* fix bugs\r\n\r\n* remove output_hidden_states arg from main model\r\n\r\n* update self.config variables\r\n\r\n* add option to return last_hidden_states\r\n\r\n* fix bug in config variables\r\n\r\n* fix copied from statements\r\n\r\n* fix small issues and bugs\r\n\r\n* fix bugs\r\n\r\n* fix bugs, support greyscale images\r\n\r\n* run fixup\r\n\r\n* update repo name\r\n\r\n* merge OwlViTImageTextEmbedder with obj detection head\r\n\r\n* fix merge conflict\r\n\r\n* fix merge conflict\r\n\r\n* make fixup\r\n\r\n* fix bugs\r\n\r\n* fix bugs\r\n\r\n* add additional processor test", "code": "def center_to_corners_format(x):\n \n x_center, y_center, width, height = x.unbind(-1)\n boxes = [(x_center - 0.5 * width), (y_center - 0.5 * height), (x_center + 0.5 * width), (y_center + 0.5 * height)]\n return torch.stack(boxes, dim=-1)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 45, "n_words": 33, "vocab_size": 22, "complexity": 1, "nloc": 4, "token_counts": 76, "n_ast_nodes": 103, "n_identifiers": 11, "d_id": 5912, "documentation": { "docstring": "\n Converts a PyTorch tensor of bounding boxes of center format (center_x, center_y, width, height) to corners format\n (left, top, right, bottom).\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 31, "language": "en" } }, { "id": 60747, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/index/package_finder.py", "file_name": "package_finder.py", "fun_name": "_sort_key", "commit_message": "upd; format", "code": "def _sort_key(self, candidate):\n # type: (InstallationCandidate) -> CandidateSortingKey\n \n valid_tags = self._supported_tags\n support_num = len(valid_tags)\n build_tag = () # type: BuildTag\n binary_preference = 0\n link = candidate.link\n if link.is_wheel:\n # can raise InvalidWheelFilename\n wheel = Wheel(link.filename)\n try:\n pri = -(wheel.find_most_preferred_tag(\n valid_tags, self._wheel_tag_preferences\n ))\n except ValueError:\n raise UnsupportedWheel(\n \"{} is not a supported wheel for this platform. It \"\n \"can't be sorted.\".format(wheel.filename)\n )\n if self._prefer_binary:\n binary_preference = 1\n if wheel.build_tag is not None:\n match = re.match(r'^(\\d+)(.*)$', wheel.build_tag)\n build_tag_groups = match.groups()\n build_tag = (int(build_tag_groups[0]), build_tag_groups[1])\n else: # sdist\n pri = -(support_num)\n has_allowed_hash = int(link.is_hash_allowed(self._hashes))\n yank_value = -1 * int(link.is_yanked) # -1 for yanked.\n return (\n has_allowed_hash, yank_value, binary_preference, candidate.version,\n pri, build_tag,\n )\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 479, "n_words": 109, "vocab_size": 79, "complexity": 5, "nloc": 31, "token_counts": 178, "n_ast_nodes": 289, "n_identifiers": 32, "d_id": 12274, "documentation": { "docstring": "\n Function to pass as the `key` argument to a call to sorted() to sort\n InstallationCandidates by preference.\n\n Returns a tuple such that tuples sorting as greater using Python's\n default comparison operator are more preferred.\n\n The preference is as follows:\n\n First and foremost, candidates with allowed (matching) hashes are\n always preferred over candidates without matching hashes. This is\n because e.g. if the only candidate with an allowed hash is yanked,\n we still want to use that candidate.\n\n Second, excepting hash considerations, candidates that have been\n yanked (in the sense of PEP 592) are always less preferred than\n candidates that haven't been yanked. Then:\n\n If not finding wheels, they are sorted by version only.\n If finding wheels, then the sort order is by version, then:\n 1. existing installs\n 2. wheels ordered via Wheel.support_index_min(self._supported_tags)\n 3. source archives\n If prefer_binary was set, then all wheels are sorted above sources.\n\n Note: it was considered to embed this logic into the Link\n comparison operators, but then different sdist links\n with the same version, would have to be considered equal\n ", "n_words": 173, "vocab_size": 123, "n_whitespaces": 346, "language": "en" } }, { "id": 21815, "commit_id": "8faa74cdc9da20cfdcc69f5ec29b91112c95b4c9", "repo": "pipenv", "path": "pipenv/vendor/tomlkit/parser.py", "file_name": "parser.py", "fun_name": "extract", "commit_message": "Update tomlkit==0.9.2\n\nUsed:\n\n python -m invoke vendoring.update --package=tomlkit", "code": "def extract(self) -> str:\n \n return self._src.extract()\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 5, "token_counts": 16, "n_ast_nodes": 29, "n_identifiers": 4, "d_id": 4057, "documentation": { "docstring": "\n Extracts the value between marker and index\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 269599, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "cumsum", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def cumsum(x, axis=0):\n \n return tf.cumsum(x, axis=axis)\n\n\n@keras_export(\"keras.backend.cumprod\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.backend.cumprod\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 12, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 63, "n_identifiers": 10, "d_id": 80220, "documentation": { "docstring": "Cumulative sum of the values in a tensor, alongside the specified axis.\n\n Args:\n x: A tensor or variable.\n axis: An integer, the axis to compute the sum.\n\n Returns:\n A tensor of the cumulative sum of values of `x` along `axis`.\n ", "n_words": 40, "vocab_size": 29, "n_whitespaces": 70, "language": "en" } }, { "id": 35036, "commit_id": "7732d0fe7a759c9844215920e9f1c5540eafb1a6", "repo": "transformers", "path": "src/transformers/generation_tf_utils.py", "file_name": "generation_tf_utils.py", "fun_name": "is_done", "commit_message": "Upgrade black to version ~=22.0 (#15565)\n\n* Upgrade black to version ~=22.0\r\n\r\n* Check copies\r\n\r\n* Fix code", "code": "def is_done(self, best_sum_logprobs, cur_len):\n \n\n if len(self) < self.num_beams:\n return False\n elif self.early_stopping:\n return True\n else:\n cur_score = best_sum_logprobs / cur_len**self.length_penalty\n ret = self.worst_score >= cur_score\n return ret\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 110, "n_words": 27, "vocab_size": 22, "complexity": 3, "nloc": 9, "token_counts": 49, "n_ast_nodes": 79, "n_identifiers": 11, "d_id": 6381, "documentation": { "docstring": "\n If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst\n one in the heap, then we are done with this sentence.\n ", "n_words": 30, "vocab_size": 26, "n_whitespaces": 52, "language": "en" } }, { "id": 125001, "commit_id": "569fe0109629048d08e1d9e023f7769f10bd2244", "repo": "ray", "path": "rllib/offline/tests/test_dataset_reader.py", "file_name": "test_dataset_reader.py", "fun_name": "test_dataset_shard_with_only_local", "commit_message": "[RLlib] improved unittests for dataset_reader and fixed bugs (#26458)", "code": "def test_dataset_shard_with_only_local(self):\n \n config = {\n \"input\": \"dataset\",\n \"input_config\": {\"format\": \"json\", \"paths\": self.dset_path},\n }\n\n # two ways of doing this:\n\n # we have no remote workers\n _, shards = get_dataset_and_shards(config, num_workers=0)\n\n assert len(shards) == 1\n assert isinstance(shards[0], ray.data.Dataset)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 115, "n_words": 37, "vocab_size": 34, "complexity": 1, "nloc": 8, "token_counts": 61, "n_ast_nodes": 107, "n_identifiers": 13, "d_id": 27741, "documentation": { "docstring": "Tests whether the dataset_shard function works correctly for a single shard\n for the local worker.", "n_words": 15, "vocab_size": 13, "n_whitespaces": 21, "language": "en" } }, { "id": 276383, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/testing_infra/test_utils.py", "file_name": "test_utils.py", "fun_name": "for_all_test_methods", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def for_all_test_methods(decorator, *args, **kwargs):\n \n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 7, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 3, "token_counts": 16, "n_ast_nodes": 21, "n_identifiers": 4, "d_id": 81645, "documentation": { "docstring": "Generate class-level decorator from given method-level decorator.\n\n It is expected for the given decorator to take some arguments and return\n a method that is then called on the test method to produce a decorated\n method.\n\n Args:\n decorator: The decorator to apply.\n *args: Positional arguments\n **kwargs: Keyword arguments\n Returns: Function that will decorate a given classes test methods with the\n decorator.\n ", "n_words": 60, "vocab_size": 43, "n_whitespaces": 98, "language": "en" } }, { "id": 224062, "commit_id": "e7f07cc82ab2be920ab426ba07456d8b2592714d", "repo": "mkdocs", "path": "mkdocs/utils/__init__.py", "file_name": "__init__.py", "fun_name": "get_theme_dir", "commit_message": "Remove spaces at the ends of docstrings, normalize quotes", "code": "def get_theme_dir(name):\n \n\n theme = get_themes()[name]\n return os.path.dirname(os.path.abspath(theme.load().__file__))\n\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 16, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 36, "n_ast_nodes": 61, "n_identifiers": 10, "d_id": 57209, "documentation": { "docstring": "Return the directory of an installed theme by name.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 3245, "commit_id": "61f4138eeb028287425f6007d692bf7faa808e75", "repo": "PySyft", "path": "packages/syft/tests/syft/core/adp/data_subject_ledger_test.py", "file_name": "data_subject_ledger_test.py", "fun_name": "test_cache", "commit_message": "Add tests for ledger and cache", "code": "def test_cache() -> None:\n \n ledger_store = DictLedgerStore()\n user_key = b\"1322\"\n ledger = DataSubjectLedger.get_or_create(store=ledger_store, user_key=user_key)\n\n assert (\n ledger._cache_constant2epsilon[0] == 0.05372712063485988\n ), \"The first value in the cache is incorrect\"\n assert (\n ledger._cache_constant2epsilon[1] == 0.07773597369831031\n ), \"Has the DP cache been changed?\"\n\n rdp_700k = convert_constants_to_indices(np.array([700_000]))\n assert (\n ledger._cache_constant2epsilon.take(rdp_700k)[0] == 706213.1816144075\n ), \"Has the DP cache been changed?\"\n rdp_50 = convert_constants_to_indices(np.array([50]))\n assert (\n ledger._cache_constant2epsilon.take(rdp_50)[0] == 100.68990516105825\n ), \"Has the DP cache been changed?\"\n assert (\n len(ledger._cache_constant2epsilon) >= 1_200_000\n ), \"Has the cache been changed?\"\n\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 164, "n_words": 81, "vocab_size": 43, "complexity": 1, "nloc": 22, "token_counts": 139, "n_ast_nodes": 211, "n_identifiers": 16, "d_id": 413, "documentation": { "docstring": "Ensure the most up to date RDP-to-epsilon cache is being used.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 272375, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/attention/base_dense_attention.py", "file_name": "base_dense_attention.py", "fun_name": "_apply_scores", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _apply_scores(self, scores, value, scores_mask=None, training=None):\n \n if scores_mask is not None:\n padding_mask = tf.logical_not(scores_mask)\n # Bias so padding positions do not contribute to attention distribution.\n # Note 65504. is the max float16 value.\n if scores.dtype is tf.float16:\n scores -= 65504.0 * tf.cast(padding_mask, dtype=scores.dtype)\n else:\n scores -= 1.0e9 * tf.cast(padding_mask, dtype=scores.dtype)\n if training is None:\n training = backend.learning_phase()\n weights = tf.nn.softmax(scores)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 184, "n_words": 60, "vocab_size": 44, "complexity": 4, "nloc": 15, "token_counts": 133, "n_ast_nodes": 153, "n_identifiers": 17, "d_id": 81002, "documentation": { "docstring": "Applies attention scores to the given value tensor.\n\n To use this method in your attention layer, follow the steps:\n\n * Use `query` tensor of shape `[batch_size, Tq]` and `key` tensor of shape\n `[batch_size, Tv]` to calculate the attention `scores`.\n * Pass `scores` and `value` tensors to this method. The method applies\n `scores_mask`, calculates `attention_distribution = softmax(scores)`, then\n returns `matmul(attention_distribution, value).\n * Apply `query_mask` and return the result.\n\n Args:\n scores: Scores float tensor of shape `[batch_size, Tq, Tv]`.\n value: Value tensor of shape `[batch_size, Tv, dim]`.\n scores_mask: A boolean mask `Tensor` of shape `[batch_size, 1, Tv]` or\n `[batch_size, Tq, Tv]`. If given, scores at positions where\n `scores_mask==False` do not contribute to the result. It must contain\n at least one `True` value in each line along the last dimension.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (no dropout).\n\n Returns:\n Tensor of shape `[batch_size, Tq, dim]`.\n Attention scores after masking and softmax with shape\n `[batch_size, Tq, Tv]`.\n ", "n_words": 165, "vocab_size": 108, "n_whitespaces": 350, "language": "en" } }, { "id": 153772, "commit_id": "3d4404e9d9a9b2a3327f8aee664a8e71ac1f18b8", "repo": "modin", "path": "modin/core/execution/ray/implementations/pandas_on_ray/partitioning/virtual_partition.py", "file_name": "virtual_partition.py", "fun_name": "drain_call_queue", "commit_message": "FEAT-#4412: Add Batch Pipeline API to Modin (#4452)\n\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Mahesh Vashishtha \r\nSigned-off-by: Rehan Durrani ", "code": "def drain_call_queue(self, num_splits=None):\n \n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 10, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 7, "token_counts": 40, "n_ast_nodes": 18, "n_identifiers": 3, "d_id": 35596, "documentation": { "docstring": "\n Execute all operations stored in this partition's call queue.\n\n Parameters\n ----------\n num_splits : int, default: None\n The number of times to split the result object.\n ", "n_words": 25, "vocab_size": 25, "n_whitespaces": 72, "language": "en" } }, { "id": 95876, "commit_id": "2bad4600970d40bc799143571ab708a19e9774d1", "repo": "sentry", "path": "src/sentry/utils/auth.py", "file_name": "auth.py", "fun_name": "has_completed_sso", "commit_message": "chore(auth): remove deprecated SSO key check (#30889)\n\n* remove deprecated sso values\r\n\r\n* clean up checking logic\r\n\r\n* update metric name", "code": "def has_completed_sso(request, organization_id) -> bool:\n \n sso_session_in_request = request.session.get(\n SsoSession.django_session_key(organization_id), None\n )\n\n if not sso_session_in_request:\n metrics.incr(\"sso.no-value-in-session\")\n return False\n\n django_session_value = SsoSession.from_django_session_value(\n organization_id, sso_session_in_request\n )\n\n if not django_session_value.is_sso_authtime_fresh():\n metrics.incr(\"sso.session-timed-out\")\n return False\n\n metrics.incr(\"sso.session-verify-success\")\n\n return True\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 101, "n_words": 32, "vocab_size": 24, "complexity": 3, "nloc": 18, "token_counts": 73, "n_ast_nodes": 125, "n_identifiers": 14, "d_id": 19252, "documentation": { "docstring": "\n look for the org id under the sso session key, and check that the timestamp isn't past our expiry limit\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 27, "language": "en" } }, { "id": 100422, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "setup.py", "file_name": "setup.py", "fun_name": "process_arguments", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def process_arguments(self):\n \n args = [arg for arg in sys.argv] # pylint:disable=unnecessary-comprehension\n if self.updater:\n from lib.utils import get_backend # pylint:disable=import-outside-toplevel\n args.append(f\"--{get_backend()}\")\n\n for arg in args:\n if arg == \"--installer\":\n self.is_installer = True\n if arg == \"--nvidia\":\n self.enable_cuda = True\n if arg == \"--amd\":\n self.enable_amd = True\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 175, "n_words": 45, "vocab_size": 28, "complexity": 7, "nloc": 12, "token_counts": 70, "n_ast_nodes": 129, "n_identifiers": 14, "d_id": 19905, "documentation": { "docstring": " Process any cli arguments and dummy in cli arguments if calling from updater. ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 14, "language": "en" } }, { "id": 100336, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "lib/gui/utils.py", "file_name": "utils.py", "fun_name": "_load_icons", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def _load_icons():\n \n size = get_config().user_config_dict.get(\"icon_size\", 16)\n size = int(round(size * get_config().scaling_factor))\n icons = {}\n pathicons = os.path.join(PATHCACHE, \"icons\")\n for fname in os.listdir(pathicons):\n name, ext = os.path.splitext(fname)\n if ext != \".png\":\n continue\n img = Image.open(os.path.join(pathicons, fname))\n img = ImageTk.PhotoImage(img.resize((size, size), resample=Image.HAMMING))\n icons[name] = img\n logger.debug(icons)\n return icons\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 172, "n_words": 46, "vocab_size": 34, "complexity": 3, "nloc": 14, "token_counts": 132, "n_ast_nodes": 216, "n_identifiers": 29, "d_id": 19832, "documentation": { "docstring": " Scan the icons cache folder and load the icons into :attr:`icons` for retrieval\n throughout the GUI.\n\n Returns\n -------\n dict:\n The icons formatted as described in :attr:`icons`\n\n ", "n_words": 26, "vocab_size": 21, "n_whitespaces": 73, "language": "en" } }, { "id": 220805, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/tasks.py", "file_name": "tasks.py", "fun_name": "run_coroutine_threadsafe", "commit_message": "add python 3.10.4 for windows", "code": "def run_coroutine_threadsafe(coro, loop):\n \n if not coroutines.iscoroutine(coro):\n raise TypeError('A coroutine object is required')\n future = concurrent.futures.Future()\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 31, "n_words": 15, "vocab_size": 15, "complexity": 2, "nloc": 7, "token_counts": 41, "n_ast_nodes": 55, "n_identifiers": 10, "d_id": 56124, "documentation": { "docstring": "Submit a coroutine object to a given event loop.\n\n Return a concurrent.futures.Future to access the result.\n ", "n_words": 16, "vocab_size": 13, "n_whitespaces": 22, "language": "en" } }, { "id": 46871, "commit_id": "bca849b4586c7446438f959b62903da4b997b9ea", "repo": "airflow", "path": "dev/breeze/src/airflow_breeze/utils/path_utils.py", "file_name": "path_utils.py", "fun_name": "find_airflow_sources_root_to_operate_on", "commit_message": "Switch to `pipx` as the only installation Breeze2 method (#22740)\n\nSwitching Breeze2 to only use `pipx` for installation of Breeze2\r\ndue to problems it might cause for autocompletion if entrypoint\r\nis not avaiable on PATH.", "code": "def find_airflow_sources_root_to_operate_on() -> Path:\n \n installation_airflow_sources = get_installation_airflow_sources()\n if installation_airflow_sources is None and not skip_upgrade_check():\n console.print(\n \"\\n[red]Breeze should only be installed with -e flag[/]\\n\\n\"\n \"[bright_yellow]Please go to Airflow sources and run[/]\\n\\n\"\n f\" {NAME} self-upgrade --force\\n\"\n )\n sys.exit(1)\n airflow_sources = get_used_airflow_sources()\n if not skip_upgrade_check():\n # only print warning and sleep if not producing complete results\n print_warning_if_different_sources(airflow_sources)\n print_warning_if_setup_changed()\n console.print(f\"[bright_blue]Airflow sources: {airflow_sources}[/]\")\n os.chdir(str(airflow_sources))\n return airflow_sources\n\n\nAIRFLOW_SOURCES_ROOT = find_airflow_sources_root_to_operate_on()\nBUILD_CACHE_DIR = AIRFLOW_SOURCES_ROOT / '.build'\nFILES_DIR = AIRFLOW_SOURCES_ROOT / 'files'\nMSSQL_DATA_VOLUME = AIRFLOW_SOURCES_ROOT / 'tmp_mssql_volume'\nMYPY_CACHE_DIR = AIRFLOW_SOURCES_ROOT / '.mypy_cache'\nLOGS_DIR = AIRFLOW_SOURCES_ROOT / 'logs'\nDIST_DIR = AIRFLOW_SOURCES_ROOT / 'dist'\nSCRIPTS_CI_DIR = AIRFLOW_SOURCES_ROOT / 'scripts' / 'ci'\nDOCKER_CONTEXT_DIR = AIRFLOW_SOURCES_ROOT / 'docker-context-files'\nCACHE_TMP_FILE_DIR = tempfile.TemporaryDirectory()\nOUTPUT_LOG = Path(CACHE_TMP_FILE_DIR.name, 'out.log')\nBREEZE_SOURCES_ROOT = AIRFLOW_SOURCES_ROOT / \"dev\" / \"breeze\"\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 211, "n_words": 120, "vocab_size": 77, "complexity": 4, "nloc": 38, "token_counts": 73, "n_ast_nodes": 286, "n_identifiers": 32, "d_id": 9029, "documentation": { "docstring": "\n Find the root of airflow sources we operate on. Handle the case when Breeze is installed via `pipx` from\n a different source tree, so it searches upwards of the current directory to find the right root of\n airflow directory we are actually in. This **might** be different than the sources of Airflow Breeze\n was installed from.\n\n If not found, we operate on Airflow sources that we were installed it. This handles the case when\n we run Breeze from a \"random\" directory.\n\n This method also handles the following errors and warnings:\n\n * It fails (and exits hard) if Breeze is installed in non-editable mode (in which case it will\n not find the Airflow sources when walking upwards the directory where it is installed)\n * It warns (with 2 seconds timeout) if you are using Breeze from a different airflow sources than\n the one you operate on.\n * If we are running in the same source tree as where Breeze was installed from (so no warning above),\n it warns (with 2 seconds timeout) if there is a change in setup.* files of Breeze since installation\n time. In such case usesr is encouraged to re-install Breeze to update dependencies.\n\n :return: Path for the found sources.\n\n ", "n_words": 202, "vocab_size": 109, "n_whitespaces": 280, "language": "en" } }, { "id": 194778, "commit_id": "81f722d29045a7a5841d0931a082ded1d1f13863", "repo": "ParlAI", "path": "parlai/scripts/generate_model_card.py", "file_name": "generate_model_card.py", "fun_name": "process_task", "commit_message": "autoformat (#4378)", "code": "def process_task(self, task):\n \n # processing tasks so that no arguments are included\n # unless it's a fromfile or jsonfile one\n if 'fromfile:' in task or 'jsonfile:' in task or 'internal:' in task:\n return None if self.ignore_task else task\n return task\n\n ##########################################\n # generation setup-related class functions\n ##########################################\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 102, "n_words": 47, "vocab_size": 35, "complexity": 5, "nloc": 4, "token_counts": 31, "n_ast_nodes": 60, "n_identifiers": 4, "d_id": 47071, "documentation": { "docstring": "\n tries to remap tasks to their external version, and then may ignore the tasks\n w/o ext.\n\n version depending on `ignore_task`\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 49, "language": "en" } }, { "id": 323119, "commit_id": "44a290e94d1becd1f09fddc3d873f9e19c9d6919", "repo": "PaddleNLP", "path": "paddlenlp/trainer/trainer_args.py", "file_name": "trainer_args.py", "fun_name": "should_save", "commit_message": "[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761)\n\n* add some datasets for finetune.\r\n\r\n* support fine tune for all tastks.\r\n\r\n* add trainer prototype.\r\n\r\n* init verison for paddlenlp trainer.\r\n\r\n* refine trainer.\r\n\r\n* update for some details.\r\n\r\n* support multi-cards training evaluation.\r\n\r\n* support load from ckpt.\r\n\r\n* support for export inference model.\r\n\r\n* first version of trainer.\r\n\r\n* seq cls support clue.\r\n\r\n* trainer support for token classification and question answersing tasks.\r\n\r\n* fix as reviews.\r\n\r\nCo-authored-by: Zeyu Chen ", "code": "def should_save(self):\n \n if self.save_on_each_node:\n return self.local_process_index == 0\n else:\n return self.process_index == 0\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 56, "n_words": 13, "vocab_size": 10, "complexity": 2, "nloc": 5, "token_counts": 25, "n_ast_nodes": 43, "n_identifiers": 5, "d_id": 118366, "documentation": { "docstring": "\n Whether or not the current process should write to disk, e.g., to save models and checkpoints.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 31, "language": "en" } }, { "id": 137403, "commit_id": "f9ec2d1ae2e14e1f1ed38d315dfd643f600dc397", "repo": "ray", "path": "rllib/algorithms/algorithm_config.py", "file_name": "algorithm_config.py", "fun_name": "get_default_rl_module_class", "commit_message": "[RLlib] Make RLModule initialization easy (#31069)\n\n1. Moved the `_enable_rl_module_api` signature into `rl_module()` api of the algorithmConfig.\r\n2. Added the ability for the user to override the entire RLModule from algorithmConfig by\r\nsimply changing the class.\r\n3. updated marl_module: we now have only one MARLModule base-class that can be used stand-alone, users can override it completely if they want.\r\n4. Removed test_torch_marl_module (Will add it back in a framework agnostic way)\r\n5. Updated TorchMARL and RL modules to use the new constructor format.\r\n6. Core tests now works independent of failures of PPORLModule.\r\n7. Core tests is now based on factory methods of RLModule.\r\n8. created a new isolated unittest for marl_module\r\n9. update ppo torch RL module to adhere to the new API changes.\r\n10. get_rl_module_class is now a instance method instead of classmethod\r\n11. made enabling the api more explicit from algo_config()\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi ", "code": "def get_default_rl_module_class(self) -> Union[Type[\"RLModule\"], str]:\n \n raise NotImplementedError\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 11, "token_counts": 18, "n_ast_nodes": 31, "n_identifiers": 6, "d_id": 31158, "documentation": { "docstring": "Returns the RLModule class to use for this algorithm.\n\n Override this method in the sub-class to return the RLModule class type given\n the input framework.\n\n Returns:\n The RLModule class to use for this algorithm either as a class type or as\n a string (e.g. x.y.z).\n ", "n_words": 45, "vocab_size": 28, "n_whitespaces": 95, "language": "en" } }, { "id": 10837, "commit_id": "13edc16d806fb5d77a6849551178ccc75937f25f", "repo": "jina", "path": "jina/orchestrate/deployments/__init__.py", "file_name": "__init__.py", "fun_name": "host", "commit_message": "refactor: rename pod to deployment (#4230)\n\n* refactor: rename pod to deployment\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: undo daemon mistake\r\n\r\n* refactor: leftover cleanup\r\n\r\n* fix: more test fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more fixes\r\n\r\n* fix: more tests\r\n\r\n* fix: fix more tests\r\n\r\n* refactor: fix more tests\r\n\r\n* refactor: more tests fixes\r\n\r\n* refactor: rename pea to pod\r\n\r\n* refactor: adjust docs\r\n\r\n* refactor: complete pea renaming\r\n\r\n* refactor: more fixes\r\n\r\n* fix: pea_type in k8s yamls\r\n\r\n* fix: adjust pod args name\r\n\r\n* refactor: rename peapods parser folder\r\n\r\n* fix: da init\r\n\r\nCo-authored-by: Jina Dev Bot ", "code": "def host(self) -> str:\n \n return self.first_pod_args.host\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 7, "token_counts": 14, "n_ast_nodes": 25, "n_identifiers": 4, "d_id": 1940, "documentation": { "docstring": "Get the host name of this deployment\n\n\n .. # noqa: DAR201\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 25, "language": "en" } }, { "id": 241434, "commit_id": "321c77230a794d9f0595038a2674c955889ed0e3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/basedatatypes.py", "file_name": "basedatatypes.py", "fun_name": "select_traces", "commit_message": "Avoid selecting all traces on selector=0", "code": "def select_traces(self, selector=None, row=None, col=None, secondary_y=None):\n \n if not selector and not isinstance(selector, int):\n selector = {}\n\n if row is not None or col is not None or secondary_y is not None:\n grid_ref = self._validate_get_grid_ref()\n filter_by_subplot = True\n\n if row is None and col is not None:\n # All rows for column\n grid_subplot_ref_tuples = [ref_row[col - 1] for ref_row in grid_ref]\n elif col is None and row is not None:\n # All columns for row\n grid_subplot_ref_tuples = grid_ref[row - 1]\n elif col is not None and row is not None:\n # Single grid cell\n grid_subplot_ref_tuples = [grid_ref[row - 1][col - 1]]\n else:\n # row and col are None, secondary_y not None\n grid_subplot_ref_tuples = [\n refs for refs_row in grid_ref for refs in refs_row\n ]\n\n # Collect list of subplot refs, taking secondary_y into account\n grid_subplot_refs = []\n for refs in grid_subplot_ref_tuples:\n if not refs:\n continue\n if secondary_y is not True:\n grid_subplot_refs.append(refs[0])\n\n if secondary_y is not False and len(refs) > 1:\n grid_subplot_refs.append(refs[1])\n\n else:\n filter_by_subplot = False\n grid_subplot_refs = None\n\n return self._perform_select_traces(\n filter_by_subplot, grid_subplot_refs, selector\n )\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 614, "n_words": 173, "vocab_size": 78, "complexity": 20, "nloc": 30, "token_counts": 217, "n_ast_nodes": 335, "n_identifiers": 19, "d_id": 69523, "documentation": { "docstring": "\n Select traces from a particular subplot cell and/or traces\n that satisfy custom selection criteria.\n\n Parameters\n ----------\n selector: dict, function, int, str or None (default None)\n Dict to use as selection criteria.\n Traces will be selected if they contain properties corresponding\n to all of the dictionary's keys, with values that exactly match\n the supplied values. If None (the default), all traces are\n selected. If a function, it must be a function accepting a single\n argument and returning a boolean. The function will be called on\n each trace and those for which the function returned True\n will be in the selection. If an int N, the Nth trace matching row\n and col will be selected (N can be negative). If a string S, the selector\n is equivalent to dict(type=S).\n row, col: int or None (default None)\n Subplot row and column index of traces to select.\n To select traces by row and column, the Figure must have been\n created using plotly.subplots.make_subplots. If None\n (the default), all traces are selected.\n secondary_y: boolean or None (default None)\n * If True, only select traces associated with the secondary\n y-axis of the subplot.\n * If False, only select traces associated with the primary\n y-axis of the subplot.\n * If None (the default), do not filter traces based on secondary\n y-axis.\n\n To select traces by secondary y-axis, the Figure must have been\n created using plotly.subplots.make_subplots. See the docstring\n for the specs argument to make_subplots for more info on\n creating subplots with secondary y-axes.\n Returns\n -------\n generator\n Generator that iterates through all of the traces that satisfy\n all of the specified selection criteria\n ", "n_words": 264, "vocab_size": 140, "n_whitespaces": 635, "language": "en" } }, { "id": 243, "commit_id": "f56f2b6fb96017472095a43f7d6b13bb8c21718f", "repo": "PySyft", "path": "packages/syft/src/syft/core/tensor/autodp/intermediate_gamma.py", "file_name": "intermediate_gamma.py", "fun_name": "_max_values", "commit_message": "reduced reliance on .flat_scalars for __add__ which is very slow", "code": "def _max_values(self) -> np.array:\n \n if self._max_vals_cache is not None:\n return self._max_vals_cache\n\n return np.array(list(map(lambda x: x.max_val, self.flat_scalars))).reshape(\n self.shape\n )\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 68, "n_words": 18, "vocab_size": 16, "complexity": 2, "nloc": 9, "token_counts": 51, "n_ast_nodes": 82, "n_identifiers": 12, "d_id": 74, "documentation": { "docstring": "WARNING: DO NOT MAKE THIS AVAILABLE TO THE POINTER!!!\n DO NOT ADD THIS METHOD TO THE AST!!!\n ", "n_words": 17, "vocab_size": 12, "n_whitespaces": 31, "language": "en" } }, { "id": 196850, "commit_id": "1eeb01e15f06c6692a5bfd6fd2d2a3002d864a07", "repo": "sympy", "path": "sympy/integrals/integrals.py", "file_name": "integrals.py", "fun_name": "integrate", "commit_message": "Fix a few docstring formatting issues", "code": "def integrate(*args, meijerg=None, conds='piecewise', risch=None, heurisch=None, manual=None, **kwargs):\n \n doit_flags = {\n 'deep': False,\n 'meijerg': meijerg,\n 'conds': conds,\n 'risch': risch,\n 'heurisch': heurisch,\n 'manual': manual\n }\n integral = Integral(*args, **kwargs)\n\n if isinstance(integral, Integral):\n return integral.doit(**doit_flags)\n else:\n new_args = [a.doit(**doit_flags) if isinstance(a, Integral) else a\n for a in integral.args]\n return integral.func(*new_args)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 144, "n_words": 48, "vocab_size": 43, "complexity": 4, "nloc": 16, "token_counts": 119, "n_ast_nodes": 190, "n_identifiers": 16, "d_id": 48217, "documentation": { "docstring": "integrate(f, var, ...)\n\n Explanation\n ===========\n\n Compute definite or indefinite integral of one or more variables\n using Risch-Norman algorithm and table lookup. This procedure is\n able to handle elementary algebraic and transcendental functions\n and also a huge class of special functions, including Airy,\n Bessel, Whittaker and Lambert.\n\n var can be:\n\n - a symbol -- indefinite integration\n - a tuple (symbol, a) -- indefinite integration with result\n given with ``a`` replacing ``symbol``\n - a tuple (symbol, a, b) -- definite integration\n\n Several variables can be specified, in which case the result is\n multiple integration. (If var is omitted and the integrand is\n univariate, the indefinite integral in that variable will be performed.)\n\n Indefinite integrals are returned without terms that are independent\n of the integration variables. (see examples)\n\n Definite improper integrals often entail delicate convergence\n conditions. Pass conds='piecewise', 'separate' or 'none' to have\n these returned, respectively, as a Piecewise function, as a separate\n result (i.e. result will be a tuple), or not at all (default is\n 'piecewise').\n\n **Strategy**\n\n SymPy uses various approaches to definite integration. One method is to\n find an antiderivative for the integrand, and then use the fundamental\n theorem of calculus. Various functions are implemented to integrate\n polynomial, rational and trigonometric functions, and integrands\n containing DiracDelta terms.\n\n SymPy also implements the part of the Risch algorithm, which is a decision\n procedure for integrating elementary functions, i.e., the algorithm can\n either find an elementary antiderivative, or prove that one does not\n exist. There is also a (very successful, albeit somewhat slow) general\n implementation of the heuristic Risch algorithm. This algorithm will\n eventually be phased out as more of the full Risch algorithm is\n implemented. See the docstring of Integral._eval_integral() for more\n details on computing the antiderivative using algebraic methods.\n\n The option risch=True can be used to use only the (full) Risch algorithm.\n This is useful if you want to know if an elementary function has an\n elementary antiderivative. If the indefinite Integral returned by this\n function is an instance of NonElementaryIntegral, that means that the\n Risch algorithm has proven that integral to be non-elementary. Note that\n by default, additional methods (such as the Meijer G method outlined\n below) are tried on these integrals, as they may be expressible in terms\n of special functions, so if you only care about elementary answers, use\n risch=True. Also note that an unevaluated Integral returned by this\n function is not necessarily a NonElementaryIntegral, even with risch=True,\n as it may just be an indication that the particular part of the Risch\n algorithm needed to integrate that function is not yet implemented.\n\n Another family of strategies comes from re-writing the integrand in\n terms of so-called Meijer G-functions. Indefinite integrals of a\n single G-function can always be computed, and the definite integral\n of a product of two G-functions can be computed from zero to\n infinity. Various strategies are implemented to rewrite integrands\n as G-functions, and use this information to compute integrals (see\n the ``meijerint`` module).\n\n The option manual=True can be used to use only an algorithm that tries\n to mimic integration by hand. This algorithm does not handle as many\n integrands as the other algorithms implemented but may return results in\n a more familiar form. The ``manualintegrate`` module has functions that\n return the steps used (see the module docstring for more information).\n\n In general, the algebraic methods work best for computing\n antiderivatives of (possibly complicated) combinations of elementary\n functions. The G-function methods work best for computing definite\n integrals from zero to infinity of moderately complicated\n combinations of special functions, or indefinite integrals of very\n simple combinations of special functions.\n\n The strategy employed by the integration code is as follows:\n\n - If computing a definite integral, and both limits are real,\n and at least one limit is +- oo, try the G-function method of\n definite integration first.\n\n - Try to find an antiderivative, using all available methods, ordered\n by performance (that is try fastest method first, slowest last; in\n particular polynomial integration is tried first, Meijer\n G-functions second to last, and heuristic Risch last).\n\n - If still not successful, try G-functions irrespective of the\n limits.\n\n The option meijerg=True, False, None can be used to, respectively:\n always use G-function methods and no others, never use G-function\n methods, or use all available methods (in order as described above).\n It defaults to None.\n\n Examples\n ========\n\n >>> from sympy import integrate, log, exp, oo\n >>> from sympy.abc import a, x, y\n\n >>> integrate(x*y, x)\n x**2*y/2\n\n >>> integrate(log(x), x)\n x*log(x) - x\n\n >>> integrate(log(x), (x, 1, a))\n a*log(a) - a + 1\n\n >>> integrate(x)\n x**2/2\n\n Terms that are independent of x are dropped by indefinite integration:\n\n >>> from sympy import sqrt\n >>> integrate(sqrt(1 + x), (x, 0, x))\n 2*(x + 1)**(3/2)/3 - 2/3\n >>> integrate(sqrt(1 + x), x)\n 2*(x + 1)**(3/2)/3\n\n >>> integrate(x*y)\n Traceback (most recent call last):\n ...\n ValueError: specify integration variables to integrate x*y\n\n Note that ``integrate(x)`` syntax is meant only for convenience\n in interactive sessions and should be avoided in library code.\n\n >>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise'\n Piecewise((gamma(a + 1), re(a) > -1),\n (Integral(x**a*exp(-x), (x, 0, oo)), True))\n\n >>> integrate(x**a*exp(-x), (x, 0, oo), conds='none')\n gamma(a + 1)\n\n >>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate')\n (gamma(a + 1), re(a) > -1)\n\n See Also\n ========\n\n Integral, Integral.doit\n\n ", "n_words": 865, "vocab_size": 406, "n_whitespaces": 1292, "language": "en" } }, { "id": 147021, "commit_id": "62a5404369d71a84fdd4da9c4bfd597fce33f2f6", "repo": "ray", "path": "python/ray/_private/usage/usage_lib.py", "file_name": "usage_lib.py", "fun_name": "get_cluster_status_to_report", "commit_message": "Collect more usage stats data (#23167)", "code": "def get_cluster_status_to_report(gcs_client, num_retries) -> ClusterStatusToReport:\n \n try:\n cluster_status = ray._private.utils.internal_kv_get_with_retry(\n gcs_client,\n ray.ray_constants.DEBUG_AUTOSCALING_STATUS,\n namespace=None,\n num_retries=num_retries,\n )\n if not cluster_status:\n return ClusterStatusToReport()\n\n result = ClusterStatusToReport()\n to_GiB = 1 / 2 ** 30\n cluster_status = json.loads(cluster_status.decode(\"utf-8\"))\n if (\n \"load_metrics_report\" not in cluster_status\n or \"usage\" not in cluster_status[\"load_metrics_report\"]\n ):\n return ClusterStatusToReport()\n\n usage = cluster_status[\"load_metrics_report\"][\"usage\"]\n # usage is a map from resource to (used, total) pair\n if \"CPU\" in usage:\n result.total_num_cpus = int(usage[\"CPU\"][1])\n if \"GPU\" in usage:\n result.total_num_gpus = int(usage[\"GPU\"][1])\n if \"memory\" in usage:\n result.total_memory_gb = usage[\"memory\"][1] * to_GiB\n if \"object_store_memory\" in usage:\n result.total_object_store_memory_gb = (\n usage[\"object_store_memory\"][1] * to_GiB\n )\n return result\n except Exception as e:\n logger.info(f\"Failed to get cluster status to report {e}\")\n return ClusterStatusToReport()\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 396, "n_words": 110, "vocab_size": 70, "complexity": 9, "nloc": 44, "token_counts": 195, "n_ast_nodes": 337, "n_identifiers": 27, "d_id": 33835, "documentation": { "docstring": "Get the current status of this cluster.\n\n It is a blocking API.\n\n Params:\n gcs_client (GCSClient): The GCS client to perform KV operation GET.\n num_retries (int): Max number of times to retry if GET fails.\n\n Returns:\n The current cluster status or empty if it fails to get that information.\n ", "n_words": 48, "vocab_size": 41, "n_whitespaces": 81, "language": "en" } }, { "id": 231602, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/_mapbox.py", "file_name": "_mapbox.py", "fun_name": "zoom", "commit_message": "switch to black .22", "code": "def zoom(self):\n \n return self[\"zoom\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 63046, "documentation": { "docstring": "\n Sets the zoom level of the map (mapbox.zoom).\n\n The 'zoom' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n ", "n_words": 27, "vocab_size": 26, "n_whitespaces": 79, "language": "en" } }, { "id": 186683, "commit_id": "7d9e9a49005de7961e84d2a7c608db57dbab3046", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/parser.py", "file_name": "parser.py", "fun_name": "_set_locations", "commit_message": "Add typing to certbot.apache (#9071)\n\n* Add typing to certbot.apache\r\n\r\nCo-authored-by: Adrien Ferrand ", "code": "def _set_locations(self) -> Dict[str, str]:\n \n default: str = self.loc[\"root\"]\n\n temp: str = os.path.join(self.root, \"ports.conf\")\n if os.path.isfile(temp):\n listen = temp\n name = temp\n else:\n listen = default\n name = default\n\n return {\"default\": default, \"listen\": listen, \"name\": name}\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 122, "n_words": 36, "vocab_size": 26, "complexity": 2, "nloc": 16, "token_counts": 77, "n_ast_nodes": 130, "n_identifiers": 14, "d_id": 45590, "documentation": { "docstring": "Set default location for directives.\n\n Locations are given as file_paths\n .. todo:: Make sure that files are included\n\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 39, "language": "en" } }, { "id": 67881, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/report/item_prices/item_prices.py", "file_name": "item_prices.py", "fun_name": "get_valuation_rate", "commit_message": "style: format code with black", "code": "def get_valuation_rate():\n\t\n\n\titem_val_rate_map = {}\n\n\tfor d in frappe.db.sql(\n\t\t,\n\t\tas_dict=1,\n\t):\n\t\titem_val_rate_map.setdefault(d.item_code, d.val_rate)\n\n\treturn item_val_rate_map\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 8, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 10, "token_counts": 40, "n_ast_nodes": 66, "n_identifiers": 10, "d_id": 14653, "documentation": { "docstring": "Get an average valuation rate of an item from all warehousesselect item_code,\n\t\tsum(actual_qty*valuation_rate)/sum(actual_qty) as val_rate\n\t\tfrom tabBin where actual_qty > 0 group by item_code", "n_words": 24, "vocab_size": 22, "n_whitespaces": 21, "language": "en" } }, { "id": 246024, "commit_id": "3b51c763ba5601e155e3e27a46cddf0370da83eb", "repo": "synapse", "path": "tests/rest/admin/test_federation.py", "file_name": "test_federation.py", "fun_name": "test_order_by", "commit_message": "Fix get federation status of destination if no error occured (#11593)", "code": "def test_order_by(self) -> None:\n \n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 2, "nloc": 48, "token_counts": 645, "n_ast_nodes": 16, "n_identifiers": 2, "d_id": 70953, "documentation": { "docstring": "\n Testing order list with parameter `order_by`\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 206916, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "scripts/manage_translations.py", "file_name": "manage_translations.py", "fun_name": "_get_locale_dirs", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _get_locale_dirs(resources, include_core=True):\n \n contrib_dir = os.path.join(os.getcwd(), \"django\", \"contrib\")\n dirs = []\n\n # Collect all locale directories\n for contrib_name in os.listdir(contrib_dir):\n path = os.path.join(contrib_dir, contrib_name, \"locale\")\n if os.path.isdir(path):\n dirs.append((contrib_name, path))\n if contrib_name in HAVE_JS:\n dirs.append((\"%s-js\" % contrib_name, path))\n if include_core:\n dirs.insert(0, (\"core\", os.path.join(os.getcwd(), \"django\", \"conf\", \"locale\")))\n\n # Filter by resources, if any\n if resources is not None:\n res_names = [d[0] for d in dirs]\n dirs = [ld for ld in dirs if ld[0] in resources]\n if len(resources) > len(dirs):\n print(\n \"You have specified some unknown resources. \"\n \"Available resource names are: %s\" % (\", \".join(res_names),)\n )\n exit(1)\n return dirs\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 267, "n_words": 98, "vocab_size": 72, "complexity": 10, "nloc": 21, "token_counts": 191, "n_ast_nodes": 315, "n_identifiers": 21, "d_id": 51797, "documentation": { "docstring": "\n Return a tuple (contrib name, absolute path) for all locale directories,\n optionally including the django core catalog.\n If resources list is not None, filter directories matching resources content.\n ", "n_words": 28, "vocab_size": 27, "n_whitespaces": 41, "language": "en" } }, { "id": 266994, "commit_id": "5b44035983aba190791df479fa7004ce20872042", "repo": "ansible", "path": "lib/ansible/inventory/manager.py", "file_name": "manager.py", "fun_name": "parse_sources", "commit_message": "Hide \"[WARNING]: No inventory was parsed\" message (#65499)\n\n* Add config option INVENTORY_UNPARSED_WARNING to hide the warning \"No inventory was parsed, only implicit localhost is available\"", "code": "def parse_sources(self, cache=False):\n \n\n parsed = False\n # allow for multiple inventory parsing\n for source in self._sources:\n\n if source:\n if ',' not in source:\n source = unfrackpath(source, follow=False)\n parse = self.parse_source(source, cache=cache)\n if parse and not parsed:\n parsed = True\n\n if parsed:\n # do post processing\n self._inventory.reconcile_inventory()\n else:\n if C.INVENTORY_UNPARSED_IS_FAILED:\n raise AnsibleError(\"No inventory was parsed, please check your configuration and options.\")\n elif C.INVENTORY_UNPARSED_WARNING:\n display.warning(\"No inventory was parsed, only implicit localhost is available\")\n\n for group in self.groups.values():\n group.vars = combine_vars(group.vars, get_vars_from_inventory_sources(self._loader, self._sources, [group], 'inventory'))\n for host in self.hosts.values():\n host.vars = combine_vars(host.vars, get_vars_from_inventory_sources(self._loader, self._sources, [host], 'inventory'))\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 339, "n_words": 93, "vocab_size": 63, "complexity": 11, "nloc": 20, "token_counts": 169, "n_ast_nodes": 272, "n_identifiers": 27, "d_id": 78680, "documentation": { "docstring": " iterate over inventory sources and parse each one to populate it", "n_words": 11, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 248389, "commit_id": "c52abc1cfdd9e5480cdb4a03d626fe61cacc6573", "repo": "synapse", "path": "tests/federation/test_federation_sender.py", "file_name": "test_federation_sender.py", "fun_name": "test_send_receipts_with_backoff", "commit_message": "Additional constants for EDU types. (#12884)\n\nInstead of hard-coding strings in many places.", "code": "def test_send_receipts_with_backoff(self):\n \n mock_send_transaction = (\n self.hs.get_federation_transport_client().send_transaction\n )\n mock_send_transaction.return_value = make_awaitable({})\n\n sender = self.hs.get_federation_sender()\n receipt = ReadReceipt(\n \"room_id\", \"m.read\", \"user_id\", [\"event_id\"], {\"ts\": 1234}\n )\n self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))\n\n self.pump()\n\n # expect a call to send_transaction\n mock_send_transaction.assert_called_once()\n json_cb = mock_send_transaction.call_args[0][1]\n data = json_cb()\n self.assertEqual(\n data[\"edus\"],\n [\n {\n \"edu_type\": EduTypes.RECEIPT,\n \"content\": {\n \"room_id\": {\n \"m.read\": {\n \"user_id\": {\n \"event_ids\": [\"event_id\"],\n \"data\": {\"ts\": 1234},\n }\n }\n }\n },\n }\n ],\n )\n mock_send_transaction.reset_mock()\n\n # send the second RR\n receipt = ReadReceipt(\n \"room_id\", \"m.read\", \"user_id\", [\"other_id\"], {\"ts\": 1234}\n )\n self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt)))\n self.pump()\n mock_send_transaction.assert_not_called()\n\n self.reactor.advance(19)\n mock_send_transaction.assert_not_called()\n\n self.reactor.advance(10)\n mock_send_transaction.assert_called_once()\n json_cb = mock_send_transaction.call_args[0][1]\n data = json_cb()\n self.assertEqual(\n data[\"edus\"],\n [\n {\n \"edu_type\": EduTypes.RECEIPT,\n \"content\": {\n \"room_id\": {\n \"m.read\": {\n \"user_id\": {\n \"event_ids\": [\"other_id\"],\n \"data\": {\"ts\": 1234},\n }\n }\n }\n },\n }\n ],\n )\n\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 1066, "n_words": 119, "vocab_size": 57, "complexity": 1, "nloc": 63, "token_counts": 296, "n_ast_nodes": 519, "n_identifiers": 28, "d_id": 72260, "documentation": { "docstring": "Send two receipts in quick succession; the second should be flushed, but\n only after 20ms", "n_words": 15, "vocab_size": 15, "n_whitespaces": 21, "language": "en" } }, { "id": 108332, "commit_id": "0abe0ce2f2748d1d0383154d045da3609a4b871b", "repo": "matplotlib", "path": "lib/matplotlib/colors.py", "file_name": "colors.py", "fun_name": "register", "commit_message": "Add a registry for color sequences\n\nColor sequences are simply lists of colors, that we store by name in\na registry. The registry is modelled similar to the ColormapRegistry\nto 1) support immutable builtin color sequences and 2) to return copies\nso that one cannot mess with the global definition of the color sequence\nthrough an obtained instance.\n\nFor now, I've made the sequences used for `ListedColormap`s available\nas builtin sequences, but that's open for discussion.\n\nMore usage documentation should be added in the color examples and/or\ntutorials, but I'll wait with that till after the general approval of\nthe structure and API. One common use case will be\n\n```\nplt.rc_params['axes.prop_cycle'] = plt.cycler(color=plt.color_sequences['Pastel1')\n```\n\nCo-authored-by: Elliott Sales de Andrade ", "code": "def register(self, name, color_list):\n \n if name in self._BUILTIN_COLOR_SEQUENCES:\n raise ValueError(f\"{name!r} is a reserved name for a builtin \"\n \"color sequence\")\n\n color_list = list(color_list) # force copy and coerce type to list\n for color in color_list:\n try:\n to_rgba(color)\n except ValueError:\n raise ValueError(\n f\"{color!r} is not a valid color specification\")\n\n self._color_sequences[name] = color_list\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 197, "n_words": 51, "vocab_size": 41, "complexity": 4, "nloc": 12, "token_counts": 58, "n_ast_nodes": 108, "n_identifiers": 10, "d_id": 23144, "documentation": { "docstring": "\n Register a new color sequence.\n\n The color sequence registry stores a copy of the given *color_list*, so\n that future changes to the original list do not affect the registered\n color sequence. Think of this as the registry taking a snapshot\n of *color_list* at registration.\n\n Parameters\n ----------\n name : str\n The name for the color sequence.\n\n color_list : list of colors\n An iterable returning valid Matplotlib colors when iterating over.\n Note however that the returned color sequence will always be a\n list regardless of the input type.\n\n ", "n_words": 86, "vocab_size": 58, "n_whitespaces": 201, "language": "en" } }, { "id": 111927, "commit_id": "a16212368718dccf6e3e07f0d9da950a365a3f90", "repo": "nni", "path": "docs/source/tutorials/quantization_customize.py", "file_name": "quantization_customize.py", "fun_name": "quantize_output", "commit_message": "update customize compressor (#4639)", "code": "def quantize_output(self, output, config, **kwargs):\n \n\n # Put your code to generate `new_output` here\n new_output = ...\n return new_output\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 46, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 30, "n_identifiers": 6, "d_id": 24510, "documentation": { "docstring": "\n quantize should overload this method to quantize output.\n This method is effectively hooked to `:meth:`forward` of the model.\n\n Parameters\n ----------\n output : Tensor\n output that needs to be quantized\n config : dict\n the configuration for output quantization\n ", "n_words": 37, "vocab_size": 29, "n_whitespaces": 109, "language": "en" } }, { "id": 309593, "commit_id": "dee843bf6e5ca84a94f336a239f6a6138c4c28e6", "repo": "core", "path": "homeassistant/components/webostv/__init__.py", "file_name": "__init__.py", "fun_name": "connect", "commit_message": "Add LG webOS Smart TV config flow support (#64117)\n\n* Add webOS Smart TV config flow support (#53256)\r\n\r\n* Add Webostv config flow\r\n\r\n* Fix tests mocks and apply review comments\r\n\r\n* Apply review comments\r\n\r\n* Change config flow to use ssdp UDN as unique_id\r\n\r\n* Fix device info\r\n\r\n* More review comments\r\n\r\n* Fix _async_check_configured_entry\r\n\r\n* Remove turn on script\r\n\r\n* Add webOS Smart TV device triggers (#53752)\r\n\r\n* Add webOS Smart TV config flow support (#53256)\r\n\r\n* Add Webostv config flow\r\n\r\n* Fix tests mocks and apply review comments\r\n\r\n* Apply review comments\r\n\r\n* Change config flow to use ssdp UDN as unique_id\r\n\r\n* Fix device info\r\n\r\n* More review comments\r\n\r\n* Fix _async_check_configured_entry\r\n\r\n* Remove turn on script\r\n\r\n* Add webOS Smart TV device triggers (#53752)\r\n\r\n* Fix webOS Smart TV mypy and pylint errors (#62620)\r\n\r\n* Change webOS Smart TV PyPi aiopylgtv package to bscpylgtv (#62633)\r\n\r\n* Change webOS Smart TV PyPi aiopylgtv package to bscpylgtv\r\n\r\n* Update bscpylgtv to 0.2.8 (revised websockets requirment)\r\n\r\n* Change webOS Smart TV PyPi package to aiowebostv (#63759)\r\n\r\n* Change webOS Smart TV PyPi package to aiowebostv\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* webOS TV check UUID for user added device (#63817)\r\n\r\n* webOS TV check uuid when for user added device\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Add test for form abort and host update\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Rework webOS Smart TV device trigger to custom trigger platform (#63950)\r\n\r\n* Rework webOS Smart TV device trigger to custom trigger platform\r\n\r\n* Review comments and add tests\r\n\r\n* Fix webOS TV import from YAML (#63996)\r\n\r\n* Fix webOS TV import from YAML\r\n\r\n* Fix requirements\r\n\r\n* Migrate YAML entities unique id to UUID\r\n\r\n* Add backoff to migration task delay\r\n\r\n* Assert result data and unique_id\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Add codeowner\r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "async def connect(self) -> None:\n \n self.client = WebOsClient(self.host, self.client_key)\n with suppress(*WEBOSTV_EXCEPTIONS, WebOsTvPairError):\n await self.client.connect()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 46, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 39, "n_ast_nodes": 68, "n_identifiers": 9, "d_id": 108291, "documentation": { "docstring": "Attempt a connection, but fail gracefully if tv is off for example.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 321142, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "qutebrowser/browser/webengine/webenginetab.py", "file_name": "webenginetab.py", "fun_name": "_inject_greasemonkey_scripts", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def _inject_greasemonkey_scripts(self, scripts):\n \n if sip.isdeleted(self._widget):\n return\n\n # Since we are inserting scripts into a per-tab collection,\n # rather than just injecting scripts on page load, we need to\n # make sure we replace existing scripts, not just add new ones.\n # While, taking care not to remove any other scripts that might\n # have been added elsewhere, like the one for stylesheets.\n page_scripts = self._widget.page().scripts()\n self._remove_all_greasemonkey_scripts()\n\n seen_names = set()\n for script in scripts:\n while script.full_name() in seen_names:\n script.dedup_suffix += 1\n seen_names.add(script.full_name())\n\n new_script = QWebEngineScript()\n\n try:\n world = int(script.jsworld)\n if not 0 <= world <= qtutils.MAX_WORLD_ID:\n log.greasemonkey.error(\n f\"script {script.name} has invalid value for '@qute-js-world'\"\n f\": {script.jsworld}, should be between 0 and \"\n f\"{qtutils.MAX_WORLD_ID}\")\n continue\n except ValueError:\n try:\n world = _JS_WORLD_MAP[usertypes.JsWorld[script.jsworld.lower()]]\n except KeyError:\n log.greasemonkey.error(\n f\"script {script.name} has invalid value for '@qute-js-world'\"\n f\": {script.jsworld}\")\n continue\n new_script.setWorldId(world)\n\n # Corresponds to \"@run-at document-end\" which is the default according to\n # https://wiki.greasespot.net/Metadata_Block#.40run-at - however,\n # QtWebEngine uses QWebEngineScript.InjectionPoint.Deferred (@run-at document-idle) as\n # default.\n #\n # NOTE that this needs to be done before setSourceCode, so that\n # QtWebEngine's parsing of GreaseMonkey tags will override it if there is a\n # @run-at comment.\n new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady)\n\n new_script.setSourceCode(script.code())\n new_script.setName(script.full_name())\n new_script.setRunsOnSubFrames(script.runs_on_sub_frames)\n\n if script.needs_document_end_workaround():\n log.greasemonkey.debug(\n f\"Forcing @run-at document-end for {script.name}\")\n new_script.setInjectionPoint(QWebEngineScript.InjectionPoint.DocumentReady)\n\n log.greasemonkey.debug(f'adding script: {new_script.name()}')\n page_scripts.insert(new_script)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 856, "n_words": 203, "vocab_size": 145, "complexity": 8, "nloc": 38, "token_counts": 232, "n_ast_nodes": 453, "n_identifiers": 44, "d_id": 117562, "documentation": { "docstring": "Register user JavaScript files with the current tab.\n\n Args:\n scripts: A list of GreasemonkeyScripts.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 39, "language": "en" } }, { "id": 72431, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/views/mixins.py", "file_name": "mixins.py", "fun_name": "get_heading", "commit_message": "Reformat with black", "code": "def get_heading(self, queryset, field):\n \n heading_override = self.export_headings.get(field)\n if heading_override:\n return force_str(heading_override)\n try:\n return force_str(queryset.model._meta.get_field(field).verbose_name.title())\n except (AttributeError, FieldDoesNotExist):\n return force_str(field)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 87, "n_words": 19, "vocab_size": 17, "complexity": 3, "nloc": 8, "token_counts": 62, "n_ast_nodes": 100, "n_identifiers": 15, "d_id": 15894, "documentation": { "docstring": "Get the heading label for a given field for a spreadsheet generated from queryset", "n_words": 14, "vocab_size": 12, "n_whitespaces": 13, "language": "en" } }, { "id": 200702, "commit_id": "c254b28e2ba0f4cdfe9ff6523ffa6e369ab415ce", "repo": "sympy", "path": "sympy/algebras/quaternion.py", "file_name": "quaternion.py", "fun_name": "to_euler", "commit_message": "added two options to to_euler", "code": "def to_euler(self, seq, angle_addition=True, avoid_square_root=False):\n \n extrinsic = _is_extrinsic(seq)\n i, j, k = seq.lower()\n\n # get index corresponding to elementary basis vectors\n i = 'xyz'.index(i) + 1\n j = 'xyz'.index(j) + 1\n k = 'xyz'.index(k) + 1\n\n if not extrinsic:\n i, k = k, i\n\n # check if sequence is symmetric\n symmetric = i == k\n if symmetric:\n k = 6 - i - j\n\n # parity of the permutation\n sign = (i - j) * (j - k) * (k - i) // 2\n\n # permutate elements\n elements = [self.a, self.b, self.c, self.d]\n a = elements[0]\n b = elements[i]\n c = elements[j]\n d = elements[k] * sign\n\n if not symmetric:\n a, b, c, d = a - c, b + d, c + a, d - b\n\n if avoid_square_root:\n angle_j = acos((a*a + b*b - c*c - d*d) / self.norm()**2)\n else:\n angle_j = 2 * atan2(sqrt(c * c + d * d), sqrt(a * a + b * b))\n\n if angle_addition:\n angle_i = atan2(b, a) + atan2(d, c)\n angle_k = atan2(b, a) - atan2(d, c)\n else:\n angle_i = atan2(b*c + a*d, a*c - b*d)\n angle_k = atan2(b*c - a*d, a*c + b*d)\n\n # for Tait-Bryan angles\n if not symmetric:\n angle_j -= pi / 2\n angle_i *= sign\n\n if extrinsic:\n return angle_k, angle_j, angle_i\n else:\n return angle_i, angle_j, angle_k\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 557, "n_words": 218, "vocab_size": 105, "complexity": 8, "nloc": 36, "token_counts": 349, "n_ast_nodes": 545, "n_identifiers": 27, "d_id": 49777, "documentation": { "docstring": "Returns Euler angles representing same rotation as the quaternion,\n in the sequence given by `seq`. This implements the method described\n in [1]_.\n\n Parameters\n ==========\n\n seq : string of length 3\n Represents the sequence of rotations.\n For intrinsic rotations, seq must be all lowercase and its elements\n must be from the set `{'x', 'y', 'z'}`\n For extrinsic rotations, seq must be all uppercase and its elements\n must be from the set `{'X', 'Y', 'Z'}`\n\n angle_addition : bool\n Default : True\n When True, first and third angles are given as an addition and\n subtraction of two simpler `atan2` expressions. When False, the first\n and third angles are each given by a single more complicated\n `atan2` expression. This equivalent is given by:\n\n --math::\n \\operatorname{atan_2} (b,a) \\pm \\operatorname{atan_2} (d,c) =\n \\operatorname{atan_2} (bc\\pm ad, ac\\mp bd)\n\n avoid_square_root : bool\n Default : False\n When True, the second angle is calculated with an expression based on\n `acos`, which is slightly more complicated but avoids a square\n root. When False, second angle is calculated with `atan2`, which\n is simpler and can be better for numerical reasons (some\n numerical implementations of `acos` have problems near zero).\n\n\n Returns\n =======\n\n Tuple\n The Euler angles calculated from the quaternion\n\n Examples\n ========\n\n >>> from sympy import Quaternion\n >>> from sympy.abc import a, b, c, d\n >>> euler = Quaternion(a, b, c, d).to_euler('zyz')\n >>> euler\n (-atan2(-b, c) + atan2(d, a),\n 2*atan2(sqrt(b**2 + c**2), sqrt(a**2 + d**2)),\n atan2(-b, c) + atan2(d, a))\n\n\n References\n ==========\n\n .. [1] https://doi.org/10.1371/journal.pone.0276302\n\n ", "n_words": 242, "vocab_size": 147, "n_whitespaces": 633, "language": "en" } }, { "id": 259009, "commit_id": "b6bc5599ae063a90c27c9c1fef1940d620b5a206", "repo": "scikit-learn", "path": "sklearn/naive_bayes.py", "file_name": "naive_bayes.py", "fun_name": "_update_class_log_prior", "commit_message": "DOC Clarifies comments and docstrings in _BaseDiscreteNB (#22565)\n\nCo-authored-by: Thomas J. Fan ", "code": "def _update_class_log_prior(self, class_prior=None):\n \n n_classes = len(self.classes_)\n if class_prior is not None:\n if len(class_prior) != n_classes:\n raise ValueError(\"Number of priors must match number of classes.\")\n self.class_log_prior_ = np.log(class_prior)\n elif self.fit_prior:\n with warnings.catch_warnings():\n # silence the warning when count is 0 because class was not yet\n # observed\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n log_class_count = np.log(self.class_count_)\n\n # empirical prior, with sample_weight taken into account\n self.class_log_prior_ = log_class_count - np.log(self.class_count_.sum())\n else:\n self.class_log_prior_ = np.full(n_classes, -np.log(n_classes))\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 245, "n_words": 69, "vocab_size": 55, "complexity": 4, "nloc": 13, "token_counts": 115, "n_ast_nodes": 195, "n_identifiers": 19, "d_id": 75522, "documentation": { "docstring": "Update class log priors.\n\n The class log priors are based on `class_prior`, class count or the\n number of classes. This method is called each time `fit` or\n `partial_fit` update the model.\n ", "n_words": 31, "vocab_size": 26, "n_whitespaces": 59, "language": "en" } }, { "id": 198535, "commit_id": "99ede53223eafb56b2c2b4ab7b8a6764b628c9d9", "repo": "sympy", "path": "sympy/physics/continuum_mechanics/truss.py", "file_name": "truss.py", "fun_name": "change_member_label", "commit_message": "remove_load method added along with other changes", "code": "def change_member_label(self, label, new_label):\n \n if label not in self._member_labels:\n raise ValueError(\"No such member exists for the Truss\")\n\n else:\n members_duplicate = self._members.copy()\n for member in members_duplicate:\n if member[0] == label:\n self._member_labels[self.member_labels.index(member[0])] = new_label\n self._member_nodes[new_label] = [self._member_nodes[label][0], self._member_nodes[label][1]]\n self._member_nodes.pop(label)\n self._internal_forces[new_label] = self._internal_forces[label]\n self._internal_forces.pop(label)\n self._members[self._members.index([label, member[1], member[2]])] = [new_label, member[1], member[2]]\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 231, "n_words": 48, "vocab_size": 39, "complexity": 4, "nloc": 13, "token_counts": 161, "n_ast_nodes": 241, "n_identifiers": 15, "d_id": 48987, "documentation": { "docstring": "\n This method changes the label of a member.\n\n Parameters\n ==========\n label: String or Symbol\n The label of the member for which the label has\n to be changed.\n\n new_label: String or Symbol\n The new label of the member.\n\n Examples\n ========\n\n >>> from sympy.physics.continuum_mechanics.truss import Truss\n >>> t = Truss()\n >>> t.add_node('A', 0, 0)\n >>> t.add_node('B', 3, 0)\n >>> t.nodes\n [('A', 0, 0), ('B', 3, 0)]\n >>> t.change_node_label('A', 'C')\n >>> t.nodes\n [('C', 0, 0), ('B', 3, 0)]\n >>> t.add_member('BC', 'B', 'C')\n >>> t.members\n [['BC', 'B', 'C']]\n >>> t.change_member_label('BC', 'BC_new')\n >>> t.members\n [['BC_new', 'B', 'C']]\n ", "n_words": 92, "vocab_size": 55, "n_whitespaces": 287, "language": "en" } }, { "id": 233094, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/parcoords/_dimension.py", "file_name": "_dimension.py", "fun_name": "multiselect", "commit_message": "switch to black .22", "code": "def multiselect(self):\n \n return self[\"multiselect\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 64538, "documentation": { "docstring": "\n Do we allow multiple selection ranges or just a single range?\n\n The 'multiselect' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n ", "n_words": 27, "vocab_size": 24, "n_whitespaces": 77, "language": "en" } }, { "id": 202903, "commit_id": "0ab58c120939093fea90822f376e1866fc714d1f", "repo": "django", "path": "django/db/migrations/questioner.py", "file_name": "questioner.py", "fun_name": "_ask_default", "commit_message": "Refs #29026 -- Allowed customizing InteractiveMigrationQuestioner's prompt destination.\n\nPreviously, the questioner did not obey the value of stdout provided\nto the command.", "code": "def _ask_default(self, default=''):\n \n self.prompt_output.write('Please enter the default value as valid Python.')\n if default:\n self.prompt_output.write(\n f\"Accept the default '{default}' by pressing 'Enter' or \"\n f\"provide another value.\"\n )\n self.prompt_output.write(\n 'The datetime and django.utils.timezone modules are available, so '\n 'it is possible to provide e.g. timezone.now as a value.'\n )\n self.prompt_output.write(\"Type 'exit' to exit this prompt\")\n while True:\n if default:\n prompt = \"[default: {}] >>> \".format(default)\n else:\n prompt = \">>> \"\n self.prompt_output.write(prompt, ending='')\n code = input()\n if not code and default:\n code = default\n if not code:\n self.prompt_output.write(\"Please enter some code, or 'exit' (without quotes) to exit.\")\n elif code == \"exit\":\n sys.exit(1)\n else:\n try:\n return eval(code, {}, {'datetime': datetime, 'timezone': timezone})\n except (SyntaxError, NameError) as e:\n self.prompt_output.write('Invalid input: %s' % e)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 473, "n_words": 119, "vocab_size": 91, "complexity": 9, "nloc": 30, "token_counts": 158, "n_ast_nodes": 288, "n_identifiers": 18, "d_id": 50164, "documentation": { "docstring": "\n Prompt for a default value.\n\n The ``default`` argument allows providing a custom default value (as a\n string) which will be shown to the user and used as the return value\n if the user doesn't provide any other input.\n ", "n_words": 38, "vocab_size": 31, "n_whitespaces": 74, "language": "en" } }, { "id": 67383, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/selling/page/point_of_sale/point_of_sale.py", "file_name": "point_of_sale.py", "fun_name": "item_group_query", "commit_message": "style: format code with black", "code": "def item_group_query(doctype, txt, searchfield, start, page_len, filters):\n\titem_groups = []\n\tcond = \"1=1\"\n\tpos_profile = filters.get(\"pos_profile\")\n\n\tif pos_profile:\n\t\titem_groups = get_item_groups(pos_profile)\n\n\t\tif item_groups:\n\t\t\tcond = \"name in (%s)\" % (\", \".join([\"%s\"] * len(item_groups)))\n\t\t\tcond = cond % tuple(item_groups)\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\tcondition=cond, start=start, page_len=page_len\n\t\t),\n\t\t{\"txt\": \"%%%s%%\" % txt},\n\t)\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 35, "n_words": 51, "vocab_size": 39, "complexity": 3, "nloc": 16, "token_counts": 102, "n_ast_nodes": 181, "n_identifiers": 21, "d_id": 14511, "documentation": { "docstring": " select distinct name from `tabItem Group`\n\t\t\twhere {condition} and (name like %(txt)s) limit {start}, {page_len}", "n_words": 15, "vocab_size": 15, "n_whitespaces": 14, "language": "en" } }, { "id": 50991, "commit_id": "6b42963d62833925ffed1cdb73400e7d528a5353", "repo": "PaddleHub", "path": "modules/image/keypoint_detection/hand_pose_localization/model.py", "file_name": "model.py", "fun_name": "load_config", "commit_message": "update hand_pose_localization (#1967)\n\n* update hand_pose_localization\r\n\r\n* add clean func", "code": "def load_config(self, modelpath, use_gpu, gpu_id, use_mkldnn, cpu_threads):\r\n \r\n # 对运行位置进行配置\r\n if use_gpu:\r\n try:\r\n int(os.environ.get('CUDA_VISIBLE_DEVICES'))\r\n except Exception:\r\n print(\r\n )\r\n use_gpu = False\r\n\r\n if os.path.isdir(modelpath):\r\n if os.path.exists(os.path.join(modelpath, \"__params__\")):\r\n # __model__ + __params__\r\n model = os.path.join(modelpath, \"__model__\")\r\n params = os.path.join(modelpath, \"__params__\")\r\n config = Config(model, params)\r\n elif os.path.exists(os.path.join(modelpath, \"params\")):\r\n # model + params\r\n model = os.path.join(modelpath, \"model\")\r\n params = os.path.join(modelpath, \"params\")\r\n config = Config(model, params)\r\n elif os.path.exists(os.path.join(modelpath, \"__model__\")):\r\n # __model__ + others\r\n config = Config(modelpath)\r\n else:\r\n raise Exception(\r\n \"Error! Can\\'t find the model in: %s. Please check your model path.\" % os.path.abspath(modelpath))\r\n elif os.path.exists(modelpath + \".pdmodel\"):\r\n # *.pdmodel + *.pdiparams\r\n model = modelpath + \".pdmodel\"\r\n params = modelpath + \".pdiparams\"\r\n config = Config(model, params)\r\n elif isinstance(modelpath, Config):\r\n config = modelpath\r\n else:\r\n raise Exception(\r\n \"Error! Can\\'t find the model in: %s. Please check your model path.\" % os.path.abspath(modelpath))\r\n\r\n # 设置参数\r\n if use_gpu:\r\n config.enable_use_gpu(100, gpu_id)\r\n else:\r\n config.disable_gpu()\r\n config.set_cpu_math_library_num_threads(cpu_threads)\r\n if use_mkldnn:\r\n config.enable_mkldnn()\r\n\r\n config.disable_glog_info()\r\n\r\n # 返回配置\r\n return config\r\n\r\n # 预测器创建函数\r", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 699, "n_words": 151, "vocab_size": 76, "complexity": 11, "nloc": 40, "token_counts": 291, "n_ast_nodes": 496, "n_identifiers": 28, "d_id": 10250, "documentation": { "docstring": "\r\n load the model config\r\n modelpath: inference model path\r\n use_gpu: use gpu or not\r\n use_mkldnn: use mkldnn or not\r\n Error! Unable to use GPU. Please set the environment variables \"CUDA_VISIBLE_DEVICES=GPU_id\" to use GPU. Now switch to CPU to continue...", "n_words": 38, "vocab_size": 27, "n_whitespaces": 73, "language": "en" } }, { "id": 100356, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "lib/model/losses_tf.py", "file_name": "losses_tf.py", "fun_name": "call", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def call(cls, y_true, y_pred):\n \n diff = K.abs(y_true - y_pred)\n max_loss = K.max(diff, axis=(1, 2), keepdims=True)\n loss = K.mean(max_loss, axis=-1)\n return loss\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 56, "n_words": 21, "vocab_size": 18, "complexity": 1, "nloc": 5, "token_counts": 55, "n_ast_nodes": 84, "n_identifiers": 13, "d_id": 19845, "documentation": { "docstring": " Call the L-inf norm loss function.\n\n Parameters\n ----------\n y_true: tensor or variable\n The ground truth value\n y_pred: tensor or variable\n The predicted value\n\n Returns\n -------\n tensor\n The loss value\n ", "n_words": 29, "vocab_size": 20, "n_whitespaces": 119, "language": "en" } }, { "id": 7805, "commit_id": "1e6dbeff57fc5065b97dd018b904b9907468676f", "repo": "ludwig", "path": "tests/integration_tests/test_preprocessing.py", "file_name": "test_preprocessing.py", "fun_name": "test_column_feature_type_mismatch_fill", "commit_message": "Treat dataset columns as object dtype during first pass of handle_missing_values (#2398)", "code": "def test_column_feature_type_mismatch_fill():\n \n cat_feat = category_feature()\n bin_feat = binary_feature()\n input_features = [cat_feat]\n output_features = [bin_feat]\n config = {\"input_features\": input_features, \"output_features\": output_features}\n\n # Construct dataframe with int-like column representing a categorical feature\n df = pd.DataFrame(\n {\n cat_feat[NAME]: pd.Series(pd.array([None] + [1] * 24, dtype=pd.Int64Dtype())),\n bin_feat[NAME]: pd.Series([True] * 25),\n }\n )\n\n # run preprocessing\n backend = LocalTestBackend()\n ludwig_model = LudwigModel(config, backend=backend)\n train_ds, val_ds, test_ds, _ = ludwig_model.preprocess(dataset=df)\n\n\n@pytest.mark.parametrize(\"format\", [\"file\", \"df\"])", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"format\", [\"file\", \"df\"])", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 140, "n_words": 66, "vocab_size": 56, "complexity": 1, "nloc": 15, "token_counts": 125, "n_ast_nodes": 230, "n_identifiers": 29, "d_id": 1273, "documentation": { "docstring": "Tests that we are able to fill missing values even in columns where the column dtype and desired feature\n dtype do not match.", "n_words": 23, "vocab_size": 22, "n_whitespaces": 25, "language": "en" } }, { "id": 156880, "commit_id": "8b95f983c232c1bd628e9cba0695d3ef229d290b", "repo": "dask", "path": "dask/array/backends.py", "file_name": "backends.py", "fun_name": "_numel_arraylike", "commit_message": "Sparse array reductions (#9342)", "code": "def _numel_arraylike(x, **kwargs):\n \n return _numel(x, coerce_np_ndarray=False, **kwargs)\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 34, "n_identifiers": 5, "d_id": 36797, "documentation": { "docstring": "Numel implementation for arrays that want to return numel of the same type.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 323153, "commit_id": "44a290e94d1becd1f09fddc3d873f9e19c9d6919", "repo": "PaddleNLP", "path": "paddlenlp/trainer/trainer_callback.py", "file_name": "trainer_callback.py", "fun_name": "_new_step", "commit_message": "[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761)\n\n* add some datasets for finetune.\r\n\r\n* support fine tune for all tastks.\r\n\r\n* add trainer prototype.\r\n\r\n* init verison for paddlenlp trainer.\r\n\r\n* refine trainer.\r\n\r\n* update for some details.\r\n\r\n* support multi-cards training evaluation.\r\n\r\n* support load from ckpt.\r\n\r\n* support for export inference model.\r\n\r\n* first version of trainer.\r\n\r\n* seq cls support clue.\r\n\r\n* trainer support for token classification and question answersing tasks.\r\n\r\n* fix as reviews.\r\n\r\nCo-authored-by: Zeyu Chen ", "code": "def _new_step(self):\n \n self.should_save = False\n self.should_evaluate = False\n self.should_log = False\n\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 39, "n_words": 11, "vocab_size": 7, "complexity": 1, "nloc": 4, "token_counts": 21, "n_ast_nodes": 37, "n_identifiers": 5, "d_id": 118389, "documentation": { "docstring": "Internal method that resets the variable for a new step.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 84150, "commit_id": "a142fbff85302c5e3acb2e204eca2e9c75dbc74b", "repo": "zulip", "path": "zerver/tests/test_upload.py", "file_name": "test_upload.py", "fun_name": "test_realm_quota", "commit_message": "tests: Refactor away result.json() calls with helpers.\n\nSigned-off-by: Zixuan James Li ", "code": "def test_realm_quota(self) -> None:\n \n self.login(\"hamlet\")\n\n d1 = StringIO(\"zulip!\")\n d1.name = \"dummy_1.txt\"\n result = self.client_post(\"/json/user_uploads\", {\"file\": d1})\n response_dict = self.assert_json_success(result)\n d1_path_id = re.sub(\"/user_uploads/\", \"\", response_dict[\"uri\"])\n d1_attachment = Attachment.objects.get(path_id=d1_path_id)\n\n realm = get_realm(\"zulip\")\n realm.upload_quota_gb = 1\n realm.save(update_fields=[\"upload_quota_gb\"])\n\n # The size of StringIO(\"zulip!\") is 6 bytes. Setting the size of\n # d1_attachment to realm.upload_quota_bytes() - 11 should allow\n # us to upload only one more attachment.\n quota = realm.upload_quota_bytes()\n assert quota is not None\n d1_attachment.size = quota - 11\n d1_attachment.save(update_fields=[\"size\"])\n\n d2 = StringIO(\"zulip!\")\n d2.name = \"dummy_2.txt\"\n result = self.client_post(\"/json/user_uploads\", {\"file\": d2})\n self.assert_json_success(result)\n\n d3 = StringIO(\"zulip!\")\n d3.name = \"dummy_3.txt\"\n result = self.client_post(\"/json/user_uploads\", {\"file\": d3})\n self.assert_json_error(result, \"Upload would exceed your organization's upload quota.\")\n\n realm.upload_quota_gb = None\n realm.save(update_fields=[\"upload_quota_gb\"])\n result = self.client_post(\"/json/user_uploads\", {\"file\": d3})\n self.assert_json_success(result)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 327, "n_words": 117, "vocab_size": 69, "complexity": 1, "nloc": 30, "token_counts": 223, "n_ast_nodes": 402, "n_identifiers": 29, "d_id": 17789, "documentation": { "docstring": "\n Realm quota for uploading should not be exceeded.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 219890, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pyio.py", "file_name": "_pyio.py", "fun_name": "readable", "commit_message": "add python 3.10.4 for windows", "code": "def readable(self):\n \n return False\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 8, "n_ast_nodes": 16, "n_identifiers": 2, "d_id": 55883, "documentation": { "docstring": "Return a bool indicating whether object was opened for reading.\n\n If False, read() will raise OSError.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 30, "language": "en" } }, { "id": 127340, "commit_id": "4c970cc88247f7cfa7351297b8b5050f2372742e", "repo": "ray", "path": "python/ray/dag/dag_node.py", "file_name": "dag_node.py", "fun_name": "get_object_refs_from_last_execute", "commit_message": "[serve] Visualize Deployment Graph with Gradio (#27897)", "code": "async def get_object_refs_from_last_execute(self) -> Dict[str, Any]:\n \n cache = {}\n for node_uuid, value in self.cache_from_last_execute.items():\n if isinstance(value, asyncio.Task):\n cache[node_uuid] = await value\n else:\n cache[node_uuid] = value\n\n return cache\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 107, "n_words": 27, "vocab_size": 21, "complexity": 3, "nloc": 13, "token_counts": 57, "n_ast_nodes": 92, "n_identifiers": 13, "d_id": 28418, "documentation": { "docstring": "Gets cached object refs from the last call to execute().\n\n After this DAG is executed through execute(), retrieves a map between node\n UUID to a reference to the return value of the default executor on that node.\n ", "n_words": 37, "vocab_size": 32, "n_whitespaces": 58, "language": "en" } }, { "id": 227325, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_layout.py", "file_name": "_layout.py", "fun_name": "editrevision", "commit_message": "switch to black .22", "code": "def editrevision(self):\n \n return self[\"editrevision\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58998, "documentation": { "docstring": "\n Controls persistence of user-driven changes in `editable: true`\n configuration, other than trace names and axis titles. Defaults\n to `layout.uirevision`.\n\n The 'editrevision' property accepts values of any type\n\n Returns\n -------\n Any\n ", "n_words": 30, "vocab_size": 29, "n_whitespaces": 87, "language": "en" } }, { "id": 198361, "commit_id": "7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c", "repo": "sympy", "path": "sympy/combinatorics/partitions.py", "file_name": "partitions.py", "fun_name": "RGS_generalized", "commit_message": "Cleanup loops and ranges", "code": "def RGS_generalized(m):\n \n d = zeros(m + 1)\n for i in range(m + 1):\n d[0, i] = 1\n\n for i in range(1, m + 1):\n for j in range(m):\n if j <= m - i:\n d[i, j] = j * d[i - 1, j] + d[i - 1, j + 1]\n else:\n d[i, j] = 0\n return d\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 138, "n_words": 57, "vocab_size": 32, "complexity": 5, "nloc": 11, "token_counts": 99, "n_ast_nodes": 148, "n_identifiers": 7, "d_id": 48881, "documentation": { "docstring": "\n Computes the m + 1 generalized unrestricted growth strings\n and returns them as rows in matrix.\n\n Examples\n ========\n\n >>> from sympy.combinatorics.partitions import RGS_generalized\n >>> RGS_generalized(6)\n Matrix([\n [ 1, 1, 1, 1, 1, 1, 1],\n [ 1, 2, 3, 4, 5, 6, 0],\n [ 2, 5, 10, 17, 26, 0, 0],\n [ 5, 15, 37, 77, 0, 0, 0],\n [ 15, 52, 151, 0, 0, 0, 0],\n [ 52, 203, 0, 0, 0, 0, 0],\n [203, 0, 0, 0, 0, 0, 0]])\n ", "n_words": 81, "vocab_size": 46, "n_whitespaces": 162, "language": "en" } }, { "id": 110772, "commit_id": "eb52a34559bad8e86c85069e5af15d0eb3d5c6f9", "repo": "matplotlib", "path": "lib/matplotlib/axes/_base.py", "file_name": "_base.py", "fun_name": "_draw_rasterized", "commit_message": "DOC: add docstring to too-clever helper function", "code": "def _draw_rasterized(figure, artists, renderer):\n ", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 7, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 8, "token_counts": 37, "n_ast_nodes": 17, "n_identifiers": 4, "d_id": 24287, "documentation": { "docstring": "\n A helper function for rasterizing the list of artists.\n\n The bookkeeping to track if we are or are not in rasterizing mode\n with the mixed-mode backends is relatively complicated and is now\n handled in the matplotlib.artist.allow_rasterization decorator.\n\n This helper defines the absolute minimum methods and attributes on\n shim class to be compatible with that decorator and the uses it to\n rasterize the list of artists.\n\n This is maybe too-clever, but allows us to re-use the same code that is\n used on normal artists to participate in the \"are we rasterizing\"\n accounting.\n\n Please do not use this outside of the \"rasterize below a given zorder\"\n functionality of Axes.\n\n Parameters\n ----------\n figure : matplotlib.figure.Figure\n The figure all of the artists belong to (not checked). We need this\n because we can at the figure level suppress composition and insert each\n rasterized artist as it's own image.\n\n artists : List[matplotlib.artist.Artist]\n The list of Artists to be rasterized. These are assumed to all\n be in the same Figure.\n\n renderer : matplotlib.backendbases.RendererBase\n The currently active renderer\n\n Returns\n -------\n None\n\n ", "n_words": 173, "vocab_size": 112, "n_whitespaces": 281, "language": "en" } }, { "id": 247675, "commit_id": "dda9b7fc4d2e6ca84a1a994a7ff1943b590e71df", "repo": "synapse", "path": "tests/storage/test_account_data.py", "file_name": "test_account_data.py", "fun_name": "test_caching", "commit_message": "Use the ignored_users table to test event visibility & sync. (#12225)\n\nInstead of fetching the raw account data and re-parsing it. The\r\nignored_users table is a denormalised version of the account data\r\nfor quick searching.", "code": "def test_caching(self):\n \n # The first user ignores a user.\n self._update_ignore_list(\"@other:test\")\n self.assert_ignored(self.user, {\"@other:test\"})\n self.assert_ignorers(\"@other:test\", {self.user})\n\n # The second user ignores them.\n self._update_ignore_list(\"@other:test\", ignorer_user_id=\"@second:test\")\n self.assert_ignored(\"@second:test\", {\"@other:test\"})\n self.assert_ignorers(\"@other:test\", {self.user, \"@second:test\"})\n\n # The first user un-ignores them.\n self._update_ignore_list()\n self.assert_ignored(self.user, set())\n self.assert_ignorers(\"@other:test\", {\"@second:test\"})\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 129, "n_words": 38, "vocab_size": 25, "complexity": 1, "nloc": 10, "token_counts": 97, "n_ast_nodes": 177, "n_identifiers": 8, "d_id": 71828, "documentation": { "docstring": "Ensure that caching works properly between different users.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 42076, "commit_id": "6460a21555ba6557e1f6f06f4d677d9c19148169", "repo": "seaborn", "path": "seaborn/utils.py", "file_name": "utils.py", "fun_name": "_assign_default_kwargs", "commit_message": "Workaround for matplotlib rc_context issue (#2925)\n\n* Workaround for matplotlib rc_context issue\r\n\r\nFixes #2914\r\n\r\n* Add some additional comments about this workaround", "code": "def _assign_default_kwargs(kws, call_func, source_func):\n \n # This exists so that axes-level functions and figure-level functions can\n # both call a Plotter method while having the default kwargs be defined in\n # the signature of the axes-level function.\n # An alternative would be to have a decorator on the method that sets its\n # defaults based on those defined in the axes-level function.\n # Then the figure-level function would not need to worry about defaults.\n # I am not sure which is better.\n needed = inspect.signature(call_func).parameters\n defaults = inspect.signature(source_func).parameters\n\n for param in needed:\n if param in defaults and param not in kws:\n kws[param] = defaults[param].default\n\n return kws\n\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 159, "n_words": 105, "vocab_size": 68, "complexity": 4, "nloc": 7, "token_counts": 58, "n_ast_nodes": 97, "n_identifiers": 11, "d_id": 7476, "documentation": { "docstring": "Assign default kwargs for call_func using values from source_func.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 201183, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/auth_tests/test_checks.py", "file_name": "test_checks.py", "fun_name": "test_is_anonymous_authenticated_methods", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_is_anonymous_authenticated_methods(self):\n \n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 9, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 26, "token_counts": 99, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 49893, "documentation": { "docstring": "\n .is_anonymous/is_authenticated must not be methods.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 111886, "commit_id": "21abc280257fb8868be61264abe42534aa09188b", "repo": "nni", "path": "nni/common/serializer.py", "file_name": "serializer.py", "fun_name": "trace_symbol", "commit_message": "Fix #4434: support pickle in serializer (#4552)", "code": "def trace_symbol(self) -> Any:\n \n raise NotImplementedError()\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 7, "token_counts": 12, "n_ast_nodes": 23, "n_identifiers": 4, "d_id": 24497, "documentation": { "docstring": "\n Symbol object. Could be a class or a function.\n ``get_hybrid_cls_or_func_name`` and ``import_cls_or_func_from_hybrid_name`` is a pair to\n convert the symbol into a string and convert the string back to symbol.\n ", "n_words": 29, "vocab_size": 21, "n_whitespaces": 58, "language": "en" } }, { "id": 100861, "commit_id": "98a65277d8c55cfcbdbfa629f790a8f8731621a8", "repo": "faceswap", "path": "tests/simple_tests.py", "file_name": "simple_tests.py", "fun_name": "print_colored", "commit_message": "Fix AMD Tests + docs", "code": "def print_colored(text, color=\"OK\", bold=False):\n \n color = _COLORS.get(color, color)\n fmt = '' if not bold else _COLORS['BOLD']\n print(f\"{color}{fmt}{text}{_COLORS['ENDC']}\")\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 29, "n_words": 17, "vocab_size": 16, "complexity": 2, "nloc": 4, "token_counts": 40, "n_ast_nodes": 91, "n_identifiers": 8, "d_id": 20312, "documentation": { "docstring": " Print colored text\n This might not work on windows,\n although travis runs windows stuff in git bash, so it might ?\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 31, "language": "en" } }, { "id": 288598, "commit_id": "47d0598e75487f63901931875f69f802a477df13", "repo": "core", "path": "homeassistant/components/light/__init__.py", "file_name": "__init__.py", "fun_name": "_light_internal_color_mode", "commit_message": "Use Kelvin as the preferred color temperature unit (#79591)\n\n* Use Kelvin as the preferred white temperature unit\r\n\r\n* Update homekit\r\n\r\n* Adjust tests", "code": "def _light_internal_color_mode(self) -> str:\n \n if (color_mode := self.color_mode) is None:\n # Backwards compatibility for color_mode added in 2021.4\n # Add warning in 2021.6, remove in 2021.10\n supported = self._light_internal_supported_color_modes\n\n if ColorMode.HS in supported and self.hs_color is not None:\n return ColorMode.HS\n if ColorMode.COLOR_TEMP in supported and self.color_temp_kelvin is not None:\n return ColorMode.COLOR_TEMP\n if ColorMode.BRIGHTNESS in supported and self.brightness is not None:\n return ColorMode.BRIGHTNESS\n if ColorMode.ONOFF in supported:\n return ColorMode.ONOFF\n return ColorMode.UNKNOWN\n\n return color_mode\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 241, "n_words": 72, "vocab_size": 38, "complexity": 9, "nloc": 14, "token_counts": 95, "n_ast_nodes": 150, "n_identifiers": 15, "d_id": 87754, "documentation": { "docstring": "Return the color mode of the light with backwards compatibility.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 102167, "commit_id": "bb5b4cceb6f737448eaaa6817cd773b6f4b0e77d", "repo": "pytorch", "path": "tools/test/test_gen_backend_stubs.py", "file_name": "test_gen_backend_stubs.py", "fun_name": "test_backend_has_no_autograd_key_but_provides_entries", "commit_message": "Revert \"Revert D32498569: allow external backend codegen to toggle whether to generate out= and inplace kernels\" (#69950)\n\nSummary:\nPull Request resolved: https://github.com/pytorch/pytorch/pull/69950\n\nThis reverts commit f6cad53443704dfe5a20cc62bee14d91e3bffcaa.\n\nTest Plan: Imported from OSS\n\nReviewed By: albanD\n\nDifferential Revision: D33113545\n\nPulled By: bdhirsh\n\nfbshipit-source-id: d6590294662588d36c09662dea65919ad4e1e288", "code": "def test_backend_has_no_autograd_key_but_provides_entries(self) -> None:\n yaml_str = \n output_error = self.get_errors_from_gen_backend_stubs(yaml_str)\n self.assertExpectedInline(output_error, ) # noqa: B950\n\n # in an operator group, currently all operators must either be registered to the backend or autograd kernel.\n # Here, functional and out mismatch", "url": "https://github.com/pytorch/pytorch.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 66, "n_words": 38, "vocab_size": 35, "complexity": 1, "nloc": 10, "token_counts": 26, "n_ast_nodes": 50, "n_identifiers": 6, "d_id": 21482, "documentation": { "docstring": "\\\nbackend: Vulkan\ncpp_namespace: torch_vulkan\nsupported:\n- add\nautograd:\n- subFound an invalid operator name: add", "n_words": 16, "vocab_size": 14, "n_whitespaces": 9, "language": "en" } }, { "id": 187200, "commit_id": "d09112ab1f6db6aa605650fe1ff6a3028344f90d", "repo": "streamlink", "path": "tests/test_api_validate.py", "file_name": "test_api_validate.py", "fun_name": "test_dict_failure", "commit_message": "plugin.api.validate: rewrite tests\n\nCompletely rewrite tests using pytest, with full coverage", "code": "def test_dict_failure(self):\n with pytest.raises(validate.ValidationError) as cm:\n validate.validate(validate.union({\"foo\": int}), \"value\")\n assert_validationerror(cm.value, )\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 35, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 11, "token_counts": 42, "n_ast_nodes": 75, "n_identifiers": 11, "d_id": 45739, "documentation": { "docstring": "\n ValidationError(UnionSchema):\n Could not validate union\n Context(dict):\n Unable to validate union 'foo'\n Context(type):\n Type of 'value' should be int, but is str\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 113, "language": "en" } }, { "id": 47418, "commit_id": "bab740c0a49b828401a8baf04eb297d083605ae8", "repo": "airflow", "path": "tests/models/test_trigger.py", "file_name": "test_trigger.py", "fun_name": "test_submit_event", "commit_message": "Fix trigger event payload is not persisted in db (#22944)\n\nCo-authored-by: Kaxil Naik \r\nCo-authored-by: Ash Berlin-Taylor ", "code": "def test_submit_event(session, create_task_instance):\n \n # Make a trigger\n trigger = Trigger(classpath=\"airflow.triggers.testing.SuccessTrigger\", kwargs={})\n trigger.id = 1\n session.add(trigger)\n session.commit()\n # Make a TaskInstance that's deferred and waiting on it\n task_instance = create_task_instance(\n session=session, execution_date=timezone.utcnow(), state=State.DEFERRED\n )\n task_instance.trigger_id = trigger.id\n task_instance.next_kwargs = {\"cheesecake\": True}\n session.commit()\n # Call submit_event\n Trigger.submit_event(trigger.id, TriggerEvent(42), session=session)\n # commit changes made by submit event and expire all cache to read from db.\n session.flush()\n session.expunge_all()\n # Check that the task instance is now scheduled\n updated_task_instance = session.query(TaskInstance).one()\n assert updated_task_instance.state == State.SCHEDULED\n assert updated_task_instance.next_kwargs == {\"event\": 42, \"cheesecake\": True}\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 157, "n_words": 87, "vocab_size": 69, "complexity": 1, "nloc": 17, "token_counts": 141, "n_ast_nodes": 237, "n_identifiers": 28, "d_id": 9106, "documentation": { "docstring": "\n Tests that events submitted to a trigger re-wake their dependent\n task instances.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 22, "language": "en" } }, { "id": 144772, "commit_id": "85d6946c9524d8544e69262f737018151efb1567", "repo": "ray", "path": "python/ray/data/tests/test_dataset_formats.py", "file_name": "test_dataset_formats.py", "fun_name": "test_fsspec_filesystem", "commit_message": "Split test_dataset.py into two (#22303)", "code": "def test_fsspec_filesystem(ray_start_regular_shared, tmp_path):\n \n df1 = pd.DataFrame({\"one\": [1, 2, 3], \"two\": [\"a\", \"b\", \"c\"]})\n table = pa.Table.from_pandas(df1)\n path1 = os.path.join(str(tmp_path), \"test1.parquet\")\n pq.write_table(table, path1)\n df2 = pd.DataFrame({\"one\": [4, 5, 6], \"two\": [\"e\", \"f\", \"g\"]})\n table = pa.Table.from_pandas(df2)\n path2 = os.path.join(str(tmp_path), \"test2.parquet\")\n pq.write_table(table, path2)\n\n fs = LocalFileSystem()\n\n ds = ray.data.read_parquet([path1, path2], filesystem=fs)\n\n # Test metadata-only parquet ops.\n assert ds._blocks._num_computed() == 1\n assert ds.count() == 6\n\n out_path = os.path.join(tmp_path, \"out\")\n os.mkdir(out_path)\n\n ds._set_uuid(\"data\")\n ds.write_parquet(out_path)\n\n ds_df1 = pd.read_parquet(os.path.join(out_path, \"data_000000.parquet\"))\n ds_df2 = pd.read_parquet(os.path.join(out_path, \"data_000001.parquet\"))\n ds_df = pd.concat([ds_df1, ds_df2])\n df = pd.concat([df1, df2])\n assert ds_df.equals(df)\n\n\n@pytest.mark.parametrize(\n \"fs,data_path\",\n [\n (None, lazy_fixture(\"local_path\")),\n (lazy_fixture(\"local_fs\"), lazy_fixture(\"local_path\")),\n (lazy_fixture(\"s3_fs\"), lazy_fixture(\"s3_path\")),\n (\n lazy_fixture(\"s3_fs_with_space\"),\n lazy_fixture(\"s3_path_with_space\"),\n ), # Path contains space.\n ],\n)", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"fs,data_path\",\n [\n (None, lazy_fixture(\"local_path\")),\n (lazy_fixture(\"local_fs\"), lazy_fixture(\"local_path\")),\n (lazy_fixture(\"s3_fs\"), lazy_fixture(\"s3_path\")),\n (\n lazy_fixture(\"s3_fs_with_space\"),\n lazy_fixture(\"s3_path_with_space\"),\n ), # Path contains space.\n ],\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 240, "n_words": 106, "vocab_size": 83, "complexity": 1, "nloc": 22, "token_counts": 266, "n_ast_nodes": 538, "n_identifiers": 43, "d_id": 33307, "documentation": { "docstring": "Same as `test_parquet_write` but using a custom, fsspec filesystem.\n\n TODO (Alex): We should write a similar test with a mock PyArrow fs, but\n unfortunately pa.fs._MockFileSystem isn't serializable, so this may require\n some effort.\n ", "n_words": 33, "vocab_size": 30, "n_whitespaces": 45, "language": "en" } }, { "id": 122245, "commit_id": "6d2aaac2454117d54997243714c1a009827707ca", "repo": "jax", "path": "jax/_src/dtypes.py", "file_name": "dtypes.py", "fun_name": "dtype", "commit_message": "implement bint arrays (opaque dtypes), add padding rules\n\nCo-authored-by: Sharad Vikram ", "code": "def dtype(x, *, canonicalize=False):\n \n if x is None:\n raise ValueError(f\"Invalid argument to dtype: {x}.\")\n elif isinstance(x, type) and x in python_scalar_dtypes:\n dt = python_scalar_dtypes[x]\n elif type(x) in python_scalar_dtypes:\n dt = python_scalar_dtypes[type(x)]\n elif jax.core.is_opaque_dtype(getattr(x, 'dtype', None)):\n dt = x.dtype\n else:\n dt = np.result_type(x)\n if dt not in _jax_dtype_set:\n raise TypeError(f\"Value '{x}' with dtype {dt} is not a valid JAX array \"\n \"type. Only arrays of numeric types are supported by JAX.\")\n return canonicalize_dtype(dt) if canonicalize else dt\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 121, "n_words": 76, "vocab_size": 57, "complexity": 8, "nloc": 15, "token_counts": 112, "n_ast_nodes": 193, "n_identifiers": 17, "d_id": 27139, "documentation": { "docstring": "Return the dtype object for a value or type, optionally canonicalized based on X64 mode.", "n_words": 15, "vocab_size": 15, "n_whitespaces": 14, "language": "en" } }, { "id": 19355, "commit_id": "def289b723e9216830c2a7b2577cb31b55710167", "repo": "PythonRobotics", "path": "PathPlanning/CubicSpline/cubic_spline_planner.py", "file_name": "cubic_spline_planner.py", "fun_name": "calc_position", "commit_message": "enhance cubic spline path doc (#698)\n\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cublic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc\r\n\r\n* enhance cubic spline path doc", "code": "def calc_position(self, x):\n \n if x < self.x[0]:\n return None\n elif x > self.x[-1]:\n return None\n\n i = self.__search_index(x)\n dx = x - self.x[i]\n position = self.a[i] + self.b[i] * dx + \\\n self.c[i] * dx ** 2.0 + self.d[i] * dx ** 3.0\n\n return position\n", "url": "https://github.com/AtsushiSakai/PythonRobotics.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 127, "n_words": 45, "vocab_size": 29, "complexity": 3, "nloc": 10, "token_counts": 97, "n_ast_nodes": 141, "n_identifiers": 11, "d_id": 2943, "documentation": { "docstring": "\n Calc `y` position for given `x`.\n\n if `x` is outside the data point's `x` range, return None.\n\n Returns\n -------\n y : float\n y position for given x.\n ", "n_words": 27, "vocab_size": 22, "n_whitespaces": 81, "language": "en" } }, { "id": 186377, "commit_id": "eeca208c8f57304590ac1af80b496e61021aaa45", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/configurator.py", "file_name": "configurator.py", "fun_name": "_enable_ocsp_stapling", "commit_message": "Various clean-ups in certbot-apache. Use f-strings. (#9132)\n\n* Various clean-ups in certbot-apache. Use f-strings.\r\n\r\n* Smaller tweaks", "code": "def _enable_ocsp_stapling(self, ssl_vhost, unused_options):\n \n min_apache_ver = (2, 3, 3)\n if self.get_version() < min_apache_ver:\n raise errors.PluginError(\n \"Unable to set OCSP directives.\\n\"\n \"Apache version is below 2.3.3.\")\n\n if \"socache_shmcb_module\" not in self.parser.modules:\n self.enable_mod(\"socache_shmcb\")\n\n # Check if there's an existing SSLUseStapling directive on.\n use_stapling_aug_path = self.parser.find_dir(\"SSLUseStapling\",\n \"on\", start=ssl_vhost.path)\n if not use_stapling_aug_path:\n self.parser.add_dir(ssl_vhost.path, \"SSLUseStapling\", \"on\")\n\n ssl_vhost_aug_path = self._escape(parser.get_aug_path(ssl_vhost.filep))\n\n # Check if there's an existing SSLStaplingCache directive.\n stapling_cache_aug_path = self.parser.find_dir('SSLStaplingCache',\n None, ssl_vhost_aug_path)\n\n # We'll simply delete the directive, so that we'll have a\n # consistent OCSP cache path.\n if stapling_cache_aug_path:\n self.parser.aug.remove(\n re.sub(r\"/\\w*$\", \"\", stapling_cache_aug_path[0]))\n\n self.parser.add_dir_to_ifmodssl(ssl_vhost_aug_path,\n \"SSLStaplingCache\",\n [\"shmcb:/var/run/apache2/stapling_cache(128000)\"])\n\n msg = \"OCSP Stapling was enabled on SSL Vhost: %s.\\n\"%(\n ssl_vhost.filep)\n self.save_notes += msg\n self.save()\n logger.info(msg)\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 518, "n_words": 108, "vocab_size": 89, "complexity": 5, "nloc": 26, "token_counts": 182, "n_ast_nodes": 311, "n_identifiers": 31, "d_id": 45473, "documentation": { "docstring": "Enables OCSP Stapling\n\n In OCSP, each client (e.g. browser) would have to query the\n OCSP Responder to validate that the site certificate was not revoked.\n\n Enabling OCSP Stapling, would allow the web-server to query the OCSP\n Responder, and staple its response to the offered certificate during\n TLS. i.e. clients would not have to query the OCSP responder.\n\n OCSP Stapling enablement on Apache implicitly depends on\n SSLCertificateChainFile being set by other code.\n\n .. note:: This function saves the configuration\n\n :param ssl_vhost: Destination of traffic, an ssl enabled vhost\n :type ssl_vhost: :class:`~certbot_apache._internal.obj.VirtualHost`\n\n :param unused_options: Not currently used\n :type unused_options: Not Available\n\n :returns: Success, general_vhost (HTTP vhost)\n :rtype: (bool, :class:`~certbot_apache._internal.obj.VirtualHost`)\n\n ", "n_words": 107, "vocab_size": 78, "n_whitespaces": 212, "language": "en" } }, { "id": 251864, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/proxy/layers/http/test_http.py", "file_name": "test_http.py", "fun_name": "test_upgrade", "commit_message": "make it black!", "code": "def test_upgrade(tctx, proto):\n \n if proto != \"websocket\":\n tctx.options.websocket = False\n if proto != \"tcp\":\n tctx.options.rawtcp = False\n\n tctx.server.address = (\"example.com\", 80)\n tctx.server.state = ConnectionState.OPEN\n http_flow = Placeholder(HTTPFlow)\n playbook = Playbook(http.HttpLayer(tctx, HTTPMode.transparent))\n (\n playbook\n >> DataReceived(\n tctx.client,\n b\"GET / HTTP/1.1\\r\\n\"\n b\"Connection: upgrade\\r\\n\"\n b\"Upgrade: websocket\\r\\n\"\n b\"Sec-WebSocket-Version: 13\\r\\n\"\n b\"\\r\\n\",\n )\n << http.HttpRequestHeadersHook(http_flow)\n >> reply()\n << http.HttpRequestHook(http_flow)\n >> reply()\n << SendData(\n tctx.server,\n b\"GET / HTTP/1.1\\r\\n\"\n b\"Connection: upgrade\\r\\n\"\n b\"Upgrade: websocket\\r\\n\"\n b\"Sec-WebSocket-Version: 13\\r\\n\"\n b\"\\r\\n\",\n )\n >> DataReceived(\n tctx.server,\n b\"HTTP/1.1 101 Switching Protocols\\r\\n\"\n b\"Upgrade: websocket\\r\\n\"\n b\"Connection: Upgrade\\r\\n\"\n b\"\\r\\n\",\n )\n << http.HttpResponseHeadersHook(http_flow)\n >> reply()\n << http.HttpResponseHook(http_flow)\n >> reply()\n << SendData(\n tctx.client,\n b\"HTTP/1.1 101 Switching Protocols\\r\\n\"\n b\"Upgrade: websocket\\r\\n\"\n b\"Connection: Upgrade\\r\\n\"\n b\"\\r\\n\",\n )\n )\n if proto == \"websocket\":\n assert playbook << WebsocketStartHook(http_flow)\n elif proto == \"tcp\":\n assert playbook << TcpStartHook(Placeholder(TCPFlow))\n else:\n assert (\n playbook\n << Log(\n \"Sent HTTP 101 response, but no protocol is enabled to upgrade to.\",\n \"warn\",\n )\n << CloseConnection(tctx.client)\n )\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 656, "n_words": 143, "vocab_size": 70, "complexity": 5, "nloc": 63, "token_counts": 256, "n_ast_nodes": 428, "n_identifiers": 33, "d_id": 73865, "documentation": { "docstring": "Test a HTTP -> WebSocket upgrade with different protocols enabled", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 125418, "commit_id": "8553df49bba654a9edd6befce198be90d6524fca", "repo": "ray", "path": "python/ray/data/_internal/plan.py", "file_name": "plan.py", "fun_name": "copy", "commit_message": "Make execution plan/blocklist aware of the memory ownership and who runs the plan (#26650)\n\nHaving the indicator about who's running the stage and who created a blocklist will enable the eager memory releasing.\r\n\r\nThis is an alternative with better abstraction to https://github.com/ray-project/ray/pull/26196.\r\n\r\nNote: this doesn't work for Dataset.split() yet, will do in a followup PR.", "code": "def copy(self) -> \"ExecutionPlan\":\n \n plan_copy = ExecutionPlan(\n self._in_blocks, self._in_stats, run_by_consumer=self._run_by_consumer\n )\n if self._snapshot_blocks is not None:\n # Copy over the existing snapshot.\n plan_copy._snapshot_blocks = self._snapshot_blocks\n plan_copy._snapshot_stats = self._snapshot_stats\n plan_copy._stages_before_snapshot = self._stages_before_snapshot.copy()\n plan_copy._stages_after_snapshot = self._stages_after_snapshot.copy()\n return plan_copy\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 129, "n_words": 36, "vocab_size": 30, "complexity": 2, "nloc": 18, "token_counts": 72, "n_ast_nodes": 118, "n_identifiers": 12, "d_id": 27862, "documentation": { "docstring": "Create a shallow copy of this execution plan.\n\n This copy can be executed without mutating the original, but clearing the copy\n will also clear the original.\n\n Returns:\n A shallow copy of this execution plan.\n ", "n_words": 34, "vocab_size": 24, "n_whitespaces": 73, "language": "en" } }, { "id": 201891, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/basic/tests.py", "file_name": "tests.py", "fun_name": "test_select_on_save_lying_update", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_select_on_save_lying_update(self):\n \n # Change the manager to not return \"row matched\" for update().\n # We are going to change the Article's _base_manager class\n # dynamically. This is a bit of a hack, but it seems hard to\n # test this properly otherwise. Article's manager, because\n # proxy models use their parent model's _base_manager.\n\n orig_class = Article._base_manager._queryset_class\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 105, "n_words": 56, "vocab_size": 47, "complexity": 2, "nloc": 23, "token_counts": 125, "n_ast_nodes": 29, "n_identifiers": 6, "d_id": 50017, "documentation": { "docstring": "\n select_on_save works correctly if the database doesn't return correct\n information about matched rows from UPDATE.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 37, "language": "en" } }, { "id": 30423, "commit_id": "deca40c2e26afed62e1f9ec4be14aff9e125929b", "repo": "spotify-downloader", "path": "spotdl/utils/console.py", "file_name": "console.py", "fun_name": "generate_config", "commit_message": "moved console actions to a new file", "code": "def generate_config():\n \n\n config_path = get_config_file()\n if config_path.exists():\n overwrite_config = input(\"Config file already exists. Overwrite? (y/N): \")\n\n if overwrite_config.lower() != \"y\":\n print(\"Exiting...\")\n return None\n\n with open(config_path, \"w\", encoding=\"utf-8\") as config_file:\n json.dump(DEFAULT_CONFIG, config_file, indent=4)\n\n print(f\"Config file generated at {config_path}\")\n\n return None\n\n", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 100, "n_words": 39, "vocab_size": 34, "complexity": 3, "nloc": 11, "token_counts": 71, "n_ast_nodes": 134, "n_identifiers": 15, "d_id": 5567, "documentation": { "docstring": "\n Generate the config file if it doesn't exist\n This is done before the argument parser so it doesn't requires `operation`\n and `query` to be passed.\n ", "n_words": 25, "vocab_size": 22, "n_whitespaces": 38, "language": "en" } }, { "id": 204817, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/base.py", "file_name": "base.py", "fun_name": "timezone", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def timezone(self):\n \n if not settings.USE_TZ:\n return None\n elif self.settings_dict[\"TIME_ZONE\"] is None:\n return timezone.utc\n else:\n return timezone_constructor(self.settings_dict[\"TIME_ZONE\"])\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 77, "n_words": 16, "vocab_size": 14, "complexity": 3, "nloc": 7, "token_counts": 40, "n_ast_nodes": 70, "n_identifiers": 7, "d_id": 50902, "documentation": { "docstring": "\n Return a tzinfo of the database connection time zone.\n\n This is only used when time zone support is enabled. When a datetime is\n read from the database, it is always returned in this time zone.\n\n When the database backend supports time zones, it doesn't matter which\n time zone Django uses, as long as aware datetimes are used everywhere.\n Other users connecting to the database can choose their own time zone.\n\n When the database backend doesn't support time zones, the time zone\n Django uses may be constrained by the requirements of other users of\n the database.\n ", "n_words": 95, "vocab_size": 57, "n_whitespaces": 166, "language": "en" } }, { "id": 189828, "commit_id": "7a13f0e969e6a70af9084cdbf41cea49c7f1813c", "repo": "manim", "path": "tests/utils/video_tester.py", "file_name": "video_tester.py", "fun_name": "check_video_data", "commit_message": "Migrate from os.path to pathlib in Testing Scripts (#2685)\n\n* updated styling\r\n\r\n* final commit\r\n\r\n* fixed style\r\n\r\n* removed exist_ok=true\r\n\r\n* added parents=True\r\n\r\n* potentially .exists() is the problem\r\n\r\n* fixed style'\r\n\r\n* fixed style on revisions\r\n\r\n* style check processed\r\n\r\n* Update tests/helpers/graphical_units.py\r\n\r\nCo-authored-by: ad_chaos <90276965+Kiran-Raj-Dev@users.noreply.github.com>\r\n\r\n* fixed changes\r\n\r\n* made get_dir_layout also accept path.\r\n\r\n* removed small auto import error\r\n\r\nCo-authored-by: ad_chaos <90276965+Kiran-Raj-Dev@users.noreply.github.com>\r\nCo-authored-by: Naveen M K ", "code": "def check_video_data(path_control_data, path_video_gen):\n \n # movie file specification\n path_sec_gen = Path(path_video_gen).parent.absolute() / \"sections\"\n control_data = load_control_data(path_control_data)\n movie_meta_gen = get_video_metadata(path_video_gen)\n movie_meta_exp = control_data[\"movie_metadata\"]\n\n assert_shallow_dict_compare(\n movie_meta_gen, movie_meta_exp, \"Movie file metadata mismatch:\"\n )\n\n # sections directory layout\n sec_dir_layout_gen = set(get_section_dir_layout(path_sec_gen))\n sec_dir_layout_exp = set(control_data[\"section_dir_layout\"])\n\n unexp_gen = sec_dir_layout_gen - sec_dir_layout_exp\n ungen_exp = sec_dir_layout_exp - sec_dir_layout_gen\n if len(unexp_gen) or len(ungen_exp):\n dif = [f\"'{dif}' got unexpectedly generated\" for dif in unexp_gen] + [\n f\"'{dif}' didn't get generated\" for dif in ungen_exp\n ]\n mismatch = \"\\n\".join(dif)\n raise AssertionError(f\"Sections don't match:\\n{mismatch}\")\n\n # sections index file\n scene_name = Path(path_video_gen).stem\n path_sec_index_gen = path_sec_gen / f\"{scene_name}.json\"\n sec_index_gen = get_section_index(path_sec_index_gen)\n sec_index_exp = control_data[\"section_index\"]\n\n if len(sec_index_gen) != len(sec_index_exp):\n raise AssertionError(\n f\"expected {len(sec_index_exp)} sections ({', '.join([el['name'] for el in sec_index_exp])}), but {len(sec_index_gen)} ({', '.join([el['name'] for el in sec_index_gen])}) got generated (in '{path_sec_index_gen}')\"\n )\n # check individual sections\n for sec_gen, sec_exp in zip(sec_index_gen, sec_index_exp):\n assert_shallow_dict_compare(\n sec_gen,\n sec_exp,\n # using json to pretty print dicts\n f\"Section {json.dumps(sec_gen, indent=4)} (in '{path_sec_index_gen}') doesn't match expected Section (in '{json.dumps(sec_exp, indent=4)}'):\",\n )\n\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 355, "n_words": 160, "vocab_size": 107, "complexity": 7, "nloc": 32, "token_counts": 185, "n_ast_nodes": 444, "n_identifiers": 37, "d_id": 46227, "documentation": { "docstring": "Compare control data with generated output.\n Used abbreviations:\n exp -> expected\n gen -> generated\n sec -> section\n meta -> metadata\n ", "n_words": 20, "vocab_size": 16, "n_whitespaces": 57, "language": "en" } }, { "id": 168200, "commit_id": "2f8d0a36703e81e4dca52ca9fe4f58c910c1b304", "repo": "pandas", "path": "pandas/core/arrays/categorical.py", "file_name": "categorical.py", "fun_name": "remove_categories", "commit_message": "PERF cache find_stack_level (#48023)\n\ncache stacklevel", "code": "def remove_categories(self, removals, inplace=no_default):\n \n if inplace is not no_default:\n warn(\n \"The `inplace` parameter in pandas.Categorical.\"\n \"remove_categories is deprecated and will be removed in \"\n \"a future version. Removing unused categories will always \"\n \"return a new Categorical object.\",\n FutureWarning,\n stacklevel=find_stack_level(inspect.currentframe()),\n )\n else:\n inplace = False\n\n inplace = validate_bool_kwarg(inplace, \"inplace\")\n if not is_list_like(removals):\n removals = [removals]\n\n removal_set = set(removals)\n not_included = removal_set - set(self.dtype.categories)\n new_categories = [c for c in self.dtype.categories if c not in removal_set]\n\n # GH 10156\n if any(isna(removals)):\n not_included = {x for x in not_included if notna(x)}\n new_categories = [x for x in new_categories if notna(x)]\n\n if len(not_included) != 0:\n raise ValueError(f\"removals must all be in old categories: {not_included}\")\n\n with catch_warnings():\n simplefilter(\"ignore\")\n return self.set_categories(\n new_categories, ordered=self.ordered, rename=False, inplace=inplace\n )\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 420, "n_words": 121, "vocab_size": 84, "complexity": 11, "nloc": 28, "token_counts": 181, "n_ast_nodes": 300, "n_identifiers": 31, "d_id": 40226, "documentation": { "docstring": "\n Remove the specified categories.\n\n `removals` must be included in the old categories. Values which were in\n the removed categories will be set to NaN\n\n Parameters\n ----------\n removals : category or list of categories\n The categories which should be removed.\n inplace : bool, default False\n Whether or not to remove the categories inplace or return a copy of\n this categorical with removed categories.\n\n .. deprecated:: 1.3.0\n\n Returns\n -------\n cat : Categorical or None\n Categorical with removed categories or None if ``inplace=True``.\n\n Raises\n ------\n ValueError\n If the removals are not contained in the categories\n\n See Also\n --------\n rename_categories : Rename categories.\n reorder_categories : Reorder categories.\n add_categories : Add new categories.\n remove_unused_categories : Remove categories which are not used.\n set_categories : Set the categories to the specified ones.\n\n Examples\n --------\n >>> c = pd.Categorical(['a', 'c', 'b', 'c', 'd'])\n >>> c\n ['a', 'c', 'b', 'c', 'd']\n Categories (4, object): ['a', 'b', 'c', 'd']\n\n >>> c.remove_categories(['d', 'a'])\n [NaN, 'c', 'b', 'c', NaN]\n Categories (2, object): ['b', 'c']\n ", "n_words": 162, "vocab_size": 94, "n_whitespaces": 435, "language": "en" } }, { "id": 93031, "commit_id": "522d6f27c28dc5fd4d996ed605865c42fbda0da8", "repo": "sentry", "path": "src/sentry/utils/meta.py", "file_name": "meta.py", "fun_name": "enter", "commit_message": "ref: replace legacy compat.map with list comprehensions (#36372)", "code": "def enter(self, *path):\n \n return Meta(self._meta, path=self._path + [str(p) for p in path])\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 26, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 2, "token_counts": 33, "n_ast_nodes": 52, "n_identifiers": 8, "d_id": 18968, "documentation": { "docstring": "\n Enters into sub meta data at the specified path. This always returns a\n new ``Meta`` object, regardless whether the path already exists.\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 44, "language": "en" } }, { "id": 167622, "commit_id": "b81f4318fc0d796760d16237a8f616dad73912eb", "repo": "pandas", "path": "pandas/tests/extension/test_arrow.py", "file_name": "test_arrow.py", "fun_name": "data_for_grouping", "commit_message": "ENH/TST: Add BaseGroupbyTests tests for ArrowExtensionArray (#47515)", "code": "def data_for_grouping(dtype):\n \n pa_dtype = dtype.pyarrow_dtype\n if pa.types.is_boolean(pa_dtype):\n A = False\n B = True\n C = True\n elif pa.types.is_floating(pa_dtype):\n A = -1.1\n B = 0.0\n C = 1.1\n elif pa.types.is_signed_integer(pa_dtype):\n A = -1\n B = 0\n C = 1\n elif pa.types.is_unsigned_integer(pa_dtype):\n A = 0\n B = 1\n C = 10\n elif pa.types.is_date(pa_dtype):\n A = date(1999, 12, 31)\n B = date(2010, 1, 1)\n C = date(2022, 1, 1)\n elif pa.types.is_timestamp(pa_dtype):\n A = datetime(1999, 1, 1, 1, 1, 1, 1)\n B = datetime(2020, 1, 1)\n C = datetime(2020, 1, 1, 1)\n elif pa.types.is_duration(pa_dtype):\n A = timedelta(-1)\n B = timedelta(0)\n C = timedelta(1, 4)\n elif pa.types.is_time(pa_dtype):\n A = time(0, 0)\n B = time(0, 12)\n C = time(12, 12)\n else:\n raise NotImplementedError\n return pd.array([B, B, None, None, A, A, B, C], dtype=dtype)\n\n\n@pytest.fixture", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 338, "n_words": 128, "vocab_size": 55, "complexity": 9, "nloc": 37, "token_counts": 281, "n_ast_nodes": 424, "n_identifiers": 26, "d_id": 40070, "documentation": { "docstring": "\n Data for factorization, grouping, and unique tests.\n\n Expected to be like [B, B, NA, NA, A, A, B, C]\n\n Where A < B < C and NA is missing\n ", "n_words": 29, "vocab_size": 24, "n_whitespaces": 42, "language": "en" } }, { "id": 222302, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/curses/textpad.py", "file_name": "textpad.py", "fun_name": "_end_of_line", "commit_message": "add python 3.10.4 for windows", "code": "def _end_of_line(self, y):\n \n self._update_max_yx()\n last = self.maxx\n while True:\n if curses.ascii.ascii(self.win.inch(y, last)) != curses.ascii.SP:\n last = min(self.maxx, last+1)\n break\n elif last == 0:\n break\n last = last - 1\n return last\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 144, "n_words": 31, "vocab_size": 23, "complexity": 4, "nloc": 11, "token_counts": 72, "n_ast_nodes": 116, "n_identifiers": 12, "d_id": 56535, "documentation": { "docstring": "Go to the location of the first blank on the given line,\n returning the index of the last non-blank character.", "n_words": 20, "vocab_size": 15, "n_whitespaces": 26, "language": "en" } }, { "id": 288026, "commit_id": "52307708c843b947a2d631f2fe7ddaa8bd9a90d7", "repo": "core", "path": "homeassistant/components/apcupsd/__init__.py", "file_name": "__init__.py", "fun_name": "hw_version", "commit_message": "Refactor apcupsd to use config flow (#64809)\n\n* Add Config Flow to APCUPSd integration and remove YAML support.\r\n\r\n* Hide the binary sensor if user does not select STATFLAG resource.\r\n\r\n* Add tests for config flows.\r\n\r\n* Simplify config flow code.\r\n\r\n* Spell fix.\r\n\r\n* Fix pylint warnings.\r\n\r\n* Simplify the code for config flow.\r\n\r\n* First attempt to implement import flows to suppport legacy YAML configurations.\r\n\r\n* Remove unnecessary log calls.\r\n\r\n* Wrap synchronous update call with `hass.async_add_executor_job`.\r\n\r\n* Import the YAML configurations when sensor platform is set up.\r\n\r\n* Move the logger call since the variables are not properly set up.\r\n\r\n* Add codeowner.\r\n\r\n* Fix name field of manifest.json.\r\n\r\n* Fix linting issue.\r\n\r\n* Fix incorrect dependency due to incorrect rebase.\r\n\r\n* Update codeowner and config flows via hassfest.\r\n\r\n* Postpone the deprecation warning to 2022.7.\r\n\r\n* Import future annotations for init file.\r\n\r\n* Add an newline at the end to make prettier happy.\r\n\r\n* Update github id.\r\n\r\n* Add type hints for return types of steps in config flow.\r\n\r\n* Move the deprecation date for YAML config to 2022.12.\r\n\r\n* Update according to reviews.\r\n\r\n* Use async_forward_entry_setups.\r\n\r\n* Add helper properties to `APCUPSdData` class.\r\n\r\n* Add device_info for binary sensor.\r\n\r\n* Simplify config flow.\r\n\r\n* Remove options flow strings.\r\n\r\n* update the tests according to the changes.\r\n\r\n* Add `entity_registry_enabled_default` to entities and use imported CONF_RESOURCES to disable entities instead of skipping them.\r\n\r\n* Update according to reviews.\r\n\r\n* Do not use model of the UPS as the title for the integration.\r\n\r\nInstead, simply use \"APCUPSd\" as the integration title and let the device info serve as title for each device instead.\r\n\r\n* Change schema to be a global variable.\r\n\r\n* Add more comments.\r\n\r\n* Rewrite the tests for config flows.\r\n\r\n* Fix enabled_by_default.\r\n\r\n* Show friendly titles in the integration.\r\n\r\n* Add import check in `async_setup_platform` to avoid importing in sensor platform setup.\r\n\r\n* Add import check in `async_setup_platform` to avoid importing in sensor platform setup.\r\n\r\n* Update comments in test files.\r\n\r\n* Use parametrize instead of manually iterating different test cases.\r\n\r\n* Swap the order of the platform constants.\r\n\r\n* Avoid using broad exceptions.\r\n\r\n* Set up device info via `_attr_device_info`.\r\n\r\n* Remove unrelated test in `test_config_flow`.\r\n\r\n* Use `DeviceInfo` instead of dict to assign to `_attr_device_info`.\r\n\r\n* Add english translation.\r\n\r\n* Add `async_create_issue` for deprecated YAML configuration.\r\n\r\n* Enable UPS status by default since it could show \"online, charging, on battery etc\" which is meaningful for all users.\r\n\r\n* Apply suggestions from code review\r\n\r\n* Apply suggestion\r\n\r\n* Apply suggestion\r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "def hw_version(self) -> str | None:\n \n return self.status.get(\"FIRMWARE\")\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 35, "n_identifiers": 5, "d_id": 87207, "documentation": { "docstring": "Return the firmware version of the UPS, if available.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 281554, "commit_id": "82747072c511beb1b2672846ae2ee4aec53eb562", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/stocks/options/pricing_controller.py", "file_name": "pricing_controller.py", "fun_name": "print_help", "commit_message": "Terminal Wide Rich (#1161)\n\n* My idea for how we handle Rich moving forward\r\n\r\n* remove independent consoles\r\n\r\n* FIxed pylint issues\r\n\r\n* add a few vars\r\n\r\n* Switched print to console\r\n\r\n* More transitions\r\n\r\n* Changed more prints\r\n\r\n* Replaced all prints\r\n\r\n* Fixing tabulate\r\n\r\n* Finished replace tabulate\r\n\r\n* Finished removing rich from Tabulate\r\n\r\n* add Panel around menu\r\n\r\n* add GST watermark under feature flag\r\n\r\n* Fixed 46 tests\r\n\r\n* Delete test_screener[False].yaml\r\n\r\n* Delete test_screener[True].yaml\r\n\r\n* Fixed the rest of the tests\r\n\r\n* add help and source color vars and use rgb\r\n\r\n* rich on stocks/options\r\n\r\n* update rich on disc, dps, sia\r\n\r\n* rich in gov, ins and scr menus\r\n\r\n* ba and ca menus with rich\r\n\r\n* Fixed import issue\r\n\r\n* Fixed some tests\r\n\r\n* removed termcolor\r\n\r\n* Removed prettytable\r\n\r\n* add rich to remaining stocks menus\r\n\r\n* FIxed linting issue\r\n\r\n* Added James' changes\r\n\r\n* Updated dependencies\r\n\r\n* Add rich to cryptocurrency menu\r\n\r\n* refactor economy and forex\r\n\r\n* refactor etf with rich\r\n\r\n* refactor mfunds\r\n\r\n* refactor rich rest\r\n\r\n* not specify style so default color works well on any background\r\n\r\n* Fixing mypy issues\r\n\r\n* Updated tests\r\n\r\n* More test fixes\r\n\r\n* James' test fixes\r\n\r\n* Updating tests : stocks/screener - fix cassettes using BR\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : disable DEBUG_MODE\r\n\r\n* Updating tests : stocks/fa/yfinance\r\n\r\n* minor fixes that escape\r\n\r\n* Improve the rich table function (that replaces tabulate :D )\r\n\r\n* Fixed bad code\r\n\r\n* delete rogue file + dcf fix + NoConsole\r\n\r\n* sia mypy\r\n\r\n* fuck you linter\r\n\r\n* fuck you linter pt 2\r\n\r\n* skip hehe\r\n\r\n* i hate the black linter\r\n\r\n* ubuntu mypy attempt\r\n\r\n* Update : rich_config + gtff\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : stocks\r\n\r\n* Update : rich_config\r\n\r\n* Updating : rich_config\r\n\r\n* make panel configurable for Theodore :b\r\n\r\n* colors update\r\n\r\n* Merged\r\n\r\n* Updating : rich_config + feature_flags\r\n\r\n* Updating : rich_config\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating : feature_flags\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: james \r\nCo-authored-by: jose-donato ", "code": "def print_help(self):\n \n help_text = f\n console.print(text=help_text, menu=\"Stocks - Options - Pricing\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 32, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 12, "token_counts": 22, "n_ast_nodes": 60, "n_identifiers": 9, "d_id": 83851, "documentation": { "docstring": "Print help\n[param]Ticker: [/param]{self.ticker or None}\n[param]Expiry: [/param]{self.selected_date or None}\n[cmds]\n add add an expected price to the list\n rmv remove an expected price from the list\n\n show show the listed of expected prices\n rnval risk neutral valuation for an option[/cmds]\n ", "n_words": 41, "vocab_size": 29, "n_whitespaces": 94, "language": "en" } }, { "id": 20062, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distro.py", "file_name": "distro.py", "fun_name": "version_parts", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def version_parts(self, best=False):\n # type: (bool) -> Tuple[str, str, str]\n \n version_str = self.version(best=best)\n if version_str:\n version_regex = re.compile(r\"(\\d+)\\.?(\\d+)?\\.?(\\d+)?\")\n matches = version_regex.match(version_str)\n if matches:\n major, minor, build_number = matches.groups()\n return major, minor or \"\", build_number or \"\"\n return \"\", \"\", \"\"\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 138, "n_words": 40, "vocab_size": 29, "complexity": 5, "nloc": 9, "token_counts": 70, "n_ast_nodes": 122, "n_identifiers": 14, "d_id": 3208, "documentation": { "docstring": "\n Return the version of the OS distribution, as a tuple of version\n numbers.\n\n For details, see :func:`distro.version_parts`.\n ", "n_words": 17, "vocab_size": 14, "n_whitespaces": 46, "language": "en" } }, { "id": 261489, "commit_id": "b1807ff8ead319a08294beeaae90c3f03b2bb8ac", "repo": "scikit-learn", "path": "sklearn/ensemble/_stacking.py", "file_name": "_stacking.py", "fun_name": "_validate_estimators", "commit_message": "ENH StackingClassifier allows regressors in its first layer (#24538)\n\nCo-authored-by: Tom Dupré la Tour \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def _validate_estimators(self):\n \n if len(self.estimators) == 0:\n raise ValueError(\n \"Invalid 'estimators' attribute, 'estimators' should be a \"\n \"non-empty list of (string, estimator) tuples.\"\n )\n names, estimators = zip(*self.estimators)\n self._validate_names(names)\n\n has_estimator = any(est != \"drop\" for est in estimators)\n if not has_estimator:\n raise ValueError(\n \"All estimators are dropped. At least one is required \"\n \"to be an estimator.\"\n )\n\n return names, estimators\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 213, "n_words": 60, "vocab_size": 49, "complexity": 4, "nloc": 15, "token_counts": 65, "n_ast_nodes": 118, "n_identifiers": 11, "d_id": 76834, "documentation": { "docstring": "Overload the method of `_BaseHeterogeneousEnsemble` to be more\n lenient towards the type of `estimators`.\n\n Regressors can be accepted for some cases such as ordinal regression.\n ", "n_words": 25, "vocab_size": 22, "n_whitespaces": 46, "language": "en" } }, { "id": 22110, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/sessions.py", "file_name": "sessions.py", "fun_name": "head", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def head(self, url, **kwargs):\n r\n\n kwargs.setdefault(\"allow_redirects\", False)\n return self.request(\"HEAD\", url, **kwargs)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 31, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 9, "token_counts": 32, "n_ast_nodes": 52, "n_identifiers": 6, "d_id": 4186, "documentation": { "docstring": "Sends a HEAD request. Returns :class:`Response` object.\n\n :param url: URL for the new :class:`Request` object.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n :rtype: requests.Response\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 52, "language": "en" } }, { "id": 246185, "commit_id": "901b264c0c88f39cbfb8b2229e0dc57968882658", "repo": "synapse", "path": "tests/rest/admin/test_user.py", "file_name": "test_user.py", "fun_name": "test_consent", "commit_message": "Add type hints to `tests/rest/admin` (#11851)", "code": "def test_consent(self) -> None:\n \n # Have the admin user accept the terms.\n self.get_success(self.store.user_set_consent_version(self.admin_user, \"1.0\"))\n\n # First, cheekily accept the terms and create a room\n self.get_success(self.store.user_set_consent_version(self.other_user, \"1.0\"))\n room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_tok)\n self.helper.send_event(room_id, \"com.example.test\", tok=self.other_user_tok)\n\n # Now unaccept it and check that we can't send an event\n self.get_success(self.store.user_set_consent_version(self.other_user, \"0.0\"))\n self.helper.send_event(\n room_id,\n \"com.example.test\",\n tok=self.other_user_tok,\n expect_code=HTTPStatus.FORBIDDEN,\n )\n\n # Login in as the user\n puppet_token = self._get_token()\n\n # Sending an event on their behalf should work fine\n self.helper.send_event(room_id, \"com.example.test\", tok=puppet_token)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 224, "n_words": 75, "vocab_size": 56, "complexity": 1, "nloc": 15, "token_counts": 137, "n_ast_nodes": 225, "n_identifiers": 18, "d_id": 71079, "documentation": { "docstring": "Test that sending a message is not subject to the privacy policies.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 53274, "commit_id": "36e7e0838aeaffc9492b330297e4905f3ab4b11f", "repo": "prefect", "path": "src/prefect/orion/database/migrations/env.py", "file_name": "env.py", "fun_name": "dry_run_migrations", "commit_message": "code review revisions pt3", "code": "def dry_run_migrations() -> None:\n \n url = db_interface.database_config.connection_url\n context.script.version_locations = [db_interface.orm.versions_dir]\n\n context.configure(\n url=url,\n target_metadata=target_metadata,\n literal_binds=True,\n include_schemas=True,\n dialect_opts={\"paramstyle\": \"named\"},\n )\n\n with context.begin_transaction():\n context.run_migrations()\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 81, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 18, "token_counts": 68, "n_ast_nodes": 113, "n_identifiers": 17, "d_id": 10769, "documentation": { "docstring": "\n Perform a dry run of migrations.\n\n This will create the sql statements without actually running them against the\n database and output them to stdout.\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 37, "language": "en" } }, { "id": 207124, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_filters/tests.py", "file_name": "tests.py", "fun_name": "test_fk_with_to_field", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_fk_with_to_field(self):\n \n modeladmin = EmployeeAdmin(Employee, site)\n\n request = self.request_factory.get(\"/\", {})\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n\n # Make sure the correct queryset is returned\n queryset = changelist.get_queryset(request)\n self.assertEqual(list(queryset), [self.jack, self.john])\n\n filterspec = changelist.get_filters(request)[0][-1]\n self.assertEqual(filterspec.title, \"department\")\n choices = list(filterspec.choices(changelist))\n\n self.assertEqual(choices[0][\"display\"], \"All\")\n self.assertIs(choices[0][\"selected\"], True)\n self.assertEqual(choices[0][\"query_string\"], \"?\")\n\n self.assertEqual(choices[1][\"display\"], \"Development\")\n self.assertIs(choices[1][\"selected\"], False)\n self.assertEqual(choices[1][\"query_string\"], \"?department__code__exact=DEV\")\n\n self.assertEqual(choices[2][\"display\"], \"Design\")\n self.assertIs(choices[2][\"selected\"], False)\n self.assertEqual(choices[2][\"query_string\"], \"?department__code__exact=DSN\")\n\n # Filter by Department=='Development' --------------------------------\n\n request = self.request_factory.get(\"/\", {\"department__code__exact\": \"DEV\"})\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n\n # Make sure the correct queryset is returned\n queryset = changelist.get_queryset(request)\n self.assertEqual(list(queryset), [self.john])\n\n filterspec = changelist.get_filters(request)[0][-1]\n self.assertEqual(filterspec.title, \"department\")\n choices = list(filterspec.choices(changelist))\n\n self.assertEqual(choices[0][\"display\"], \"All\")\n self.assertIs(choices[0][\"selected\"], False)\n self.assertEqual(choices[0][\"query_string\"], \"?\")\n\n self.assertEqual(choices[1][\"display\"], \"Development\")\n self.assertIs(choices[1][\"selected\"], True)\n self.assertEqual(choices[1][\"query_string\"], \"?department__code__exact=DEV\")\n\n self.assertEqual(choices[2][\"display\"], \"Design\")\n self.assertIs(choices[2][\"selected\"], False)\n self.assertEqual(choices[2][\"query_string\"], \"?department__code__exact=DSN\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 384, "n_words": 111, "vocab_size": 55, "complexity": 1, "nloc": 36, "token_counts": 447, "n_ast_nodes": 741, "n_identifiers": 24, "d_id": 51871, "documentation": { "docstring": "\n A filter on a FK respects the FK's to_field attribute (#17972).\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 280806, "commit_id": "ed99e34f279a2d2d6a44af87ee64f8fc98c7e8b9", "repo": "keras", "path": "keras/mixed_precision/autocast_variable.py", "file_name": "autocast_variable.py", "fun_name": "placeholder_value", "commit_message": "Implement TraceType for AutoCastVariable to support tracing with tf.function layering efforts.\n\nPiperOrigin-RevId: 498447924", "code": "def placeholder_value(self, placeholder_context=None):\n \n return self._value\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 24, "n_identifiers": 4, "d_id": 83439, "documentation": { "docstring": "Use the AutoCastVariable value itself as a placeholder.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 7512, "commit_id": "ed8d9cf20843744f18593b22fb6a30eaf5f325eb", "repo": "ludwig", "path": "ludwig/utils/triton_utils.py", "file_name": "triton_utils.py", "fun_name": "save_config", "commit_message": "Triton ensemble export (#2251)", "code": "def save_config(self) -> TritonArtifact:\n \n device = self.device\n if self.inference_stage != PREDICTOR:\n device = \"cpu\"\n self.config = TritonConfig(\n self.full_model_name,\n self.input_features,\n self.output_features,\n self.max_batch_size,\n self.max_queue_delay_microseconds,\n device,\n self.model_instance_count,\n self.inference_stage,\n )\n\n config_path = os.path.join(self.base_path, \"config.pbtxt\")\n with open(config_path, \"w\") as f:\n formatted_config = remove_empty_lines(self.config.get_model_config())\n f.write(formatted_config)\n\n config_artifact = TritonArtifact(\n model_name=self.full_model_name,\n model_version=self.model_version,\n platform=\"pytorch_libtorch\",\n path=config_path,\n content_type=\"text/x-protobuf\",\n content_length=os.path.getsize(config_path),\n )\n\n return config_artifact\n\n\n@dataclass", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "@dataclass", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 308, "n_words": 52, "vocab_size": 44, "complexity": 2, "nloc": 31, "token_counts": 144, "n_ast_nodes": 231, "n_identifiers": 33, "d_id": 1224, "documentation": { "docstring": "Save the Triton config.\n\n Return the appropriate artifact.\n ", "n_words": 8, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 220732, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/sslproto.py", "file_name": "sslproto.py", "fun_name": "abort", "commit_message": "add python 3.10.4 for windows", "code": "def abort(self):\n \n self._ssl_protocol._abort()\n self._closed = True\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 27, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 33, "n_identifiers": 5, "d_id": 56106, "documentation": { "docstring": "Close the transport immediately.\n\n Buffered data will be lost. No more data will be received.\n The protocol's connection_lost() method will (eventually) be\n called with None as its argument.\n ", "n_words": 28, "vocab_size": 23, "n_whitespaces": 57, "language": "en" } }, { "id": 265188, "commit_id": "f9d81fd36232e9bf3f60a215d2c6a405b9b342fb", "repo": "netbox", "path": "netbox/netbox/models/__init__.py", "file_name": "__init__.py", "fun_name": "clone", "commit_message": "Closes #9414: Add clone() method to NetBoxModel for copying instance attributes", "code": "def clone(self):\n \n attrs = {}\n\n for field_name in getattr(self, 'clone_fields', []):\n field = self._meta.get_field(field_name)\n field_value = field.value_from_object(self)\n if field_value not in (None, ''):\n attrs[field_name] = field_value\n\n # Include tags (if applicable)\n if is_taggable(self):\n attrs['tags'] = [tag.pk for tag in self.tags.all()]\n\n return attrs\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 143, "n_words": 42, "vocab_size": 31, "complexity": 5, "nloc": 10, "token_counts": 85, "n_ast_nodes": 140, "n_identifiers": 15, "d_id": 78019, "documentation": { "docstring": "\n Return a dictionary of attributes suitable for creating a copy of the current instance. This is used for pre-\n populating an object creation form in the UI.\n ", "n_words": 27, "vocab_size": 23, "n_whitespaces": 49, "language": "en" } }, { "id": 126801, "commit_id": "f15ed3836d710f655856d5cd1dbbf40b08953f86", "repo": "ray", "path": "python/ray/train/base_trainer.py", "file_name": "base_trainer.py", "fun_name": "preprocess_datasets", "commit_message": "[air] Render trainer docstring signatures (#27590)\n\nSigned-off-by: Richard Liaw ", "code": "def preprocess_datasets(self) -> None:\n \n # Evaluate all datasets.\n self.datasets = {k: d() if callable(d) else d for k, d in self.datasets.items()}\n\n if self.preprocessor:\n train_dataset = self.datasets.get(TRAIN_DATASET_KEY, None)\n if train_dataset:\n self.preprocessor.fit(train_dataset)\n\n # Execute dataset transformations serially for now.\n # Cannot execute them in remote tasks due to dataset ownership model:\n # if datasets are created on a remote node, then if that node fails,\n # we cannot recover the dataset.\n new_datasets = {}\n for key, dataset in self.datasets.items():\n new_datasets[key] = self.preprocessor.transform(dataset)\n\n self.datasets = new_datasets\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 240, "n_words": 83, "vocab_size": 61, "complexity": 6, "nloc": 26, "token_counts": 101, "n_ast_nodes": 166, "n_identifiers": 16, "d_id": 28271, "documentation": { "docstring": "Called during fit() to preprocess dataset attributes with preprocessor.\n\n .. note:: This method is run on a remote process.\n\n This method is called prior to entering the training_loop.\n\n If the ``Trainer`` has both a datasets dict and\n a preprocessor, the datasets dict contains a training dataset (denoted by\n the \"train\" key), and the preprocessor has not yet\n been fit, then it will be fit on the train dataset.\n\n Then, all Trainer's datasets will be transformed by the preprocessor.\n\n The transformed datasets will be set back in the ``self.datasets`` attribute\n of the Trainer to be used when overriding ``training_loop``.\n ", "n_words": 98, "vocab_size": 66, "n_whitespaces": 168, "language": "en" } }, { "id": 218885, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/lib2to3/refactor.py", "file_name": "refactor.py", "fun_name": "get_fixers", "commit_message": "add python 3.10.4 for windows", "code": "def get_fixers(self):\n \n pre_order_fixers = []\n post_order_fixers = []\n for fix_mod_path in self.fixers:\n mod = __import__(fix_mod_path, {}, {}, [\"*\"])\n fix_name = fix_mod_path.rsplit(\".\", 1)[-1]\n if fix_name.startswith(self.FILE_PREFIX):\n fix_name = fix_name[len(self.FILE_PREFIX):]\n parts = fix_name.split(\"_\")\n class_name = self.CLASS_PREFIX + \"\".join([p.title() for p in parts])\n try:\n fix_class = getattr(mod, class_name)\n except AttributeError:\n raise FixerError(\"Can't find %s.%s\" % (fix_name, class_name)) from None\n fixer = fix_class(self.options, self.fixer_log)\n if fixer.explicit and self.explicit is not True and \\\n fix_mod_path not in self.explicit:\n self.log_message(\"Skipping optional fixer: %s\", fix_name)\n continue\n\n self.log_debug(\"Adding transformation: %s\", fix_name)\n if fixer.order == \"pre\":\n pre_order_fixers.append(fixer)\n elif fixer.order == \"post\":\n post_order_fixers.append(fixer)\n else:\n raise FixerError(\"Illegal fixer order: %r\" % fixer.order)\n\n key_func = operator.attrgetter(\"run_order\")\n pre_order_fixers.sort(key=key_func)\n post_order_fixers.sort(key=key_func)\n return (pre_order_fixers, post_order_fixers)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 446, "n_words": 108, "vocab_size": 81, "complexity": 10, "nloc": 30, "token_counts": 245, "n_ast_nodes": 403, "n_identifiers": 37, "d_id": 55529, "documentation": { "docstring": "Inspects the options to load the requested patterns and handlers.\n\n Returns:\n (pre_order, post_order), where pre_order is the list of fixers that\n want a pre-order AST traversal, and post_order is the list that want\n post-order traversal.\n ", "n_words": 35, "vocab_size": 27, "n_whitespaces": 76, "language": "en" } }, { "id": 240291, "commit_id": "c95b4fa4388f29e50b6966e45c94c5980013a01d", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_figure.py", "file_name": "_figure.py", "fun_name": "for_each_coloraxis", "commit_message": "type annotations for chainable Figure methods", "code": "def for_each_coloraxis(self, fn, selector=None, row=None, col=None) -> \"Figure\":\n \n for obj in self.select_coloraxes(selector=selector, row=row, col=col):\n fn(obj)\n\n return self\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 49, "n_words": 17, "vocab_size": 17, "complexity": 2, "nloc": 32, "token_counts": 48, "n_ast_nodes": 73, "n_identifiers": 8, "d_id": 68402, "documentation": { "docstring": "\n Apply a function to all coloraxis objects that satisfy the\n specified selection criteria\n\n Parameters\n ----------\n fn:\n Function that inputs a single coloraxis object.\n selector: dict, function, or None (default None)\n Dict to use as selection criteria.\n coloraxis objects will be selected if they contain\n properties corresponding to all of the dictionary's keys, with\n values that exactly match the supplied values. If None\n (the default), all coloraxis objects are selected. If a\n function, it must be a function accepting a single argument and\n returning a boolean. The function will be called on each\n coloraxis and those for which the function returned True will\n be in the selection.\n row, col: int or None (default None)\n Subplot row and column index of coloraxis objects to select.\n To select coloraxis objects by row and column, the Figure\n must have been created using plotly.subplots.make_subplots.\n If None (the default), all coloraxis objects are selected.\n Returns\n -------\n self\n Returns the Figure object that the method was called on\n ", "n_words": 161, "vocab_size": 95, "n_whitespaces": 404, "language": "en" } }, { "id": 87131, "commit_id": "bf416f7ad23d7537a84c9727cfe1c0a7effd27bb", "repo": "sentry", "path": "tests/sentry/search/events/test_builder.py", "file_name": "test_builder.py", "fun_name": "test_run_query_with_multiple_groupby_orderby_null_values_in_first_entity", "commit_message": "feat(discover): Only transform when ordering project (#39468)\n\n- This updates the querybuilder with a orderby resolver so we can\r\nimplement more custom orderbys(orderbies?) in the future\r\n- This changes the project field to just select the project_id only,\r\nwhich results in needing a new post-processing capability to the\r\nquerybuilder\r\n- This is done via the `value_resolver_map` and the `meta_resolver_map`\r\n- Removed the snuba_filter param from transform_results since we no\r\nlonger use it\r\n- Removes the old discover 1 query since it shouldn't be supported and\r\nno longer is being used\r\n- Needed to update ds code too since it relied on the old project\r\nbehaviour but doesn't use `discover.query`", "code": "def test_run_query_with_multiple_groupby_orderby_null_values_in_first_entity(self):\n \n self.setup_orderby_data()\n self.store_transaction_metric(200, tags={\"transaction\": \"baz_transaction\"})\n query = MetricsQueryBuilder(\n self.params,\n f\"project:{self.project.slug}\",\n dataset=Dataset.PerformanceMetrics,\n selected_columns=[\n \"transaction\",\n \"project\",\n \"p95(transaction.duration)\",\n \"count_unique(user)\",\n ],\n orderby=\"count_unique(user)\",\n )\n result = query.run_query(\"test_query\")\n assert len(result[\"data\"]) == 3\n assert result[\"data\"][0] == {\n \"transaction\": resolve_tag_value(\n UseCaseKey.PERFORMANCE,\n self.organization.id,\n \"baz_transaction\",\n ),\n \"project\": self.project.id,\n \"p95_transaction_duration\": 200,\n }\n assert result[\"data\"][1] == {\n \"transaction\": resolve_tag_value(\n UseCaseKey.PERFORMANCE,\n self.organization.id,\n \"foo_transaction\",\n ),\n \"project\": self.project.id,\n \"p95_transaction_duration\": 100,\n \"count_unique_user\": 1,\n }\n assert result[\"data\"][2] == {\n \"transaction\": resolve_tag_value(\n UseCaseKey.PERFORMANCE,\n self.organization.id,\n \"bar_transaction\",\n ),\n \"project\": self.project.id,\n \"p95_transaction_duration\": 50,\n \"count_unique_user\": 2,\n }\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 582, "n_words": 76, "vocab_size": 48, "complexity": 1, "nloc": 46, "token_counts": 209, "n_ast_nodes": 357, "n_identifiers": 23, "d_id": 18229, "documentation": { "docstring": "But if the null value is in the first entity, it won't show up in the groupby values, which means the\n transaction will be missing", "n_words": 25, "vocab_size": 21, "n_whitespaces": 31, "language": "en" } }, { "id": 86044, "commit_id": "bdcd185bc020080da29961b9c60a5a0dabd3ab03", "repo": "sentry", "path": "tests/sentry/integrations/github/test_client.py", "file_name": "test_client.py", "fun_name": "test_get_blame_for_file", "commit_message": "feat(commit-context): Process commit context task (#38984)\n\n## Objective:\r\n\r\nWe want to use the GitHub blame API to determine who is the committer\r\nfor the first in_app stacktrace frame and assign them as a Suspect\r\nCommit GroupOwner.\r\n\r\nThis task is feature flagged for an internal release.", "code": "def test_get_blame_for_file(self, get_jwt):\n responses.add(\n method=responses.POST,\n url=\"https://api.github.com/app/installations/1/access_tokens\",\n body='{\"token\": \"12345token\", \"expires_at\": \"2030-01-01T00:00:00Z\"}',\n content_type=\"application/json\",\n )\n\n path = \"src/sentry/integrations/github/client.py\"\n ref = \"master\"\n query = f\n responses.add(\n method=responses.POST,\n url=\"https://api.github.com/graphql\",\n json={\"query\": query},\n content_type=\"application/json\",\n )\n resp = self.client.get_blame_for_file(self.repo, path, ref)\n assert (\n responses.calls[1].request.body\n == b'{\"query\": \"query {\\\\n repository(name: foo, owner: Test-Organization) {\\\\n ref(qualifiedName: master) {\\\\n target {\\\\n ... on Commit {\\\\n blame(path: src/sentry/integrations/github/client.py) {\\\\n ranges {\\\\n commit {\\\\n oid\\\\n author {\\\\n name\\\\n email\\\\n }\\\\n message\\\\n committedDate\\\\n }\\\\n startingLine\\\\n endingLine\\\\n age\\\\n }\\\\n }\\\\n }\\\\n }\\\\n }\\\\n }\\\\n }\"}'\n )\n\n assert resp == []\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 1022, "n_words": 85, "vocab_size": 59, "complexity": 1, "nloc": 47, "token_counts": 104, "n_ast_nodes": 220, "n_identifiers": 20, "d_id": 18075, "documentation": { "docstring": "query {{\n repository(name: foo, owner: Test-Organization) {{\n ref(qualifiedName: {ref}) {{\n target {{\n ... on Commit {{\n blame(path: {path}) {{\n ranges {{\n commit {{\n oid\n author {{\n name\n email\n }}\n message\n committedDate\n }}\n startingLine\n endingLine\n age\n }}\n }}\n }}\n }}\n }}\n }}\n }}", "n_words": 42, "vocab_size": 26, "n_whitespaces": 792, "language": "en" } }, { "id": 258993, "commit_id": "8abc6d890e8bb4be7abe2984b3f373585f8f3c57", "repo": "scikit-learn", "path": "sklearn/utils/__init__.py", "file_name": "__init__.py", "fun_name": "tosequence", "commit_message": "DOC Ensure that tosequence passes numpydoc validation (#22494)\n\nCo-authored-by: Guillaume Lemaitre ", "code": "def tosequence(x):\n \n if isinstance(x, np.ndarray):\n return np.asarray(x)\n elif isinstance(x, Sequence):\n return x\n else:\n return list(x)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 48, "n_words": 15, "vocab_size": 12, "complexity": 3, "nloc": 7, "token_counts": 40, "n_ast_nodes": 66, "n_identifiers": 8, "d_id": 75512, "documentation": { "docstring": "Cast iterable x to a Sequence, avoiding a copy if possible.\n\n Parameters\n ----------\n x : iterable\n The iterable to be converted.\n\n Returns\n -------\n x : Sequence\n If `x` is a NumPy array, it returns it as a `ndarray`. If `x`\n is a `Sequence`, `x` is returned as-is. If `x` is from any other\n type, `x` is returned casted as a list.\n ", "n_words": 61, "vocab_size": 37, "n_whitespaces": 110, "language": "en" } }, { "id": 228646, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/bar/_error_x.py", "file_name": "_error_x.py", "fun_name": "type", "commit_message": "switch to black .22", "code": "def type(self):\n \n return self[\"type\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 60319, "documentation": { "docstring": "\n Determines the rule used to generate the error bars. If\n *constant`, the bar lengths are of a constant value. Set this\n constant in `value`. If \"percent\", the bar lengths correspond\n to a percentage of underlying data. Set this percentage in\n `value`. If \"sqrt\", the bar lengths correspond to the square of\n the underlying data. If \"data\", the bar lengths are set with\n data set `array`.\n\n The 'type' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['percent', 'constant', 'sqrt', 'data']\n\n Returns\n -------\n Any\n ", "n_words": 90, "vocab_size": 55, "n_whitespaces": 199, "language": "en" } }, { "id": 258738, "commit_id": "5f75acdd12d77b973471961ad716367c6199d01c", "repo": "scikit-learn", "path": "sklearn/linear_model/_stochastic_gradient.py", "file_name": "_stochastic_gradient.py", "fun_name": "_fit_multiclass", "commit_message": "MNT Bump joblib version dependency to 1.0.0 (#22365)", "code": "def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, max_iter):\n \n # Precompute the validation split using the multiclass labels\n # to ensure proper balancing of the classes.\n validation_mask = self._make_validation_split(y)\n\n # Use joblib to fit OvA in parallel.\n # Pick the random seed for each job outside of fit_binary to avoid\n # sharing the estimator random state between threads which could lead\n # to non-deterministic behavior\n random_state = check_random_state(self.random_state)\n seeds = random_state.randint(MAX_INT, size=len(self.classes_))\n result = Parallel(\n n_jobs=self.n_jobs, verbose=self.verbose, require=\"sharedmem\"\n )(\n delayed(fit_binary)(\n self,\n i,\n X,\n y,\n alpha,\n C,\n learning_rate,\n max_iter,\n self._expanded_class_weight[i],\n 1.0,\n sample_weight,\n validation_mask=validation_mask,\n random_state=seed,\n )\n for i, seed in enumerate(seeds)\n )\n\n # take the maximum of n_iter_ over every binary fit\n n_iter_ = 0.0\n for i, (_, intercept, n_iter_i) in enumerate(result):\n self.intercept_[i] = intercept\n n_iter_ = max(n_iter_, n_iter_i)\n\n self.t_ += n_iter_ * X.shape[0]\n self.n_iter_ = n_iter_\n\n if self.average > 0:\n if self.average <= self.t_ - 1.0:\n self.coef_ = self._average_coef\n self.intercept_ = self._average_intercept\n else:\n self.coef_ = self._standard_coef\n self._standard_intercept = np.atleast_1d(self.intercept_)\n self.intercept_ = self._standard_intercept\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 652, "n_words": 161, "vocab_size": 106, "complexity": 5, "nloc": 38, "token_counts": 243, "n_ast_nodes": 355, "n_identifiers": 46, "d_id": 75397, "documentation": { "docstring": "Fit a multi-class classifier by combining binary classifiers\n\n Each binary classifier predicts one class versus all others. This\n strategy is called OvA (One versus All) or OvR (One versus Rest).\n ", "n_words": 30, "vocab_size": 25, "n_whitespaces": 51, "language": "en" } }, { "id": 196814, "commit_id": "f757f3daae6e11ea0cfb7dadc133274d8d74315f", "repo": "sympy", "path": "sympy/series/approximants.py", "file_name": "approximants.py", "fun_name": "approximants", "commit_message": "Reordered imports 2", "code": "def approximants(l, X=Symbol('x'), simplify=False):\n \n from sympy.simplify import simplify as simp\n from sympy.simplify.radsimp import denom\n p1, q1 = [S.One], [S.Zero]\n p2, q2 = [S.Zero], [S.One]\n while len(l):\n b = 0\n while l[b]==0:\n b += 1\n if b == len(l):\n return\n m = [S.One/l[b]]\n for k in range(b+1, len(l)):\n s = 0\n for j in range(b, k):\n s -= l[j+1] * m[b-j-1]\n m.append(s/l[b])\n l = m\n a, l[0] = l[0], 0\n p = [0] * max(len(p2), b+len(p1))\n q = [0] * max(len(q2), b+len(q1))\n for k in range(len(p2)):\n p[k] = a*p2[k]\n for k in range(b, b+len(p1)):\n p[k] += p1[k-b]\n for k in range(len(q2)):\n q[k] = a*q2[k]\n for k in range(b, b+len(q1)):\n q[k] += q1[k-b]\n while p[-1]==0: p.pop()\n while q[-1]==0: q.pop()\n p1, p2 = p2, p\n q1, q2 = q2, q\n\n # yield result\n c = 1\n for x in p:\n c = lcm(c, denom(x))\n for x in q:\n c = lcm(c, denom(x))\n out = ( sum(c*e*X**k for k, e in enumerate(p))\n / sum(c*e*X**k for k, e in enumerate(q)) )\n if simplify:\n yield(simp(out))\n else:\n yield out\n return\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 541, "n_words": 173, "vocab_size": 96, "complexity": 17, "nloc": 96, "token_counts": 447, "n_ast_nodes": 689, "n_identifiers": 36, "d_id": 48194, "documentation": { "docstring": "\n Return a generator for consecutive Pade approximants for a series.\n It can also be used for computing the rational generating function of a\n series when possible, since the last approximant returned by the generator\n will be the generating function (if any).\n\n Explanation\n ===========\n\n The input list can contain more complex expressions than integer or rational\n numbers; symbols may also be involved in the computation. An example below\n show how to compute the generating function of the whole Pascal triangle.\n\n The generator can be asked to apply the sympy.simplify function on each\n generated term, which will make the computation slower; however it may be\n useful when symbols are involved in the expressions.\n\n Examples\n ========\n\n >>> from sympy.series import approximants\n >>> from sympy import lucas, fibonacci, symbols, binomial\n >>> g = [lucas(k) for k in range(16)]\n >>> [e for e in approximants(g)]\n [2, -4/(x - 2), (5*x - 2)/(3*x - 1), (x - 2)/(x**2 + x - 1)]\n\n >>> h = [fibonacci(k) for k in range(16)]\n >>> [e for e in approximants(h)]\n [x, -x/(x - 1), (x**2 - x)/(2*x - 1), -x/(x**2 + x - 1)]\n\n >>> x, t = symbols(\"x,t\")\n >>> p=[sum(binomial(k,i)*x**i for i in range(k+1)) for k in range(16)]\n >>> y = approximants(p, t)\n >>> for k in range(3): print(next(y))\n 1\n (x + 1)/((-x - 1)*(t*(x + 1) + (x + 1)/(-x - 1)))\n nan\n\n >>> y = approximants(p, t, simplify=True)\n >>> for k in range(3): print(next(y))\n 1\n -1/(t*(x + 1) - 1)\n nan\n\n See Also\n ========\n\n See function sympy.concrete.guess.guess_generating_function_rational and\n function mpmath.pade\n\n ", "n_words": 253, "vocab_size": 139, "n_whitespaces": 371, "language": "en" } }, { "id": 153841, "commit_id": "b22b93df20ad25ae7a11f0c89d32fb2f234d4641", "repo": "modin", "path": "modin/core/execution/ray/common/utils.py", "file_name": "utils.py", "fun_name": "deserialize", "commit_message": "FIX-#4464: Refactor Ray utils and quick fix groupby.count failing on virtual partitions (#4490)\n\nCo-authored-by: Devin Petersohn \r\nSigned-off-by: jeffreykennethli ", "code": "def deserialize(obj):\n \n if isinstance(obj, ObjectIDType):\n return ray.get(obj)\n elif isinstance(obj, (tuple, list)) and any(\n isinstance(o, ObjectIDType) for o in obj\n ):\n return ray.get(list(obj))\n elif isinstance(obj, dict) and any(\n isinstance(val, ObjectIDType) for val in obj.values()\n ):\n return dict(zip(obj.keys(), ray.get(list(obj.values()))))\n else:\n return obj\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 103, "n_words": 40, "vocab_size": 27, "complexity": 8, "nloc": 13, "token_counts": 113, "n_ast_nodes": 177, "n_identifiers": 15, "d_id": 35654, "documentation": { "docstring": "\n Deserialize a Ray object.\n\n Parameters\n ----------\n obj : ObjectIDType, iterable of ObjectIDType, or mapping of keys to ObjectIDTypes\n Object(s) to deserialize.\n\n Returns\n -------\n obj\n The deserialized object.\n ", "n_words": 27, "vocab_size": 22, "n_whitespaces": 66, "language": "en" } }, { "id": 291798, "commit_id": "dffdc78915ad9d25f54be90ef62659b2c68de347", "repo": "core", "path": "homeassistant/components/here_travel_time/sensor.py", "file_name": "sensor.py", "fun_name": "async_added_to_hass", "commit_message": "Make HERETravelTimeSensor extend RestoreSensor (#82400)", "code": "async def async_added_to_hass(self) -> None:\n \n await self._async_restore_state()\n await super().async_added_to_hass()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 30, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 6, "token_counts": 38, "n_ast_nodes": 43, "n_identifiers": 4, "d_id": 90902, "documentation": { "docstring": "Wait for start so origin and destination entities can be resolved.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 320554, "commit_id": "a2b7687c3b88aadc55ec38a2249c299eaefd394d", "repo": "paperless-ngx", "path": "src/paperless_tesseract/tests/test_parser.py", "file_name": "test_parser.py", "fun_name": "test_rtl_language_detection", "commit_message": "In the case of an RTL language being extracted via pdfminer.six, fall back to forced OCR, which handles RTL text better", "code": "def test_rtl_language_detection(self):\n \n parser = RasterisedDocumentParser(None)\n with mock.patch.object(\n parser,\n \"construct_ocrmypdf_parameters\",\n wraps=parser.construct_ocrmypdf_parameters,\n ) as wrapped:\n\n parser.parse(\n os.path.join(self.SAMPLE_FILES, \"rtl-test.pdf\"),\n \"application/pdf\",\n )\n\n # There isn't a good way to actually check this working, with RTL correctly return\n # as it would require tesseract-ocr-ara installed for everyone running the\n # test suite. This test does provide the coverage though and attempts to ensure\n # the force OCR happens\n self.assertIsNotNone(parser.get_text())\n\n self.assertEqual(parser.construct_ocrmypdf_parameters.call_count, 2)\n # Check the last call kwargs\n self.assertTrue(\n parser.construct_ocrmypdf_parameters.call_args.kwargs[\"safe_fallback\"],\n )\n\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 305, "n_words": 75, "vocab_size": 62, "complexity": 1, "nloc": 16, "token_counts": 91, "n_ast_nodes": 157, "n_identifiers": 22, "d_id": 117215, "documentation": { "docstring": "\n GIVEN:\n - File with text in an RTL language\n WHEN:\n - Document is parsed\n THEN:\n - Text from the document is extracted\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 84, "language": "en" } }, { "id": 194827, "commit_id": "4f7b38e7970424e4329cb57ab65710291a50f3f7", "repo": "ParlAI", "path": "parlai/tasks/empathetic_dialogues/agents.py", "file_name": "agents.py", "fun_name": "_get_experiencer_side_only", "commit_message": "[Empathetic Dialogues] Switch to DialogTeacher (#4405)\n\n* Start revising ED teacher\r\n\r\n* Convert over ED teacher\r\n\r\n* Silly\r\n\r\n* Minor\r\n\r\n* Remove episode_done\r\n\r\n* More cleanup\r\n\r\n* Fix\r\n\r\n* Test fix\r\n\r\n* Force new CI check\r\n\r\n* Note\r\n\r\n* Cleanup\r\n\r\n* Update parlai/tasks/empathetic_dialogues/agents.py\r\n\r\nCo-authored-by: Stephen Roller \r\n\r\n* Update parlai/tasks/empathetic_dialogues/agents.py\r\n\r\nCo-authored-by: Stephen Roller \r\n\r\n* Minor\r\n\r\n* Fixes\r\n\r\n* EDPersonaTopicifierTeacher fix\r\n\r\n* Fix ID\r\n\r\n* Hopefully fix style gen teacher PR\r\n\r\n* Add back fields\r\n\r\n* Update test_blended_skill_talk.py\r\n\r\n* Update test_blended_skill_talk.py\r\n\r\n* Convert over EDPersonaTopicifierTeacher\r\n\r\n* EDPersonaTopicifierTeacher overhaul\r\n\r\n* Minor\r\n\r\n* Minor\r\n\r\n* Remove imports\r\n\r\n* Black\r\n\r\nCo-authored-by: Stephen Roller ", "code": "def _get_experiencer_side_only(self, opt):\n \n base_datatype = self._get_base_datatype(opt)\n return (\n opt.get('train_experiencer_only', DEFAULT_TRAIN_EXPERIENCER_ONLY)\n and base_datatype == 'train'\n ) or base_datatype != 'train'\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 69, "n_words": 19, "vocab_size": 16, "complexity": 3, "nloc": 6, "token_counts": 35, "n_ast_nodes": 62, "n_identifiers": 7, "d_id": 47102, "documentation": { "docstring": "\n Determine which side(s) of the conversation to use.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 23192, "commit_id": "9f62b610dea6161627200ed85d92e19b1923279a", "repo": "PaddleOCR", "path": "ppocr/data/imaug/fce_targets.py", "file_name": "fce_targets.py", "fun_name": "generate_level_targets", "commit_message": "add fcenet", "code": "def generate_level_targets(self, img_size, text_polys, ignore_polys):\n \n h, w = img_size\n lv_size_divs = self.level_size_divisors\n lv_proportion_range = self.level_proportion_range\n lv_text_polys = [[] for i in range(len(lv_size_divs))]\n lv_ignore_polys = [[] for i in range(len(lv_size_divs))]\n level_maps = []\n for poly in text_polys:\n # assert len(poly) == 1\n # text_instance = [[poly[i], poly[i + 1]]\n # for i in range(0, len(poly), 2)]\n polygon = np.array(poly, dtype=np.int).reshape((1, -1, 2))\n _, _, box_w, box_h = cv2.boundingRect(polygon)\n proportion = max(box_h, box_w) / (h + 1e-8)\n\n for ind, proportion_range in enumerate(lv_proportion_range):\n if proportion_range[0] < proportion < proportion_range[1]:\n lv_text_polys[ind].append(poly / lv_size_divs[ind])\n\n for ignore_poly in ignore_polys:\n # assert len(ignore_poly) == 1\n # text_instance = [[ignore_poly[i], ignore_poly[i + 1]]\n # for i in range(0, len(ignore_poly), 2)]\n polygon = np.array(ignore_poly, dtype=np.int).reshape((1, -1, 2))\n _, _, box_w, box_h = cv2.boundingRect(polygon)\n proportion = max(box_h, box_w) / (h + 1e-8)\n\n for ind, proportion_range in enumerate(lv_proportion_range):\n if proportion_range[0] < proportion < proportion_range[1]:\n lv_ignore_polys[ind].append(ignore_poly / lv_size_divs[ind])\n\n for ind, size_divisor in enumerate(lv_size_divs):\n current_level_maps = []\n level_img_size = (h // size_divisor, w // size_divisor)\n\n text_region = self.generate_text_region_mask(\n level_img_size, lv_text_polys[ind])[None]\n current_level_maps.append(text_region)\n\n center_region = self.generate_center_region_mask(\n level_img_size, lv_text_polys[ind])[None]\n current_level_maps.append(center_region)\n\n effective_mask = self.generate_effective_mask(\n level_img_size, lv_ignore_polys[ind])[None]\n current_level_maps.append(effective_mask)\n\n fourier_real_map, fourier_image_maps = self.generate_fourier_maps(\n level_img_size, lv_text_polys[ind])\n current_level_maps.append(fourier_real_map)\n current_level_maps.append(fourier_image_maps)\n\n level_maps.append(np.concatenate(current_level_maps))\n\n return level_maps\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 716, "n_words": 191, "vocab_size": 96, "complexity": 10, "nloc": 39, "token_counts": 384, "n_ast_nodes": 586, "n_identifiers": 49, "d_id": 4536, "documentation": { "docstring": "Generate ground truth target on each level.\n\n Args:\n img_size (list[int]): Shape of input image.\n text_polys (list[list[ndarray]]): A list of ground truth polygons.\n ignore_polys (list[list[ndarray]]): A list of ignored polygons.\n Returns:\n level_maps (list(ndarray)): A list of ground target on each level.\n ", "n_words": 40, "vocab_size": 24, "n_whitespaces": 105, "language": "en" } }, { "id": 181598, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/driver_tests.py", "file_name": "driver_tests.py", "fun_name": "test_driver_3", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_driver_3():\n \n args_list = [\n 'tests/tests.csv',\n '-is', ',',\n '-target', 'class',\n '-g', '1',\n '-p', '2',\n '-cv', '3',\n '-s',' 45',\n '-config', 'TPOT light',\n '-v', '2'\n ]\n args = _get_arg_parser().parse_args(args_list)\n with captured_output() as (out, err):\n tpot_driver(args)\n ret_stdout = out.getvalue()\n assert \"TPOT settings\" in ret_stdout\n assert \"Final Pareto front testing scores\" not in ret_stdout\n try:\n ret_val = float(ret_stdout.split('\\n')[-2].split(': ')[-1])\n except Exception:\n ret_val = -float('inf')\n assert ret_val > 0.0\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 265, "n_words": 64, "vocab_size": 53, "complexity": 2, "nloc": 23, "token_counts": 125, "n_ast_nodes": 231, "n_identifiers": 15, "d_id": 43387, "documentation": { "docstring": "Assert that the tpot_driver() in TPOT driver outputs normal result with verbosity = 2.", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 269606, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "_has_nchw_support", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _has_nchw_support():\n \n explicitly_on_cpu = _is_current_explicit_device(\"CPU\")\n gpus_available = bool(_get_available_gpus())\n return not explicitly_on_cpu and gpus_available\n\n\n# VARIABLE MANIPULATION\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 27, "n_words": 16, "vocab_size": 13, "complexity": 2, "nloc": 4, "token_counts": 24, "n_ast_nodes": 47, "n_identifiers": 6, "d_id": 80226, "documentation": { "docstring": "Check whether the current scope supports NCHW ops.\n\n TensorFlow does not support NCHW on CPU. Therefore we check if we are not\n explicitly put on\n CPU, and have GPUs available. In this case there will be soft-placing on the\n GPU device.\n\n Returns:\n bool: if the current scope device placement would support nchw\n ", "n_words": 52, "vocab_size": 41, "n_whitespaces": 77, "language": "en" } }, { "id": 4247, "commit_id": "56bf982cb96f831fe04f5e44a92ee4a669b9e16a", "repo": "airbyte", "path": "octavia-cli/octavia_cli/apply/resources.py", "file_name": "resources.py", "fun_name": "_search", "commit_message": "🐙 octavia-cli: `apply` connections (#10881)", "code": "def _search(self, check_return_type=True) -> Union[SourceReadList, DestinationReadList, ConnectionReadList]:\n \n return self._search_fn(self.api_instance, self.search_payload, _check_return_type=check_return_type)\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 25, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 7, "token_counts": 36, "n_ast_nodes": 53, "n_identifiers": 11, "d_id": 641, "documentation": { "docstring": "Run search of a resources on the remote Airbyte instance.\n\n Returns:\n Union[SourceReadList, DestinationReadList, ConnectionReadList]: Search results\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 41, "language": "en" } }, { "id": 90257, "commit_id": "096b5511e244eecd8799b2a0324655207ce8985e", "repo": "sentry", "path": "tests/snuba/api/endpoints/test_organization_group_index.py", "file_name": "test_organization_group_index.py", "fun_name": "test_in_non_semver_projects_resolved_in_next_release_is_equated_to_in_release", "commit_message": "ref(tests): Remove `get_valid_response()` (#34822)", "code": "def test_in_non_semver_projects_resolved_in_next_release_is_equated_to_in_release(self):\n \n release_1 = self.create_release(\n date_added=timezone.now() - timedelta(minutes=45), version=\"foobar 1\"\n )\n release_2 = self.create_release(version=\"foobar 2\")\n self.create_release(version=\"foobar 3\")\n\n group = self.store_event(\n data={\n \"timestamp\": iso_format(before_now(seconds=12)),\n \"fingerprint\": [\"group-1\"],\n \"release\": release_1.version,\n },\n project_id=self.project.id,\n ).group\n\n self.login_as(user=self.user)\n\n response = self.get_success_response(\n qs_params={\"id\": group.id}, status=\"resolvedInNextRelease\"\n )\n assert response.data[\"status\"] == \"resolved\"\n assert response.data[\"statusDetails\"][\"inNextRelease\"]\n\n grp_resolution = GroupResolution.objects.filter(group=group)\n\n assert len(grp_resolution) == 1\n grp_resolution = grp_resolution[0]\n\n assert grp_resolution.current_release_version == release_1.version\n assert grp_resolution.release.id == release_2.id\n assert grp_resolution.type == GroupResolution.Type.in_release\n assert grp_resolution.status == GroupResolution.Status.resolved\n\n activity = Activity.objects.filter(\n group=grp_resolution.group,\n type=Activity.SET_RESOLVED_IN_RELEASE,\n ident=grp_resolution.id,\n ).first()\n assert activity.data[\"version\"] == release_2.version\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 368, "n_words": 81, "vocab_size": 59, "complexity": 1, "nloc": 33, "token_counts": 249, "n_ast_nodes": 407, "n_identifiers": 43, "d_id": 18657, "documentation": { "docstring": "\n Test that ensures that if we basically know the next release when clicking on Resolved\n In Next Release because that release exists, then we can short circuit setting\n GroupResolution to type \"inNextRelease\", and then having `clear_exrired_resolutions` run\n once a new release is created to convert GroupResolution to in_release and set Activity.\n Basically we treat \"ResolvedInNextRelease\" as \"ResolvedInRelease\" when there is a release\n that was created after the last release associated with the group being resolved\n ", "n_words": 75, "vocab_size": 55, "n_whitespaces": 125, "language": "en" } }, { "id": 196118, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/perm_groups.py", "file_name": "perm_groups.py", "fun_name": "schreier_vector", "commit_message": "Updated import locations", "code": "def schreier_vector(self, alpha):\n \n n = self.degree\n v = [None]*n\n v[alpha] = -1\n orb = [alpha]\n used = [False]*n\n used[alpha] = True\n gens = self.generators\n r = len(gens)\n for b in orb:\n for i in range(r):\n temp = gens[i]._array_form[b]\n if used[temp] is False:\n orb.append(temp)\n used[temp] = True\n v[temp] = i\n return v\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 226, "n_words": 51, "vocab_size": 35, "complexity": 4, "nloc": 17, "token_counts": 108, "n_ast_nodes": 169, "n_identifiers": 18, "d_id": 47618, "documentation": { "docstring": "Computes the schreier vector for ``alpha``.\n\n Explanation\n ===========\n\n The Schreier vector efficiently stores information\n about the orbit of ``alpha``. It can later be used to quickly obtain\n elements of the group that send ``alpha`` to a particular element\n in the orbit. Notice that the Schreier vector depends on the order\n in which the group generators are listed. For a definition, see [3].\n Since list indices start from zero, we adopt the convention to use\n \"None\" instead of 0 to signify that an element doesn't belong\n to the orbit.\n For the algorithm and its correctness, see [2], pp.78-80.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation, PermutationGroup\n >>> a = Permutation([2, 4, 6, 3, 1, 5, 0])\n >>> b = Permutation([0, 1, 3, 5, 4, 6, 2])\n >>> G = PermutationGroup([a, b])\n >>> G.schreier_vector(0)\n [-1, None, 0, 1, None, 1, 0]\n\n See Also\n ========\n\n orbit\n\n ", "n_words": 143, "vocab_size": 97, "n_whitespaces": 304, "language": "en" } }, { "id": 319864, "commit_id": "edaaedae36ee2bb99859b1ca22455b3b7381d0bd", "repo": "paperless-ngx", "path": "src/paperless/tests/test_settings.py", "file_name": "test_settings.py", "fun_name": "test_workers_threads", "commit_message": "Reduces webserver and task worker count to 1 by default", "code": "def test_workers_threads(self):\n \n default_workers = 1\n\n for i in range(1, 64):\n with mock.patch(\n \"paperless.settings.multiprocessing.cpu_count\",\n ) as cpu_count:\n cpu_count.return_value = i\n\n default_threads = default_threads_per_worker(default_workers)\n\n self.assertGreaterEqual(default_threads, 1)\n\n self.assertLessEqual(default_workers * default_threads, i)\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 146, "n_words": 28, "vocab_size": 25, "complexity": 2, "nloc": 10, "token_counts": 59, "n_ast_nodes": 99, "n_identifiers": 13, "d_id": 117009, "documentation": { "docstring": "\n GIVEN:\n - Certain CPU counts\n WHEN:\n - Threads per worker is calculated\n THEN:\n - Threads per worker less than or equal to CPU count\n - At least 1 thread per worker\n ", "n_words": 31, "vocab_size": 22, "n_whitespaces": 104, "language": "en" } }, { "id": 29297, "commit_id": "d90be220d6b687d08153934a51354011a3cb5ca1", "repo": "saleor", "path": "saleor/graphql/product/tests/queries/test_product_type_query.py", "file_name": "test_product_type_query.py", "fun_name": "test_query_product_type_for_federation", "commit_message": "Split test_product.py and test_variant.py into multiple files (#11173)\n\n* Split test_product.py into multiple files\r\n\r\n* Split test_variant.py into multiple files", "code": "def test_query_product_type_for_federation(api_client, product, channel_USD):\n product_type = product.product_type\n product_type_id = graphene.Node.to_global_id(\"ProductType\", product_type.pk)\n variables = {\n \"representations\": [\n {\n \"__typename\": \"ProductType\",\n \"id\": product_type_id,\n },\n ],\n }\n query = \n\n response = api_client.post_graphql(query, variables)\n content = get_graphql_content(response)\n assert content[\"data\"][\"_entities\"] == [\n {\n \"__typename\": \"ProductType\",\n \"id\": product_type_id,\n \"name\": product_type.name,\n }\n ]\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 186, "n_words": 46, "vocab_size": 33, "complexity": 1, "nloc": 31, "token_counts": 94, "n_ast_nodes": 161, "n_identifiers": 17, "d_id": 5212, "documentation": { "docstring": "\n query GetProductTypeInFederation($representations: [_Any]) {\n _entities(representations: $representations) {\n __typename\n ... on ProductType {\n id\n name\n }\n }\n }\n ", "n_words": 17, "vocab_size": 13, "n_whitespaces": 94, "language": "en" } }, { "id": 101547, "commit_id": "7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5", "repo": "faceswap", "path": "lib/training/preview_tk.py", "file_name": "preview_tk.py", "fun_name": "_clear_combo_focus", "commit_message": "Training - Use custom preview pop-out", "code": "def _clear_combo_focus(self, *args) -> None: # pylint: disable=unused-argument\n \n logger.debug(\"Clearing scale combo focus\")\n self._scale.selection_clear()\n self._scale.winfo_toplevel().focus_set()\n logger.debug(\"Cleared scale combo focus\")\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 54, "n_words": 18, "vocab_size": 15, "complexity": 1, "nloc": 7, "token_counts": 41, "n_ast_nodes": 76, "n_identifiers": 9, "d_id": 20957, "documentation": { "docstring": " Remove the highlighting and stealing of focus that the combobox annoyingly\n implements. ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 20, "language": "en" } }, { "id": 11257, "commit_id": "2efe175c975975532f6e3fd326ed280addf20eba", "repo": "jina", "path": "tests/distributed/test_topologies/test_topologies.py", "file_name": "test_topologies.py", "fun_name": "test_remote_workspace_value", "commit_message": "fix: return responses (#4343)", "code": "def test_remote_workspace_value():\n \n HOST = __default_host__\n client = JinaDClient(host=HOST, port=8000)\n workspace_id = client.workspaces.create(paths=[os.path.join(cur_dir, 'yamls')])\n flow_id = client.flows.create(\n workspace_id=workspace_id, filename='flow_workspace_validate.yml'\n )\n args = client.flows.get(flow_id)['arguments']['object']['arguments']\n response = Client(\n host=HOST,\n port=args['port_expose'],\n protocol=args['protocol'],\n return_responses=True,\n ).post(on='/', inputs=[Document()], show_progress=True, return_results=True)\n assert (\n response[0]\n .data.docs[0]\n .text.startswith(f'{__partial_workspace__}/WorkspaceValidator/0')\n )\n assert client.flows.delete(flow_id)\n assert client.workspaces.delete(workspace_id)\n\n\n@pytest.mark.parametrize('gpus', ['all', '2'])", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize('gpus', ['all', '2'])", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 140, "n_words": 46, "vocab_size": 38, "complexity": 1, "nloc": 21, "token_counts": 168, "n_ast_nodes": 301, "n_identifiers": 39, "d_id": 2028, "documentation": { "docstring": "\n This tests the value set in `self.workspace` in a remote Flow.\n It should always be `/workspace/ExecutorName/...\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 26, "language": "en" } }, { "id": 215701, "commit_id": "1ff576163e64dac4ea1005121b32a1b3a4168f70", "repo": "salt", "path": "tests/pytests/unit/modules/test_chocolatey.py", "file_name": "test_chocolatey.py", "fun_name": "test__yes_version_less_than", "commit_message": "move chocolatey exec module tests to pytest", "code": "def test__yes_version_less_than():\n \n mock_version = MagicMock(return_value=\"0.9.0\")\n with patch(\"salt.modules.chocolatey.chocolatey_version\", mock_version):\n result = chocolatey._yes()\n expected = []\n # Did it return correctly\n assert result == expected\n # Did it populate __context__\n assert chocolatey.__context__[\"chocolatey._yes\"] == expected\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 83, "n_words": 32, "vocab_size": 22, "complexity": 1, "nloc": 7, "token_counts": 45, "n_ast_nodes": 85, "n_identifiers": 10, "d_id": 54109, "documentation": { "docstring": "\n Test _yes when Chocolatey version is less than 0.9.9\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 242434, "commit_id": "a0e1fde1eddf45f26653e2ff6080d31e177adbec", "repo": "Pillow", "path": "src/PIL/ImageFile.py", "file_name": "ImageFile.py", "fun_name": "setimage", "commit_message": "Added PyEncoder", "code": "def setimage(self, im, extents=None):\n \n\n # following c code\n self.im = im\n\n if extents:\n (x0, y0, x1, y1) = extents\n else:\n (x0, y0, x1, y1) = (0, 0, 0, 0)\n\n if x0 == 0 and x1 == 0:\n self.state.xsize, self.state.ysize = self.im.size\n else:\n self.state.xoff = x0\n self.state.yoff = y0\n self.state.xsize = x1 - x0\n self.state.ysize = y1 - y0\n\n if self.state.xsize <= 0 or self.state.ysize <= 0:\n raise ValueError(\"Size cannot be negative\")\n\n if (\n self.state.xsize + self.state.xoff > self.im.size[0]\n or self.state.ysize + self.state.yoff > self.im.size[1]\n ):\n raise ValueError(\"Tile cannot extend outside image\")\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 282, "n_words": 91, "vocab_size": 54, "complexity": 8, "nloc": 20, "token_counts": 184, "n_ast_nodes": 282, "n_identifiers": 15, "d_id": 69856, "documentation": { "docstring": "\n Called from ImageFile to set the core output image for the codec\n\n :param im: A core image object\n :param extents: a 4 tuple of (x0, y0, x1, y1) defining the rectangle\n for this tile\n :returns: None\n ", "n_words": 36, "vocab_size": 30, "n_whitespaces": 83, "language": "en" } }, { "id": 67481, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/setup/doctype/customer_group/customer_group.py", "file_name": "customer_group.py", "fun_name": "get_parent_customer_groups", "commit_message": "style: format code with black", "code": "def get_parent_customer_groups(customer_group):\n\tlft, rgt = frappe.db.get_value(\"Customer Group\", customer_group, [\"lft\", \"rgt\"])\n\n\treturn frappe.db.sql(\n\t\t,\n\t\t(lft, rgt),\n\t\tas_dict=True,\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 10, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 9, "token_counts": 45, "n_ast_nodes": 72, "n_identifiers": 9, "d_id": 14539, "documentation": { "docstring": "select name from `tabCustomer Group`\n\t\twhere lft <= %s and rgt >= %s\n\t\torder by lft asc", "n_words": 17, "vocab_size": 15, "n_whitespaces": 14, "language": "en" } }, { "id": 99587, "commit_id": "1730c481f1a8a71446326fa1ff72e10663016385", "repo": "sentry", "path": "tests/sentry/integrations/slack/notifications/test_unassigned.py", "file_name": "test_unassigned.py", "fun_name": "test_unassignment", "commit_message": "fix(notifications): Use `metrics_key` (#34572)", "code": "def test_unassignment(self, mock_func):\n \n notification = UnassignedActivityNotification(\n Activity(\n project=self.project,\n group=self.group,\n user=self.user,\n type=ActivityType.ASSIGNED,\n data={\"assignee\": \"\"},\n )\n )\n with self.tasks():\n notification.send()\n\n attachment, text = get_attachment()\n assert text == f\"Issue unassigned by {self.name}\"\n assert attachment[\"title\"] == self.group.title\n assert (\n attachment[\"footer\"]\n == f\"{self.project.slug} | \"\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 235, "n_words": 42, "vocab_size": 34, "complexity": 1, "nloc": 19, "token_counts": 93, "n_ast_nodes": 171, "n_identifiers": 21, "d_id": 19665, "documentation": { "docstring": "\n Test that a Slack message is sent with the expected payload when an issue is unassigned\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 31, "language": "en" } }, { "id": 176466, "commit_id": "6801db694e6a3603ab943079c3399baa0c5ff686", "repo": "networkx", "path": "networkx/utils/misc.py", "file_name": "misc.py", "fun_name": "to_tuple", "commit_message": "Deprecate `to_tuple` (#5430)\n\n* Add _to_tuple private fn to node_link.py.\r\n\r\n* Deprecate utils.misc.to_tuple.\r\n\r\n* Add deprecation note.\r\n\r\n* Add release note.", "code": "def to_tuple(x):\n \n warnings.warn(\n \"to_tuple is deprecated and will be removed in NetworkX 3.0.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n if not isinstance(x, (tuple, list)):\n return x\n return tuple(map(to_tuple, x))\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 69, "n_words": 26, "vocab_size": 25, "complexity": 2, "nloc": 9, "token_counts": 44, "n_ast_nodes": 70, "n_identifiers": 10, "d_id": 41921, "documentation": { "docstring": "Converts lists to tuples.\n\n .. deprecated:: 2.8\n\n to_tuple is deprecated and will be removed in NetworkX 3.0.\n\n Examples\n --------\n >>> from networkx.utils import to_tuple\n >>> a_list = [1, 2, [1, 4]]\n >>> to_tuple(a_list)\n (1, 2, (1, 4))\n ", "n_words": 37, "vocab_size": 31, "n_whitespaces": 67, "language": "en" } }, { "id": 261569, "commit_id": "2c1581c32e641e535305647eb57a1787bcf803f0", "repo": "scikit-learn", "path": "examples/ensemble/plot_gradient_boosting_oob.py", "file_name": "plot_gradient_boosting_oob.py", "fun_name": "heldout_score", "commit_message": "DOC Fix FutureWarning in ensemble/plot_gradient_boosting_oob.py (#24948)", "code": "def heldout_score(clf, X_test, y_test):\n \n score = np.zeros((n_estimators,), dtype=np.float64)\n for i, y_pred in enumerate(clf.staged_decision_function(X_test)):\n score[i] = binomial_deviance(y_test, y_pred.ravel())\n return score\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 38, "n_words": 19, "vocab_size": 17, "complexity": 2, "nloc": 5, "token_counts": 59, "n_ast_nodes": 91, "n_identifiers": 16, "d_id": 76867, "documentation": { "docstring": "compute deviance scores on ``X_test`` and ``y_test``.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 303407, "commit_id": "343508a0151378ec4958bd04fa87ca772aaf0e4e", "repo": "core", "path": "homeassistant/components/life360/device_tracker.py", "file_name": "device_tracker.py", "fun_name": "battery_level", "commit_message": "Fix Life360 recovery from server errors (#76231)", "code": "def battery_level(self) -> int | None:\n \n if not self._data:\n return None\n return self._data.battery_level\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 45, "n_words": 13, "vocab_size": 12, "complexity": 2, "nloc": 8, "token_counts": 24, "n_ast_nodes": 41, "n_identifiers": 4, "d_id": 102227, "documentation": { "docstring": "Return the battery level of the device.\n\n Percentage from 0-100.\n ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 200943, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/aggregation_regress/tests.py", "file_name": "tests.py", "fun_name": "test_ticket_11293_q_immutable", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_ticket_11293_q_immutable(self):\n \n q1 = Q(isbn=\"\")\n q2 = Q(authors__count__gt=1)\n query = Book.objects.annotate(Count(\"authors\"))\n query.filter(q1 | q2)\n self.assertEqual(len(q2.children), 1)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 58, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 6, "token_counts": 56, "n_ast_nodes": 97, "n_identifiers": 16, "d_id": 49831, "documentation": { "docstring": "\n Splitting a q object to parts for where/having doesn't alter\n the original q-object.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 35, "language": "en" } }, { "id": 241703, "commit_id": "59a7ba760548baadf6dbb30864b54cb01c7225a3", "repo": "lightning", "path": "pytorch_lightning/loops/epoch/training_epoch_loop.py", "file_name": "training_epoch_loop.py", "fun_name": "on_advance_end", "commit_message": "Move `epoch_{start,end}` hooks from `TrainingEpochLoop` to `FitLoop` (#11201)", "code": "def on_advance_end(self) -> None:\n \n # -----------------------------------------\n # VALIDATE IF NEEDED + CHECKPOINT CALLBACK\n # -----------------------------------------\n should_check_val = self._should_check_val_fx(self.batch_idx, self.batch_progress.is_last_batch)\n if should_check_val:\n self.trainer.validating = True\n self._run_validation()\n self.trainer.training = True\n\n # -----------------------------------------\n # SAVE LOGGERS (ie: Tensorboard, etc...)\n # -----------------------------------------\n self._save_loggers_on_train_batch_end()\n\n # update plateau LR scheduler after metrics are logged\n self.update_lr_schedulers(\"step\", update_plateau_schedulers=True)\n\n if not self._should_accumulate():\n # progress global step according to grads progress\n self.global_step += 1\n\n # if training finished, defer exit to the parent. this assumes there will be enough time in between\n # which might not be the case depending on what's in the `*_epoch_end` hooks\n if not self._is_training_done:\n # if fault tolerant is enabled and process has been notified, exit.\n self.trainer._exit_gracefully_on_signal()\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 301, "n_words": 112, "vocab_size": 84, "complexity": 4, "nloc": 17, "token_counts": 87, "n_ast_nodes": 157, "n_identifiers": 18, "d_id": 69658, "documentation": { "docstring": "Runs validation and Checkpointing if necessary.\n\n Raises:\n StopIteration: if :attr:`done` evaluates to ``True`` to finish this epoch\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 42, "language": "en" } }, { "id": 261047, "commit_id": "9f9f1684e91fbfffbc446f786a8c64628b752efb", "repo": "scikit-learn", "path": "sklearn/utils/tests/test_validation.py", "file_name": "test_validation.py", "fun_name": "test_get_feature_names_invalid_dtypes", "commit_message": "MAINT Clean deprecation for 1.2: validation (#24493)\n\n* cln deprecations\r\n\r\n* cln\r\n\r\n* fix tst switch to pytest.raises", "code": "def test_get_feature_names_invalid_dtypes(names, dtypes):\n \n pd = pytest.importorskip(\"pandas\")\n X = pd.DataFrame([[1, 2], [4, 5], [5, 6]], columns=names)\n\n msg = re.escape(\n \"Feature names only support names that are all strings. \"\n f\"Got feature names with dtypes: {dtypes}.\"\n )\n with pytest.raises(TypeError, match=msg):\n names = _get_feature_names(X)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 80, "n_words": 41, "vocab_size": 34, "complexity": 1, "nloc": 9, "token_counts": 74, "n_ast_nodes": 123, "n_identifiers": 16, "d_id": 76648, "documentation": { "docstring": "Get feature names errors when the feature names have mixed dtypes", "n_words": 11, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 160044, "commit_id": "40747ae50620631941e43dbbd5baaccab669922f", "repo": "numpy", "path": "numpy/linalg/linalg.py", "file_name": "linalg.py", "fun_name": "svd", "commit_message": "clarify svd documentation\n\n`u @ np.diag(s) @ vh` can only reproduce the original matrix when `full_matrices` is `False`, otherwise dimension does not match.", "code": "def svd(a, full_matrices=True, compute_uv=True, hermitian=False):\n \n import numpy as _nx\n a, wrap = _makearray(a)\n\n if hermitian:\n # note: lapack svd returns eigenvalues with s ** 2 sorted descending,\n # but eig returns s sorted ascending, so we re-order the eigenvalues\n # and related arrays to have the correct order\n if compute_uv:\n s, u = eigh(a)\n sgn = sign(s)\n s = abs(s)\n sidx = argsort(s)[..., ::-1]\n sgn = _nx.take_along_axis(sgn, sidx, axis=-1)\n s = _nx.take_along_axis(s, sidx, axis=-1)\n u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1)\n # singular values are unsigned, move the sign into v\n vt = transpose(u * sgn[..., None, :]).conjugate()\n return wrap(u), s, wrap(vt)\n else:\n s = eigvalsh(a)\n s = s[..., ::-1]\n s = abs(s)\n return sort(s)[..., ::-1]\n\n _assert_stacked_2d(a)\n t, result_t = _commonType(a)\n\n extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)\n\n m, n = a.shape[-2:]\n if compute_uv:\n if full_matrices:\n if m < n:\n gufunc = _umath_linalg.svd_m_f\n else:\n gufunc = _umath_linalg.svd_n_f\n else:\n if m < n:\n gufunc = _umath_linalg.svd_m_s\n else:\n gufunc = _umath_linalg.svd_n_s\n\n signature = 'D->DdD' if isComplexType(t) else 'd->ddd'\n u, s, vh = gufunc(a, signature=signature, extobj=extobj)\n u = u.astype(result_t, copy=False)\n s = s.astype(_realType(result_t), copy=False)\n vh = vh.astype(result_t, copy=False)\n return wrap(u), s, wrap(vh)\n else:\n if m < n:\n gufunc = _umath_linalg.svd_m\n else:\n gufunc = _umath_linalg.svd_n\n\n signature = 'D->d' if isComplexType(t) else 'd->d'\n s = gufunc(a, signature=signature, extobj=extobj)\n s = s.astype(_realType(result_t), copy=False)\n return s\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 658, "n_words": 215, "vocab_size": 116, "complexity": 10, "nloc": 49, "token_counts": 399, "n_ast_nodes": 635, "n_identifiers": 48, "d_id": 38474, "documentation": { "docstring": "\n Singular Value Decomposition.\n\n When `a` is a 2D array, and when `full_matrices` is `False`,\n it is factorized as ``u @ np.diag(s) @ vh = (u * s) @ vh``,\n where `u` and `vh` are 2D unitary arrays and `s` is a 1D\n array of `a`'s singular values. When `a` is higher-dimensional, SVD is\n applied in stacked mode as explained below.\n\n Parameters\n ----------\n a : (..., M, N) array_like\n A real or complex array with ``a.ndim >= 2``.\n full_matrices : bool, optional\n If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and\n ``(..., N, N)``, respectively. Otherwise, the shapes are\n ``(..., M, K)`` and ``(..., K, N)``, respectively, where\n ``K = min(M, N)``.\n compute_uv : bool, optional\n Whether or not to compute `u` and `vh` in addition to `s`. True\n by default.\n hermitian : bool, optional\n If True, `a` is assumed to be Hermitian (symmetric if real-valued),\n enabling a more efficient method for finding singular values.\n Defaults to False.\n\n .. versionadded:: 1.17.0\n\n Returns\n -------\n u : { (..., M, M), (..., M, K) } array\n Unitary array(s). The first ``a.ndim - 2`` dimensions have the same\n size as those of the input `a`. The size of the last two dimensions\n depends on the value of `full_matrices`. Only returned when\n `compute_uv` is True.\n s : (..., K) array\n Vector(s) with the singular values, within each vector sorted in\n descending order. The first ``a.ndim - 2`` dimensions have the same\n size as those of the input `a`.\n vh : { (..., N, N), (..., K, N) } array\n Unitary array(s). The first ``a.ndim - 2`` dimensions have the same\n size as those of the input `a`. The size of the last two dimensions\n depends on the value of `full_matrices`. Only returned when\n `compute_uv` is True.\n\n Raises\n ------\n LinAlgError\n If SVD computation does not converge.\n\n See Also\n --------\n scipy.linalg.svd : Similar function in SciPy.\n scipy.linalg.svdvals : Compute singular values of a matrix.\n\n Notes\n -----\n\n .. versionchanged:: 1.8.0\n Broadcasting rules apply, see the `numpy.linalg` documentation for\n details.\n\n The decomposition is performed using LAPACK routine ``_gesdd``.\n\n SVD is usually described for the factorization of a 2D matrix :math:`A`.\n The higher-dimensional case will be discussed below. In the 2D case, SVD is\n written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,\n :math:`S= \\\\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`\n contains the singular values of `a` and `u` and `vh` are unitary. The rows\n of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are\n the eigenvectors of :math:`A A^H`. In both cases the corresponding\n (possibly non-zero) eigenvalues are given by ``s**2``.\n\n If `a` has more than two dimensions, then broadcasting rules apply, as\n explained in :ref:`routines.linalg-broadcasting`. This means that SVD is\n working in \"stacked\" mode: it iterates over all indices of the first\n ``a.ndim - 2`` dimensions and for each combination SVD is applied to the\n last two indices. The matrix `a` can be reconstructed from the\n decomposition with either ``(u * s[..., None, :]) @ vh`` or\n ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the\n function ``np.matmul`` for python versions below 3.5.)\n\n If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are\n all the return values.\n\n Examples\n --------\n >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)\n >>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)\n\n Reconstruction based on full SVD, 2D case:\n\n >>> u, s, vh = np.linalg.svd(a, full_matrices=True)\n >>> u.shape, s.shape, vh.shape\n ((9, 9), (6,), (6, 6))\n >>> np.allclose(a, np.dot(u[:, :6] * s, vh))\n True\n >>> smat = np.zeros((9, 6), dtype=complex)\n >>> smat[:6, :6] = np.diag(s)\n >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))\n True\n\n Reconstruction based on reduced SVD, 2D case:\n\n >>> u, s, vh = np.linalg.svd(a, full_matrices=False)\n >>> u.shape, s.shape, vh.shape\n ((9, 6), (6,), (6, 6))\n >>> np.allclose(a, np.dot(u * s, vh))\n True\n >>> smat = np.diag(s)\n >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))\n True\n\n Reconstruction based on full SVD, 4D case:\n\n >>> u, s, vh = np.linalg.svd(b, full_matrices=True)\n >>> u.shape, s.shape, vh.shape\n ((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))\n >>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))\n True\n >>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))\n True\n\n Reconstruction based on reduced SVD, 4D case:\n\n >>> u, s, vh = np.linalg.svd(b, full_matrices=False)\n >>> u.shape, s.shape, vh.shape\n ((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))\n >>> np.allclose(b, np.matmul(u * s[..., None, :], vh))\n True\n >>> np.allclose(b, np.matmul(u, s[..., None] * vh))\n True\n\n ", "n_words": 746, "vocab_size": 328, "n_whitespaces": 1180, "language": "en" } }, { "id": 276110, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/saved_model/save_impl.py", "file_name": "save_impl.py", "fun_name": "trace_with_input_signature", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def trace_with_input_signature(self):\n \n if self._layer_inputs[0] is None:\n return\n\n args, kwargs = self._layer_inputs\n if self._expects_training_arg:\n args, kwargs = self._call_spec.set_arg_value(\n \"training\", False, args, kwargs, inputs_in_args=True\n )\n if None not in tf.nest.flatten([args, kwargs]):\n # Manually add traces for layers that have keyword arguments and have\n # a fully defined input signature.\n self.add_trace(*args, **kwargs)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 165, "n_words": 49, "vocab_size": 41, "complexity": 4, "nloc": 10, "token_counts": 78, "n_ast_nodes": 124, "n_identifiers": 13, "d_id": 81562, "documentation": { "docstring": "Trace with the layer/models inferred input signature if possible.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 258465, "commit_id": "1c24595c74e0bea246737b19f8fdfc8a1ffa2282", "repo": "scikit-learn", "path": "sklearn/linear_model/tests/test_ransac.py", "file_name": "test_ransac.py", "fun_name": "test_perfect_horizontal_line", "commit_message": "MAINT rename base_estimator to estimator in RANSACRegressor (#22062)", "code": "def test_perfect_horizontal_line():\n \n X = np.arange(100)[:, None]\n y = np.zeros((100,))\n\n estimator = LinearRegression()\n ransac_estimator = RANSACRegressor(estimator, random_state=0)\n ransac_estimator.fit(X, y)\n\n assert_allclose(ransac_estimator.estimator_.coef_, 0.0)\n assert_allclose(ransac_estimator.estimator_.intercept_, 0.0)\n\n\n# TODO: Remove in v1.2\n@pytest.mark.parametrize(\n \"old_loss, new_loss\",\n [\n (\"absolute_loss\", \"squared_error\"),\n (\"squared_loss\", \"absolute_error\"),\n ],\n)", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"old_loss, new_loss\",\n [\n (\"absolute_loss\", \"squared_error\"),\n (\"squared_loss\", \"absolute_error\"),\n ],\n)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 81, "n_words": 37, "vocab_size": 33, "complexity": 1, "nloc": 8, "token_counts": 76, "n_ast_nodes": 160, "n_identifiers": 19, "d_id": 75246, "documentation": { "docstring": "Check that we can fit a line where all samples are inliers.\n Non-regression test for:\n https://github.com/scikit-learn/scikit-learn/issues/19497\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 25, "language": "en" } }, { "id": 66003, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/erpnext_integrations/doctype/plaid_settings/plaid_settings.py", "file_name": "plaid_settings.py", "fun_name": "sync_transactions", "commit_message": "style: format code with black", "code": "def sync_transactions(bank, bank_account):\n\t\n\tlast_transaction_date = frappe.db.get_value(\"Bank Account\", bank_account, \"last_integration_date\")\n\tif last_transaction_date:\n\t\tstart_date = formatdate(last_transaction_date, \"YYYY-MM-dd\")\n\telse:\n\t\tstart_date = formatdate(add_months(today(), -12), \"YYYY-MM-dd\")\n\tend_date = formatdate(today(), \"YYYY-MM-dd\")\n\n\ttry:\n\t\ttransactions = get_transactions(\n\t\t\tbank=bank, bank_account=bank_account, start_date=start_date, end_date=end_date\n\t\t)\n\n\t\tresult = []\n\t\tfor transaction in reversed(transactions):\n\t\t\tresult += new_bank_transaction(transaction)\n\n\t\tif result:\n\t\t\tlast_transaction_date = frappe.db.get_value(\"Bank Transaction\", result.pop(), \"date\")\n\n\t\t\tfrappe.logger().info(\n\t\t\t\t\"Plaid added {} new Bank Transactions from '{}' between {} and {}\".format(\n\t\t\t\t\tlen(result), bank_account, start_date, end_date\n\t\t\t\t)\n\t\t\t)\n\n\t\t\tfrappe.db.set_value(\n\t\t\t\t\"Bank Account\", bank_account, \"last_integration_date\", last_transaction_date\n\t\t\t)\n\texcept Exception:\n\t\tfrappe.log_error(frappe.get_traceback(), _(\"Plaid transactions sync error\"))\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 59, "n_words": 85, "vocab_size": 62, "complexity": 5, "nloc": 26, "token_counts": 178, "n_ast_nodes": 296, "n_identifiers": 28, "d_id": 14088, "documentation": { "docstring": "Sync transactions based on the last integration date as the start date, after sync is completed\n\tadd the transaction date of the oldest transaction as the last integration date.", "n_words": 29, "vocab_size": 20, "n_whitespaces": 27, "language": "en" } }, { "id": 205802, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/query_utils.py", "file_name": "query_utils.py", "fun_name": "select_related_descend", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def select_related_descend(field, restricted, requested, load_fields, reverse=False):\n \n if not field.remote_field:\n return False\n if field.remote_field.parent_link and not reverse:\n return False\n if restricted:\n if reverse and field.related_query_name() not in requested:\n return False\n if not reverse and field.name not in requested:\n return False\n if not restricted and field.null:\n return False\n if load_fields:\n if field.attname not in load_fields:\n if restricted and field.name in requested:\n msg = (\n \"Field %s.%s cannot be both deferred and traversed using \"\n \"select_related at the same time.\"\n ) % (field.model._meta.object_name, field.name)\n raise FieldError(msg)\n return True\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 264, "n_words": 85, "vocab_size": 48, "complexity": 15, "nloc": 21, "token_counts": 123, "n_ast_nodes": 195, "n_identifiers": 17, "d_id": 51220, "documentation": { "docstring": "\n Return True if this field should be used to descend deeper for\n select_related() purposes. Used by both the query construction code\n (sql.query.fill_related_selections()) and the model instance creation code\n (query.get_klass_info()).\n\n Arguments:\n * field - the field to be checked\n * restricted - a boolean field, indicating if the field list has been\n manually restricted using a requested clause)\n * requested - The select_related() dictionary.\n * load_fields - the set of fields to be loaded on this model\n * reverse - boolean, True if we are checking a reverse select related\n ", "n_words": 89, "vocab_size": 58, "n_whitespaces": 134, "language": "en" } }, { "id": 321330, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "tests/unit/browser/webengine/test_webenginetab.py", "file_name": "test_webenginetab.py", "fun_name": "test_notification_permission_workaround", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def test_notification_permission_workaround():\n \n try:\n notifications = QWebEnginePage.Feature.Notifications\n except AttributeError:\n pytest.skip(\"No Notifications member\")\n\n permissions = webenginetab._WebEnginePermissions\n assert permissions._options[notifications] == 'content.notifications.enabled'\n assert permissions._messages[notifications] == 'show notifications'\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 55, "n_words": 23, "vocab_size": 20, "complexity": 2, "nloc": 8, "token_counts": 46, "n_ast_nodes": 82, "n_identifiers": 13, "d_id": 117667, "documentation": { "docstring": "Make sure the value for QWebEnginePage::Notifications is correct.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 73525, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/settings/tests/test_templates.py", "file_name": "test_templates.py", "fun_name": "test_get_settings_request_context_use_default", "commit_message": "Reformat with black", "code": "def test_get_settings_request_context_use_default(self):\n \n request = self.get_request(site=self.other_site)\n context = Context({\"request\": request})\n\n # This should use the default site, ignoring the site in the request\n template = Template(\n \"{% load wagtailsettings_tags %}\"\n \"{% get_settings use_default_site=True %}\"\n \"{{ settings.tests.testsetting.title}}\"\n )\n\n self.assertEqual(template.render(context), self.default_site_settings.title)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 120, "n_words": 38, "vocab_size": 31, "complexity": 1, "nloc": 9, "token_counts": 53, "n_ast_nodes": 96, "n_identifiers": 14, "d_id": 16042, "documentation": { "docstring": "\n Check that the {% get_settings use_default_site=True %} option\n overrides a request in the context.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 176417, "commit_id": "8f4c99debc9440728c5e85f8bffa5d26b232eb6f", "repo": "networkx", "path": "networkx/classes/multigraph.py", "file_name": "multigraph.py", "fun_name": "adj", "commit_message": "Multigraph docs update (#5389)\n\n* Updated MultiDiGraph documentation to include more examples of actually\r\nusing parallel edges, and fixed references to things like G[u, v] where\r\nG[u, v, k] is required for a MultiDigraph. Have not made parallel\r\nchanges in MultiGraph which should maybe also be made?\r\n\r\nDocs tests pass on my end; no code outside of comments was changed.\r\n\r\n-Peter Mawhorter\r\n\r\n* Updated docs for MultiGraph to add more multigraph-specific examples and\r\nfix a few places where untested examples were wrong.\r\n\r\n-Peter Mawhorter\r\n\r\n* [DOC] fix typo\r\n\r\n* add the right amount of separators\r\n\r\nCo-authored-by: Mridul Seth ", "code": "def adj(self):\n \n return MultiAdjacencyView(self._adj)\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 24, "n_identifiers": 4, "d_id": 41882, "documentation": { "docstring": "Graph adjacency object holding the neighbors of each node.\n\n This object is a read-only dict-like structure with node keys\n and neighbor-dict values. The neighbor-dict is keyed by neighbor\n to the edgekey-data-dict. So `G.adj[3][2][0]['color'] = 'blue'` sets\n the color of the edge `(3, 2, 0)` to `\"blue\"`.\n\n Iterating over G.adj behaves like a dict. Useful idioms include\n `for nbr, edgesdict in G.adj[n].items():`.\n\n The neighbor information is also provided by subscripting the graph.\n\n Examples\n --------\n >>> e = [(1, 2), (1, 2), (1, 3), (3, 4)] # list of edges\n >>> G = nx.MultiGraph(e)\n >>> G.edges[1, 2, 0][\"weight\"] = 3\n >>> result = set()\n >>> for edgekey, data in G[1][2].items():\n ... result.add(data.get('weight', 1))\n >>> result\n {1, 3}\n\n For directed graphs, `G.adj` holds outgoing (successor) info.\n ", "n_words": 123, "vocab_size": 94, "n_whitespaces": 263, "language": "en" } }, { "id": 205412, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/base.py", "file_name": "base.py", "fun_name": "_check_m2m_through_same_relationship", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _check_m2m_through_same_relationship(cls):\n \n\n errors = []\n seen_intermediary_signatures = []\n\n fields = cls._meta.local_many_to_many\n\n # Skip when the target model wasn't found.\n fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))\n\n # Skip when the relationship model wasn't found.\n fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))\n\n for f in fields:\n signature = (\n f.remote_field.model,\n cls,\n f.remote_field.through,\n f.remote_field.through_fields,\n )\n if signature in seen_intermediary_signatures:\n errors.append(\n checks.Error(\n \"The model has two identical many-to-many relations \"\n \"through the intermediate model '%s'.\"\n % f.remote_field.through._meta.label,\n obj=cls,\n id=\"models.E003\",\n )\n )\n else:\n seen_intermediary_signatures.append(signature)\n return errors\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 460, "n_words": 88, "vocab_size": 53, "complexity": 7, "nloc": 26, "token_counts": 136, "n_ast_nodes": 215, "n_identifiers": 21, "d_id": 51119, "documentation": { "docstring": "Check if no relationship model is used by more than one m2m field.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 161087, "commit_id": "b617a87ee40ab384767a27335313c2c65ee094ec", "repo": "MockingBird", "path": "ppg_extractor/encoder/attention.py", "file_name": "attention.py", "fun_name": "forward", "commit_message": "Init ppg extractor and ppg2mel (#375)\n\n* Init ppg extractor and ppg2mel\r\n\r\n* add preprocess and training\r\n\r\n* FIx known issues\r\n\r\n* Update __init__.py\r\n\r\nAllow to gen audio\r\n\r\n* Fix length issue\r\n\r\n* Fix bug of preparing fid\r\n\r\n* Fix sample issues\r\n\r\n* Add UI usage of PPG-vc", "code": "def forward(self, query, key, value, pos_emb, mask):\n \n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\n\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2) # (batch, head, time1, d_k)\n\n # (batch, head, time1, d_k)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n # (batch, head, time1, d_k)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n\n # compute attention score\n # first compute matrix a and matrix c\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n # (batch, head, time1, time2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n\n # compute matrix b and matrix d\n # (batch, head, time1, time2)\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n matrix_bd = self.rel_shift(matrix_bd)\n\n scores = (matrix_ac + matrix_bd) / math.sqrt(\n self.d_k\n ) # (batch, head, time1, time2)\n\n return self.forward_attention(v, scores, mask)\n", "url": "https://github.com/babysor/MockingBird.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 298, "n_words": 130, "vocab_size": 71, "complexity": 1, "nloc": 15, "token_counts": 189, "n_ast_nodes": 293, "n_identifiers": 32, "d_id": 38899, "documentation": { "docstring": "Compute 'Scaled Dot Product Attention' with rel. positional encoding.\n\n :param torch.Tensor query: (batch, time1, size)\n :param torch.Tensor key: (batch, time2, size)\n :param torch.Tensor value: (batch, time2, size)\n :param torch.Tensor pos_emb: (batch, time1, size)\n :param torch.Tensor mask: (batch, time1, time2)\n :param torch.nn.Dropout dropout:\n :return torch.Tensor: attention output (batch, time1, d_model)\n ", "n_words": 49, "vocab_size": 28, "n_whitespaces": 106, "language": "en" } }, { "id": 224449, "commit_id": "f79b34d174e41084391868e7b503f5c61b8b1bdf", "repo": "mkdocs", "path": "mkdocs/plugins.py", "file_name": "plugins.py", "fun_name": "on_page_context", "commit_message": "Move plugin events docs into source code + refactor\n\n* Create real (no-op) methods for each event in the base class.\n* Refactor event dispatcher to not check for methods' existence, instead just call them.\n* Move documentation from Markdown into docstrings of these methods.\n* Activate the 'mkdocstrings' plugin.\n* Use 'mkdocstrings' to insert documentation from those docstrings into the site.", "code": "def on_page_context(self, context, page, config, nav):\n \n return context\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 24, "n_identifiers": 6, "d_id": 57294, "documentation": { "docstring": "\n The `page_context` event is called after the context for a page is created\n and can be used to alter the context for that specific page only.\n\n Parameters:\n context: dict of template context variables\n page: `mkdocs.nav.Page` instance\n config: global configuration object\n nav: global navigation object\n\n Returns:\n dict of template context variables\n ", "n_words": 50, "vocab_size": 37, "n_whitespaces": 141, "language": "en" } }, { "id": 153549, "commit_id": "97769988a6f19e4b76f34238c97bf159ee7626a5", "repo": "modin", "path": "modin/core/io/text/json_dispatcher.py", "file_name": "json_dispatcher.py", "fun_name": "_read", "commit_message": "REFACTOR-#3853: interacting with Dask interface through 'DaskWrapper' class (#3854)\n\nCo-authored-by: Devin Petersohn \r\nCo-authored-by: Dmitry Chigarev \r\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Anatoly Myachev ", "code": "def _read(cls, path_or_buf, **kwargs):\n \n path_or_buf = cls.get_path_or_buffer(path_or_buf)\n if isinstance(path_or_buf, str):\n if not cls.file_exists(path_or_buf):\n return cls.single_worker_read(path_or_buf, **kwargs)\n path_or_buf = cls.get_path(path_or_buf)\n elif not cls.pathlib_or_pypath(path_or_buf):\n return cls.single_worker_read(path_or_buf, **kwargs)\n if not kwargs.get(\"lines\", False):\n return cls.single_worker_read(path_or_buf, **kwargs)\n with OpenFile(path_or_buf, \"rb\") as f:\n columns = pandas.read_json(BytesIO(b\"\" + f.readline()), lines=True).columns\n kwargs[\"columns\"] = columns\n empty_pd_df = pandas.DataFrame(columns=columns)\n\n with OpenFile(path_or_buf, \"rb\", kwargs.get(\"compression\", \"infer\")) as f:\n partition_ids = []\n index_ids = []\n dtypes_ids = []\n\n column_widths, num_splits = cls._define_metadata(empty_pd_df, columns)\n\n args = {\"fname\": path_or_buf, \"num_splits\": num_splits, **kwargs}\n\n splits = cls.partitioned_file(\n f,\n num_partitions=NPartitions.get(),\n )\n for start, end in splits:\n args.update({\"start\": start, \"end\": end})\n partition_id = cls.deploy(cls.parse, num_returns=num_splits + 3, **args)\n partition_ids.append(partition_id[:-3])\n index_ids.append(partition_id[-3])\n dtypes_ids.append(partition_id[-2])\n\n # partition_id[-1] contains the columns for each partition, which will be useful\n # for implementing when `lines=False`.\n row_lengths = cls.materialize(index_ids)\n new_index = pandas.RangeIndex(sum(row_lengths))\n\n dtypes = cls.get_dtypes(dtypes_ids)\n partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths)\n\n if isinstance(dtypes, pandas.Series):\n dtypes.index = columns\n else:\n dtypes = pandas.Series(dtypes, index=columns)\n\n new_frame = cls.frame_cls(\n np.array(partition_ids),\n new_index,\n columns,\n row_lengths,\n column_widths,\n dtypes=dtypes,\n )\n new_frame.synchronize_labels(axis=0)\n return cls.query_compiler_cls(new_frame)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 655, "n_words": 157, "vocab_size": 106, "complexity": 7, "nloc": 48, "token_counts": 398, "n_ast_nodes": 641, "n_identifiers": 58, "d_id": 35438, "documentation": { "docstring": "\n Read data from `path_or_buf` according to the passed `read_json` `kwargs` parameters.\n\n Parameters\n ----------\n path_or_buf : str, path object or file-like object\n `path_or_buf` parameter of `read_json` function.\n **kwargs : dict\n Parameters of `read_json` function.\n\n Returns\n -------\n BaseQueryCompiler\n Query compiler with imported data for further processing.\n ", "n_words": 44, "vocab_size": 35, "n_whitespaces": 141, "language": "en" } }, { "id": 61979, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py", "file_name": "database.py", "fun_name": "write_shared_locations", "commit_message": "upd; format", "code": "def write_shared_locations(self, paths, dry_run=False):\n \n shared_path = os.path.join(self.path, 'SHARED')\n logger.info('creating %s', shared_path)\n if dry_run:\n return None\n lines = []\n for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):\n path = paths[key]\n if os.path.isdir(paths[key]):\n lines.append('%s=%s' % (key, path))\n for ns in paths.get('namespace', ()):\n lines.append('namespace=%s' % ns)\n\n with codecs.open(shared_path, 'w', encoding='utf-8') as f:\n f.write('\\n'.join(lines))\n return shared_path\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 186, "n_words": 52, "vocab_size": 44, "complexity": 5, "nloc": 15, "token_counts": 139, "n_ast_nodes": 239, "n_identifiers": 21, "d_id": 12792, "documentation": { "docstring": "\n Write shared location information to the SHARED file in .dist-info.\n :param paths: A dictionary as described in the documentation for\n :meth:`shared_locations`.\n :param dry_run: If True, the action is logged but no file is actually\n written.\n :return: The path of the file written to.\n ", "n_words": 43, "vocab_size": 35, "n_whitespaces": 109, "language": "en" } }, { "id": 223651, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/charset.py", "file_name": "charset.py", "fun_name": "get_body_encoding", "commit_message": "add python 3.10.4 for windows", "code": "def get_body_encoding(self):\n \n assert self.body_encoding != SHORTEST\n if self.body_encoding == QP:\n return 'quoted-printable'\n elif self.body_encoding == BASE64:\n return 'base64'\n else:\n return encode_7or8bit\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 89, "n_words": 21, "vocab_size": 16, "complexity": 3, "nloc": 8, "token_counts": 34, "n_ast_nodes": 61, "n_identifiers": 7, "d_id": 57033, "documentation": { "docstring": "Return the content-transfer-encoding used for body encoding.\n\n This is either the string `quoted-printable' or `base64' depending on\n the encoding used, or it is a function in which case you should call\n the function with a single argument, the Message object being\n encoded. The function should then set the Content-Transfer-Encoding\n header itself to whatever is appropriate.\n\n Returns \"quoted-printable\" if self.body_encoding is QP.\n Returns \"base64\" if self.body_encoding is BASE64.\n Returns conversion function otherwise.\n ", "n_words": 71, "vocab_size": 52, "n_whitespaces": 135, "language": "en" } }, { "id": 191835, "commit_id": "11e1a8a3fa8d13513fe926b731fb907a066af2a1", "repo": "ydata-profiling", "path": "src/pandas_profiling/visualisation/context.py", "file_name": "context.py", "fun_name": "manage_matplotlib_context", "commit_message": "fix: change context managed backend (#1149)", "code": "def manage_matplotlib_context() -> Any:\n \n originalRcParams = matplotlib.rcParams.copy()\n\n # Credits for this style go to the ggplot and seaborn packages.\n # We copied the style file to remove dependencies on the Seaborn package.\n # Check it out, it's an awesome library for plotting\n customRcParams = {\n \"patch.facecolor\": \"#348ABD\", # blue\n \"patch.antialiased\": True,\n \"font.size\": 10.0,\n \"figure.edgecolor\": \"0.50\",\n # Seaborn common parameters\n \"figure.facecolor\": \"white\",\n \"text.color\": \".15\",\n \"axes.labelcolor\": \".15\",\n \"legend.numpoints\": 1,\n \"legend.scatterpoints\": 1,\n \"xtick.direction\": \"out\",\n \"ytick.direction\": \"out\",\n \"xtick.color\": \".15\",\n \"ytick.color\": \".15\",\n \"axes.axisbelow\": True,\n \"image.cmap\": \"Greys\",\n \"font.family\": [\"sans-serif\"],\n \"font.sans-serif\": [\n \"Arial\",\n \"Liberation Sans\",\n \"Bitstream Vera Sans\",\n \"sans-serif\",\n ],\n \"grid.linestyle\": \"-\",\n \"lines.solid_capstyle\": \"round\",\n # Seaborn darkgrid parameters\n # .15 = dark_gray\n # .8 = light_gray\n \"axes.grid\": True,\n \"axes.facecolor\": \"#EAEAF2\",\n \"axes.edgecolor\": \"white\",\n \"axes.linewidth\": 0,\n \"grid.color\": \"white\",\n # Seaborn notebook context\n \"figure.figsize\": [8.0, 5.5],\n \"axes.labelsize\": 11,\n \"axes.titlesize\": 12,\n \"xtick.labelsize\": 10,\n \"ytick.labelsize\": 10,\n \"legend.fontsize\": 10,\n \"grid.linewidth\": 1,\n \"lines.linewidth\": 1.75,\n \"patch.linewidth\": 0.3,\n \"lines.markersize\": 7,\n \"lines.markeredgewidth\": 0,\n \"xtick.major.width\": 1,\n \"ytick.major.width\": 1,\n \"xtick.minor.width\": 0.5,\n \"ytick.minor.width\": 0.5,\n \"xtick.major.pad\": 7,\n \"ytick.major.pad\": 7,\n \"backend\": \"agg\",\n }\n\n try:\n register_matplotlib_converters()\n matplotlib.rcParams.update(customRcParams)\n sns.set_style(style=\"white\")\n yield\n finally:\n deregister_matplotlib_converters() # revert to original unit registries\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=matplotlib.cbook.mplDeprecation)\n matplotlib.rcParams.update(originalRcParams) # revert to original rcParams\n", "url": "https://github.com/ydataai/ydata-profiling.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 662, "n_words": 184, "vocab_size": 139, "complexity": 2, "nloc": 62, "token_counts": 273, "n_ast_nodes": 503, "n_identifiers": 19, "d_id": 46847, "documentation": { "docstring": "Return a context manager for temporarily changing matplotlib unit registries and rcParams.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 306735, "commit_id": "e3fb04e1166d15f576d4b6fdec962f13871aaafe", "repo": "core", "path": "homeassistant/components/life360/device_tracker.py", "file_name": "device_tracker.py", "fun_name": "force_update", "commit_message": "Add comment to life360 device tracker (#77879)", "code": "def force_update(self) -> bool:\n \n return False\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 7, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 105519, "documentation": { "docstring": "Return True if state updates should be forced.\n\n Overridden because CoordinatorEntity sets `should_poll` to False,\n which causes TrackerEntity to set `force_update` to True.\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 44, "language": "en" } }, { "id": 260403, "commit_id": "9d863aba2b6dab9c9cbbcf2f7c3b7a99b6ad168f", "repo": "scikit-learn", "path": "sklearn/linear_model/_glm/tests/test_glm.py", "file_name": "test_glm.py", "fun_name": "glm_dataset", "commit_message": "TST tight tests for GLMs (#23619)\n\nCo-authored-by: Olivier Grisel ", "code": "def glm_dataset(global_random_seed, request):\n \n data_type, model = request.param\n # Make larger dim more than double as big as the smaller one.\n # This helps when constructing singular matrices like (X, X).\n if data_type == \"long\":\n n_samples, n_features = 12, 4\n else:\n n_samples, n_features = 4, 12\n k = min(n_samples, n_features)\n rng = np.random.RandomState(global_random_seed)\n X = make_low_rank_matrix(\n n_samples=n_samples,\n n_features=n_features,\n effective_rank=k,\n tail_strength=0.1,\n random_state=rng,\n )\n X[:, -1] = 1 # last columns acts as intercept\n U, s, Vt = linalg.svd(X, full_matrices=False)\n assert np.all(s > 1e-3) # to be sure\n assert np.max(s) / np.min(s) < 100 # condition number of X\n\n if data_type == \"long\":\n coef_unpenalized = rng.uniform(low=1, high=3, size=n_features)\n coef_unpenalized *= rng.choice([-1, 1], size=n_features)\n raw_prediction = X @ coef_unpenalized\n else:\n raw_prediction = rng.uniform(low=-3, high=3, size=n_samples)\n # minimum norm solution min ||w||_2 such that raw_prediction = X w:\n # w = X'(XX')^-1 raw_prediction = V s^-1 U' raw_prediction\n coef_unpenalized = Vt.T @ np.diag(1 / s) @ U.T @ raw_prediction\n\n linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=True)\n sw = np.full(shape=n_samples, fill_value=1 / n_samples)\n y = linear_loss.base_loss.link.inverse(raw_prediction)\n\n # Add penalty l2_reg_strength * ||coef||_2^2 for l2_reg_strength=1 and solve with\n # optimizer. Note that the problem is well conditioned such that we get accurate\n # results.\n l2_reg_strength = 1\n fun = partial(\n linear_loss.loss,\n X=X[:, :-1],\n y=y,\n sample_weight=sw,\n l2_reg_strength=l2_reg_strength,\n )\n grad = partial(\n linear_loss.gradient,\n X=X[:, :-1],\n y=y,\n sample_weight=sw,\n l2_reg_strength=l2_reg_strength,\n )\n coef_penalized_with_intercept = _special_minimize(\n fun, grad, coef_unpenalized, tol_NM=1e-6, tol=1e-14\n )\n\n linear_loss = LinearModelLoss(base_loss=model._get_loss(), fit_intercept=False)\n fun = partial(\n linear_loss.loss,\n X=X[:, :-1],\n y=y,\n sample_weight=sw,\n l2_reg_strength=l2_reg_strength,\n )\n grad = partial(\n linear_loss.gradient,\n X=X[:, :-1],\n y=y,\n sample_weight=sw,\n l2_reg_strength=l2_reg_strength,\n )\n coef_penalized_without_intercept = _special_minimize(\n fun, grad, coef_unpenalized[:-1], tol_NM=1e-6, tol=1e-14\n )\n\n # To be sure\n assert np.linalg.norm(coef_penalized_with_intercept) < np.linalg.norm(\n coef_unpenalized\n )\n\n return (\n model,\n X,\n y,\n coef_unpenalized,\n coef_penalized_with_intercept,\n coef_penalized_without_intercept,\n l2_reg_strength,\n )\n\n\n@pytest.mark.parametrize(\"solver\", SOLVERS)\n@pytest.mark.parametrize(\"fit_intercept\", [False, True])", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"solver\", SOLVERS)\n@pytest.mark.parametrize(\"fit_intercept\", [False, True])", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 716, "n_words": 284, "vocab_size": 172, "complexity": 3, "nloc": 77, "token_counts": 495, "n_ast_nodes": 766, "n_identifiers": 65, "d_id": 76228, "documentation": { "docstring": "Dataset with GLM solutions, well conditioned X.\n\n This is inspired by ols_ridge_dataset in test_ridge.py.\n\n The construction is based on the SVD decomposition of X = U S V'.\n\n Parameters\n ----------\n type : {\"long\", \"wide\"}\n If \"long\", then n_samples > n_features.\n If \"wide\", then n_features > n_samples.\n model : a GLM model\n\n For \"wide\", we return the minimum norm solution:\n\n min ||w||_2 subject to w = argmin deviance(X, y, w)\n\n Note that the deviance is always minimized if y = inverse_link(X w) is possible to\n achieve, which it is in the wide data case. Therefore, we can construct the\n solution with minimum norm like (wide) OLS:\n\n min ||w||_2 subject to link(y) = raw_prediction = X w\n\n Returns\n -------\n model : GLM model\n X : ndarray\n Last column of 1, i.e. intercept.\n y : ndarray\n coef_unpenalized : ndarray\n Minimum norm solutions, i.e. min sum(loss(w)) (with mininum ||w||_2 in\n case of ambiguity)\n Last coefficient is intercept.\n coef_penalized : ndarray\n GLM solution with alpha=l2_reg_strength=1, i.e.\n min 1/n * sum(loss) + ||w[:-1]||_2^2.\n Last coefficient is intercept.\n l2_reg_strength : float\n Always equal 1.\n ", "n_words": 177, "vocab_size": 111, "n_whitespaces": 318, "language": "en" } }, { "id": 159326, "commit_id": "6339856514897056716bb531acb8489c9cf05d26", "repo": "rasa", "path": "rasa/telemetry.py", "file_name": "telemetry.py", "fun_name": "get_telemetry_id", "commit_message": "Add support for different recipes (#10641)\n\n* Add support for different recipes\r\n\r\nFixes https://github.com/RasaHQ/rasa/issues/10473\r\n\r\n* Update docs/docs/graph-recipe.mdx\r\n\r\nCo-authored-by: Joe Juzl ", "code": "def get_telemetry_id() -> Optional[Text]:\n \n try:\n telemetry_config = (\n rasa_utils.read_global_config_value(CONFIG_FILE_TELEMETRY_KEY) or {}\n )\n\n return telemetry_config.get(CONFIG_TELEMETRY_ID)\n except Exception as e: # skipcq:PYL-W0703\n logger.debug(f\"Unable to retrieve telemetry ID: {e}\")\n return None\n\n", "url": "https://github.com/RasaHQ/rasa.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 84, "n_words": 28, "vocab_size": 27, "complexity": 3, "nloc": 16, "token_counts": 46, "n_ast_nodes": 84, "n_identifiers": 13, "d_id": 38198, "documentation": { "docstring": "Return the unique telemetry identifier for this Rasa Open Source install.\n\n The identifier can be any string, but it should be a UUID.\n\n Returns:\n The identifier, if it is configured correctly.\n ", "n_words": 31, "vocab_size": 27, "n_whitespaces": 47, "language": "en" } }, { "id": 203409, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/options.py", "file_name": "options.py", "fun_name": "get_changelist_form", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_changelist_form(self, request, **kwargs):\n \n defaults = {\n \"formfield_callback\": partial(self.formfield_for_dbfield, request=request),\n **kwargs,\n }\n if defaults.get(\"fields\") is None and not modelform_defines_fields(\n defaults.get(\"form\")\n ):\n defaults[\"fields\"] = forms.ALL_FIELDS\n\n return modelform_factory(self.model, **defaults)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 113, "n_words": 27, "vocab_size": 26, "complexity": 3, "nloc": 10, "token_counts": 70, "n_ast_nodes": 117, "n_identifiers": 13, "d_id": 50357, "documentation": { "docstring": "\n Return a Form class for use in the Formset on the changelist page.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 28, "language": "en" } }, { "id": 67565, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/startup/leaderboard.py", "file_name": "leaderboard.py", "fun_name": "get_all_sales_person", "commit_message": "style: format code with black", "code": "def get_all_sales_person(date_range, company, field=None, limit=0):\n\tdate_condition = get_date_condition(date_range, \"sales_order.transaction_date\")\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\tdate_condition=date_condition\n\t\t),\n\t\t(company, cint(limit)),\n\t\tas_dict=1,\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 9, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 19, "token_counts": 53, "n_ast_nodes": 80, "n_identifiers": 13, "d_id": 14557, "documentation": { "docstring": "\n\t\tselect sales_team.sales_person as name, sum(sales_order.base_net_total) as value\n\t\tfrom `tabSales Order` as sales_order join `tabSales Team` as sales_team\n\t\t\ton sales_order.name = sales_team.parent and sales_team.parenttype = 'Sales Order'\n\t\twhere sales_order.docstatus = 1\n\t\t\tand sales_order.company = %s\n\t\t\t{date_condition}\n\t\tgroup by sales_team.sales_person\n\t\torder by value DESC\n\t\tlimit %s\n\t", "n_words": 44, "vocab_size": 32, "n_whitespaces": 35, "language": "en" } }, { "id": 258753, "commit_id": "39c341ad91b545c895ede9c6240a04659b82defb", "repo": "scikit-learn", "path": "sklearn/datasets/_base.py", "file_name": "_base.py", "fun_name": "load_digits", "commit_message": "DOC Ensures that load_digits passes numpydoc validation (#22392)", "code": "def load_digits(*, n_class=10, return_X_y=False, as_frame=False):\n \n\n data, fdescr = load_gzip_compressed_csv_data(\n data_file_name=\"digits.csv.gz\", descr_file_name=\"digits.rst\", delimiter=\",\"\n )\n\n target = data[:, -1].astype(int, copy=False)\n flat_data = data[:, :-1]\n images = flat_data.view()\n images.shape = (-1, 8, 8)\n\n if n_class < 10:\n idx = target < n_class\n flat_data, target = flat_data[idx], target[idx]\n images = images[idx]\n\n feature_names = [\n \"pixel_{}_{}\".format(row_idx, col_idx)\n for row_idx in range(8)\n for col_idx in range(8)\n ]\n\n frame = None\n target_columns = [\n \"target\",\n ]\n if as_frame:\n frame, flat_data, target = _convert_data_dataframe(\n \"load_digits\", flat_data, target, feature_names, target_columns\n )\n\n if return_X_y:\n return flat_data, target\n\n return Bunch(\n data=flat_data,\n target=target,\n frame=frame,\n feature_names=feature_names,\n target_names=np.arange(10),\n images=images,\n DESCR=fdescr,\n )\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 285, "n_words": 97, "vocab_size": 64, "complexity": 6, "nloc": 36, "token_counts": 212, "n_ast_nodes": 324, "n_identifiers": 32, "d_id": 75403, "documentation": { "docstring": "Load and return the digits dataset (classification).\n\n Each datapoint is a 8x8 image of a digit.\n\n ================= ==============\n Classes 10\n Samples per class ~180\n Samples total 1797\n Dimensionality 64\n Features integers 0-16\n ================= ==============\n\n This is a copy of the test set of the UCI ML hand-written digits datasets\n https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n n_class : int, default=10\n The number of classes to return. Between 0 and 10.\n\n return_X_y : bool, default=False\n If True, returns ``(data, target)`` instead of a Bunch object.\n See below for more information about the `data` and `target` object.\n\n .. versionadded:: 0.18\n\n as_frame : bool, default=False\n If True, the data is a pandas DataFrame including columns with\n appropriate dtypes (numeric). The target is\n a pandas DataFrame or Series depending on the number of target columns.\n If `return_X_y` is True, then (`data`, `target`) will be pandas\n DataFrames or Series as described below.\n\n .. versionadded:: 0.23\n\n Returns\n -------\n data : :class:`~sklearn.utils.Bunch`\n Dictionary-like object, with the following attributes.\n\n data : {ndarray, dataframe} of shape (1797, 64)\n The flattened data matrix. If `as_frame=True`, `data` will be\n a pandas DataFrame.\n target: {ndarray, Series} of shape (1797,)\n The classification target. If `as_frame=True`, `target` will be\n a pandas Series.\n feature_names: list\n The names of the dataset columns.\n target_names: list\n The names of target classes.\n\n .. versionadded:: 0.20\n\n frame: DataFrame of shape (1797, 65)\n Only present when `as_frame=True`. DataFrame with `data` and\n `target`.\n\n .. versionadded:: 0.23\n images: {ndarray} of shape (1797, 8, 8)\n The raw image data.\n DESCR: str\n The full description of the dataset.\n\n (data, target) : tuple if ``return_X_y`` is True\n A tuple of two ndarrays by default. The first contains a 2D ndarray of\n shape (1797, 64) with each row representing one sample and each column\n representing the features. The second ndarray of shape (1797) contains\n the target samples. If `as_frame=True`, both arrays are pandas objects,\n i.e. `X` a dataframe and `y` a series.\n\n .. versionadded:: 0.18\n\n Examples\n --------\n To load the data and visualize the images::\n\n >>> from sklearn.datasets import load_digits\n >>> digits = load_digits()\n >>> print(digits.data.shape)\n (1797, 64)\n >>> import matplotlib.pyplot as plt\n >>> plt.gray()\n >>> plt.matshow(digits.images[0])\n <...>\n >>> plt.show()\n ", "n_words": 356, "vocab_size": 207, "n_whitespaces": 877, "language": "en" } }, { "id": 259455, "commit_id": "75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc", "repo": "scikit-learn", "path": "sklearn/linear_model/_glm/tests/test_glm.py", "file_name": "test_glm.py", "fun_name": "test_family_deprecation", "commit_message": "ENH migrate GLMs / TweedieRegressor to linear loss (#22548)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Thomas J. Fan ", "code": "def test_family_deprecation(est, family):\n \n with pytest.warns(FutureWarning, match=\"`family` was deprecated\"):\n if isinstance(family, str):\n assert est.family == family\n else:\n assert est.family.__class__ == family.__class__\n assert est.family.power == family.power\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 77, "n_words": 24, "vocab_size": 20, "complexity": 2, "nloc": 7, "token_counts": 56, "n_ast_nodes": 92, "n_identifiers": 11, "d_id": 75785, "documentation": { "docstring": "Test backward compatibility of the family property.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 297831, "commit_id": "b0cee0bc46cbd7efe0e6421da18d91595c7a25ad", "repo": "core", "path": "homeassistant/core.py", "file_name": "core.py", "fun_name": "async_start", "commit_message": "String formatting and max line length - Part 1 (#84390)\n\nCo-authored-by: Erik Montnemery ", "code": "async def async_start(self) -> None:\n \n _LOGGER.info(\"Starting Home Assistant\")\n setattr(self.loop, \"_thread_ident\", threading.get_ident())\n\n self.state = CoreState.starting\n self.bus.async_fire(EVENT_CORE_CONFIG_UPDATE)\n self.bus.async_fire(EVENT_HOMEASSISTANT_START)\n\n try:\n # Only block for EVENT_HOMEASSISTANT_START listener\n self.async_stop_track_tasks()", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "async def async_start(self) -> None:\n \"\"\"Finalize startup from inside the event loop.\n\n This method is a coroutine.\n \"\"\"\n _LOGGER.info(\"Starting Home Assistant\")\n setattr(self.loop, \"_thread_ident\", threading.get_ident())\n\n self.state = CoreState.starting\n self.bus.async_fire(EVENT_CORE_CONFIG_UPDATE)\n self.bus.async_fire(EVENT_HOMEASSISTANT_START)\n\n try:\n # Only block for EVENT_HOMEASSISTANT_START listener\n self.async_stop_track_tasks()", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 95, "n_words": 24, "vocab_size": 24, "complexity": 3, "nloc": 34, "token_counts": 150, "n_ast_nodes": 101, "n_identifiers": 16, "d_id": 96788, "documentation": { "docstring": "Finalize startup from inside the event loop.\n\n This method is a coroutine.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 26, "language": "en" } }, { "id": 113688, "commit_id": "d68c786ff81bad19c04619d6a999ff34aaa724e7", "repo": "nni", "path": "nni/compression/pytorch/utils/pruning.py", "file_name": "pruning.py", "fun_name": "get_module_by_name", "commit_message": "[Compression] remove pruning v1 & refactor directory (#5228)", "code": "def get_module_by_name(model, module_name):\n \n name_list = module_name.split(\".\")\n for name in name_list[:-1]:\n if hasattr(model, name):\n model = getattr(model, name)\n else:\n return None, None\n if hasattr(model, name_list[-1]):\n leaf_module = getattr(model, name_list[-1])\n return model, leaf_module\n else:\n return None, None\n\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 107, "n_words": 35, "vocab_size": 24, "complexity": 4, "nloc": 12, "token_counts": 82, "n_ast_nodes": 131, "n_identifiers": 9, "d_id": 25005, "documentation": { "docstring": "\n Get a module specified by its module name\n Parameters\n ----------\n model : pytorch model\n the pytorch model from which to get its module\n module_name : str\n the name of the required module\n Returns\n -------\n module, module\n the parent module of the required module, the required module\n ", "n_words": 46, "vocab_size": 25, "n_whitespaces": 95, "language": "en" } }, { "id": 304531, "commit_id": "ed60611b07e38e7009c6cc266c14625a751e7b32", "repo": "core", "path": "homeassistant/components/cast/media_player.py", "file_name": "media_player.py", "fun_name": "_get_chromecast", "commit_message": "Improve type hint in cast media_player entity (#77025)\n\n* Improve type hint in cast media_player entity\r\n\r\n* Update docstring", "code": "def _get_chromecast(self) -> pychromecast.Chromecast:\n \n if self._chromecast is None:\n raise HomeAssistantError(\"Chromecast is not available.\")\n return self._chromecast\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 47, "n_words": 15, "vocab_size": 13, "complexity": 2, "nloc": 5, "token_counts": 26, "n_ast_nodes": 46, "n_identifiers": 6, "d_id": 103338, "documentation": { "docstring": "Ensure chromecast is available, to facilitate type checking.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 19993, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_internal/utils/misc.py", "file_name": "misc.py", "fun_name": "is_wheel_installed", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def is_wheel_installed() -> bool:\n \n try:\n import pipenv.vendor.wheel as wheel # noqa: F401\n except ImportError:\n return False\n\n return True\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 45, "n_words": 18, "vocab_size": 17, "complexity": 2, "nloc": 9, "token_counts": 24, "n_ast_nodes": 42, "n_identifiers": 6, "d_id": 3167, "documentation": { "docstring": "\n Return whether the wheel package is installed.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 181704, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/tpot_tests.py", "file_name": "tpot_tests.py", "fun_name": "test_pick_two_individuals_eligible_for_crossover", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_pick_two_individuals_eligible_for_crossover():\n \n\n ind1 = creator.Individual.from_string(\n 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)',\n tpot_obj._pset\n )\n ind2 = creator.Individual.from_string(\n 'BernoulliNB(input_matrix, BernoulliNB__alpha=10.0, BernoulliNB__fit_prior=True)',\n tpot_obj._pset\n )\n ind3 = creator.Individual.from_string(\n 'GaussianNB(input_matrix)',\n tpot_obj._pset\n )\n\n pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind2, ind3])\n assert ((str(pick1) == str(ind1) and str(pick2) == str(ind2)) or\n str(pick1) == str(ind2) and str(pick2) == str(ind1))\n\n ind4 = creator.Individual.from_string(\n 'KNeighborsClassifier('\n 'BernoulliNB(input_matrix, BernoulliNB__alpha=10.0, BernoulliNB__fit_prior=True),'\n 'KNeighborsClassifier__n_neighbors=10, '\n 'KNeighborsClassifier__p=1, '\n 'KNeighborsClassifier__weights=uniform'\n ')',\n tpot_obj._pset\n )\n\n # Eventhough ind4 does not have the same primitive at the root, the tree shares a primitive with ind1\n pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind3, ind4])\n assert ((str(pick1) == str(ind1) and str(pick2) == str(ind4)) or\n str(pick1) == str(ind4) and str(pick2) == str(ind1))\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 259, "n_words": 102, "vocab_size": 56, "complexity": 7, "nloc": 28, "token_counts": 182, "n_ast_nodes": 301, "n_identifiers": 14, "d_id": 43491, "documentation": { "docstring": "Assert that pick_two_individuals_eligible_for_crossover() picks the correct pair of nodes to perform crossover with", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 102401, "commit_id": "32bf5e0ef9177a9f8e65cea6cdf6d17e2cc5eaff", "repo": "pytorch", "path": "test/quantization/core/test_quantized_op.py", "file_name": "test_quantized_op.py", "fun_name": "test_qgelu", "commit_message": "Add native impl of gelu for QuantizedCPU (#69968)\n\nSummary:\nAdd native implementation of gelu for quantized CPU.\n\ncc jerryzh168 jianyuh raghuramank100 jamesr66a vkuzo\n\nPull Request resolved: https://github.com/pytorch/pytorch/pull/69968\n\nReviewed By: ejguan\n\nDifferential Revision: D33187095\n\nPulled By: vkuzo\n\nfbshipit-source-id: 4c4bf0eb47d2d9c2b8827174f2ccdea41986148a", "code": "def test_qgelu(self):\n shapes = ((4,), (4, 4), (4, 4, 4), (4, 4, 4, 4))\n dtypes = (torch.quint8, torch.qint8)\n memory_formats = (torch.channels_last, torch.contiguous_format)\n test_cases = itertools.product(shapes, dtypes, memory_formats)\n for shape, dtype, memory_format in test_cases:\n if memory_format == torch.channels_last and len(shape) != 4:\n continue\n X, scale, zero_point, torch_type = \\\n torch.randn(*shape), 0.1, 0, dtype\n X = X.to(memory_format=memory_format)\n\n qX = torch.quantize_per_tensor(X, scale=scale, zero_point=zero_point,\n dtype=torch_type)\n dqX = qX.dequantize()\n\n op = torch.nn.functional.gelu\n dqY = op(dqX)\n qY = torch.quantize_per_tensor(dqY, scale=scale, zero_point=zero_point,\n dtype=torch_type)\n qY_hat = op(qX)\n self.assertEqual(qY.dequantize(), qY_hat.dequantize(),\n msg=\"F.gelu failed ({} vs {})\".format(qY, qY_hat))\n\n ", "url": "https://github.com/pytorch/pytorch.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 377, "n_words": 87, "vocab_size": 67, "complexity": 4, "nloc": 21, "token_counts": 224, "n_ast_nodes": 327, "n_identifiers": 37, "d_id": 21520, "documentation": { "docstring": "Tests the correctness of the quantized::qlayer_norm op.", "n_words": 7, "vocab_size": 6, "n_whitespaces": 6, "language": "en" } }, { "id": 335695, "commit_id": "3e2cff4da25642e964c48fa44d7c00d3314b1ce8", "repo": "diffusers", "path": "src/diffusers/models/unet_sde_score_estimation.py", "file_name": "unet_sde_score_estimation.py", "fun_name": "_upsample_conv_2d", "commit_message": "better names and more cleanup", "code": "def _upsample_conv_2d(x, w, k=None, factor=2, gain=1):\n \n\n assert isinstance(factor, int) and factor >= 1\n\n # Check weight shape.\n assert len(w.shape) == 4\n convH = w.shape[2]\n convW = w.shape[3]\n inC = w.shape[1]\n\n assert convW == convH\n\n # Setup filter kernel.\n if k is None:\n k = [1] * factor\n k = _setup_kernel(k) * (gain * (factor**2))\n p = (k.shape[0] - factor) - (convW - 1)\n\n stride = (factor, factor)\n\n # Determine data dimensions.\n stride = [1, 1, factor, factor]\n output_shape = ((x.shape[2] - 1) * factor + convH, (x.shape[3] - 1) * factor + convW)\n output_padding = (\n output_shape[0] - (x.shape[2] - 1) * stride[0] - convH,\n output_shape[1] - (x.shape[3] - 1) * stride[1] - convW,\n )\n assert output_padding[0] >= 0 and output_padding[1] >= 0\n num_groups = x.shape[1] // inC\n\n # Transpose weights.\n w = torch.reshape(w, (num_groups, -1, inC, convH, convW))\n w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4)\n w = torch.reshape(w, (num_groups * inC, -1, convH, convW))\n\n x = F.conv_transpose2d(x, w, stride=stride, output_padding=output_padding, padding=0)\n\n return upfirdn2d(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2 + factor - 1, p // 2 + 1))\n\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 281, "n_words": 182, "vocab_size": 102, "complexity": 4, "nloc": 25, "token_counts": 356, "n_ast_nodes": 538, "n_identifiers": 29, "d_id": 120826, "documentation": { "docstring": "Fused `upsample_2d()` followed by `Conv2d()`.\n\n Args:\n Padding is performed only once at the beginning, not between the operations. The fused op is considerably more\n efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary\n order.\n x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,\n C]`.\n w: Weight tensor of the shape `[filterH, filterW, inChannels,\n outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.\n k: FIR filter of the shape `[firH, firW]` or `[firN]`\n (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.\n factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0).\n\n Returns:\n Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as\n `x`.\n ", "n_words": 139, "vocab_size": 102, "n_whitespaces": 208, "language": "en" } }, { "id": 100548, "commit_id": "bdbbad4d310fb606b6f412aa81e9f57ccd994e97", "repo": "faceswap", "path": "lib/gpu_stats/amd.py", "file_name": "amd.py", "fun_name": "_get_supported_devices", "commit_message": "Refactor lib.gpu_stats (#1218)\n\n* inital gpu_stats refactor\r\n\r\n* Add dummy CPU Backend\r\n\r\n* Update Sphinx documentation", "code": "def _get_supported_devices(self) -> List[plaidml._DeviceConfig]:\n \n experimental_setting = plaidml.settings.experimental\n\n plaidml.settings.experimental = False\n devices = plaidml.devices(self._ctx, limit=100, return_all=True)[0]\n\n plaidml.settings.experimental = experimental_setting\n\n supported = [d for d in devices\n if d.details\n and json.loads(d.details.decode(\"utf-8\")).get(\"type\", \"cpu\").lower() == \"gpu\"]\n\n self._log(\"debug\", f\"Obtained supported devices: {supported}\")\n return supported\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 135, "n_words": 39, "vocab_size": 29, "complexity": 4, "nloc": 18, "token_counts": 106, "n_ast_nodes": 178, "n_identifiers": 21, "d_id": 20012, "documentation": { "docstring": " Obtain GPU devices from PlaidML that are marked as \"supported\".\n\n Returns\n -------\n list_LOGGER.\n The :class:`plaidml._DeviceConfig` objects for all supported GPUs that PlaidML has\n discovered.\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 75, "language": "en" } }, { "id": 266159, "commit_id": "80f5c96af3e78232ffe2bcce7c27995612964596", "repo": "netbox", "path": "netbox/netbox/views/generic/bulk_views.py", "file_name": "bulk_views.py", "fun_name": "save_object", "commit_message": "Document save_object() on BulkImportView", "code": "def save_object(self, object_form, request):\n \n return object_form.save()\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 27, "n_identifiers": 5, "d_id": 78320, "documentation": { "docstring": "\n Provide a hook to modify the object immediately before saving it (e.g. to encrypt secret data).\n\n Args:\n object_form: The model form instance\n request: The current request\n ", "n_words": 26, "vocab_size": 24, "n_whitespaces": 70, "language": "en" } }, { "id": 244142, "commit_id": "14f0e9585c15c28f0c31dcc3ea352449bbe5eb96", "repo": "mmdetection", "path": "mmdet/models/dense_heads/mask2former_head.py", "file_name": "mask2former_head.py", "fun_name": "forward", "commit_message": "[Feature] Add Mask2Former to mmdet (#6938)\n\nupdate doc\r\n\r\nupdate doc format\r\n\r\ndeepcopy pixel_decoder cfg\r\n\r\nmove mask_pseudo_sampler cfg to config file\r\n\r\nmove part of postprocess from head to detector\r\n\r\nfix bug in postprocessing\r\n\r\nmove class setting from head to config file\r\n\r\nremove if else\r\n\r\nmove mask2bbox to mask/util\r\n\r\nupdate docstring\r\n\r\nupdate docstring in result2json\r\n\r\nfix bug\r\n\r\nupdate class_weight\r\n\r\nadd maskformer_fusion_head\r\n\r\nadd maskformer fusion head\r\n\r\nupdate\r\n\r\nadd cfg for filter_low_score\r\n\r\nupdate maskformer\r\n\r\nupdate class_weight\r\n\r\nupdate config\r\n\r\nupdate unit test\r\n\r\nrename param\r\n\r\nupdate comments in config\r\n\r\nrename variable, rm arg, update unit tests\r\n\r\nupdate mask2bbox\r\n\r\nadd unit test for mask2bbox\r\n\r\nreplace unsqueeze(1) and squeeze(1)\r\n\r\nadd unit test for maskformer_fusion_head\r\n\r\nupdate docstrings\r\n\r\nupdate docstring\r\n\r\ndelete \\\r\n\r\nremove modification to ce loss\r\n\r\nupdate docstring\r\n\r\nupdate docstring\r\n\r\nupdate docstring of ce loss\r\n\r\nupdate unit test\r\n\r\nupdate docstring\r\n\r\nupdate docstring\r\n\r\nupdate docstring\r\n\r\nrename\r\n\r\nrename\r\n\r\nadd msdeformattn pixel decoder\r\n\r\nmaskformer refactor\r\n\r\nadd strides in config\r\n\r\nremove redundant code\r\n\r\nremove redundant code\r\n\r\nupdate unit test\r\n\r\nupdate config\r\n\r\nupdate", "code": "def forward(self, feats, img_metas):\n \n batch_size = len(img_metas)\n mask_features, multi_scale_memorys = self.pixel_decoder(feats)\n # multi_scale_memorys (from low resolution to high resolution)\n decoder_inputs = []\n decoder_positional_encodings = []\n for i in range(self.num_transformer_feat_level):\n decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i])\n # shape (batch_size, c, h, w) -> (h*w, batch_size, c)\n decoder_input = decoder_input.flatten(2).permute(2, 0, 1)\n level_embed = self.level_embed.weight[i].view(1, 1, -1)\n decoder_input = decoder_input + level_embed\n # shape (batch_size, c, h, w) -> (h*w, batch_size, c)\n mask = decoder_input.new_zeros(\n (batch_size, ) + multi_scale_memorys[i].shape[-2:],\n dtype=torch.bool)\n decoder_positional_encoding = self.decoder_positional_encoding(\n mask)\n decoder_positional_encoding = decoder_positional_encoding.flatten(\n 2).permute(2, 0, 1)\n decoder_inputs.append(decoder_input)\n decoder_positional_encodings.append(decoder_positional_encoding)\n # shape (num_queries, c) -> (num_queries, batch_size, c)\n query_feat = self.query_feat.weight.unsqueeze(1).repeat(\n (1, batch_size, 1))\n query_embed = self.query_embed.weight.unsqueeze(1).repeat(\n (1, batch_size, 1))\n\n cls_pred_list = []\n mask_pred_list = []\n cls_pred, mask_pred, attn_mask = self.forward_head(\n query_feat, mask_features, multi_scale_memorys[0].shape[-2:])\n cls_pred_list.append(cls_pred)\n mask_pred_list.append(mask_pred)\n\n for i in range(self.num_transformer_decoder_layers):\n level_idx = i % self.num_transformer_feat_level\n # if a mask is all True(all background), then set it all False.\n attn_mask[torch.where(\n attn_mask.sum(-1) == attn_mask.shape[-1])] = False\n\n # cross_attn + self_attn\n layer = self.transformer_decoder.layers[i]\n attn_masks = [attn_mask, None]\n query_feat = layer(\n query=query_feat,\n key=decoder_inputs[level_idx],\n value=decoder_inputs[level_idx],\n query_pos=query_embed,\n key_pos=decoder_positional_encodings[level_idx],\n attn_masks=attn_masks,\n query_key_padding_mask=None,\n # here we do not apply masking on padded region\n key_padding_mask=None)\n cls_pred, mask_pred, attn_mask = self.forward_head(\n query_feat, mask_features, multi_scale_memorys[\n (i + 1) % self.num_transformer_feat_level].shape[-2:])\n\n cls_pred_list.append(cls_pred)\n mask_pred_list.append(mask_pred)\n\n return cls_pred_list, mask_pred_list\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 828, "n_words": 201, "vocab_size": 121, "complexity": 3, "nloc": 50, "token_counts": 412, "n_ast_nodes": 632, "n_identifiers": 54, "d_id": 70258, "documentation": { "docstring": "Forward function.\n\n Args:\n feats (list[Tensor]): Multi scale Features from the\n upstream network, each is a 4D-tensor.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple: A tuple contains two elements.\n\n - cls_pred_list (list[Tensor)]: Classification logits \\\n for each decoder layer. Each is a 3D-tensor with shape \\\n (batch_size, num_queries, cls_out_channels). \\\n Note `cls_out_channels` should includes background.\n - mask_pred_list (list[Tensor]): Mask logits for each \\\n decoder layer. Each with shape (batch_size, num_queries, \\\n h, w).\n ", "n_words": 73, "vocab_size": 54, "n_whitespaces": 240, "language": "en" } }, { "id": 212684, "commit_id": "d363bd761fef3de10d162809199ad3c351081914", "repo": "PySimpleGUI", "path": "DemoPrograms/Demo_Emoji_Toolbar_PIL.py", "file_name": "Demo_Emoji_Toolbar_PIL.py", "fun_name": "convert_to_bytes", "commit_message": "New Demo - Emoji Toolbar", "code": "def convert_to_bytes(file_or_bytes, resize=None, fill=False):\n \n\n if isinstance(file_or_bytes, str):\n img = PIL.Image.open(file_or_bytes)\n else:\n try:\n img = PIL.Image.open(io.BytesIO(base64.b64decode(file_or_bytes)))\n except Exception as e:\n dataBytesIO = io.BytesIO(file_or_bytes)\n img = PIL.Image.open(dataBytesIO)\n\n cur_width, cur_height = img.size\n if resize:\n new_width, new_height = resize\n scale = min(new_height / cur_height, new_width / cur_width)\n img = img.resize((int(cur_width * scale), int(cur_height * scale)), PIL.Image.ANTIALIAS)\n if fill:\n if resize is not None:\n img = make_square(img, resize[0])\n with io.BytesIO() as bio:\n img.save(bio, format=\"PNG\")\n del img\n return bio.getvalue()\n\n\"\"`'", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 208, "n_words": 74, "vocab_size": 54, "complexity": 6, "nloc": 21, "token_counts": 181, "n_ast_nodes": 295, "n_identifiers": 31, "d_id": 53340, "documentation": { "docstring": "\n Will convert into bytes and optionally resize an image that is a file or a base64 bytes object.\n Turns into PNG format in the process so that can be displayed by tkinter\n :param file_or_bytes: either a string filename or a bytes base64 image object\n :type file_or_bytes: (Union[str, bytes])\n :param resize: optional new size\n :type resize: (Tuple[int, int] or None)\n :param fill: If True then the image is filled/padded so that the image is not distorted\n :type fill: (bool)\n :return: (bytes) a byte-string object\n :rtype: (bytes)\n \nM`YM dP \nM mm. mm. M 88 \nM MMM MMM M .d8888b. 88 .dP .d8888b. \nM MMM MMM M 88' `88 88888\" 88ooood8 \nM MMM MMM M 88. .88 88 `8b. 88. ... \nM MMM MMM M `88888P8 dP `YP `88888P' \nMMMMMMMMMMMMMM \n \nM\"\"MMM\"\"MMM\"\"M oo dP \nM MMM MMM M 88 \nM MMP MMP M dP 88d888b. .d888b88 .d8888b. dP dP dP \nM MM' MM' .M 88 88' `88 88' `88 88' `88 88 88 88 \nM `' . '' .MM 88 88 88 88. .88 88. .88 88.88b.88' \nM .d .dMMM dP dP dP `88888P8 `88888P' 8888P Y8P \nMMMMMMMMMMMMMM\n", "n_words": 184, "vocab_size": 92, "n_whitespaces": 524, "language": "en" } }, { "id": 82002, "commit_id": "d3eb2c197595c29c4a3f7b38cd609ce953009623", "repo": "awx", "path": "awx/main/management/commands/bottleneck.py", "file_name": "bottleneck.py", "fun_name": "handle", "commit_message": "Add new flak8 rules to do some meaningful corrections", "code": "def handle(self, *args, **options):\n jt = options['jt']\n threshold = options['threshold']\n history = options['history']\n ignore = options['ignore']\n\n print('## ' + JobTemplate.objects.get(pk=jt).name + f' (last {history} runs)\\n')\n with connection.cursor() as cursor:\n cursor.execute(\n f\n )\n slowest_events = cursor.fetchall()\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 124, "n_words": 35, "vocab_size": 30, "complexity": 14, "nloc": 67, "token_counts": 398, "n_ast_nodes": 150, "n_identifiers": 19, "d_id": 17294, "documentation": { "docstring": "\n SELECT\n b.id, b.job_id, b.host_name, b.created - a.created delta,\n b.task task,\n b.event_data::json->'task_action' task_action,\n b.event_data::json->'task_path' task_path\n FROM main_jobevent a JOIN main_jobevent b\n ON b.parent_uuid = a.parent_uuid AND a.host_name = b.host_name\n WHERE\n a.event = 'runner_on_start' AND\n b.event != 'runner_on_start' AND\n b.event != 'runner_on_skipped' AND\n b.failed = false AND\n a.job_id IN (\n SELECT unifiedjob_ptr_id FROM main_job\n WHERE job_template_id={jt}\n ORDER BY unifiedjob_ptr_id DESC\n LIMIT {history}\n )\n ORDER BY delta DESC;\n ", "n_words": 65, "vocab_size": 48, "n_whitespaces": 439, "language": "en" } }, { "id": 93682, "commit_id": "2fbf550ec05c8501cbc9eca62e73526e717dcbdf", "repo": "sentry", "path": "src/sentry/integrations/jira/client.py", "file_name": "client.py", "fun_name": "user_id_get_param", "commit_message": "ref(Jira): Split Jira Cloud and Jira Server (#37034)\n\n* Split Jira Cloud and Jira Server", "code": "def user_id_get_param(self):\n \n return \"accountId\"\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 8, "n_ast_nodes": 18, "n_identifiers": 2, "d_id": 19005, "documentation": { "docstring": "\n Jira-Cloud requires GDPR compliant API usage so we have to use accountId\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 124040, "commit_id": "d83bbda2816b1781eb61342b4539578149eeb686", "repo": "ray", "path": "rllib/evaluation/rollout_worker.py", "file_name": "rollout_worker.py", "fun_name": "save", "commit_message": "[RLlib] Save serialized PolicySpec. Extract `num_gpus` related logics into a util function. (#25954)", "code": "def save(self) -> bytes:\n \n filters = self.get_filters(flush_after=True)\n state = {}\n policy_specs = {}\n connector_enabled = self.policy_config.get(\"enable_connectors\", False)\n for pid in self.policy_map:\n state[pid] = self.policy_map[pid].get_state()\n policy_spec = self.policy_map.policy_specs[pid]\n # If connectors are enabled, try serializing the policy spec\n # instead of picking the spec object.\n policy_specs[pid] = (\n policy_spec.serialize() if connector_enabled else policy_spec\n )\n return pickle.dumps(\n {\n \"filters\": filters,\n \"state\": state,\n \"policy_specs\": policy_specs,\n }\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 268, "n_words": 64, "vocab_size": 51, "complexity": 3, "nloc": 24, "token_counts": 106, "n_ast_nodes": 174, "n_identifiers": 18, "d_id": 27501, "documentation": { "docstring": "Serializes this RolloutWorker's current state and returns it.\n\n Returns:\n The current state of this RolloutWorker as a serialized, pickled\n byte sequence.\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 57, "language": "en" } }, { "id": 37632, "commit_id": "1ac698744c4dbdf1495d303246d08ffacdf4f5b8", "repo": "transformers", "path": "src/transformers/models/yolos/feature_extraction_yolos.py", "file_name": "feature_extraction_yolos.py", "fun_name": "post_process_segmentation", "commit_message": "Add YOLOS (#16848)\n\n* First draft\r\n\r\n* Add YolosForObjectDetection\r\n\r\n* Make forward pass work\r\n\r\n* Add mid position embeddings\r\n\r\n* Add interpolation of position encodings\r\n\r\n* Add expected values\r\n\r\n* Add YOLOS to tests\r\n\r\n* Add integration test\r\n\r\n* Support tiny model as well\r\n\r\n* Support all models in conversion script\r\n\r\n* Remove mid_pe_size attribute\r\n\r\n* Make more tests pass\r\n\r\n* Add model to README and fix config\r\n\r\n* Add copied from statements\r\n\r\n* Rename base_model_prefix to vit\r\n\r\n* Add missing YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP\r\n\r\n* Apply suggestions from code review\r\n\r\n* Apply more suggestions from code review\r\n\r\n* Convert remaining checkpoints\r\n\r\n* Improve docstrings\r\n\r\n* Add YolosFeatureExtractor\r\n\r\n* Add feature extractor to docs\r\n\r\n* Add corresponding tests\r\n\r\n* Fix style\r\n\r\n* Fix docs\r\n\r\n* Apply suggestion from code review\r\n\r\n* Fix bad rebase\r\n\r\n* Fix some more bad rebase\r\n\r\n* Fix missing character\r\n\r\n* Improve docs and variable names\r\n\r\nCo-authored-by: Niels Rogge ", "code": "def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5):\n \n out_logits, raw_masks = outputs.logits, outputs.pred_masks\n preds = []\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 35, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 16, "token_counts": 196, "n_ast_nodes": 51, "n_identifiers": 11, "d_id": 6842, "documentation": { "docstring": "\n Converts the output of [`DetrForSegmentation`] into image segmentation predictions. Only supports PyTorch.\n\n Parameters:\n outputs ([`DetrSegmentationOutput`]):\n Raw outputs of the model.\n target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`):\n Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction.\n threshold (`float`, *optional*, defaults to 0.9):\n Threshold to use to filter out queries.\n mask_threshold (`float`, *optional*, defaults to 0.5):\n Threshold to use when turning the predicted masks into binary values.\n\n Returns:\n `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image\n in the batch as predicted by the model.\n ", "n_words": 101, "vocab_size": 73, "n_whitespaces": 256, "language": "en" } }, { "id": 42462, "commit_id": "0fac0c0f8e4618c2bdd3d2137d5fb8a80f581246", "repo": "nltk", "path": "nltk/metrics/agreement.py", "file_name": "agreement.py", "fun_name": "pi", "commit_message": "Update black to 22.3.0\n\nThe most recent release of Click (8.1.0) was breaking Black. See psf/black#2964", "code": "def pi(self):\n \n total = 0.0\n label_freqs = FreqDist(x[\"labels\"] for x in self.data)\n for k, f in label_freqs.items():\n total += f**2\n Ae = total / ((len(self.I) * len(self.C)) ** 2)\n return (self.avg_Ao() - Ae) / (1 - Ae)\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 90, "n_words": 37, "vocab_size": 28, "complexity": 3, "nloc": 7, "token_counts": 81, "n_ast_nodes": 128, "n_identifiers": 15, "d_id": 7551, "documentation": { "docstring": "Scott 1955; here, multi-pi.\n Equivalent to K from Siegel and Castellan (1988).\n\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 26, "language": "en" } }, { "id": 294612, "commit_id": "c1a2be72fc8b76b55cfde1823c5688100e397369", "repo": "core", "path": "homeassistant/components/generic/config_flow.py", "file_name": "config_flow.py", "fun_name": "get_image_type", "commit_message": "Generic IP Camera configflow 2 (#52360)\n\nCo-authored-by: J. Nick Koston ", "code": "def get_image_type(image):\n \n fmt = imghdr.what(None, h=image)\n if fmt is None:\n # if imghdr can't figure it out, could be svg.\n with contextlib.suppress(UnicodeDecodeError):\n if image.decode(\"utf-8\").startswith(\".+)'\n r'\\[(?P\\d+)\\]>$')", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 144, "n_words": 26, "vocab_size": 23, "complexity": 1, "nloc": 5, "token_counts": 60, "n_ast_nodes": 112, "n_identifiers": 15, "d_id": 56919, "documentation": { "docstring": "\n Record the fact that the given DocTest (`test`) generated `f`\n failures out of `t` tried examples.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 38, "language": "en" } }, { "id": 80933, "commit_id": "676b8f6d8ff85c10e66cebe0a471d3d97434a6c4", "repo": "awx", "path": "awx/main/tasks/system.py", "file_name": "system.py", "fun_name": "migrate_json_fields_expensive", "commit_message": "Implement an out-of-band migration to change the json fields", "code": "def migrate_json_fields_expensive(table, columns):\n batchsize = 50000\n\n ct = ContentType.objects.get_by_natural_key(*table.split('_', 1))\n model = ct.model_class()\n\n # Phase 1: add the new columns, making them nullable to avoid populating them\n with connection.schema_editor() as schema_editor:\n # See: https://docs.djangoproject.com/en/3.1/ref/schema-editor/\n\n for colname in columns:\n f = model._meta.get_field(colname)\n _, _, args, kwargs = f.deconstruct()\n kwargs['null'] = True\n new_f = f.__class__(*args, **kwargs)\n new_f.set_attributes_from_name(f'_{colname}')\n\n schema_editor.add_field(model, new_f)\n\n # Create a trigger to make sure new data automatically gets put in both fields.\n with connection.cursor() as cursor:\n # It's a little annoying, I think this trigger will re-do\n # the same work as the update query in Phase 2\n cursor.execute(\n f\n )\n cursor.execute(\n f\n )\n\n # Phase 2: copy over the data\n with connection.cursor() as cursor:\n rows = 0\n for i in itertools.count(0, batchsize):\n cursor.execute(f\"select count(1) from {table} where id >= %s;\", (i,))\n if not cursor.fetchone()[0]:\n break\n\n column_expr = ', '.join(f\"_{colname} = {colname}::jsonb\" for colname in columns)\n cursor.execute(\n f,\n (i, i + batchsize),\n )\n rows += cursor.rowcount\n logger.debug(f\"Batch {i} to {i + batchsize} copied on {table}.\")\n\n logger.warning(f\"Data copied for {rows} rows on {table}.\")\n\n # Phase 3: drop the old column and rename the new one\n with connection.schema_editor() as schema_editor:\n\n # FIXME: Grab a lock explicitly here?\n for colname in columns:\n with connection.cursor() as cursor:\n cursor.execute(f\"drop trigger {table}_{colname}_trigger;\")\n cursor.execute(f\"drop function update_{table}_{colname};\")\n\n f = model._meta.get_field(colname)\n _, _, args, kwargs = f.deconstruct()\n kwargs['null'] = True\n new_f = f.__class__(*args, **kwargs)\n new_f.set_attributes_from_name(f'_{colname}')\n\n schema_editor.remove_field(model, f)\n\n _, _, args, kwargs = new_f.deconstruct()\n f = new_f.__class__(*args, **kwargs)\n f.set_attributes_from_name(colname)\n\n schema_editor.alter_field(model, new_f, f)\n\n\n@task(queue=get_local_queuename)", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "@task(queue=get_local_queuename)", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 806, "n_words": 243, "vocab_size": 141, "complexity": 6, "nloc": 66, "token_counts": 358, "n_ast_nodes": 710, "n_identifiers": 43, "d_id": 17114, "documentation": { "docstring": "\n create or replace function update_{table}_{colname}()\n returns trigger as $body$\n begin\n new._{colname} = new.{colname}::jsonb\n return new;\n end\n $body$ language plpgsql;\n \n create trigger {table}_{colname}_trigger\n before insert or update\n on {table}\n for each row\n execute procedure update_{table}_{colname};\n \n update {table}\n set {column_expr}\n where id >= %s and id < %s;\n ", "n_words": 46, "vocab_size": 39, "n_whitespaces": 403, "language": "en" } }, { "id": 258375, "commit_id": "9ebf164cfdfb320503b7161493420c1b0ec577a3", "repo": "haystack", "path": "test/nodes/test_prompt_node.py", "file_name": "test_prompt_node.py", "fun_name": "test_complex_pipeline_with_shared_prompt_model_yaml", "commit_message": "feat: Expand LLM support with PromptModel, PromptNode, and PromptTemplate (#3667)\n\nCo-authored-by: ZanSara ", "code": "def test_complex_pipeline_with_shared_prompt_model_yaml(tmp_path):\n with open(tmp_path / \"tmp_config.yml\", \"w\") as tmp_file:\n tmp_file.write(\n f\n )\n pipeline = Pipeline.load_from_yaml(path=tmp_path / \"tmp_config.yml\")\n result = pipeline.run(query=\"not relevant\", documents=[Document(\"Berlin is an amazing city.\")])\n assert \"Berlin\" in result[\"results\"][0]\n assert len(result[\"meta\"][\"invocation_context\"]) > 0\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 73, "n_words": 34, "vocab_size": 31, "complexity": 1, "nloc": 34, "token_counts": 78, "n_ast_nodes": 141, "n_identifiers": 15, "d_id": 75230, "documentation": { "docstring": "\n version: ignore\n components:\n - name: pmodel\n type: PromptModel\n - name: p1\n params:\n model_name_or_path: pmodel\n default_prompt_template: question-generation\n output_variable: questions\n type: PromptNode\n - name: p2\n params:\n model_name_or_path: pmodel\n default_prompt_template: question-answering\n type: PromptNode\n pipelines:\n - name: query\n nodes:\n - name: p1\n inputs:\n - Query\n - name: p2\n inputs:\n - p1\n ", "n_words": 47, "vocab_size": 23, "n_whitespaces": 371, "language": "en" } }, { "id": 106140, "commit_id": "c902456677116a081f762fa2b4aad13a0aa04d6e", "repo": "datasets", "path": "src/datasets/table.py", "file_name": "table.py", "fun_name": "drop", "commit_message": "Clean up Table class docstrings (#5355)\n\n* clean table docstrings\r\n\r\n* apply review\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>", "code": "def drop(self, *args, **kwargs):\n \n raise NotImplementedError()\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 28, "n_identifiers": 5, "d_id": 22304, "documentation": { "docstring": "\n Drop one or more columns and return a new table.\n\n Args:\n columns (`List[str]`):\n List of field names referencing existing columns.\n\n Raises:\n `KeyError` : if any of the passed columns name are not existing.\n\n Returns:\n `datasets.table.Table`: New table without the columns.\n ", "n_words": 40, "vocab_size": 35, "n_whitespaces": 124, "language": "en" } }, { "id": 153457, "commit_id": "2d40797b2b700d81d4db4a4cd023d563edf6431f", "repo": "modin", "path": "modin/db_conn.py", "file_name": "db_conn.py", "fun_name": "_dialect_is_microsoft_sql", "commit_message": "FEAT-#979: Enable reading from SQL server. (#4279)\n\nCo-authored-by: eavidan \r\nCo-authored-by: Devin Petersohn \r\nSigned-off-by: mvashishtha ", "code": "def _dialect_is_microsoft_sql(self):\n \n if self._dialect_is_microsoft_sql_cache is None:\n self._dialect_is_microsoft_sql_cache = False\n if self.lib == _SQLALCHEMY_LIB_NAME:\n from sqlalchemy import create_engine\n\n self._dialect_is_microsoft_sql_cache = create_engine(\n *self.args, **self.kwargs\n ).driver in (\"pymssql\", \"pyodbc\")\n\n return self._dialect_is_microsoft_sql_cache\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 135, "n_words": 28, "vocab_size": 23, "complexity": 3, "nloc": 9, "token_counts": 57, "n_ast_nodes": 96, "n_identifiers": 10, "d_id": 35406, "documentation": { "docstring": "\n Tell whether this connection requires Microsoft SQL dialect.\n\n If this is a sqlalchemy connection, create an engine from args and\n kwargs. If that engine's driver is pymssql or pyodbc, this\n connection requires Microsoft SQL. Otherwise, it doesn't.\n\n Returns\n -------\n Boolean\n ", "n_words": 40, "vocab_size": 33, "n_whitespaces": 97, "language": "en" } }, { "id": 21365, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distlib/_backport/sysconfig.py", "file_name": "sysconfig.py", "fun_name": "get_config_var", "commit_message": "Vendor in pip 22.1.2", "code": "def get_config_var(name):\n \n return get_config_vars().get(name)\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 10, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 28, "n_identifiers": 4, "d_id": 3789, "documentation": { "docstring": "Return the value of a single variable using the dictionary returned by\n 'get_config_vars()'.\n\n Equivalent to get_config_vars().get(name)\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 25, "language": "en" } }, { "id": 136086, "commit_id": "2fe96302d962b2372b12d4d1584b43a3e953bca8", "repo": "ray", "path": "rllib/policy/tests/test_sample_batch.py", "file_name": "test_sample_batch.py", "fun_name": "test_to_device", "commit_message": "[RLlib] Enable counting of SampleBatch length by traversing nested structures (#30067)\n\nSigned-off-by: Artur Niederfahrenhorst ", "code": "def test_to_device(self):\n \n torch, _ = try_import_torch()\n\n # sample batch includes\n # a numpy array (a)\n # a nested stucture of dict, tuple and lists (b) of numpys and None\n # info dict\n # a nested structure that ends up with tensors and ints(c)\n # a tensor with float64 values (d)\n # a float64 tensor with possibly wrong device (depends on if cuda available)\n # repeated value object with np.array leaves (f)\n\n cuda_available = int(os.environ.get(\"RLLIB_NUM_GPUS\", \"0\")) > 0\n cuda_if_possible = torch.device(\"cuda:0\" if cuda_available else \"cpu\")\n s = SampleBatch(\n {\n \"a\": np.array([1, 2]),\n \"b\": {\"c\": (np.array([4, 5]), np.array([5, 6]))},\n \"c\": {\"d\": torch.Tensor([1, 2]), \"g\": (torch.Tensor([3, 4]), 1)},\n \"d\": torch.Tensor([1.0, 2.0]).double(),\n \"e\": torch.Tensor([1.0, 2.0]).double().to(cuda_if_possible),\n \"f\": RepeatedValues(np.array([[1, 2, 0, 0]]), lengths=[2], max_len=4),\n SampleBatch.SEQ_LENS: np.array([2, 3, 1]),\n \"state_in_0\": np.array([1.0, 3.0, 4.0]),\n # INFO can have arbitrary elements, others need to conform in size\n SampleBatch.INFOS: np.array([{\"a\": 1}, {\"b\": [1, 2]}, {\"c\": None}]),\n }\n )\n\n # inplace operation for sample_batch\n s.to_device(cuda_if_possible, framework=\"torch\")\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 453, "n_words": 155, "vocab_size": 125, "complexity": 2, "nloc": 27, "token_counts": 432, "n_ast_nodes": 439, "n_identifiers": 26, "d_id": 30823, "documentation": { "docstring": "Tests whether to_device works properly under different circumstances", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 186612, "commit_id": "16aad35d31a887dab157f9d4f5e0fe9218d06064", "repo": "certbot", "path": "certbot-nginx/certbot_nginx/_internal/parser_obj.py", "file_name": "parser_obj.py", "fun_name": "parsing_hooks", "commit_message": "Fully type certbot-nginx module (#9124)\n\n* Work in progress\r\n\r\n* Fix type\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Work in progress\r\n\r\n* Oups.\r\n\r\n* Fix typing in UnspacedList\r\n\r\n* Fix logic\r\n\r\n* Finish typing\r\n\r\n* List certbot-nginx as fully typed in tox\r\n\r\n* Fix lint\r\n\r\n* Fix checks\r\n\r\n* Organize imports\r\n\r\n* Fix typing for Python 3.6\r\n\r\n* Fix checks\r\n\r\n* Fix lint\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/configurator.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/configurator.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Fix signature of deploy_cert regarding the installer interface\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/obj.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Fix types\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/parser.py\r\n\r\nCo-authored-by: alexzorin \r\n\r\n* Precise type\r\n\r\n* Precise _coerce possible inputs/outputs\r\n\r\n* Fix type\r\n\r\n* Update certbot-nginx/certbot_nginx/_internal/http_01.py\r\n\r\nCo-authored-by: ohemorange \r\n\r\n* Fix type\r\n\r\n* Remove an undesirable implementation.\r\n\r\n* Fix type\r\n\r\nCo-authored-by: alexzorin \r\nCo-authored-by: ohemorange ", "code": "def parsing_hooks(cls) -> Tuple[Type[\"Block\"], Type[\"Sentence\"], Type[\"Statements\"]]:\n \n return Block, Sentence, Statements\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 24, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 8, "token_counts": 30, "n_ast_nodes": 50, "n_identifiers": 7, "d_id": 45524, "documentation": { "docstring": "Returns object types that this class should be able to `parse` recusrively.\n The order of the objects indicates the order in which the parser should\n try to parse each subitem.\n :returns: A list of Parsable classes.\n :rtype list:\n ", "n_words": 38, "vocab_size": 32, "n_whitespaces": 73, "language": "en" } }, { "id": 298338, "commit_id": "c96781a7957e3887f55cd669002b333539c834c3", "repo": "core", "path": "tests/components/habitica/conftest.py", "file_name": "conftest.py", "fun_name": "disable_plumbum", "commit_message": "Prevent plumbum from causing the testsuite to fail (#70400)", "code": "def disable_plumbum():\n \n with patch(\"plumbum.local\"), patch(\"plumbum.colors\"):\n yield\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 19, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 39, "n_identifiers": 2, "d_id": 97282, "documentation": { "docstring": "Disable plumbum in tests as it can cause the test suite to fail.\n\n plumbum can leave behind PlumbumTimeoutThreads\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 24, "language": "en" } }, { "id": 232591, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/template/_data.py", "file_name": "_data.py", "fun_name": "histogram2d", "commit_message": "switch to black .22", "code": "def histogram2d(self):\n \n return self[\"histogram2d\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 64035, "documentation": { "docstring": "\n The 'histogram2d' property is a tuple of instances of\n Histogram2d that may be specified as:\n - A list or tuple of instances of plotly.graph_objs.layout.template.data.Histogram2d\n - A list or tuple of dicts of string/value properties that\n will be passed to the Histogram2d constructor\n\n Supported dict properties:\n\n Returns\n -------\n tuple[plotly.graph_objs.layout.template.data.Histogram2d]\n ", "n_words": 48, "vocab_size": 33, "n_whitespaces": 131, "language": "en" } }, { "id": 267520, "commit_id": "34f8168afc1d7047c47adec3730c591a58f4f899", "repo": "ansible", "path": "lib/ansible/cli/console.py", "file_name": "console.py", "fun_name": "__getattr__", "commit_message": "ansible-console fixes (#78064)\n\n* list collection task actions too\r\n* dynamically add execute/help functions when module is found\r\n* handle redirection and short names", "code": "def __getattr__(self, name):\n \n attr = None\n\n if name.startswith('do_'):\n module = name.replace('do_', '')\n if module_loader.find_plugin(module):\n setattr(self, name, lambda arg, module=module: self.default(module + ' ' + arg))\n attr = object.__getattr__(self, name)\n elif name.startswith('help_'):\n module = name.replace('help_', '')\n if module_loader.find_plugin(module):\n setattr(self, name, lambda module=module: self.helpdefault(module))\n attr = object.__getattr__(self, name)\n\n if attr is None:\n raise AttributeError(f\"{self.__class__} does not have a {name} attribute\")\n\n return attr\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 217, "n_words": 60, "vocab_size": 38, "complexity": 6, "nloc": 15, "token_counts": 138, "n_ast_nodes": 240, "n_identifiers": 16, "d_id": 78939, "documentation": { "docstring": " handle not found to populate dynamically a module function if module matching name exists ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 15, "language": "en" } }, { "id": 181640, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/feature_transformers_tests.py", "file_name": "feature_transformers_tests.py", "fun_name": "test_CategoricalSelector_fit", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_CategoricalSelector_fit():\n \n op = CategoricalSelector()\n ret_op = op.fit(iris_data)\n\n assert ret_op==op\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 22, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 22, "n_ast_nodes": 40, "n_identifiers": 6, "d_id": 43428, "documentation": { "docstring": "Assert that fit() in CategoricalSelector does nothing.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 66583, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v11_1/make_job_card_time_logs.py", "file_name": "make_job_card_time_logs.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tfrappe.reload_doc(\"manufacturing\", \"doctype\", \"job_card_time_log\")\n\n\tif frappe.db.table_exists(\"Job Card\") and frappe.get_meta(\"Job Card\").has_field(\n\t\t\"actual_start_date\"\n\t):\n\t\ttime_logs = []\n\t\tfor d in frappe.get_all(\n\t\t\t\"Job Card\",\n\t\t\tfields=[\"actual_start_date\", \"actual_end_date\", \"time_in_mins\", \"name\", \"for_quantity\"],\n\t\t\tfilters={\"docstatus\": (\"<\", 2)},\n\t\t):\n\t\t\tif d.actual_start_date:\n\t\t\t\ttime_logs.append(\n\t\t\t\t\t[\n\t\t\t\t\t\td.actual_start_date,\n\t\t\t\t\t\td.actual_end_date,\n\t\t\t\t\t\td.time_in_mins,\n\t\t\t\t\t\td.for_quantity,\n\t\t\t\t\t\td.name,\n\t\t\t\t\t\t\"Job Card\",\n\t\t\t\t\t\t\"time_logs\",\n\t\t\t\t\t\tfrappe.generate_hash(\"\", 10),\n\t\t\t\t\t]\n\t\t\t\t)\n\n\t\tif time_logs:\n\t\t\tfrappe.db.sql(\n\t\t\t\t.format(\n\t\t\t\t\tvalues=\",\".join([\"%s\"] * len(time_logs))\n\t\t\t\t),\n\t\t\t\ttuple(time_logs),\n\t\t\t)\n\n\t\tfrappe.reload_doc(\"manufacturing\", \"doctype\", \"job_card\")\n\t\tfrappe.db.sql(\n\t\t\t\n\t\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 28, "n_words": 62, "vocab_size": 52, "complexity": 6, "nloc": 40, "token_counts": 176, "n_ast_nodes": 304, "n_identifiers": 25, "d_id": 14230, "documentation": { "docstring": " INSERT INTO\n `tabJob Card Time Log`\n (from_time, to_time, time_in_mins, completed_qty, parent, parenttype, parentfield, name)\n values {values}\n update `tabJob Card` set total_completed_qty = for_quantity,\n total_time_in_mins = time_in_mins where docstatus < 2 ", "n_words": 30, "vocab_size": 28, "n_whitespaces": 103, "language": "en" } }, { "id": 106063, "commit_id": "cd3169f3f35afcf73a36a8276113e1881d92e5e0", "repo": "datasets", "path": "src/datasets/search.py", "file_name": "search.py", "fun_name": "list_indexes", "commit_message": "Clean up Dataset and DatasetDict (#5344)\n\n* clean up docstrings\r\n\r\n* make style\r\n\r\n* apply review\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>", "code": "def list_indexes(self) -> List[str]:\n \n return list(self._indexes)\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 18, "n_ast_nodes": 31, "n_identifiers": 6, "d_id": 22272, "documentation": { "docstring": "List the `colindex_nameumns`/identifiers of all the attached indexes.", "n_words": 8, "vocab_size": 7, "n_whitespaces": 7, "language": "en" } }, { "id": 31423, "commit_id": "b681e12d5963490d29c2a77ba7346ee050e46def", "repo": "transformers", "path": "src/transformers/models/vit_mae/modeling_vit_mae.py", "file_name": "modeling_vit_mae.py", "fun_name": "forward_loss", "commit_message": "[ViTMAE] Fix docstrings and variable names (#17710)\n\n* Fix docstrings and variable names\r\n\r\n* Rename x to something better\r\n\r\n* Improve messages\r\n\r\n* Fix docstrings and add test for greyscale images\r\n\r\nCo-authored-by: Niels Rogge ", "code": "def forward_loss(self, pixel_values, pred, mask):\n \n target = self.patchify(pixel_values)\n if self.config.norm_pix_loss:\n mean = target.mean(dim=-1, keepdim=True)\n var = target.var(dim=-1, keepdim=True)\n target = (target - mean) / (var + 1.0e-6) ** 0.5\n\n loss = (pred - target) ** 2\n loss = loss.mean(dim=-1) # [N, L], mean loss per patch\n\n loss = (loss * mask).sum() / mask.sum() # mean loss on removed patches\n return loss\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 145, "n_words": 61, "vocab_size": 42, "complexity": 2, "nloc": 10, "token_counts": 117, "n_ast_nodes": 177, "n_identifiers": 15, "d_id": 5740, "documentation": { "docstring": "\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values.\n pred (`torch.FloatTensor` of shape `(batch_size, num_patches, patch_size**2 * num_channels)`:\n Predicted pixel values.\n mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Tensor indicating which patches are masked (1) and which are not (0).\n\n Returns:\n `torch.FloatTensor`: Pixel reconstruction loss.\n ", "n_words": 46, "vocab_size": 34, "n_whitespaces": 157, "language": "en" } }, { "id": 133506, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/workflow/workflow_storage.py", "file_name": "workflow_storage.py", "fun_name": "get_latest_progress", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def get_latest_progress(self) -> \"StepID\":\n \n return asyncio_run(self._get(self._key_workflow_progress(), True))[\"step_id\"]\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 11, "token_counts": 27, "n_ast_nodes": 49, "n_identifiers": 5, "d_id": 30039, "documentation": { "docstring": "Load the latest progress of a workflow. This is used by a\n virtual actor.\n\n Raises:\n DataLoadError: if we fail to load the progress.\n\n Returns:\n The step that contains the latest output.\n ", "n_words": 31, "vocab_size": 27, "n_whitespaces": 81, "language": "en" } }, { "id": 159691, "commit_id": "50d5f1af8406165128a8567b0796ce244542f70c", "repo": "numpy", "path": "numpy/core/setup.py", "file_name": "setup.py", "fun_name": "can_link_svml", "commit_message": "BLD: Add NPY_DISABLE_SVML env var to opt out of SVML", "code": "def can_link_svml():\n \n if NPY_DISABLE_SVML:\n return False\n machine = platform.machine()\n system = platform.system()\n return \"x86_64\" in machine and system == \"Linux\"\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 42, "n_words": 20, "vocab_size": 16, "complexity": 3, "nloc": 6, "token_counts": 32, "n_ast_nodes": 60, "n_identifiers": 5, "d_id": 38396, "documentation": { "docstring": "SVML library is supported only on x86_64 architecture and currently\n only on linux\n ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 19, "language": "en" } }, { "id": 282069, "commit_id": "4501dfd442d371150b8785d379c5354095b6954b", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/cryptocurrency/discovery/discovery_controller.py", "file_name": "discovery_controller.py", "fun_name": "call_cglosers", "commit_message": "Crypto features: Replace coingecko scrapping (#1156)\n\n* replaced cgcategories with api\r\n\r\n* added coingecko categories\r\n\r\n* refactoring commands to use api, added coins to cryptocontroller and merged find and coins\r\n\r\n* autocompletion for coins\r\n\r\n* removed unused vars\r\n\r\n* added dappradar features\r\n\r\n* refactoring commands position\r\n\r\n* refactoring commands position\r\n\r\n* adding visual commands and fixed report\r\n\r\n* skipped tests for now\r\n\r\n* lint notebook\r\n\r\n* correct report\r\n\r\n* black formatter keeps crying because notebook\r\n\r\n* removed unused imports\r\n\r\n* Fixed black\r\n\r\n* Keep kernel metadata 'cause it's required by papermill\r\n\r\n* Change jupyter cleanup hook to one based on nbconvert\r\n\r\n* Try fix the hook I just broke\r\n\r\n* Fix trailing commas in the crypto notebook\r\n\r\n* Change the jupyter hook to a one that's featured on pre-commit's page\r\n\r\n* Format report notebook and test new notebook hook\r\n\r\n* Black the notebook\r\n\r\n* Remove deleted functions from the crypto discovery API\r\n\r\n* Remove deleted functions from the crypto overview API\r\n\r\n* replaced print for console print and removed print from table\r\n\r\n* replaced print for console print and removed print from table\r\n\r\n* auto completion + sort for all discovery commands\r\n\r\n* replacing help messages\r\n\r\n* fix linting\r\n\r\n* added docs and removed unused commands\r\n\r\n* added todos and fixed help messages\r\n\r\n* lint\r\n\r\n* pr issues fixed\r\n\r\n* updated tests\r\n\r\n* tests merge\r\n\r\n* replaced with new rich table function\r\n\r\nCo-authored-by: Colin Delahunty \r\nCo-authored-by: Theodore Aptekarev ", "code": "def call_cglosers(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"cglosers\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=,\n )\n\n parser.add_argument(\n \"-p\",\n \"--period\",\n dest=\"period\",\n type=str,\n help=\"time period, one from {14d,1h,1y,200d,24h,30d,7d}\",\n default=\"1h\",\n choices=pycoingecko_model.API_PERIODS,\n )\n\n parser.add_argument(\n \"-l\",\n \"--limit\",\n dest=\"limit\",\n type=check_positive,\n help=\"Number of records to display\",\n default=15,\n )\n\n parser.add_argument(\n \"-s\",\n \"--sort\",\n dest=\"sortby\",\n nargs=\"+\",\n help=\"Sort by given column. Default: Market Cap Rank\",\n default=\"Market Cap Rank\",\n )\n\n ns_parser = parse_known_args_and_warn(\n parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED\n )\n\n if ns_parser:\n pycoingecko_view.display_losers(\n period=ns_parser.period,\n top=ns_parser.limit,\n export=ns_parser.export,\n sortby=\" \".join(ns_parser.sortby),\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 499, "n_words": 69, "vocab_size": 59, "complexity": 2, "nloc": 47, "token_counts": 161, "n_ast_nodes": 261, "n_identifiers": 33, "d_id": 84032, "documentation": { "docstring": "Process losers command\n Shows Largest Losers - coins which price dropped the most in given period\n You can use parameter --period to set which timeframe are you interested in: {14d,1h,1y,200d,24h,30d,7d}\n You can look on only N number of records with --limit,\n You can sort by {Symbol,Name,Price [$],Market Cap [$],Market Cap Rank,Volume [$]} with --sort.\n ", "n_words": 54, "vocab_size": 46, "n_whitespaces": 105, "language": "en" } }, { "id": 39221, "commit_id": "2b98f1045321475f6537986af134fb53f8320268", "repo": "recommenders", "path": "contrib/sarplus/python/pysarplus/SARPlus.py", "file_name": "SARPlus.py", "fun_name": "fit", "commit_message": "Correct typos", "code": "def fit(self, df):\n \n # threshold - items below this number get set to zero in cooccurrence counts\n\n df.createOrReplaceTempView(self.f(\"{prefix}df_train_input\"))\n\n if self.timedecay_formula:\n # WARNING: previously we would take the last value in training dataframe and set it\n # as a matrix U element\n # for each user-item pair. Now with time decay, we compute a sum over ratings given\n # by a user in the case\n # when T=np.inf, so user gets a cumulative sum of ratings for a particular item and\n # not the last rating.\n # Time Decay\n # does a group by on user item pairs and apply the formula for time decay there\n # Time T parameter is in days and input time is in seconds,\n # so we do dt/60/(T*24*60)=dt/(T*24*3600)\n # the following is the query which we want to run\n\n query = self.f(\n \n )\n\n # replace with timedecayed version\n df = self.spark.sql(query)\n else:\n # since SQL is case-insensitive, this check needs to be performed similar\n if self.header[\"col_timestamp\"].lower() in [\n s.name.lower() for s in df.schema\n ]:\n # we need to de-duplicate items by using the latest item\n query = self.f(\n \n )\n\n df = self.spark.sql(query)\n\n df.createOrReplaceTempView(self.f(\"{prefix}df_train\"))\n\n log.info(\"sarplus.fit 1/2: compute item cooccurrences...\")\n\n # compute cooccurrence above minimum threshold\n query = self.f(\n \n )\n\n item_cooccurrence = self.spark.sql(query)\n item_cooccurrence.write.mode(\"overwrite\").saveAsTable(\n self.f(\"{prefix}item_cooccurrence\")\n )\n\n # compute the diagonal used later for Jaccard and Lift\n if self.similarity_type == SIM_LIFT or self.similarity_type == SIM_JACCARD:\n item_marginal = self.spark.sql(\n self.f(\n \"SELECT i1 i, value AS margin FROM {prefix}item_cooccurrence WHERE i1 = i2\"\n )\n )\n item_marginal.createOrReplaceTempView(self.f(\"{prefix}item_marginal\"))\n\n if self.similarity_type == SIM_COOCCUR:\n self.item_similarity = item_cooccurrence\n elif self.similarity_type == SIM_JACCARD:\n query = self.f(\n \n )\n self.item_similarity = self.spark.sql(query)\n elif self.similarity_type == SIM_LIFT:\n query = self.f(\n \n )\n self.item_similarity = self.spark.sql(query)\n else:\n raise ValueError(\n \"Unknown similarity type: {0}\".format(self.similarity_type)\n )\n\n # store upper triangular\n log.info(\n \"sarplus.fit 2/2: compute similarity metric %s...\" % self.similarity_type\n )\n self.item_similarity.write.mode(\"overwrite\").saveAsTable(\n self.f(\"{prefix}item_similarity_upper\")\n )\n\n # expand upper triangular to full matrix\n\n query = self.f(\n \n )\n\n self.item_similarity = self.spark.sql(query)\n self.item_similarity.write.mode(\"overwrite\").saveAsTable(\n self.f(\"{prefix}item_similarity\")\n )\n\n # free space\n self.spark.sql(self.f(\"DROP TABLE {prefix}item_cooccurrence\"))\n self.spark.sql(self.f(\"DROP TABLE {prefix}item_similarity_upper\"))\n\n self.item_similarity = self.spark.table(self.f(\"{prefix}item_similarity\"))\n", "url": "https://github.com/microsoft/recommenders.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 1172, "n_words": 329, "vocab_size": 175, "complexity": 9, "nloc": 109, "token_counts": 375, "n_ast_nodes": 669, "n_identifiers": 29, "d_id": 7143, "documentation": { "docstring": "Main fit method for SAR.\n\n Expects the dataframes to have row_id, col_id columns which are indexes,\n i.e. contain the sequential integer index of the original alphanumeric user and item IDs.\n Dataframe also contains rating and timestamp as floats; timestamp is in seconds since Epoch by default.\n\n Arguments:\n df (pySpark.DataFrame): input dataframe which contains the index of users and items.\n \n SELECT\n {col_user}, {col_item}, \n SUM({col_rating} * EXP(-log(2) * (latest_timestamp - CAST({col_timestamp} AS long)) / ({time_decay_coefficient} * 3600 * 24))) as {col_rating}\n FROM {prefix}df_train_input,\n (SELECT CAST(MAX({col_timestamp}) AS long) latest_timestamp FROM {prefix}df_train_input)\n GROUP BY {col_user}, {col_item} \n CLUSTER BY {col_user} \n \n SELECT {col_user}, {col_item}, {col_rating}\n FROM\n (\n SELECT\n {col_user}, {col_item}, {col_rating}, \n ROW_NUMBER() OVER (PARTITION BY {col_user}, {col_item} ORDER BY {col_timestamp} DESC) latest\n FROM {prefix}df_train_input\n )\n WHERE latest = 1\n \n SELECT A.{col_item} i1, B.{col_item} i2, COUNT(*) value\n FROM {prefix}df_train A INNER JOIN {prefix}df_train B\n ON A.{col_user} = B.{col_user} AND A.{col_item} <= b.{col_item} \n GROUP BY A.{col_item}, B.{col_item}\n HAVING COUNT(*) >= {threshold}\n CLUSTER BY i1, i2\n \n SELECT i1, i2, value / (M1.margin + M2.margin - value) AS value\n FROM {prefix}item_cooccurrence A \n INNER JOIN {prefix}item_marginal M1 ON A.i1 = M1.i \n INNER JOIN {prefix}item_marginal M2 ON A.i2 = M2.i\n CLUSTER BY i1, i2\n \n SELECT i1, i2, value / (M1.margin * M2.margin) AS value\n FROM {prefix}item_cooccurrence A \n INNER JOIN {prefix}item_marginal M1 ON A.i1 = M1.i \n INNER JOIN {prefix}item_marginal M2 ON A.i2 = M2.i\n CLUSTER BY i1, i2\n \n SELECT i1, i2, value\n FROM\n (\n (SELECT i1, i2, value FROM {prefix}item_similarity_upper)\n UNION ALL\n (SELECT i2 i1, i1 i2, value FROM {prefix}item_similarity_upper WHERE i1 <> i2)\n )\n CLUSTER BY i1\n ", "n_words": 255, "vocab_size": 133, "n_whitespaces": 854, "language": "en" } }, { "id": 196217, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/subsets.py", "file_name": "subsets.py", "fun_name": "iterate_binary", "commit_message": "Updated import locations", "code": "def iterate_binary(self, k):\n \n bin_list = Subset.bitlist_from_subset(self.subset, self.superset)\n n = (int(''.join(bin_list), 2) + k) % 2**self.superset_size\n bits = bin(n)[2:].rjust(self.superset_size, '0')\n return Subset.subset_from_bitlist(self.superset, bits)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 57, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 5, "token_counts": 75, "n_ast_nodes": 120, "n_identifiers": 16, "d_id": 47717, "documentation": { "docstring": "\n This is a helper function. It iterates over the\n binary subsets by ``k`` steps. This variable can be\n both positive or negative.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Subset\n >>> a = Subset(['c', 'd'], ['a', 'b', 'c', 'd'])\n >>> a.iterate_binary(-2).subset\n ['d']\n >>> a = Subset(['a', 'b', 'c'], ['a', 'b', 'c', 'd'])\n >>> a.iterate_binary(2).subset\n []\n\n See Also\n ========\n\n next_binary, prev_binary\n ", "n_words": 59, "vocab_size": 45, "n_whitespaces": 172, "language": "en" } }, { "id": 100480, "commit_id": "0189029dbaad486e623353ee4a8451af8c85f4e4", "repo": "faceswap", "path": "plugins/train/model/phaze_a.py", "file_name": "phaze_a.py", "fun_name": "_get_input_shape", "commit_message": "Phaze-A: Add MobileNetV3 encoder", "code": "def _get_input_shape(self):\n \n arch = self.config[\"enc_architecture\"]\n enforce_size = _MODEL_MAPPING[arch].get(\"enforce_for_weights\", False)\n default_size = _MODEL_MAPPING[arch][\"default_size\"]\n scaling = self.config[\"enc_scaling\"] / 100\n\n min_size = _MODEL_MAPPING[arch].get(\"min_size\", 32)\n size = int(max(min_size, min(default_size, ((default_size * scaling) // 16) * 16)))\n\n if self.config[\"enc_load_weights\"] and enforce_size and scaling != 1.0:\n logger.warning(\"%s requires input size to be %spx when loading imagenet weights. \"\n \"Adjusting input size from %spx to %spx\",\n arch, default_size, size, default_size)\n retval = (default_size, default_size, 3)\n else:\n retval = (size, size, 3)\n\n logger.debug(\"Encoder input set to: %s\", retval)\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 244, "n_words": 82, "vocab_size": 60, "complexity": 4, "nloc": 16, "token_counts": 139, "n_ast_nodes": 232, "n_identifiers": 18, "d_id": 19953, "documentation": { "docstring": " Obtain the input shape for the model.\n\n Input shape is calculated from the selected Encoder's input size, scaled to the user\n selected Input Scaling, rounded down to the nearest 16 pixels.\n\n Notes\n -----\n Some models (NasNet) require the input size to be of a certain dimension if loading\n imagenet weights. In these instances resize inputs and raise warning message\n\n Returns\n -------\n tuple\n The shape tuple for the input size to the Phaze-A model\n ", "n_words": 73, "vocab_size": 53, "n_whitespaces": 155, "language": "en" } }, { "id": 147395, "commit_id": "60054995e65304fb14e6d0ab69bdec07aa9389fe", "repo": "ray", "path": "python/ray/util/actor_pool.py", "file_name": "actor_pool.py", "fun_name": "has_free", "commit_message": "[docs] fix doctests and activate CI (#23418)", "code": "def has_free(self):\n \n return len(self._idle_actors) > 0 and len(self._pending_submits) == 0\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 24, "n_words": 10, "vocab_size": 9, "complexity": 2, "nloc": 2, "token_counts": 24, "n_ast_nodes": 41, "n_identifiers": 5, "d_id": 33944, "documentation": { "docstring": "Returns whether there are any idle actors available.\n\n Returns:\n True if there are any idle actors and no pending submits.\n\n Examples:\n >>> @ray.remote # doctest: +SKIP\n >>> class Actor: # doctest: +SKIP\n ... ... # doctest: +SKIP\n >>> a1 = Actor.remote() # doctest: +SKIP\n >>> pool = ActorPool(a1) # doctest: +SKIP\n >>> pool.submit(lambda a, v: a.double.remote(v), 1) # doctest: +SKIP\n >>> print(pool.has_free()) # doctest: +SKIP\n False\n >>> print(pool.get_next()) # doctest: +SKIP\n 2\n >>> print(pool.has_free()) # doctest: +SKIP\n True\n ", "n_words": 78, "vocab_size": 38, "n_whitespaces": 246, "language": "en" } }, { "id": 130358, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/autoscaler/_private/aliyun/utils.py", "file_name": "utils.py", "fun_name": "stop_instances", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def stop_instances(self, instance_ids, stopped_mode=\"StopCharging\"):\n \n request = StopInstancesRequest()\n request.set_InstanceIds(instance_ids)\n request.set_StoppedMode(stopped_mode)\n response = self._send_request(request)\n if response is None:\n logging.error(\"stop_instances failed\")\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 71, "n_words": 18, "vocab_size": 16, "complexity": 2, "nloc": 7, "token_counts": 48, "n_ast_nodes": 84, "n_identifiers": 12, "d_id": 29244, "documentation": { "docstring": "Stop one or more ECS instances that are in the Running state.\n\n :param instance_ids: The IDs of instances.\n :param stopped_mode: Specifies whether billing for the instance\n continues after the instance is stopped.\n ", "n_words": 32, "vocab_size": 28, "n_whitespaces": 81, "language": "en" } }, { "id": 269208, "commit_id": "c52c11968b096580577c75b169f51c5b39002106", "repo": "keras", "path": "keras/utils/dataset_utils.py", "file_name": "dataset_utils.py", "fun_name": "check_validation_split_arg", "commit_message": "Updated tests for subset=\"both\"", "code": "def check_validation_split_arg(validation_split, subset, shuffle, seed):\n \n if validation_split and not 0 < validation_split < 1:\n raise ValueError(\n '`validation_split` must be between 0 and 1, received: %s' %\n (validation_split,))\n if (validation_split or subset) and not (validation_split and subset):\n raise ValueError(\n 'If `subset` is set, `validation_split` must be set, and inversely.')\n if subset not in ('training', 'validation', 'both', None):\n raise ValueError('`subset` must be either \"training\", '\n '\"validation\" or \"both\", received: %s' % (subset,))\n if validation_split and shuffle and seed is None:\n raise ValueError(\n 'If using `validation_split` and shuffling the data, you must provide '\n 'a `seed` argument, to make sure that there is no overlap between the '\n 'training and validation subset.')\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 188, "n_words": 109, "vocab_size": 68, "complexity": 11, "nloc": 16, "token_counts": 92, "n_ast_nodes": 159, "n_identifiers": 6, "d_id": 79952, "documentation": { "docstring": "Raise errors in case of invalid argument values.\n\n Args:\n validation_split: float between 0 and 1, fraction of data to reserve for\n validation.\n subset: One of \"training\", \"validation\" or \"both\". Only used if `validation_split`\n is set.\n shuffle: Whether to shuffle the data. Either True or False.\n seed: random seed for shuffling and transformations.\n ", "n_words": 52, "vocab_size": 46, "n_whitespaces": 76, "language": "en" } }, { "id": 200579, "commit_id": "22174995eac1f437c5f4abe0232760877daf586f", "repo": "sympy", "path": "sympy/tensor/tensor.py", "file_name": "tensor.py", "fun_name": "_dedupe_indices", "commit_message": "TensMul._dedupe_indices: remove index_structure arg\n\n_get_generator_for_dummy_indices is a staticmethod, and so I can just\ncall _IndexStructure._get_generator_for_dummy_indices", "code": "def _dedupe_indices(new, exclude):\n \n exclude = set(exclude)\n dums_new = set(get_dummy_indices(new))\n\n conflicts = dums_new.intersection(exclude)\n if len(conflicts) == 0:\n return None\n\n \n exclude.update(dums_new)\n self_args_free = [(i, None) for i in exclude]\n gen = _IndexStructure._get_generator_for_dummy_indices(self_args_free)\n repl = {}\n for d in conflicts:\n if -d in repl.keys():\n continue\n newname = gen(d.tensor_index_type)\n new_d = d.func(newname, *d.args[1:])\n repl[d] = new_d\n repl[-d] = -new_d\n\n if len(repl) == 0:\n return None\n\n new_renamed = new._replace_indices(repl)\n return new_renamed\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 257, "n_words": 66, "vocab_size": 44, "complexity": 6, "nloc": 26, "token_counts": 148, "n_ast_nodes": 240, "n_identifiers": 25, "d_id": 49714, "documentation": { "docstring": "\n exclude: set\n new: TensExpr\n\n If ``new`` has any dummy indices that are in ``exclude``, return a version\n of new with those indices replaced. If no replacements are needed,\n return None\n\n \n ``self_args_free`` is to be passed to ``_IndexStructure._get_generator_for_dummy_indices()``.\n Since the latter does not use the index position for anything, we just\n set it as ``None`` here.\n ", "n_words": 55, "vocab_size": 48, "n_whitespaces": 127, "language": "en" } }, { "id": 100672, "commit_id": "a9908b46f77dc66ac7efe7100ea0eed4b1f2b460", "repo": "faceswap", "path": "tools/alignments/jobs.py", "file_name": "jobs.py", "fun_name": "process", "commit_message": "Alignments tool - Replace 'extract-large' with 'min-size'", "code": "def process(self) -> None:\n \n logger.info(\"[EXTRACT FACES]\") # Tidy up cli output\n self._check_folder()\n if self._is_legacy:\n self._legacy_check()\n self._saver = ImagesSaver(self._faces_dir, as_bytes=True)\n\n if self._min_size > 0:\n logger.info(\"Only selecting faces that have been resized from a minimum resolution \"\n \"of %spx\", self._min_size)\n\n self._export_faces()\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 134, "n_words": 39, "vocab_size": 38, "complexity": 3, "nloc": 11, "token_counts": 66, "n_ast_nodes": 117, "n_identifiers": 13, "d_id": 20130, "documentation": { "docstring": " Run the re-extraction from Alignments file process", "n_words": 7, "vocab_size": 7, "n_whitespaces": 7, "language": "en" } }, { "id": 264309, "commit_id": "54834c47f8870e7faabcd847c3270da0bd3d2884", "repo": "netbox", "path": "netbox/netbox/views/generic/object_views.py", "file_name": "object_views.py", "fun_name": "alter_object", "commit_message": "Refactor generic views; add plugins dev documentation", "code": "def alter_object(self, obj, request, url_args, url_kwargs):\n \n return obj\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 24, "n_identifiers": 6, "d_id": 77680, "documentation": { "docstring": "\n Provides a hook for views to modify an object before it is processed. For example, a parent object can be\n defined given some parameter from the request URL.\n\n Args:\n obj: The object being edited\n request: The current request\n url_args: URL path args\n url_kwargs: URL path kwargs\n ", "n_words": 46, "vocab_size": 39, "n_whitespaces": 119, "language": "en" } }, { "id": 247794, "commit_id": "9d21ecf7ceab55bc19c4457b8b07401b0b1623a7", "repo": "synapse", "path": "tests/storage/test_id_generators.py", "file_name": "test_id_generators.py", "fun_name": "test_load_existing_stream", "commit_message": "Add type hints to tests files. (#12256)", "code": "def test_load_existing_stream(self) -> None:\n \n self._insert_rows(\"foobar1\", \"first\", 3)\n self._insert_rows(\"foobar2\", \"second\", 3)\n self._insert_rows(\"foobar2\", \"second\", 1, update_stream_table=False)\n\n first_id_gen = self._create_id_generator(\"first\", writers=[\"first\", \"second\"])\n second_id_gen = self._create_id_generator(\"second\", writers=[\"first\", \"second\"])\n\n # The first ID gen will notice that it can advance its token to 7 as it\n # has no in progress writes...\n self.assertEqual(first_id_gen.get_positions(), {\"first\": 7, \"second\": 6})\n self.assertEqual(first_id_gen.get_current_token_for_writer(\"first\"), 7)\n self.assertEqual(first_id_gen.get_current_token_for_writer(\"second\"), 6)\n self.assertEqual(first_id_gen.get_persisted_upto_position(), 7)\n\n # ... but the second ID gen doesn't know that.\n self.assertEqual(second_id_gen.get_positions(), {\"first\": 3, \"second\": 7})\n self.assertEqual(second_id_gen.get_current_token_for_writer(\"first\"), 3)\n self.assertEqual(second_id_gen.get_current_token_for_writer(\"second\"), 7)\n self.assertEqual(first_id_gen.get_persisted_upto_position(), 7)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 198, "n_words": 79, "vocab_size": 61, "complexity": 1, "nloc": 17, "token_counts": 190, "n_ast_nodes": 330, "n_identifiers": 12, "d_id": 71927, "documentation": { "docstring": "Test creating ID gens with multiple tables that have rows from after\n the position in `stream_positions` table.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 31, "language": "en" } }, { "id": 211927, "commit_id": "ada85ff1dc6dc1d5857141b3202733870de5c809", "repo": "bokeh", "path": "bokeh/colors/color.py", "file_name": "color.py", "fun_name": "to_hex", "commit_message": "Bump min sphinx version (#11973)\n\n* Bump min sphinx version\r\n\r\n* checkpoint\r\n\r\n* comment for fully qualified names", "code": "def to_hex(self) -> str:\n \n return \"#%02X%02X%02X\" % (self.r, self.g, self.b)\n", "url": "https://github.com/bokeh/bokeh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 24, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 11, "token_counts": 24, "n_ast_nodes": 40, "n_identifiers": 6, "d_id": 53164, "documentation": { "docstring": " Return a hex color string for this RGB color.\n\n Any alpha value on this color is discarded, only hex color strings for\n the RGB components are returned.\n\n Returns:\n str, ``\"#RRGGBB\"``\n\n ", "n_words": 30, "vocab_size": 24, "n_whitespaces": 70, "language": "en" } }, { "id": 49818, "commit_id": "f4d6e64cdc132ae868699a0ba442f4ab1d304a14", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/nn.py", "file_name": "nn.py", "fun_name": "scale_module", "commit_message": "add disco_diffusion_cnclip_vitb16 module", "code": "def scale_module(module, scale):\n \n for p in module.parameters():\n p.detach().mul_(scale)\n return module\n\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 26, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 4, "token_counts": 29, "n_ast_nodes": 49, "n_identifiers": 7, "d_id": 9929, "documentation": { "docstring": "\n Scale the parameters of a module and return it.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 322374, "commit_id": "487162262196bead8d9b4c2306f313b8f64edf9b", "repo": "PaddleNLP", "path": "examples/text_summarization/prophetnet/evaluate/cnndm/bs_pyrouge.py", "file_name": "bs_pyrouge.py", "fun_name": "settings_file", "commit_message": "Add model Prohetnet (#1698)\n\n* add Prohetnet model\r\n\r\n* update prohetnet\r\n\r\n* update format\r\n\r\n* pre commit\r\n\r\n* add prophetnet example\r\n\r\n* update tokenizer.py,run_train.sh,train_prophetnet.py\r\n\r\n* remove evaluate/gigaword/__init__.py\r\n\r\nCo-authored-by: smallv0221 <33639025+smallv0221@users.noreply.github.com>", "code": "def settings_file(self):\n \n return self._settings_file\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 118150, "documentation": { "docstring": "\n Path of the setttings file, which stores the ROUGE home dir.\n\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 26, "language": "en" } }, { "id": 285633, "commit_id": "291e7d69914e9ab8b9bf9b20bb44d971bcedc247", "repo": "OpenBBTerminal", "path": "openbb_terminal/portfolio/portfolio_model.py", "file_name": "portfolio_model.py", "fun_name": "get_orderbook", "commit_message": "Add ETF support for portfolio allocation command (#2143)\n\n* fill etf and prevent different length errors\r\n\r\n* auto fill etf sectors and allow 0 value portfolios\r\n\r\n* allow 0 value portfolios\r\n\r\n* remove unused folder\r\n\r\n* allow 1 asset portfolios\r\n\r\n* split allocation calls by category in controller\r\n\r\n* comment sector allocation model\r\n\r\n* get country and region allocations for etf\r\n\r\n* add comments and black\r\n\r\n* improve comments\r\n\r\n* improve comments\r\n\r\n* allow for 1 category in sector, country and region\r\n\r\n* add progress bars\r\n\r\n* linting fix\r\n\r\n* fix mypy\r\n\r\n* set default date\r\n\r\n* fix pylint\r\n\r\n* add isin on paexport\r\n\r\n* merge main\r\n\r\n* auto pre load benchmark\r\n\r\n* fix rich table for np.float64\r\n\r\n* refactor portfolio allocs\r\n\r\n* refactor alloc command\r\n\r\n* add isins column\r\n\r\n* format output alloc\r\n\r\n* rename variable\r\n\r\n* fix nan bug\r\n\r\n* black\r\n\r\n* add ticker conversion to yf format by isin\r\n\r\n* warn and removed unsupported ISINs\r\n\r\n* solve same day trades bug\r\n\r\n* display bench loading progress\r\n\r\n* portfolio show\r\n\r\n* check if valid isins on preprocessing\r\n\r\n* black\r\n\r\n* fix bug when region empty\r\n\r\n* warn when category is empty\r\n\r\n* reformat preprocessing\r\n\r\n* codespell\r\n\r\n* check if ticker is valid\r\n\r\n* flake8\r\n\r\n* fix test\r\n\r\n* fix bug with trades on holidays\r\n\r\nCo-authored-by: Jeroen Bouma ", "code": "def get_orderbook(self):\n \n df = self.__orderbook[\n [\n \"Date\",\n \"Type\",\n \"Ticker\",\n \"Side\",\n \"Price\",\n \"Quantity\",\n \"Fees\",\n \"Investment\",\n \"Currency\",\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n ]\n ]\n df = df.replace(np.nan, \"-\")\n df[\"Date\"] = df[\"Date\"].dt.strftime(\"%Y-%m-%d\")\n df.sort_values(by=\"Date\", ascending=False, inplace=True)\n return df\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 299, "n_words": 33, "vocab_size": 28, "complexity": 1, "nloc": 22, "token_counts": 87, "n_ast_nodes": 157, "n_identifiers": 13, "d_id": 85338, "documentation": { "docstring": "Get formatted transactions\n\n Returns:\n pd.DataFrame: formatted transactions\n ", "n_words": 7, "vocab_size": 5, "n_whitespaces": 32, "language": "en" } }, { "id": 69488, "commit_id": "9209ec59c2216223bc1a7618bd95ec2424434849", "repo": "erpnext", "path": "erpnext/accounts/utils.py", "file_name": "utils.py", "fun_name": "_delete_accounting_ledger_entries", "commit_message": "refactor: split delete gl utility function into two", "code": "def _delete_accounting_ledger_entries(voucher_type, voucher_no):\n\t\n\t_delete_gl_entries(voucher_type, voucher_no)\n\t_delete_pl_entries(voucher_type, voucher_no)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 4, "n_words": 7, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 33, "n_identifiers": 5, "d_id": 15053, "documentation": { "docstring": "\n\tRemove entries from both General and Payment Ledger for specified Voucher\n\t", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 108373, "commit_id": "1bc33e99efc9e4be433f99c6a74c7e3b30147dac", "repo": "matplotlib", "path": "lib/matplotlib/ticker.py", "file_name": "ticker.py", "fun_name": "base", "commit_message": "Improve consistency in LogLocator and LogFormatter API", "code": "def base(self, base):\n \n self.set_base(base)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 25, "n_identifiers": 3, "d_id": 23162, "documentation": { "docstring": "\n Change the *base* for labeling.\n\n .. warning::\n Should always match the base used for :class:`LogLocator`\n ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 47, "language": "en" } }, { "id": 289830, "commit_id": "f73fc9e3558eca0a1e74a19273a67f8d2bfa8af7", "repo": "core", "path": "tests/helpers/test_template.py", "file_name": "test_template.py", "fun_name": "test_is_state", "commit_message": "Adds states and state_attr as a filter, adds is_state and is_state_attr as a test. (#79473)", "code": "def test_is_state(hass):\n \n hass.states.async_set(\"test.object\", \"available\")\n tpl = template.Template(\n ,\n hass,\n )\n assert tpl.async_render() == \"yes\"\n\n tpl = template.Template(\n ,\n hass,\n )\n assert tpl.async_render() is False\n\n tpl = template.Template(\n ,\n hass,\n )\n assert tpl.async_render() == \"yes\"\n\n tpl = template.Template(\n ,\n hass,\n )\n assert tpl.async_render() == \"test.object\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 142, "n_words": 44, "vocab_size": 17, "complexity": 1, "nloc": 30, "token_counts": 92, "n_ast_nodes": 162, "n_identifiers": 8, "d_id": 88960, "documentation": { "docstring": "Test is_state method.\n{% if is_state(\"test.object\", \"available\") %}yes{% else %}no{% endif %}\n \n{{ is_state(\"test.noobject\", \"available\") }}\n \n{% if \"test.object\" is is_state(\"available\") %}yes{% else %}no{% endif %}\n \n{{ ['test.object'] | select(\"is_state\", \"available\") | first | default }}\n ", "n_words": 36, "vocab_size": 23, "n_whitespaces": 63, "language": "en" } }, { "id": 76983, "commit_id": "134bd19bef529f0c205a48cedb8574ee0c52d436", "repo": "wagtail", "path": "wagtail/contrib/forms/forms.py", "file_name": "forms.py", "fun_name": "get_formatted_field_choices", "commit_message": "add ability for form builder to split choices by newline\n\n- fixes #3001\n- keep support for comma separated lists if supplied", "code": "def get_formatted_field_choices(self, field):\n \n\n if \"\\n\" in field.choices:\n choices = map(\n lambda x: (\n x.strip().rstrip(\",\").strip(),\n x.strip().rstrip(\",\").strip(),\n ),\n field.choices.split(\"\\r\\n\"),\n )\n else:\n choices = map(lambda x: (x.strip(), x.strip()), field.choices.split(\",\"))\n\n return choices\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 172, "n_words": 28, "vocab_size": 23, "complexity": 2, "nloc": 12, "token_counts": 99, "n_ast_nodes": 172, "n_identifiers": 9, "d_id": 16608, "documentation": { "docstring": "\n Returns a list of choices [(string, string),] for the field.\n Split the provided choices into a list, separated by new lines.\n If no new lines in the provided choices, split by commas.\n ", "n_words": 32, "vocab_size": 25, "n_whitespaces": 61, "language": "en" } }, { "id": 22626, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "ftp_send_receive.py", "file_name": "ftp_send_receive.py", "fun_name": "receive_file", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def receive_file(filename=\"example.txt\"):\n with open(filename, \"wb\") as out_file:\n ftp.retrbinary(\"RETR \" + filename, out_file.write, 1024)\n ftp.quit()\n\n\n\n\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 30, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 4, "token_counts": 36, "n_ast_nodes": 69, "n_identifiers": 8, "d_id": 4380, "documentation": { "docstring": "\n\tThe file which will be sent via the FTP server\n\tThe file send will be send to the current working directory\n", "n_words": 21, "vocab_size": 15, "n_whitespaces": 19, "language": "en" } }, { "id": 108782, "commit_id": "f7f3bb6079048506613c513231e1bd2a87ebc7d3", "repo": "matplotlib", "path": "lib/matplotlib/figure.py", "file_name": "figure.py", "fun_name": "set_layout_engine", "commit_message": "ENH: add ability to remove layout engine\n\nThis also adds a \"place holder\" layout engine to ensure that users can not \"go\nthrough zero\" and change to an incompatible layout engine.\n\nCo-authored-by: Jody Klymak ", "code": "def set_layout_engine(self, layout=None, **kwargs):\n \n if layout is None:\n if mpl.rcParams['figure.autolayout']:\n layout = 'tight'\n elif mpl.rcParams['figure.constrained_layout.use']:\n layout = 'constrained'\n else:\n self._layout_engine = None\n return\n if layout == 'tight':\n new_layout_engine = TightLayoutEngine(**kwargs)\n elif layout == 'constrained':\n new_layout_engine = ConstrainedLayoutEngine(**kwargs)\n elif layout == 'compressed':\n new_layout_engine = ConstrainedLayoutEngine(compress=True,\n **kwargs)\n elif layout == 'none':\n if self._layout_engine is not None:\n new_layout_engine = PlaceHolderLayoutEngine(\n self._layout_engine.adjust_compatible,\n self._layout_engine.colorbar_gridspec\n )\n else:\n new_layout_engine = None\n elif isinstance(layout, LayoutEngine):\n new_layout_engine = layout\n else:\n raise ValueError(f\"Invalid value for 'layout': {layout!r}\")\n\n if self._check_layout_engines_compat(self._layout_engine,\n new_layout_engine):\n self._layout_engine = new_layout_engine\n else:\n raise RuntimeError('Colorbar layout of new layout engine not '\n 'compatible with old engine, and a colorbar '\n 'has been created. Engine not changed.')\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 612, "n_words": 107, "vocab_size": 60, "complexity": 11, "nloc": 35, "token_counts": 167, "n_ast_nodes": 296, "n_identifiers": 19, "d_id": 23340, "documentation": { "docstring": "\n Set the layout engine for this figure.\n\n Parameters\n ----------\n layout: {'constrained', 'compressed', 'tight', 'none'} or \\\n`LayoutEngine` or None\n\n - 'constrained' will use `~.ConstrainedLayoutEngine`\n - 'compressed' will also use `~.ConstrainedLayoutEngine`, but with\n a correction that attempts to make a good layout for fixed-aspect\n ratio Axes.\n - 'tight' uses `~.TightLayoutEngine`\n - 'none' removes layout engine.\n\n If `None`, the behavior is controlled by :rc:`figure.autolayout`\n (which if `True` behaves as if 'tight' were passed) and\n :rc:`figure.constrained_layout.use` (which if true behaves as if\n 'constrained' were passed). If both are true,\n :rc:`figure.autolayout` takes priority.\n\n Users and libraries can define their own layout engines and pass\n the instance directly as well.\n\n kwargs: dict\n The keyword arguments are passed to the layout engine to set things\n like padding and margin sizes. Only used if *layout* is a string.\n\n ", "n_words": 131, "vocab_size": 94, "n_whitespaces": 344, "language": "en" } }, { "id": 278654, "commit_id": "3613c3defc39c236fb1592c4f7ba1a9cc887343a", "repo": "keras", "path": "keras/constraints.py", "file_name": "constraints.py", "fun_name": "_kernel_constraint", "commit_message": "Remove pylint comments.\n\nPiperOrigin-RevId: 452353044", "code": "def _kernel_constraint(self, kernel):\n \n padding = backend.constant([[1, 1], [1, 1]], dtype=\"int32\")\n\n kernel_shape = backend.shape(kernel)[0]\n start = backend.cast(kernel_shape / 2, \"int32\")\n\n kernel_new = backend.switch(\n backend.cast(tf.math.floormod(kernel_shape, 2), \"bool\"),\n lambda: kernel[start - 1 : start, start - 1 : start],\n lambda: kernel[start - 1 : start, start - 1 : start]\n + backend.zeros((2, 2), dtype=kernel.dtype),\n )\n index = backend.switch(\n backend.cast(tf.math.floormod(kernel_shape, 2), \"bool\"),\n lambda: backend.constant(0, dtype=\"int32\"),\n lambda: backend.constant(1, dtype=\"int32\"),\n )\n while_condition = lambda index, *args: backend.less(index, start)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 212, "n_words": 72, "vocab_size": 44, "complexity": 1, "nloc": 24, "token_counts": 246, "n_ast_nodes": 299, "n_identifiers": 21, "d_id": 82655, "documentation": { "docstring": "Radially constraints a kernel with shape (height, width,\n channels).", "n_words": 9, "vocab_size": 9, "n_whitespaces": 15, "language": "en" } }, { "id": 222491, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/difflib.py", "file_name": "difflib.py", "fun_name": "set_seq1", "commit_message": "add python 3.10.4 for windows", "code": "def set_seq1(self, a):\n \n\n if a is self.a:\n return\n self.a = a\n self.matching_blocks = self.opcodes = None\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 55, "n_words": 16, "vocab_size": 13, "complexity": 2, "nloc": 5, "token_counts": 30, "n_ast_nodes": 50, "n_identifiers": 5, "d_id": 56591, "documentation": { "docstring": "Set the first sequence to be compared.\n\n The second sequence to be compared is not changed.\n\n >>> s = SequenceMatcher(None, \"abcd\", \"bcde\")\n >>> s.ratio()\n 0.75\n >>> s.set_seq1(\"bcde\")\n >>> s.ratio()\n 1.0\n >>>\n\n SequenceMatcher computes and caches detailed information about the\n second sequence, so if you want to compare one sequence S against\n many sequences, use .set_seq2(S) once and call .set_seq1(x)\n repeatedly for each of the other sequences.\n\n See also set_seqs() and set_seq2().\n ", "n_words": 71, "vocab_size": 56, "n_whitespaces": 169, "language": "en" } }, { "id": 296701, "commit_id": "64381acbaf2930cda5dfa538d00bfa9f5172e690", "repo": "core", "path": "tests/common.py", "file_name": "common.py", "fun_name": "assert_lists_same", "commit_message": "Mark device actions from hidden or auxiliary entities as secondary (#70278)", "code": "def assert_lists_same(a, b):\n \n assert len(a) == len(b)\n for i in a:\n assert i in b\n for i in b:\n assert i in a\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 49, "n_words": 23, "vocab_size": 14, "complexity": 3, "nloc": 6, "token_counts": 36, "n_ast_nodes": 57, "n_identifiers": 5, "d_id": 95675, "documentation": { "docstring": "Compare two lists, ignoring order.\n\n Check both that all items in a are in b and that all items in b are in a,\n otherwise assert_lists_same([\"1\", \"1\"], [\"1\", \"2\"]) could be True.\n ", "n_words": 32, "vocab_size": 24, "n_whitespaces": 41, "language": "en" } }, { "id": 214689, "commit_id": "03be003d417c4d0f90ec03fbca1ba0a0b337ff44", "repo": "flair", "path": "flair/models/multitask_model.py", "file_name": "multitask_model.py", "fun_name": "_get_state_dict", "commit_message": "multitask training", "code": "def _get_state_dict(self):\n \n model_state = {}\n\n for task in self.tasks:\n model_state[task] = {\n \"state_dict\": self.__getattr__(task)._get_state_dict(),\n \"class\": self.__getattr__(task).__class__,\n }\n\n return model_state\n", "url": "https://github.com/flairNLP/flair.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 99, "n_words": 19, "vocab_size": 17, "complexity": 2, "nloc": 8, "token_counts": 50, "n_ast_nodes": 84, "n_identifiers": 7, "d_id": 53765, "documentation": { "docstring": "\n Returns the state dict of the multitask model which has multiple models underneath.\n :return model_state: model state for the multitask model\n ", "n_words": 21, "vocab_size": 15, "n_whitespaces": 43, "language": "en" } }, { "id": 263807, "commit_id": "41483cb9e6d5086416c8fea6ad6781782c091c60", "repo": "pyinstaller", "path": "PyInstaller/utils/win32/winutils.py", "file_name": "winutils.py", "fun_name": "set_exe_build_timestamp", "commit_message": "winutils: optimize PE headers fixup\n\nAttempt to optimize PE headers fix-up from both time- and memory-\nintensity perspective.\n\nFirst, avoid specifying `fast_load=False` in `pefile.PE` constructor,\nbecause that triggers the bytes statistics collection\nhttps://github.com/erocarrera/pefile/blob/v2022.5.30/pefile.py#L2862-L2876\nwhich takes a long time for large files. Instead, we can obtain\nfull headers (required for build timestamp modification) by\ncalling `pe.full_load()` ourselves.\n\nSecond, use (an equivalent of) `MapFileAndCheckSumW` to compute\nthe PE checksum. For large files, it is orders of magnitude\nfaster than its pure-python `pefile.PE.generate_checksum`\ncounterpart.\n\nThe downside is that `MapFileAndCheckSumW` requires an on-disk\nfile as opposed to a memory buffer, so we need to split the\nPE headers fixup into two separate steps, with each modifying\nthe corresponding PE headers and (re)writing the whole file.\nEven so, this brings the fix-up process for a 700MB executable\ndown to seconds instead of minutes.\n\nIn addition, as noted on MSDN, `MapFileAndCheckSumW` internally\ncalls its ASCII variant (`MapFileAndCheckSumA`), so it cannot\nhandle file paths that contain characters that are not representable\nin the current code page. Therefore, we implement our own equivalent\nusing `ctypes` and pure widechar-based win32 API functions.", "code": "def set_exe_build_timestamp(exe_path, timestamp):\n \n import pefile\n\n with pefile.PE(exe_path, fast_load=True) as pe:\n # Manually perform a full load. We need it to load all headers, but specifying it in the constructor triggers\n # byte statistics gathering that takes forever with large files. So we try to go around that...\n pe.full_load()\n\n # Set build timestamp.\n # See: https://0xc0decafe.com/malware-analyst-guide-to-pe-timestamps\n timestamp = int(timestamp)\n # Set timestamp field in FILE_HEADER\n pe.FILE_HEADER.TimeDateStamp = timestamp\n # MSVC-compiled executables contain (at least?) one DIRECTORY_ENTRY_DEBUG entry that also contains timestamp\n # with same value as set in FILE_HEADER. So modify that as well, as long as it is set.\n debug_entries = getattr(pe, 'DIRECTORY_ENTRY_DEBUG', [])\n for debug_entry in debug_entries:\n if debug_entry.struct.TimeDateStamp:\n debug_entry.struct.TimeDateStamp = timestamp\n\n # Generate updated EXE data\n data = pe.write()\n\n # Rewrite the exe\n with open(exe_path, 'wb') as fp:\n fp.write(data)\n\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 277, "n_words": 131, "vocab_size": 95, "complexity": 3, "nloc": 13, "token_counts": 95, "n_ast_nodes": 171, "n_identifiers": 19, "d_id": 77447, "documentation": { "docstring": "\n Modifies the executable's build timestamp by updating values in the corresponding PE headers.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 20, "language": "en" } }, { "id": 177937, "commit_id": "eb9198e827e0fdab1e10593c7ea91a56af299e8b", "repo": "label-studio", "path": "label_studio/data_import/uploader.py", "file_name": "uploader.py", "fun_name": "load_tasks", "commit_message": "fix: DEV-2235: Fix blind SSRF on add model and import (#2450)\n\n* fix: DEV-2235: Fix blind SSRF on add model and import\r\n\r\n* Fix ip check (DEV-2235)\r\n\r\n* Disable bandit check (DEV-2235)", "code": "def load_tasks(request, project):\n \n file_upload_ids, found_formats, data_keys = [], [], set()\n could_be_tasks_lists = False\n\n # take tasks from request FILES\n if len(request.FILES):\n check_file_sizes_and_number(request.FILES)\n for filename, file in request.FILES.items():\n file_upload = create_file_upload(request, project, file)\n if file_upload.format_could_be_tasks_list:\n could_be_tasks_lists = True\n file_upload_ids.append(file_upload.id)\n tasks, found_formats, data_keys = FileUpload.load_tasks_from_uploaded_files(project, file_upload_ids)\n\n # take tasks from url address\n elif 'application/x-www-form-urlencoded' in request.content_type:\n # empty url\n url = request.data.get('url')\n if not url:\n raise ValidationError('\"url\" is not found in request data')\n\n # try to load json with task or tasks from url as string\n json_data = str_to_json(url)\n if json_data:\n file_upload = create_file_upload(request, project, SimpleUploadedFile('inplace.json', url.encode()))\n file_upload_ids.append(file_upload.id)\n tasks, found_formats, data_keys = FileUpload.load_tasks_from_uploaded_files(project, file_upload_ids)\n \n # download file using url and read tasks from it\n else:\n if settings.SSRF_PROTECTION_ENABLED and url_is_local(url):\n raise ImportFromLocalIPError\n data_keys, found_formats, tasks, file_upload_ids = tasks_from_url(\n file_upload_ids, project, request, url\n )\n\n # take one task from request DATA\n elif 'application/json' in request.content_type and isinstance(request.data, dict):\n tasks = [request.data]\n\n # take many tasks from request DATA\n elif 'application/json' in request.content_type and isinstance(request.data, list):\n tasks = request.data\n\n # incorrect data source\n else:\n raise ValidationError('load_tasks: No data found in DATA or in FILES')\n\n # check is data root is list\n if not isinstance(tasks, list):\n raise ValidationError('load_tasks: Data root must be list')\n\n # empty tasks error\n if not tasks:\n raise ValidationError('load_tasks: No tasks added')\n\n check_max_task_number(tasks)\n return tasks, file_upload_ids, could_be_tasks_lists, found_formats, list(data_keys)\n\n", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 552, "n_words": 216, "vocab_size": 108, "complexity": 15, "nloc": 38, "token_counts": 292, "n_ast_nodes": 478, "n_identifiers": 40, "d_id": 42547, "documentation": { "docstring": " Load tasks from different types of request.data / request.files\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 13, "language": "en" } }, { "id": 42471, "commit_id": "e081b67f971fa478a98d5734366c602f85d9f7d9", "repo": "nltk", "path": "nltk/corpus/reader/wordnet.py", "file_name": "wordnet.py", "fun_name": "synset_from_pos_and_offset", "commit_message": "Warn about nonexistent synset offsets", "code": "def synset_from_pos_and_offset(self, pos, offset):\n \n # Check to see if the synset is in the cache\n if offset in self._synset_offset_cache[pos]:\n return self._synset_offset_cache[pos][offset]\n\n data_file = self._data_file(pos)\n data_file.seek(offset)\n data_file_line = data_file.readline()\n # If valid, the offset equals the 8-digit 0-padded integer found at the start of the line:\n line_offset = data_file_line[:8]\n if line_offset.isalnum() and offset == int(line_offset):\n synset = self._synset_from_pos_and_line(pos, data_file_line)\n assert synset._offset == offset\n self._synset_offset_cache[pos][offset] = synset\n else:\n synset = None\n warnings.warn(f\"No WordNet synset found for pos={pos} at offset={offset}.\")\n data_file.seek(0)\n return synset\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 230, "n_words": 80, "vocab_size": 53, "complexity": 4, "nloc": 16, "token_counts": 119, "n_ast_nodes": 199, "n_identifiers": 18, "d_id": 7557, "documentation": { "docstring": "\n - pos: The synset's part of speech, matching one of the module level\n attributes ADJ, ADJ_SAT, ADV, NOUN or VERB ('a', 's', 'r', 'n', or 'v').\n - offset: The byte offset of this synset in the WordNet dict file\n for this pos.\n\n >>> from nltk.corpus import wordnet as wn\n >>> print(wn.synset_from_pos_and_offset('n', 1740))\n Synset('entity.n.01')\n ", "n_words": 53, "vocab_size": 45, "n_whitespaces": 114, "language": "en" } }, { "id": 314760, "commit_id": "4bfdb1433e95dfe504e376ca082def5257c23bcb", "repo": "core", "path": "homeassistant/components/sonos/media_player.py", "file_name": "media_player.py", "fun_name": "async_unjoin_player", "commit_message": "Optimize Sonos unjoin behavior when using `media_player.unjoin` (#74086)\n\n* Coalesce Sonos unjoins to process together\r\n\r\n* Refactor for readability\r\n\r\n* Skip unjoin call if already ungrouped\r\n\r\n* Store unjoin data in a dedicated dataclass\r\n\r\n* Revert import adjustment", "code": "async def async_unjoin_player(self):\n \n sonos_data = self.hass.data[DATA_SONOS]\n household_id = self.speaker.household_id\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 9, "vocab_size": 8, "complexity": 2, "nloc": 12, "token_counts": 89, "n_ast_nodes": 40, "n_identifiers": 8, "d_id": 113364, "documentation": { "docstring": "Remove this player from any group.\n\n Coalesces all calls within 0.5s to allow use of SonosSpeaker.unjoin_multi()\n which optimizes the order in which speakers are removed from their groups.\n Removing coordinators last better preserves playqueues on the speakers.\n ", "n_words": 37, "vocab_size": 34, "n_whitespaces": 65, "language": "en" } }, { "id": 204309, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/sessions/backends/file.py", "file_name": "file.py", "fun_name": "_key_to_file", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _key_to_file(self, session_key=None):\n \n if session_key is None:\n session_key = self._get_or_create_session_key()\n\n # Make sure we're not vulnerable to directory traversal. Session keys\n # should always be md5s, so they should never contain directory\n # components.\n if not set(session_key).issubset(VALID_KEY_CHARS):\n raise InvalidSessionKey(\"Invalid characters in session key\")\n\n return os.path.join(self.storage_path, self.file_prefix + session_key)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 119, "n_words": 48, "vocab_size": 41, "complexity": 3, "nloc": 6, "token_counts": 56, "n_ast_nodes": 96, "n_identifiers": 13, "d_id": 50690, "documentation": { "docstring": "\n Get the file associated with this session key.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 191569, "commit_id": "a408ed3ea39dfa47e8b522a9e153b259f25df54e", "repo": "langchain", "path": "tests/unit_tests/chains/test_conversation.py", "file_name": "test_conversation.py", "fun_name": "test_conversation_chain_errors_bad_variable", "commit_message": "Samantha/add conversation chain (#166)\n\nAdd MemoryChain and ConversationChain as chains that take a docstore in\r\naddition to the prompt, and use the docstore to stuff context into the\r\nprompt. This can be used to have an ongoing conversation with a chatbot.\r\n\r\nProbably needs a bit of refactoring for code quality\r\n\r\nCo-authored-by: Harrison Chase ", "code": "def test_conversation_chain_errors_bad_variable() -> None:\n \n llm = FakeLLM()\n prompt = PromptTemplate(input_variables=[\"foo\"], template=\"{foo}\")\n memory = ConversationBufferMemory(dynamic_key=\"foo\")\n with pytest.raises(ValueError):\n ConversationChain(llm=llm, prompt=prompt, memory=memory, input_key=\"foo\")\n\n\n@pytest.mark.parametrize(\n \"memory\",\n [\n ConversationBufferMemory(dynamic_key=\"baz\"),\n ConversationSummaryMemory(llm=FakeLLM(), dynamic_key=\"baz\"),\n ],\n)", "url": "https://github.com/hwchase17/langchain.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"memory\",\n [\n ConversationBufferMemory(dynamic_key=\"baz\"),\n ConversationSummaryMemory(llm=FakeLLM(), dynamic_key=\"baz\"),\n ],\n)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 71, "n_words": 28, "vocab_size": 26, "complexity": 1, "nloc": 7, "token_counts": 60, "n_ast_nodes": 161, "n_identifiers": 18, "d_id": 46692, "documentation": { "docstring": "Test that conversation chain works in basic setting.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 12488, "commit_id": "ef662b529b2a2eecea7bb99759a9f7b9d86d3062", "repo": "jina", "path": "jina/clients/base/__init__.py", "file_name": "__init__.py", "fun_name": "_dry_run", "commit_message": "feat: add grpc health checking (#4779)", "code": "def _dry_run(self, **kwargs) -> bool:\n \n ...\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 6, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 4, "d_id": 2311, "documentation": { "docstring": "Sends a dry run to the Flow to validate if the Flow is ready to receive requests\n\n :param kwargs: potential kwargs received passed from the public interface\n ", "n_words": 27, "vocab_size": 22, "n_whitespaces": 41, "language": "en" } }, { "id": 215982, "commit_id": "f2a783643de61cac1ff3288b40241e5ce6e1ddc8", "repo": "salt", "path": "salt/utils/platform.py", "file_name": "platform.py", "fun_name": "is_fedora", "commit_message": "Update to latest ``pyupgrade`` hook. Stop skipping it on CI.\n\nSigned-off-by: Pedro Algarvio ", "code": "def is_fedora():\n \n (osname, osrelease, oscodename) = (\n x.strip('\"').strip(\"'\") for x in linux_distribution()\n )\n return osname == \"Fedora\"\n\n\n@real_memoize", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "@real_memoize", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 36, "n_words": 18, "vocab_size": 18, "complexity": 2, "nloc": 5, "token_counts": 36, "n_ast_nodes": 68, "n_identifiers": 8, "d_id": 54302, "documentation": { "docstring": "\n Simple function to return if host is Fedora or not\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 305791, "commit_id": "6f564e4f514b56bce281ec7e82703cfbff87b417", "repo": "core", "path": "homeassistant/components/rejseplanen/sensor.py", "file_name": "sensor.py", "fun_name": "update", "commit_message": "Improve entity type hints [r] (#77874)", "code": "def update(self) -> None:\n \n self.data.update()\n self._times = self.data.info\n\n if not self._times:\n self._state = None\n else:\n with suppress(TypeError):\n self._state = self._times[0][ATTR_DUE_IN]\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 92, "n_words": 20, "vocab_size": 17, "complexity": 2, "nloc": 9, "token_counts": 56, "n_ast_nodes": 95, "n_identifiers": 9, "d_id": 104575, "documentation": { "docstring": "Get the latest data from rejseplanen.dk and update the states.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 282590, "commit_id": "fd5821928265429d1ffb6e6d53f019915b3afbbc", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/cryptocurrency/due_diligence/pycoingecko_model.py", "file_name": "pycoingecko_model.py", "fun_name": "get_base_info", "commit_message": "adjusted format of logs (#1292)\n\nadjusted format of logs", "code": "def get_base_info(self) -> pd.DataFrame:\n \n\n regx = r'|'\n\n results = {}\n for attr in BASE_INFO:\n info_obj = self.coin.get(attr, {})\n if attr == \"description\":\n info_obj = info_obj.get(\"en\")\n info_obj = re.sub(regx, \"\", info_obj)\n info_obj = re.sub(r\"\\r\\n\\r\\n\", \" \", info_obj)\n results[attr] = info_obj\n results.update(self._get_base_market_data_info())\n df = pd.Series(results).to_frame().reset_index()\n df.columns = [\"Metric\", \"Value\"]\n df[\"Metric\"] = df[\"Metric\"].apply(\n lambda x: lambda_replace_underscores_in_column_names(x)\n if isinstance(x, str)\n else x\n )\n\n return df[df[\"Value\"].notna()]\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 243, "n_words": 62, "vocab_size": 46, "complexity": 4, "nloc": 26, "token_counts": 156, "n_ast_nodes": 262, "n_identifiers": 26, "d_id": 84179, "documentation": { "docstring": "Get all the base information about given coin. [Source: CoinGecko]\n\n Returns\n -------\n pandas.DataFrame\n Base information about coin\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 56, "language": "en" } }, { "id": 68093, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/templates/pages/rfq.py", "file_name": "rfq.py", "fun_name": "check_supplier_has_docname_access", "commit_message": "style: format code with black", "code": "def check_supplier_has_docname_access(supplier):\n\tstatus = True\n\tif frappe.form_dict.name not in frappe.db.sql_list(\n\t\t,\n\t\t(supplier,),\n\t):\n\t\tstatus = False\n\treturn status\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 10, "n_words": 18, "vocab_size": 15, "complexity": 2, "nloc": 9, "token_counts": 36, "n_ast_nodes": 57, "n_identifiers": 8, "d_id": 14717, "documentation": { "docstring": "select parent from `tabRequest for Quotation Supplier`\n\t\twhere supplier = %s", "n_words": 11, "vocab_size": 11, "n_whitespaces": 9, "language": "en" } }, { "id": 118716, "commit_id": "2c153aa179a27539f856e389870161d5a58da213", "repo": "streamlit", "path": "lib/tests/streamlit/legacy_dataframe_styling_test.py", "file_name": "legacy_dataframe_styling_test.py", "fun_name": "test_format_float_precision", "commit_message": "Pandas 1.4 styler fix (#4316)\n\nChange the way we detect custom styling in a DataFrame, to account for changes in Pandas 1.4.\r\n\r\nOur DataFrame styling support is based on internal Pandas APIs, so they're always subject to change out from underneath us. In general, we'd prefer to only pass `display_value` data to the frontend when a DataFrame cell has been custom-formatted by the user, to save on bandwidth. However, Panda's Styler's internals are private, and it doesn't give us a consistent way of testing whether a cell has a custom `display_value` or not. \r\n\r\nPrior to Pandas 1.4, we could test whether a cell's `display_value` differed from its `value`, and only stick the `display_value` in the protobuf when that was the case. In 1.4, an unmodified Styler will contain `display_value` strings for all cells, regardless of whether any formatting has been applied to that cell, so we no longer have this ability (or at least I couldn't figure out a reasonable way to test for this). \r\n\r\nSo instead, as of this PR, calling `st._legacy_dataframe(df.styler)` will *always* result in `display_value` strings being written to the dataframe protobuf (even though there isn't any custom formatting). This means that styled DataFrames may result in more data being sent to the frontend now than was the case before. In practice, I don't think this is a big deal - only the legacy DataFrame code has styling support; and often, if you're styling a DataFrame, you're customizing the formatting on most or all of its cells anyway.\r\n\r\nI also made a number of small type-safety changes as I was working with the dataframe code, and those are all in the PR as well. (I've left a PR comment under the actual logic changes.)", "code": "def test_format_float_precision(self, st_element, get_proto):\n \n values = [3.14, 3.1]\n display_values = [\"3.14\", \"3.10\"]\n\n df = pd.DataFrame({\"test\": values})\n\n st_element(df.style.format({\"test\": \"{:.2f}\"}))\n\n proto_df = get_proto(self._get_element())\n self._assert_column_display_values(proto_df, 0, display_values)\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 73, "n_words": 24, "vocab_size": 21, "complexity": 1, "nloc": 7, "token_counts": 75, "n_ast_nodes": 121, "n_identifiers": 14, "d_id": 26374, "documentation": { "docstring": "Tests DataFrame.style.format() with floats.\n By default, the frontend will format any unstyled DataFrame float\n with 4 digits after the decimal. If we have any floating point styling\n in a DataFrame, our display_values should be filled in even for\n cells whose display_value == value.\n ", "n_words": 43, "vocab_size": 39, "n_whitespaces": 78, "language": "en" } }, { "id": 65151, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/asset_depreciations_and_balances/asset_depreciations_and_balances.py", "file_name": "asset_depreciations_and_balances.py", "fun_name": "get_asset_categories", "commit_message": "style: format code with black", "code": "def get_asset_categories(filters):\n\treturn frappe.db.sql(\n\t\t,\n\t\t{\"to_date\": filters.to_date, \"from_date\": filters.from_date, \"company\": filters.company},\n\t\tas_dict=1,\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 7, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 47, "token_counts": 39, "n_ast_nodes": 64, "n_identifiers": 9, "d_id": 13809, "documentation": { "docstring": "\n\t\tSELECT asset_category,\n\t\t\t ifnull(sum(case when purchase_date < %(from_date)s then\n\t\t\t\t\t\t\t case when ifnull(disposal_date, 0) = 0 or disposal_date >= %(from_date)s then\n\t\t\t\t\t\t\t\t\tgross_purchase_amount\n\t\t\t\t\t\t\t else\n\t\t\t\t\t\t\t\t\t0\n\t\t\t\t\t\t\t end\n\t\t\t\t\t\t else\n\t\t\t\t\t\t\t\t0\n\t\t\t\t\t\t end), 0) as cost_as_on_from_date,\n\t\t\t ifnull(sum(case when purchase_date >= %(from_date)s then\n\t\t\t \t\t\t\t\t\tgross_purchase_amount\n\t\t\t \t\t\t\t else\n\t\t\t \t\t\t\t \t\t0\n\t\t\t \t\t\t\t end), 0) as cost_of_new_purchase,\n\t\t\t ifnull(sum(case when ifnull(disposal_date, 0) != 0\n\t\t\t \t\t\t\t\t\tand disposal_date >= %(from_date)s\n\t\t\t \t\t\t\t\t\tand disposal_date <= %(to_date)s then\n\t\t\t\t\t\t\t case when status = \"Sold\" then\n\t\t\t\t\t\t\t \t\tgross_purchase_amount\n\t\t\t\t\t\t\t else\n\t\t\t\t\t\t\t \t\t0\n\t\t\t\t\t\t\t end\n\t\t\t\t\t\t else\n\t\t\t\t\t\t\t\t0\n\t\t\t\t\t\t end), 0) as cost_of_sold_asset,\n\t\t\t ifnull(sum(case when ifnull(disposal_date, 0) != 0\n\t\t\t \t\t\t\t\t\tand disposal_date >= %(from_date)s\n\t\t\t \t\t\t\t\t\tand disposal_date <= %(to_date)s then\n\t\t\t\t\t\t\t case when status = \"Scrapped\" then\n\t\t\t\t\t\t\t \t\tgross_purchase_amount\n\t\t\t\t\t\t\t else\n\t\t\t\t\t\t\t \t\t0\n\t\t\t\t\t\t\t end\n\t\t\t\t\t\t else\n\t\t\t\t\t\t\t\t0\n\t\t\t\t\t\t end), 0) as cost_of_scrapped_asset\n\t\tfrom `tabAsset`\n\t\twhere docstatus=1 and company=%(company)s and purchase_date <= %(to_date)s\n\t\tgroup by asset_category\n\t", "n_words": 117, "vocab_size": 40, "n_whitespaces": 179, "language": "en" } }, { "id": 26315, "commit_id": "41b87559118f560c223f83d405efe9b406701d17", "repo": "saleor", "path": "saleor/graphql/core/tests/test_graphql.py", "file_name": "test_graphql.py", "fun_name": "test_get_nodes_for_order_with_int_id", "commit_message": "Migrate order id from int to UUID (#9324)\n\n* Add migration to change order id from int to UUID (#9281)\r\n\r\n* Change order token to uuid\r\n\r\n* Migrate order id to uuid\r\n\r\n* Fix failing tests\r\n\r\n* Apply code review suggestions\r\n\r\n* Fix payment migration dependencies\r\n\r\n* Fix typo in order migration name\r\n\r\n* Handle old order ids for order queries\r\n\r\n* Hanlde old order ids for order mutations\r\n\r\n* Add order relation to GiftCardEvent model\r\n\r\n* Deprecate order token related queries and fields (#9295)\r\n\r\n* Deprecate order.token field\r\n\r\n* Update description of orderByToken query\r\n\r\n* Update prepare_order_search_document_value method\r\n\r\n* Update changelog\r\n\r\n* Update schema file", "code": "def test_get_nodes_for_order_with_int_id(order_list):\n \n order_models.Order.objects.update(use_old_id=True)\n\n # given\n global_ids = [to_global_id(\"Order\", order.number) for order in order_list]\n\n # Make sure function works even if duplicated ids are provided\n global_ids.append(to_global_id(\"Order\", order_list[0].number))\n\n # when\n orders = get_nodes(global_ids, Order)\n\n # then\n assert orders == order_list\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 68, "n_words": 38, "vocab_size": 33, "complexity": 2, "nloc": 6, "token_counts": 62, "n_ast_nodes": 105, "n_identifiers": 14, "d_id": 4963, "documentation": { "docstring": "Ensure that `get_nodes` returns correct nodes, when old id is used\n for orders with the `use_old_id` flag set to True.", "n_words": 20, "vocab_size": 20, "n_whitespaces": 22, "language": "en" } }, { "id": 35595, "commit_id": "84eaa6acf582206dba33135727dc3bfff05a7e9c", "repo": "transformers", "path": "src/transformers/models/convnext/modeling_tf_convnext.py", "file_name": "modeling_tf_convnext.py", "fun_name": "serving", "commit_message": "Add TFConvNextModel (#15750)\n\n* feat: initial implementation of convnext in tensorflow.\r\n\r\n* fix: sample code for the classification model.\r\n\r\n* chore: added checked for from the classification model.\r\n\r\n* chore: set bias initializer in the classification head.\r\n\r\n* chore: updated license terms.\r\n\r\n* chore: removed ununsed imports\r\n\r\n* feat: enabled argument during using drop_path.\r\n\r\n* chore: replaced tf.identity with layers.Activation(linear).\r\n\r\n* chore: edited default checkpoint.\r\n\r\n* fix: minor bugs in the initializations.\r\n\r\n* partial-fix: tf model errors for loading pretrained pt weights.\r\n\r\n* partial-fix: call method updated\r\n\r\n* partial-fix: cross loading of weights (4x3 variables to be matched)\r\n\r\n* chore: removed unneeded comment.\r\n\r\n* removed playground.py\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* fix: renaming TFConvNextStage conv and layer norm layers\r\n\r\n* chore: added initializers and other minor additions.\r\n\r\n* chore: added initializers and other minor additions.\r\n\r\n* add: tests for convnext.\r\n\r\n* fix: integration tester class.\r\n\r\n* fix: issues mentioned in pr feedback (round 1).\r\n\r\n* fix: how output_hidden_states arg is propoagated inside the network.\r\n\r\n* feat: handling of arg for pure cnn models.\r\n\r\n* chore: added a note on equal contribution in model docs.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* feat: encapsulation for the convnext trunk.\r\n\r\n* Fix variable naming; Test-related corrections; Run make fixup\r\n\r\n* chore: added Joao as a contributor to convnext.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* chore: corrected copyright year and added comment on NHWC.\r\n\r\n* chore: fixed the black version and ran formatting.\r\n\r\n* chore: ran make style.\r\n\r\n* chore: removed from_pt argument from test, ran make style.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* fix: tests in the convnext subclass, ran make style.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* rebasing\r\n\r\n* rebasing and removing playground.py.\r\n\r\n* chore: moved convnext test to the correct location\r\n\r\n* fix: locations for the test file of convnext.\r\n\r\n* fix: convnext tests.\r\n\r\n* chore: applied sgugger's suggestion for dealing w/ output_attentions.\r\n\r\n* chore: added comments.\r\n\r\n* chore: applied updated quality enviornment style.\r\n\r\n* chore: applied formatting with quality enviornment.\r\n\r\n* chore: revert to the previous tests/test_modeling_common.py.\r\n\r\n* chore: revert to the original test_modeling_common.py\r\n\r\n* chore: revert to previous states for test_modeling_tf_common.py and modeling_tf_utils.py\r\n\r\n* fix: tests for convnext.\r\n\r\n* chore: removed output_attentions argument from convnext config.\r\n\r\n* chore: revert to the earlier tf utils.\r\n\r\n* fix: output shapes of the hidden states\r\n\r\n* chore: removed unnecessary comment\r\n\r\n* chore: reverting to the right test_modeling_tf_common.py.\r\n\r\n* Styling nits\r\n\r\nCo-authored-by: ariG23498 \r\nCo-authored-by: Joao Gante \r\nCo-authored-by: Sylvain Gugger ", "code": "def serving(self, inputs):\n \n return self.call(inputs)\n\n\nCONVNEXT_START_DOCSTRING = r\n\nCONVNEXT_INPUTS_DOCSTRING = r\n\n\n@add_start_docstrings(\n \"The bare ConvNext model outputting raw features without any specific head on top.\",\n CONVNEXT_START_DOCSTRING,\n)", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@add_start_docstrings(\n \"The bare ConvNext model outputting raw features without any specific head on top.\",\n CONVNEXT_START_DOCSTRING,\n)", "n_ast_errors": 1, "ast_levels": 7, "n_whitespaces": 43, "n_words": 27, "vocab_size": 25, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 54, "n_identifiers": 7, "d_id": 6512, "documentation": { "docstring": "\n Method used for serving the model.\n\n Args:\n inputs (`Dict[str, tf.Tensor]`):\n The input of the saved model as a dictionary of tensors.\n \n This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads\n etc.)\n\n This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it\n as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and\n behavior.\n\n \n\n TF 2.0 models accepts two formats as inputs:\n\n - having all inputs as keyword arguments (like PyTorch models), or\n - having all inputs as a list, tuple or dict in the first positional arguments.\n\n This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the\n tensors in the first argument of the model call function: `model(inputs)`.\n\n \n\n Parameters:\n config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`ConvNextFeatureExtractor`]. See\n [`ConvNextFeatureExtractor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail. This argument can be used only in eager mode, in graph mode the value in the config will be\n used instead.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used\n in eager mode, in graph mode the value will always be set to True.\n", "n_words": 298, "vocab_size": 171, "n_whitespaces": 518, "language": "en" } }, { "id": 104413, "commit_id": "e35be138148333078284b942ccc9ed7b1d826f97", "repo": "datasets", "path": "src/datasets/table.py", "file_name": "table.py", "fun_name": "to_pydict", "commit_message": "Update docs to new frontend/UI (#3690)\n\n* WIP: update docs to new UI\r\n\r\n* make style\r\n\r\n* Rm unused\r\n\r\n* inject_arrow_table_documentation __annotations__\r\n\r\n* hasattr(arrow_table_method, \"__annotations__\")\r\n\r\n* Update task_template.rst\r\n\r\n* Codeblock PT-TF-SPLIT\r\n\r\n* Convert loading scripts\r\n\r\n* Convert docs to mdx\r\n\r\n* Fix mdx\r\n\r\n* Add \r\n\r\n* Convert mdx tables\r\n\r\n* Fix codeblock\r\n\r\n* Rm unneded hashlinks\r\n\r\n* Update index.mdx\r\n\r\n* Redo dev change\r\n\r\n* Rm circle ci `build_doc` & `deploy_doc`\r\n\r\n* Rm unneeded files\r\n\r\n* Update docs reamde\r\n\r\n* Standardize to `Example::`\r\n\r\n* mdx logging levels doc\r\n\r\n* Table properties inject_arrow_table_documentation\r\n\r\n* ``` to ```py mdx\r\n\r\n* Add Tips mdx\r\n\r\n* important,None -> \r\n\r\n* More misc\r\n\r\n* Center imgs\r\n\r\n* Update instllation page\r\n\r\n* `setup.py` docs section\r\n\r\n* Rm imgs since they are in hf.co\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* Update index mdx\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* just `Dataset` obj\r\n\r\n* Addedversion just italics\r\n\r\n* Update ReadInstruction doc example syntax\r\n\r\n* Change docstring for `prepare_for_task`\r\n\r\n* Chore\r\n\r\n* Remove `code` syntax from headings\r\n\r\n* Rm `code` syntax from headings\r\n\r\n* Hashlink backward compatability\r\n\r\n* S3FileSystem doc\r\n\r\n* S3FileSystem doc updates\r\n\r\n* index.mdx updates\r\n\r\n* Add darkmode gifs\r\n\r\n* Index logo img css classes\r\n\r\n* Index mdx dataset logo img size\r\n\r\n* Docs for DownloadMode class\r\n\r\n* Doc DownloadMode table\r\n\r\n* format docstrings\r\n\r\n* style\r\n\r\n* Add doc builder scripts (#3790)\r\n\r\n* add doc builder scripts\r\n\r\n* fix docker image\r\n\r\n* Docs new UI actions no self hosted (#3793)\r\n\r\n* No self hosted\r\n\r\n* replace doc injection by actual docstrings\r\n\r\n* Docstring formatted\r\n\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Mishig Davaadorj \r\n\r\nCo-authored-by: Lysandre Debut \r\nCo-authored-by: Mishig Davaadorj \r\n\r\n* Rm notebooks from docs actions since they dont exi\r\n\r\n* Update tsting branch\r\n\r\n* More docstring\r\n\r\n* Chore\r\n\r\n* bump up node version\r\n\r\n* bump up node\r\n\r\n* ``` -> ```py for audio_process.mdx\r\n\r\n* Update .github/workflows/build_documentation.yml\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Uodate dev doc build\r\n\r\n* remove run on PR\r\n\r\n* fix action\r\n\r\n* Fix gh doc workflow\r\n\r\n* forgot this change when merging master\r\n\r\n* Update build doc\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: Lysandre Debut ", "code": "def to_pydict(self, *args, **kwargs):\n \n return self.table.to_pydict(*args, **kwargs)\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 25, "n_ast_nodes": 41, "n_identifiers": 5, "d_id": 21849, "documentation": { "docstring": "\n Convert the Table to a dict or OrderedDict.\n\n Returns:\n :obj:`dict`\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 43, "language": "en" } }, { "id": 71246, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/templatetags/wagtailadmin_tags.py", "file_name": "wagtailadmin_tags.py", "fun_name": "avatar_url", "commit_message": "Reformat with black", "code": "def avatar_url(user, size=50, gravatar_only=False):\n \n\n if (\n not gravatar_only\n and hasattr(user, \"wagtail_userprofile\")\n and user.wagtail_userprofile.avatar\n ):\n return user.wagtail_userprofile.avatar.url\n\n if hasattr(user, \"email\"):\n gravatar_url = get_gravatar_url(user.email, size=size)\n if gravatar_url is not None:\n return gravatar_url\n\n return versioned_static_func(\"wagtailadmin/images/default-user-avatar.png\")\n\n\n@register.simple_tag", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "@register.simple_tag", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 100, "n_words": 33, "vocab_size": 24, "complexity": 6, "nloc": 12, "token_counts": 74, "n_ast_nodes": 127, "n_identifiers": 14, "d_id": 15646, "documentation": { "docstring": "\n A template tag that receives a user and size and return\n the appropriate avatar url for that user.\n Example usage: {% avatar_url request.user 50 %}\n ", "n_words": 25, "vocab_size": 23, "n_whitespaces": 38, "language": "en" } }, { "id": 110326, "commit_id": "383de519505964ed879c40b23ef36e90c17ebe0d", "repo": "matplotlib", "path": "lib/matplotlib/axes/_base.py", "file_name": "_base.py", "fun_name": "get_position", "commit_message": "[Doc] fix more spelling and grammar", "code": "def get_position(self, original=False):\n \n if original:\n return self._originalPosition.frozen()\n else:\n locator = self.get_axes_locator()\n if not locator:\n self.apply_aspect()\n return self._position.frozen()\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 97, "n_words": 17, "vocab_size": 15, "complexity": 3, "nloc": 8, "token_counts": 47, "n_ast_nodes": 81, "n_identifiers": 9, "d_id": 24065, "documentation": { "docstring": "\n Return the position of the Axes within the figure as a `.Bbox`.\n\n Parameters\n ----------\n original : bool\n If ``True``, return the original position. Otherwise, return the\n active position. For an explanation of the positions see\n `.set_position`.\n\n Returns\n -------\n `.Bbox`\n\n ", "n_words": 39, "vocab_size": 30, "n_whitespaces": 129, "language": "en" } }, { "id": 222640, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/command/bdist_msi.py", "file_name": "bdist_msi.py", "fun_name": "back", "commit_message": "add python 3.10.4 for windows", "code": "def back(self, title, next, name = \"Back\", active = 1):\n \n if active:\n flags = 3 # Visible|Enabled\n else:\n flags = 1 # Visible\n return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 83, "n_words": 33, "vocab_size": 27, "complexity": 2, "nloc": 6, "token_counts": 54, "n_ast_nodes": 81, "n_identifiers": 9, "d_id": 56682, "documentation": { "docstring": "Add a back button with a given title, the tab-next button,\n its name in the Control table, possibly initially disabled.\n\n Return the button, so that events can be associated", "n_words": 29, "vocab_size": 25, "n_whitespaces": 42, "language": "en" } }, { "id": 250111, "commit_id": "3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b", "repo": "synapse", "path": "tests/storage/test_client_ips.py", "file_name": "test_client_ips.py", "fun_name": "test_insert_new_client_ip_none_device_id", "commit_message": "Require types in tests.storage. (#14646)\n\nAdds missing type hints to `tests.storage` package\r\nand does not allow untyped definitions.", "code": "def test_insert_new_client_ip_none_device_id(self) -> None:\n \n self.reactor.advance(12345678)\n\n user_id = \"@user:id\"\n\n # Add & trigger the storage loop\n self.get_success(\n self.store.insert_client_ip(\n user_id, \"access_token\", \"ip\", \"user_agent\", None\n )\n )\n self.reactor.advance(200)\n self.pump(0)\n\n result = self.get_success(\n self.store.db_pool.simple_select_list(\n table=\"user_ips\",\n keyvalues={\"user_id\": user_id},\n retcols=[\"access_token\", \"ip\", \"user_agent\", \"device_id\", \"last_seen\"],\n desc=\"get_user_ip_and_agents\",\n )\n )\n\n self.assertEqual(\n result,\n [\n {\n \"access_token\": \"access_token\",\n \"ip\": \"ip\",\n \"user_agent\": \"user_agent\",\n \"device_id\": None,\n \"last_seen\": 12345678000,\n }\n ],\n )\n\n # Add another & trigger the storage loop\n self.get_success(\n self.store.insert_client_ip(\n user_id, \"access_token\", \"ip\", \"user_agent\", None\n )\n )\n self.reactor.advance(10)\n self.pump(0)\n\n result = self.get_success(\n self.store.db_pool.simple_select_list(\n table=\"user_ips\",\n keyvalues={\"user_id\": user_id},\n retcols=[\"access_token\", \"ip\", \"user_agent\", \"device_id\", \"last_seen\"],\n desc=\"get_user_ip_and_agents\",\n )\n )\n # Only one result, has been upserted.\n self.assertEqual(\n result,\n [\n {\n \"access_token\": \"access_token\",\n \"ip\": \"ip\",\n \"user_agent\": \"user_agent\",\n \"device_id\": None,\n \"last_seen\": 12345878000,\n }\n ],\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 824, "n_words": 116, "vocab_size": 55, "complexity": 1, "nloc": 61, "token_counts": 245, "n_ast_nodes": 431, "n_identifiers": 17, "d_id": 73277, "documentation": { "docstring": "\n An insert with a device ID of NULL will not create a new entry, but\n update an existing entry in the user_ips table.\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 45, "language": "en" } }, { "id": 319783, "commit_id": "53baed03895f28f24113d376b089e3ef281b34ed", "repo": "paperless-ngx", "path": "src/documents/tests/test_api.py", "file_name": "test_api.py", "fun_name": "test_api_unset_storage_path", "commit_message": "Increases test coverage of storage paths", "code": "def test_api_unset_storage_path(self, m):\n \n m.return_value = \"OK\"\n\n response = self.client.post(\n \"/api/documents/bulk_edit/\",\n json.dumps(\n {\n \"documents\": [self.doc1.id],\n \"method\": \"set_storage_path\",\n \"parameters\": {\"storage_path\": None},\n },\n ),\n content_type=\"application/json\",\n )\n\n self.assertEqual(response.status_code, 200)\n m.assert_called_once()\n args, kwargs = m.call_args\n\n self.assertListEqual(args[0], [self.doc1.id])\n self.assertEqual(kwargs[\"storage_path\"], None)\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 228, "n_words": 34, "vocab_size": 32, "complexity": 1, "nloc": 18, "token_counts": 109, "n_ast_nodes": 182, "n_identifiers": 19, "d_id": 116996, "documentation": { "docstring": "\n GIVEN:\n - API data to clear/unset the storage path of a document\n WHEN:\n - API is called\n THEN:\n - set_storage_path is called with correct document IDs and None storage_path\n ", "n_words": 29, "vocab_size": 23, "n_whitespaces": 91, "language": "en" } }, { "id": 249078, "commit_id": "c97042f7eef3748e17c90e48a4122389a89c4735", "repo": "synapse", "path": "tests/rest/admin/test_device.py", "file_name": "test_device.py", "fun_name": "test_update_display_name", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13469)", "code": "def test_update_display_name(self) -> None:\n \n # Set new display_name\n channel = self.make_request(\n \"PUT\",\n self.url,\n access_token=self.admin_user_tok,\n content={\"display_name\": \"new displayname\"},\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n\n # Check new display_name\n channel = self.make_request(\n \"GET\",\n self.url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(\"new displayname\", channel.json_body[\"display_name\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 185, "n_words": 38, "vocab_size": 26, "complexity": 1, "nloc": 18, "token_counts": 99, "n_ast_nodes": 160, "n_identifiers": 12, "d_id": 72585, "documentation": { "docstring": "\n Tests a normal successful update of display name\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 72395, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/views/collections.py", "file_name": "collections.py", "fun_name": "_user_may_move_collection", "commit_message": "Reformat with black", "code": "def _user_may_move_collection(self, user, instance):\n \n if user.is_active and user.is_superuser:\n return True\n else:\n permissions = self.permission_policy._get_permission_objects_for_actions(\n [\"add\", \"edit\", \"delete\"]\n )\n return not GroupCollectionPermission.objects.filter(\n group__user=user,\n permission__in=permissions,\n collection=instance,\n ).exists()\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 161, "n_words": 25, "vocab_size": 24, "complexity": 3, "nloc": 12, "token_counts": 64, "n_ast_nodes": 104, "n_identifiers": 16, "d_id": 15884, "documentation": { "docstring": "\n Is this instance used for assigning GroupCollectionPermissions to the user?\n If so, this user is not allowed do move the collection to a new part of the tree\n ", "n_words": 28, "vocab_size": 24, "n_whitespaces": 50, "language": "en" } }, { "id": 259259, "commit_id": "751c5cd05ff545c20ad0b09ac491c07f31e4cd56", "repo": "scikit-learn", "path": "sklearn/_loss/tests/test_loss.py", "file_name": "test_loss.py", "fun_name": "test_predict_proba", "commit_message": "TST ensure that sklearn/_loss/tests/test_loss.py is seed insensitive (#22847)\n\nCo-authored-by: Christian Lorentzen ", "code": "def test_predict_proba(loss, global_random_seed):\n \n n_samples = 20\n y_true, raw_prediction = random_y_true_raw_prediction(\n loss=loss,\n n_samples=n_samples,\n y_bound=(-100, 100),\n raw_bound=(-5, 5),\n seed=global_random_seed,\n )\n\n if hasattr(loss, \"predict_proba\"):\n proba = loss.predict_proba(raw_prediction)\n assert proba.shape == (n_samples, loss.n_classes)\n assert np.sum(proba, axis=1) == approx(1, rel=1e-11)\n\n if hasattr(loss, \"gradient_proba\"):\n for grad, proba in (\n (None, None),\n (None, np.empty_like(raw_prediction)),\n (np.empty_like(raw_prediction), None),\n (np.empty_like(raw_prediction), np.empty_like(raw_prediction)),\n ):\n grad, proba = loss.gradient_proba(\n y_true=y_true,\n raw_prediction=raw_prediction,\n sample_weight=None,\n gradient_out=grad,\n proba_out=proba,\n )\n assert proba.shape == (n_samples, loss.n_classes)\n assert np.sum(proba, axis=1) == approx(1, rel=1e-11)\n assert_allclose(\n grad,\n loss.gradient(\n y_true=y_true,\n raw_prediction=raw_prediction,\n sample_weight=None,\n gradient_out=None,\n ),\n )\n\n\n@pytest.mark.parametrize(\"loss\", ALL_LOSSES)\n@pytest.mark.parametrize(\"sample_weight\", [None, \"range\"])\n@pytest.mark.parametrize(\"dtype\", (np.float32, np.float64))\n@pytest.mark.parametrize(\"order\", (\"C\", \"F\"))", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"loss\", ALL_LOSSES)\n@pytest.mark.parametrize(\"sample_weight\", [None, \"range\"])\n@pytest.mark.parametrize(\"dtype\", (np.float32, np.float64))\n@pytest.mark.parametrize(\"order\", (\"C\", \"F\"))", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 483, "n_words": 93, "vocab_size": 62, "complexity": 4, "nloc": 38, "token_counts": 248, "n_ast_nodes": 453, "n_identifiers": 34, "d_id": 75678, "documentation": { "docstring": "Test that predict_proba and gradient_proba work as expected.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 164685, "commit_id": "047137ce2619cfe2027e3999dfb92eb614d9a485", "repo": "pandas", "path": "pandas/io/excel/_base.py", "file_name": "_base.py", "fun_name": "handles", "commit_message": "DEP: Protect some ExcelWriter attributes (#45795)\n\n* DEP: Deprecate ExcelWriter attributes\r\n\r\n* DEP: Deprecate ExcelWriter attributes\r\n\r\n* Fixup for test\r\n\r\n* Move tests and restore check_extension\r\n\r\ny\r\n\r\n* Deprecate xlwt fm_date and fm_datetime; doc improvements", "code": "def handles(self):\n \n self._deprecate(\"handles\")\n return self._handles\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 26, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 3, "token_counts": 16, "n_ast_nodes": 31, "n_identifiers": 4, "d_id": 39590, "documentation": { "docstring": "\n Handles to Excel sheets.\n\n .. deprecated:: 1.5.0\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 29, "language": "en" } }, { "id": 197456, "commit_id": "9a3ffc6781bd44c47cf49e128ef154389c32876a", "repo": "sympy", "path": "sympy/physics/vector/vector.py", "file_name": "vector.py", "fun_name": "xreplace", "commit_message": "Some pep8 cleanup of sympy.physics.vector.", "code": "def xreplace(self, rule):\n \n\n new_args = []\n for mat, frame in self.args:\n mat = mat.xreplace(rule)\n new_args.append([mat, frame])\n return Vector(new_args)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 68, "n_words": 18, "vocab_size": 17, "complexity": 2, "nloc": 6, "token_counts": 44, "n_ast_nodes": 71, "n_identifiers": 9, "d_id": 48559, "documentation": { "docstring": "Replace occurrences of objects within the measure numbers of the\n vector.\n\n Parameters\n ==========\n\n rule : dict-like\n Expresses a replacement rule.\n\n Returns\n =======\n\n Vector\n Result of the replacement.\n\n Examples\n ========\n\n >>> from sympy import symbols, pi\n >>> from sympy.physics.vector import ReferenceFrame\n >>> A = ReferenceFrame('A')\n >>> x, y, z = symbols('x y z')\n >>> ((1 + x*y) * A.x).xreplace({x: pi})\n (pi*y + 1)*A.x\n >>> ((1 + x*y) * A.x).xreplace({x: pi, y: 2})\n (1 + 2*pi)*A.x\n\n Replacements occur only if an entire node in the expression tree is\n matched:\n\n >>> ((x*y + z) * A.x).xreplace({x*y: pi})\n (z + pi)*A.x\n >>> ((x*y*z) * A.x).xreplace({x*y: pi})\n x*y*z*A.x\n\n ", "n_words": 103, "vocab_size": 74, "n_whitespaces": 293, "language": "en" } }, { "id": 28887, "commit_id": "89786f24b5296a23c093fcfea90893292473b275", "repo": "saleor", "path": "saleor/core/utils/events.py", "file_name": "events.py", "fun_name": "call_event", "commit_message": "[Change] Change the way transactions are handled in mutations (#10606)\n\n* refactor account, app, attribute mutations\r\n\r\n* add checkout refactor\r\n\r\n* Change transactions on all mutations to context, and use call_event method to trigger webhooks\r\n\r\n* remove comments\r\n\r\n* refactor call_event and move app load outside transaction in few places\r\n\r\n* remove redundant code from merge conflicts\r\n\r\n* switch calling call_event to more readable way\r\n\r\n* fix missed event call\r\n\r\n* refactor and add transaction in permission group\r\n\r\n* move call_event function to utils, fix few event calls after review\r\n\r\n* fix one event call after review\r\n\r\n* fix transaction scope", "code": "def call_event(func_obj, *func_args):\n \n connection = transaction.get_connection()\n if connection.in_atomic_block:\n transaction.on_commit(lambda: func_obj(*func_args))\n else:\n func_obj(*func_args)\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 38, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 6, "token_counts": 40, "n_ast_nodes": 70, "n_identifiers": 8, "d_id": 5184, "documentation": { "docstring": "Call webhook event with given args.\n\n Ensures that in atomic transaction event is called on_commit.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 21, "language": "en" } }, { "id": 296361, "commit_id": "3b2aae5045f9f08dc8f174c5d975852588e1a132", "repo": "core", "path": "homeassistant/components/mqtt/mixins.py", "file_name": "mixins.py", "fun_name": "_cleanup_discovery_on_remove", "commit_message": "Refactor MQTT discovery (#67966)\n\n* Proof of concept\r\n\r\n* remove notify platform\r\n\r\n* remove loose test\r\n\r\n* Add rework from #67912 (#1)\r\n\r\n* Move notify serviceupdater to Mixins\r\n\r\n* Move tag discovery handler to Mixins\r\n\r\n* fix tests\r\n\r\n* Add typing for async_load_platform_helper\r\n\r\n* Add add entry unload support for notify platform\r\n\r\n* Simplify discovery updates\r\n\r\n* Remove not needed extra logic\r\n\r\n* Cleanup inrelevant or duplicate code\r\n\r\n* reuse update_device and move to mixins\r\n\r\n* Remove notify platform\r\n\r\n* revert changes to notify platform\r\n\r\n* Rename update class\r\n\r\n* unify tag entry setup\r\n\r\n* Use shared code for device_trigger `update_device`\r\n\r\n* PoC shared dispatcher for device_trigger\r\n\r\n* Fix bugs\r\n\r\n* Improve typing - remove async_update\r\n\r\n* Unload config_entry and tests\r\n\r\n* Release dispatcher after setup and deduplicate\r\n\r\n* closures to methods, revert `in` to `=`, updates\r\n\r\n* Re-add update support for tag platform\r\n\r\n* Re-add update support for device-trigger platform\r\n\r\n* Cleanup rediscovery code revert related changes\r\n\r\n* Undo discovery code shift\r\n\r\n* Update homeassistant/components/mqtt/mixins.py\r\n\r\nCo-authored-by: Erik Montnemery \r\n\r\n* Update homeassistant/components/mqtt/device_trigger.py\r\n\r\nCo-authored-by: Erik Montnemery \r\n\r\n* Update homeassistant/components/mqtt/mixins.py\r\n\r\nCo-authored-by: Erik Montnemery \r\n\r\n* revert doc string changes\r\n\r\n* move conditions\r\n\r\n* typing and check config_entry_id\r\n\r\n* Update homeassistant/components/mqtt/mixins.py\r\n\r\nCo-authored-by: Erik Montnemery \r\n\r\n* cleanup not used attribute\r\n\r\n* Remove entry_unload code and tests\r\n\r\n* update comment\r\n\r\n* add second comment\r\n\r\nCo-authored-by: Erik Montnemery ", "code": "def _cleanup_discovery_on_remove(self) -> None:\n \n if self._discovery_data and not self._removed_from_hass:\n stop_discovery_updates(\n self.hass, self._discovery_data, self._remove_discovery_updated\n )\n self._removed_from_hass = True\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 79, "n_words": 17, "vocab_size": 17, "complexity": 3, "nloc": 7, "token_counts": 37, "n_ast_nodes": 60, "n_identifiers": 7, "d_id": 95345, "documentation": { "docstring": "Stop listening to signal and cleanup discovery data.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 249111, "commit_id": "c97042f7eef3748e17c90e48a4122389a89c4735", "repo": "synapse", "path": "tests/rest/admin/test_media.py", "file_name": "test_media.py", "fun_name": "test_keep_media_by_date", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13469)", "code": "def test_keep_media_by_date(self) -> None:\n \n\n # timestamp before upload\n now_ms = self.clock.time_msec()\n server_and_media_id = self._create_media()\n\n self._access_media(server_and_media_id)\n\n channel = self.make_request(\n \"POST\",\n self.url + \"?before_ts=\" + str(now_ms),\n access_token=self.admin_user_tok,\n )\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(0, channel.json_body[\"total\"])\n\n self._access_media(server_and_media_id)\n\n # timestamp after upload\n now_ms = self.clock.time_msec()\n channel = self.make_request(\n \"POST\",\n self.url + \"?before_ts=\" + str(now_ms),\n access_token=self.admin_user_tok,\n )\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(1, channel.json_body[\"total\"])\n self.assertEqual(\n server_and_media_id.split(\"/\")[1],\n channel.json_body[\"deleted_media\"][0],\n )\n\n self._access_media(server_and_media_id, False)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 282, "n_words": 61, "vocab_size": 35, "complexity": 1, "nloc": 28, "token_counts": 188, "n_ast_nodes": 304, "n_identifiers": 19, "d_id": 72618, "documentation": { "docstring": "\n Tests that media is not deleted if it is newer than `before_ts`\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 27, "language": "en" } }, { "id": 124496, "commit_id": "56716a1c1b6f9aae3967b910a799bb6af9f2c5d9", "repo": "ray", "path": "python/ray/_private/test_utils.py", "file_name": "test_utils.py", "fun_name": "external_ray_cluster_activity_hook1", "commit_message": "[dashboard] Add `RAY_CLUSTER_ACTIVITY_HOOK` to `/api/component_activities` (#26297)\n\nAdd external hook to /api/component_activities endpoint in dashboard snapshot router\r\nChange is_active field of RayActivityResponse to take an enum RayActivityStatus instead of bool. This is a backward incompatible change, but should be ok because [dashboard] Add component_activities API #25996 wasn't included in any branch cuts. RayActivityResponse now supports informing when there was an error getting the activity observation and the reason.", "code": "def external_ray_cluster_activity_hook1():\n \n global ray_cluster_activity_hook_counter\n ray_cluster_activity_hook_counter += 1\n return {\n \"test_component1\": TestRayActivityResponse(\n is_active=\"ACTIVE\",\n reason=f\"Counter: {ray_cluster_activity_hook_counter}\",\n )\n }\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 67, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 9, "token_counts": 27, "n_ast_nodes": 53, "n_identifiers": 5, "d_id": 27613, "documentation": { "docstring": "\n Example external hook for test_component_activities_hook.\n\n Returns valid response and increments counter in `reason`\n field on each call.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 30, "language": "en" } }, { "id": 310495, "commit_id": "63f8e437ed0bf79d72286853b7c2e7c01abef91f", "repo": "core", "path": "tests/components/http/test_auth.py", "file_name": "test_auth.py", "fun_name": "test_auth_middleware_loaded_by_default", "commit_message": "Add Home Assistant Content user (#64337)", "code": "async def test_auth_middleware_loaded_by_default(hass):\n \n with patch(\"homeassistant.components.http.async_setup_auth\") as mock_setup:\n await async_setup_component(hass, \"http\", {\"http\": {}})\n\n assert len(mock_setup.mock_calls) == 1\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 32, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 4, "token_counts": 37, "n_ast_nodes": 71, "n_identifiers": 7, "d_id": 109180, "documentation": { "docstring": "Test accessing to server from banned IP when feature is off.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 156749, "commit_id": "2820bae493a49cb1d0a6e376985c5473b8f04fa8", "repo": "dask", "path": "dask/array/core.py", "file_name": "core.py", "fun_name": "sum", "commit_message": "Don't include docs in ``Array`` methods, just refer to module docs (#9244)\n\nCo-authored-by: James Bourbeau ", "code": "def sum(self, axis=None, dtype=None, keepdims=False, split_every=None, out=None):\n \n from dask.array.reductions import sum\n\n return sum(\n self,\n axis=axis,\n dtype=dtype,\n keepdims=keepdims,\n split_every=split_every,\n out=out,\n )\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 114, "n_words": 20, "vocab_size": 20, "complexity": 1, "nloc": 10, "token_counts": 60, "n_ast_nodes": 83, "n_identifiers": 10, "d_id": 36759, "documentation": { "docstring": "\n Return the sum of the array elements over the given axis.\n\n Refer to :func:`dask.array.sum` for full documentation.\n\n See Also\n --------\n dask.array.sum : equivalent function\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 67, "language": "en" } }, { "id": 86580, "commit_id": "c67c560f667e6fc7fee2c6d62ac3987ba54f89d5", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metric_data.py", "file_name": "test_organization_metric_data.py", "fun_name": "test_limit_without_orderby_excess_groups_pruned", "commit_message": "feat(metrics): Standardize tests and fix overall flakiness [TET-437] (#39660)", "code": "def test_limit_without_orderby_excess_groups_pruned(self):\n \n for tag, tag_value in ((\"tag1\", \"group1\"), (\"tag1\", \"group2\")):\n self.store_release_health_metric(\n name=SessionMRI.SESSION.value,\n tags={tag: tag_value},\n value=10,\n minutes_before_now=4,\n )\n\n for tag, tag_value, numbers in (\n (\"tag1\", \"group2\", list(range(3))),\n (\"tag1\", \"group3\", list(range(3, 6))),\n ):\n for value in numbers:\n self.store_release_health_metric(\n name=SessionMRI.ERROR.value,\n tags={tag: tag_value},\n value=value,\n )\n\n for tag, tag_value, numbers in (\n (\"tag1\", \"group4\", list(range(3))),\n (\"tag1\", \"group5\", list(range(3, 6))),\n ):\n for value in numbers:\n self.store_release_health_metric(\n name=SessionMRI.DURATION.value,\n tags={tag: tag_value},\n value=value,\n )\n\n response = self.get_success_response(\n self.organization.slug,\n field=[\n f\"p50({SessionMetricKey.DURATION.value})\",\n SessionMetricKey.ERRORED.value,\n \"sum(sentry.sessions.session)\",\n ],\n statsPeriod=\"1h\",\n interval=\"1h\",\n groupBy=\"tag1\",\n per_page=3,\n )\n\n groups = response.data[\"groups\"]\n assert len(groups) == 3\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 600, "n_words": 86, "vocab_size": 52, "complexity": 6, "nloc": 42, "token_counts": 254, "n_ast_nodes": 401, "n_identifiers": 30, "d_id": 18131, "documentation": { "docstring": "\n Test that ensures that when requesting series data that is not ordered, if the limit of\n each query is not met, thereby a limit is not applied to the aueries and we end up with\n more groups than the limit then the excess number of groups should be pruned\n ", "n_words": 49, "vocab_size": 36, "n_whitespaces": 78, "language": "en" } }, { "id": 300696, "commit_id": "0584e84c30903aae07cf16898138ce4e1e8b6be7", "repo": "core", "path": "homeassistant/components/logbook/queries.py", "file_name": "queries.py", "fun_name": "_select_states", "commit_message": "Add MySQL index hints to logbook (#71864)\n\n* Add MySQL index hints to logbook\r\n\r\n* fix mysql query planner", "code": "def _select_states() -> Select:\n \n return select(\n literal(value=None, type_=sqlalchemy.Text).label(\"event_id\"),\n literal(value=EVENT_STATE_CHANGED, type_=sqlalchemy.String).label(\"event_type\"),\n literal(value=None, type_=sqlalchemy.Text).label(\"event_data\"),\n States.last_updated.label(\"time_fired\"),\n States.context_id.label(\"context_id\"),\n States.context_user_id.label(\"context_user_id\"),\n States.context_parent_id.label(\"context_parent_id\"),\n literal(value=None, type_=sqlalchemy.Text).label(\"shared_data\"),\n *STATE_COLUMNS,\n NOT_CONTEXT_ONLY,\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 100, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 14, "token_counts": 124, "n_ast_nodes": 200, "n_identifiers": 18, "d_id": 99556, "documentation": { "docstring": "Generate a states select that formats the states table as event rows.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 186658, "commit_id": "7d9e9a49005de7961e84d2a7c608db57dbab3046", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/override_centos.py", "file_name": "override_centos.py", "fun_name": "_prepare_options", "commit_message": "Add typing to certbot.apache (#9071)\n\n* Add typing to certbot.apache\r\n\r\nCo-authored-by: Adrien Ferrand ", "code": "def _prepare_options(self) -> None:\n \n super()._prepare_options()\n if not self.options.restart_cmd_alt: # pragma: no cover\n raise ValueError(\"OS option restart_cmd_alt must be set for CentOS.\")\n self.options.restart_cmd_alt[0] = self.options.ctl\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 64, "n_words": 24, "vocab_size": 24, "complexity": 2, "nloc": 9, "token_counts": 42, "n_ast_nodes": 74, "n_identifiers": 7, "d_id": 45566, "documentation": { "docstring": "\n Override the options dictionary initialization in order to support\n alternative restart cmd used in CentOS.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 37, "language": "en" } }, { "id": 104423, "commit_id": "e35be138148333078284b942ccc9ed7b1d826f97", "repo": "datasets", "path": "src/datasets/table.py", "file_name": "table.py", "fun_name": "add_column", "commit_message": "Update docs to new frontend/UI (#3690)\n\n* WIP: update docs to new UI\r\n\r\n* make style\r\n\r\n* Rm unused\r\n\r\n* inject_arrow_table_documentation __annotations__\r\n\r\n* hasattr(arrow_table_method, \"__annotations__\")\r\n\r\n* Update task_template.rst\r\n\r\n* Codeblock PT-TF-SPLIT\r\n\r\n* Convert loading scripts\r\n\r\n* Convert docs to mdx\r\n\r\n* Fix mdx\r\n\r\n* Add \r\n\r\n* Convert mdx tables\r\n\r\n* Fix codeblock\r\n\r\n* Rm unneded hashlinks\r\n\r\n* Update index.mdx\r\n\r\n* Redo dev change\r\n\r\n* Rm circle ci `build_doc` & `deploy_doc`\r\n\r\n* Rm unneeded files\r\n\r\n* Update docs reamde\r\n\r\n* Standardize to `Example::`\r\n\r\n* mdx logging levels doc\r\n\r\n* Table properties inject_arrow_table_documentation\r\n\r\n* ``` to ```py mdx\r\n\r\n* Add Tips mdx\r\n\r\n* important,None -> \r\n\r\n* More misc\r\n\r\n* Center imgs\r\n\r\n* Update instllation page\r\n\r\n* `setup.py` docs section\r\n\r\n* Rm imgs since they are in hf.co\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* Update index mdx\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* just `Dataset` obj\r\n\r\n* Addedversion just italics\r\n\r\n* Update ReadInstruction doc example syntax\r\n\r\n* Change docstring for `prepare_for_task`\r\n\r\n* Chore\r\n\r\n* Remove `code` syntax from headings\r\n\r\n* Rm `code` syntax from headings\r\n\r\n* Hashlink backward compatability\r\n\r\n* S3FileSystem doc\r\n\r\n* S3FileSystem doc updates\r\n\r\n* index.mdx updates\r\n\r\n* Add darkmode gifs\r\n\r\n* Index logo img css classes\r\n\r\n* Index mdx dataset logo img size\r\n\r\n* Docs for DownloadMode class\r\n\r\n* Doc DownloadMode table\r\n\r\n* format docstrings\r\n\r\n* style\r\n\r\n* Add doc builder scripts (#3790)\r\n\r\n* add doc builder scripts\r\n\r\n* fix docker image\r\n\r\n* Docs new UI actions no self hosted (#3793)\r\n\r\n* No self hosted\r\n\r\n* replace doc injection by actual docstrings\r\n\r\n* Docstring formatted\r\n\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Mishig Davaadorj \r\n\r\nCo-authored-by: Lysandre Debut \r\nCo-authored-by: Mishig Davaadorj \r\n\r\n* Rm notebooks from docs actions since they dont exi\r\n\r\n* Update tsting branch\r\n\r\n* More docstring\r\n\r\n* Chore\r\n\r\n* bump up node version\r\n\r\n* bump up node\r\n\r\n* ``` -> ```py for audio_process.mdx\r\n\r\n* Update .github/workflows/build_documentation.yml\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Uodate dev doc build\r\n\r\n* remove run on PR\r\n\r\n* fix action\r\n\r\n* Fix gh doc workflow\r\n\r\n* forgot this change when merging master\r\n\r\n* Update build doc\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: Lysandre Debut ", "code": "def add_column(self, *args, **kwargs):\n \n raise NotImplementedError()\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 28, "n_identifiers": 5, "d_id": 21859, "documentation": { "docstring": "\n Add column to Table at position.\n\n A new table is returned with the column added, the original table\n object is left unchanged.\n\n Args:\n i (:obj:`int`):\n Index to place the column at.\n field_ (:obj:`Union[str, pyarrow.Field]`):\n If a string is passed then the type is deduced from the column\n data.\n column (:obj:`Union[pyarrow.Array, List[pyarrow.Array]]`):\n Column data.\n\n Returns:\n :class:`datasets.table.Table`: New table with the passed column added.\n ", "n_words": 62, "vocab_size": 43, "n_whitespaces": 209, "language": "en" } }, { "id": 195585, "commit_id": "f0194812568c83585ff09488fe7f67df300938cc", "repo": "rembg", "path": "versioneer.py", "file_name": "versioneer.py", "fun_name": "write_to_version_file", "commit_message": "add auto tag", "code": "def write_to_version_file(filename, versions):\n \n os.unlink(filename)\n contents = json.dumps(versions, sort_keys=True,\n indent=1, separators=(\",\", \": \"))\n with open(filename, \"w\") as f:\n f.write(SHORT_VERSION_PY % contents)\n\n print(\"set %s to '%s'\" % (filename, versions[\"version\"]))\n\n", "url": "https://github.com/danielgatis/rembg.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 74, "n_words": 27, "vocab_size": 26, "complexity": 1, "nloc": 7, "token_counts": 69, "n_ast_nodes": 118, "n_identifiers": 16, "d_id": 47300, "documentation": { "docstring": "Write the given version number to the given _version.py file.", "n_words": 10, "vocab_size": 8, "n_whitespaces": 9, "language": "en" } }, { "id": 228732, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/bar/marker/_colorbar.py", "file_name": "_colorbar.py", "fun_name": "minexponent", "commit_message": "switch to black .22", "code": "def minexponent(self):\n \n return self[\"minexponent\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 60405, "documentation": { "docstring": "\n Hide SI prefix for 10^n if |n| is below this number. This only\n has an effect when `tickformat` is \"SI\" or \"B\".\n\n The 'minexponent' property is a number and may be specified as:\n - An int or float in the interval [0, inf]\n\n Returns\n -------\n int|float\n ", "n_words": 46, "vocab_size": 43, "n_whitespaces": 105, "language": "en" } }, { "id": 120235, "commit_id": "edae0ac31f7493bbe3a7f845dd8f48fc9f5b5760", "repo": "jax", "path": "jax/experimental/sparse/bcoo.py", "file_name": "bcoo.py", "fun_name": "bcoo_sum_duplicates", "commit_message": "[sparse] make bcoo_sum_duplicates a primitive", "code": "def bcoo_sum_duplicates(mat, nse=None):\n \n data, indices = _bcoo_sum_duplicates(mat.data, mat.indices, spinfo=mat._info, nse=nse)\n return BCOO((data, indices), shape=mat.shape)\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 17, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 3, "token_counts": 49, "n_ast_nodes": 73, "n_identifiers": 10, "d_id": 26802, "documentation": { "docstring": "Sums duplicate indices within a BCOO array, returning an array with sorted indices.\n\n Args:\n mat : BCOO array\n nse : integer (optional). The number of specified elements in the output matrix. This must\n be specified for bcoo_sum_duplicates to be compatible with JIT and other JAX transformations.\n If not specified, the optimal nse will be computed based on the contents of the data and\n index arrays. If specified nse is larger than necessary, data and index arrays will be padded\n with standard fill values. If smaller than necessary, data elements will be dropped from the\n output matrix.\n\n Returns:\n mat_out : BCOO array with sorted indices and no duplicate indices.\n ", "n_words": 108, "vocab_size": 67, "n_whitespaces": 145, "language": "en" } }, { "id": 208718, "commit_id": "dc5bcc1c50892a5128fcf128af28887226144927", "repo": "ipython", "path": "IPython/core/history.py", "file_name": "history.py", "fun_name": "_run_sql", "commit_message": "This fixed the mixing of multiple history seen in #13631\n\nIt forces get_tail to put the current session last in the returned\nresults.", "code": "def _run_sql(self, sql, params, raw=True, output=False, latest=False):\n \n toget = 'source_raw' if raw else 'source'\n sqlfrom = \"history\"\n if output:\n sqlfrom = \"history LEFT JOIN output_history USING (session, line)\"\n toget = \"history.%s, output_history.output\" % toget\n if latest:\n toget += \", MAX(session * 128 * 1024 + line)\"\n this_querry = \"SELECT session, line, %s FROM %s \" % (toget, sqlfrom) + sql\n cur = self.db.execute(this_querry, params)\n if latest:\n cur = (row[:-1] for row in cur)\n if output: # Regroup into 3-tuples, and parse JSON\n return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur)\n return cur\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 224, "n_words": 96, "vocab_size": 68, "complexity": 8, "nloc": 15, "token_counts": 118, "n_ast_nodes": 188, "n_identifiers": 18, "d_id": 52477, "documentation": { "docstring": "Prepares and runs an SQL query for the history database.\n\n Parameters\n ----------\n sql : str\n Any filtering expressions to go after SELECT ... FROM ...\n params : tuple\n Parameters passed to the SQL query (to replace \"?\")\n raw, output : bool\n See :meth:`get_range`\n latest : bool\n Select rows with max (session, line)\n\n Returns\n -------\n Tuples as :meth:`get_range`\n ", "n_words": 57, "vocab_size": 46, "n_whitespaces": 171, "language": "en" } }, { "id": 294965, "commit_id": "ab0abdc988ac101217ba043909c4be8b33101ab3", "repo": "core", "path": "tests/components/subaru/test_config_flow.py", "file_name": "test_config_flow.py", "fun_name": "test_registered_no_pin_required", "commit_message": "Add 2FA support for Subaru integration setup (#68753)\n\n* Add 2FA support for Subaru integration setup\r\n\r\n* Update config flow to abort with 2FA request fail", "code": "async def test_registered_no_pin_required(hass, user_form):\n \n with patch(MOCK_API_CONNECT, return_value=True), patch(\n MOCK_API_DEVICE_REGISTERED, new_callable=PropertyMock\n ) as mock_device_registered, patch(MOCK_API_IS_PIN_REQUIRED, return_value=False):\n mock_device_registered.return_value = True\n await hass.config_entries.flow.async_configure(\n user_form[\"flow_id\"], user_input=TEST_CREDS\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 71, "n_words": 23, "vocab_size": 22, "complexity": 1, "nloc": 8, "token_counts": 61, "n_ast_nodes": 100, "n_identifiers": 16, "d_id": 93992, "documentation": { "docstring": "Test if the device is already registered and PIN not required.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 70998, "commit_id": "de3fcba9e95818e9634ab7de6bfcb1f4221f2775", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/tests/test_modeladmin_edit_handlers.py", "file_name": "test_modeladmin_edit_handlers.py", "fun_name": "test_model_panels", "commit_message": "Fix warnings from flake8-comprehensions.", "code": "def test_model_panels(self):\n \n response = self.client.get('/admin/modeladmintest/friend/create/')\n self.assertEqual(\n list(response.context['form'].fields),\n ['first_name', 'phone_number']\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 60, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 6, "token_counts": 38, "n_ast_nodes": 69, "n_identifiers": 9, "d_id": 15597, "documentation": { "docstring": "loads the 'create' view and verifies that form fields are returned\n which have been defined via model Friend.panels", "n_words": 18, "vocab_size": 18, "n_whitespaces": 24, "language": "en" } }, { "id": 65288, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/non_billed_report.py", "file_name": "non_billed_report.py", "fun_name": "get_ordered_to_be_billed_data", "commit_message": "style: format code with black", "code": "def get_ordered_to_be_billed_data(args):\n\tdoctype, party = args.get(\"doctype\"), args.get(\"party\")\n\tchild_tab = doctype + \" Item\"\n\tprecision = (\n\t\tget_field_precision(\n\t\t\tfrappe.get_meta(child_tab).get_field(\"billed_amt\"), currency=get_default_currency()\n\t\t)\n\t\tor 2\n\t)\n\n\tproject_field = get_project_field(doctype, party)\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\tparent_tab=\"tab\" + doctype,\n\t\t\tchild_tab=\"tab\" + child_tab,\n\t\t\tprecision=precision,\n\t\t\tparty=party,\n\t\t\tdate_field=args.get(\"date\"),\n\t\t\tproject_field=project_field,\n\t\t\torder=args.get(\"order\"),\n\t\t\torder_by=args.get(\"order_by\"),\n\t\t)\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 22, "n_words": 44, "vocab_size": 35, "complexity": 2, "nloc": 46, "token_counts": 125, "n_ast_nodes": 208, "n_identifiers": 22, "d_id": 13841, "documentation": { "docstring": "\n\t\tSelect\n\t\t\t`{parent_tab}`.name, `{parent_tab}`.{date_field},\n\t\t\t`{parent_tab}`.{party}, `{parent_tab}`.{party}_name,\n\t\t\t`{child_tab}`.item_code,\n\t\t\t`{child_tab}`.base_amount,\n\t\t\t(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1)),\n\t\t\t(`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0)),\n\t\t\t(`{child_tab}`.base_amount -\n\t\t\t(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1)) -\n\t\t\t(`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0))),\n\t\t\t`{child_tab}`.item_name, `{child_tab}`.description,\n\t\t\t{project_field}, `{parent_tab}`.company\n\t\tfrom\n\t\t\t`{parent_tab}`, `{child_tab}`\n\t\twhere\n\t\t\t`{parent_tab}`.name = `{child_tab}`.parent and `{parent_tab}`.docstatus = 1\n\t\t\tand `{parent_tab}`.status not in ('Closed', 'Completed')\n\t\t\tand `{child_tab}`.amount > 0\n\t\t\tand (`{child_tab}`.base_amount -\n\t\t\tround(`{child_tab}`.billed_amt * ifnull(`{parent_tab}`.conversion_rate, 1), {precision}) -\n\t\t\t(`{child_tab}`.base_rate * ifnull(`{child_tab}`.returned_qty, 0))) > 0\n\t\torder by\n\t\t\t`{parent_tab}`.{order} {order_by}\n\t\t", "n_words": 70, "vocab_size": 48, "n_whitespaces": 47, "language": "en" } }, { "id": 62546, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/html5lib/html5parser.py", "file_name": "html5parser.py", "fun_name": "parseFragment", "commit_message": "upd; format", "code": "def parseFragment(doc, container=\"div\", treebuilder=\"etree\", namespaceHTMLElements=True, **kwargs):\n \n tb = treebuilders.getTreeBuilder(treebuilder)\n p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)\n return p.parseFragment(doc, container=container, **kwargs)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 29, "n_words": 17, "vocab_size": 16, "complexity": 1, "nloc": 4, "token_counts": 53, "n_ast_nodes": 84, "n_identifiers": 11, "d_id": 12986, "documentation": { "docstring": "Parse an HTML fragment as a string or file-like object into a tree\n\n :arg doc: the fragment to parse as a string or file-like object\n\n :arg container: the container context to parse the fragment in\n\n :arg treebuilder: the treebuilder to use when parsing\n\n :arg namespaceHTMLElements: whether or not to namespace HTML elements\n\n :returns: parsed tree\n\n Example:\n\n >>> from html5lib.html5libparser import parseFragment\n >>> parseFragment('this is a fragment')\n \n\n ", "n_words": 70, "vocab_size": 46, "n_whitespaces": 100, "language": "en" } }, { "id": 322190, "commit_id": "621357338437ee420eabbbf5ab19065bc85e73a5", "repo": "PaddleNLP", "path": "paddlenlp/taskflow/knowledge_mining.py", "file_name": "knowledge_mining.py", "fun_name": "_preprocess", "commit_message": "Update neural search readme and Add Paddle Serving Support (#1558)\n\n* add recall inference similarity\r\n\r\n* update examples\r\n\r\n* updatea readme\r\n\r\n* update dir name\r\n\r\n* update neural search readme\r\n\r\n* update milvus readme\r\n\r\n* update domain adaptive pretraining readme\r\n\r\n* fix the mistakes\r\n\r\n* update readme\r\n\r\n* add recall Paddle Serving Support\r\n\r\n* update readme\r\n\r\n* update readme and format the code\r\n\r\n* reformat the files\r\n\r\n* move the files\r\n\r\n* reformat the code\r\n\r\n* remove redundant code\r\n\r\nCo-authored-by: Zeyu Chen \r\nCo-authored-by: tianxin ", "code": "def _preprocess(self, inputs):\n \n inputs = self._check_input_text(inputs)\n self._max_cls_len = 5\n num_workers = self.kwargs[\n 'num_workers'] if 'num_workers' in self.kwargs else 0\n lazy_load = self.kwargs[\n 'lazy_load'] if 'lazy_load' in self.kwargs else False\n\n # Prompt template: input_text + \"是\" + \"[MASK]\" * cls_seq_length\n prompt_template = [\"是\"] + [\"[MASK]\"] * self._max_cls_len\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 117, "n_words": 46, "vocab_size": 33, "complexity": 3, "nloc": 26, "token_counts": 168, "n_ast_nodes": 115, "n_identifiers": 9, "d_id": 118085, "documentation": { "docstring": "\n Create the dataset and dataloader for the predict.\n ", "n_words": 8, "vocab_size": 7, "n_whitespaces": 23, "language": "en" } }, { "id": 58837, "commit_id": "451688c6aa1350bb3967d0d72b95e9da311de5d7", "repo": "prefect", "path": "tests/cli/test_deployment_cli.py", "file_name": "test_deployment_cli.py", "fun_name": "test_server_side_settings_are_used_if_present", "commit_message": "Further merge CLI and Python code paths", "code": "def test_server_side_settings_are_used_if_present(self, patch_import, tmp_path):\n \n d = Deployment(\n name=\"TEST\",\n flow_name=\"fn\",\n description=\"server-side value\",\n version=\"server\",\n parameters={\"key\": \"server\"},\n tags=[\"server-tag\"],\n work_queue_name=\"dev\",\n )\n assert d.apply()\n\n invoke_and_assert(\n [\n \"deployment\",\n \"build\",\n \"fake-path.py:fn\",\n \"-n\",\n \"TEST\",\n \"-o\",\n str(tmp_path / \"test.yaml\"),\n ],\n expected_code=0,\n temp_dir=tmp_path,\n )\n\n deployment = Deployment.load_from_yaml(tmp_path / \"test.yaml\")\n assert deployment.description == \"server-side value\"\n assert deployment.tags == [\"server-tag\"]\n assert deployment.parameters == dict(key=\"server\")\n assert deployment.work_queue_name == \"dev\"\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 359, "n_words": 56, "vocab_size": 46, "complexity": 1, "nloc": 29, "token_counts": 129, "n_ast_nodes": 225, "n_identifiers": 22, "d_id": 11821, "documentation": { "docstring": "\n This only applies to tags, work queue name, description, schedules and default parameter values\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 29, "language": "en" } }, { "id": 159098, "commit_id": "c687960f44e2ad07ccd48ddbccda26cb18a9d1c7", "repo": "rasa", "path": "tests/nlu/selectors/test_selectors.py", "file_name": "test_selectors.py", "fun_name": "test_transformer_size_gets_corrected", "commit_message": "correct transformer_size value if needed", "code": "def test_transformer_size_gets_corrected(train_persist_load_with_different_settings,):\n \n pipeline = [\n {\"component\": WhitespaceTokenizer},\n {\"component\": CountVectorsFeaturizer},\n ]\n config_params = {EPOCHS: 1, NUM_TRANSFORMER_LAYERS: 1}\n\n selector = train_persist_load_with_different_settings(\n pipeline, config_params, False,\n )\n assert selector.component_config[TRANSFORMER_SIZE] == DEFAULT_TRANSFORMER_SIZE\n\n\n@pytest.mark.timeout(120)", "url": "https://github.com/RasaHQ/rasa.git", "language": "Python", "ast_errors": "@pytest.mark.timeout(120)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 69, "n_words": 28, "vocab_size": 25, "complexity": 1, "nloc": 10, "token_counts": 54, "n_ast_nodes": 100, "n_identifiers": 15, "d_id": 38124, "documentation": { "docstring": "Tests that the default value of `transformer_size` which is `None` is\n corrected if transformer layers are enabled in `ResponseSelector`.\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 25, "language": "en" } }, { "id": 230885, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/_annotation.py", "file_name": "_annotation.py", "fun_name": "valign", "commit_message": "switch to black .22", "code": "def valign(self):\n \n return self[\"valign\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 62558, "documentation": { "docstring": "\n Sets the vertical alignment of the `text` within the box. Has\n an effect only if an explicit height is set to override the\n text height.\n\n The 'valign' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['top', 'middle', 'bottom']\n\n Returns\n -------\n Any\n ", "n_words": 49, "vocab_size": 40, "n_whitespaces": 130, "language": "en" } }, { "id": 26386, "commit_id": "ab45ebda5a14df6806046fd552e2c6d08f025503", "repo": "saleor", "path": "saleor/graphql/core/mutations.py", "file_name": "mutations.py", "fun_name": "check_permissions", "commit_message": "Better permissions (#9363)\n\n* Better permissions\r\n\r\n* Add OWNER permission\r\n\r\n* WIP Add enums to represent function-based permissions\r\n\r\n* Rename OWNER to IS_OWNER\r\n\r\n* Add flag to skip autogenerated permission message\r\n\r\n* Rename InternalPermissions to PermissionFunctions\r\n\r\n* Add permission descriptions for meta mutations\r\n\r\n* Better permissions validation\r\n\r\n* Reuse user checking functions\r\n\r\n* Rename permission functions enums\r\n\r\n* Update schema\r\n\r\n* Rename permission functions enums", "code": "def check_permissions(cls, context, permissions=None):\n \n all_permissions = permissions or cls._meta.permissions\n if not all_permissions:\n return True\n\n authorization_filters = [\n p for p in all_permissions if isinstance(p, AuthorizationFilters)\n ]\n permissions = [\n p for p in all_permissions if not isinstance(p, AuthorizationFilters)\n ]\n\n granted_by_permissions = False\n granted_by_authorization_filters = False\n\n app = getattr(context, \"app\", None)\n if app and permissions and AccountPermissions.MANAGE_STAFF in permissions:\n # `MANAGE_STAFF` permission for apps is not supported. If apps could use it\n # they could create a staff user with full access which would be a\n # permission leak issue.\n return False\n\n requestor = get_user_or_app_from_context(context)\n if permissions:\n granted_by_permissions = requestor.has_perms(permissions)\n\n if authorization_filters:\n internal_perm_checks = []\n for p in authorization_filters:\n perm_fn = resolve_authorization_filter_fn(p)\n if perm_fn:\n res = perm_fn(context)\n internal_perm_checks.append(bool(res))\n granted_by_authorization_filters = any(internal_perm_checks)\n\n return granted_by_permissions or granted_by_authorization_filters\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 418, "n_words": 124, "vocab_size": 68, "complexity": 15, "nloc": 27, "token_counts": 152, "n_ast_nodes": 244, "n_identifiers": 26, "d_id": 4979, "documentation": { "docstring": "Determine whether user or app has rights to perform this mutation.\n\n Default implementation assumes that account is allowed to perform any\n mutation. By overriding this method or defining required permissions\n in the meta-class, you can restrict access to it.\n\n The `context` parameter is the Context instance associated with the request.\n ", "n_words": 50, "vocab_size": 41, "n_whitespaces": 85, "language": "en" } }, { "id": 60460, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/_distutils_hack/__init__.py", "file_name": "__init__.py", "fun_name": "enabled", "commit_message": "upd; format", "code": "def enabled():\n \n which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')\n return which == 'local'\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 19, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 21, "n_ast_nodes": 42, "n_identifiers": 5, "d_id": 12170, "documentation": { "docstring": "\n Allow selection of distutils by environment variable.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 191437, "commit_id": "c636488fe5e144bcf41832d27d64dbed6c9f4997", "repo": "langchain", "path": "tests/unit_tests/test_dynamic_prompt.py", "file_name": "test_dynamic_prompt.py", "fun_name": "test_dynamic_prompt_valid", "commit_message": "DynamicPrompt class creation (#49)\n\nChecking that this structure looks generally ok -- going to sub in logic\r\nwhere the TODO comment is then add a test.", "code": "def test_dynamic_prompt_valid() -> None:\n \n input_variables = [\"question\"]\n example_separator = \"\\n\\n\"\n dynamic_prompt_cls = DynamicPrompt(\n examples=EXAMPLES,\n suffix=SUFFIX,\n input_variables=input_variables,\n example_separator=example_separator,\n prefix=PREFIX,\n )\n prompt_cls = Prompt(input_variables=input_variables, template=LONGER_TEMPLATE)\n dynamic_prompt_template = dynamic_prompt_cls.format(question=\"foo?\")\n prompt_template = prompt_cls.format(question=\"foo?\")\n assert dynamic_prompt_template == prompt_template\n assert dynamic_prompt_cls.input_variables == prompt_cls.input_variables\n\n", "url": "https://github.com/hwchase17/langchain.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 102, "n_words": 37, "vocab_size": 28, "complexity": 1, "nloc": 16, "token_counts": 84, "n_ast_nodes": 140, "n_identifiers": 19, "d_id": 46569, "documentation": { "docstring": "Test dynamic prompt can be successfully constructed from examples.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 101699, "commit_id": "e5356a417e7c2124e75c4a2994ed604fc0a3cc74", "repo": "faceswap", "path": "lib/align/alignments.py", "file_name": "alignments.py", "fun_name": "faces_count", "commit_message": "Alignments update:\n - Store face embeddings in PNG header when sorting\n - typing + refactor\n - Update alignments keys for 'identity' and 'video_meta' + bump to v2.3\n - General typing fixes", "code": "def faces_count(self) -> int:\n \n retval = sum(len(val[\"faces\"]) for val in self._data.values())\n logger.trace(retval) # type:ignore\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 45, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 5, "token_counts": 38, "n_ast_nodes": 66, "n_identifiers": 11, "d_id": 21103, "documentation": { "docstring": " int: The total number of faces that appear in the alignments :attr:`data`. ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 13, "language": "en" } }, { "id": 300810, "commit_id": "69cc6ab5f1d58adc586c3b300a4f7f0cde2cd0c2", "repo": "core", "path": "tests/auth/test_auth_store.py", "file_name": "test_auth_store.py", "fun_name": "test_loading_race_condition", "commit_message": "Clean up accessing entity_registry.async_get_registry helper via hass (#72005)", "code": "async def test_loading_race_condition(hass):\n \n store = auth_store.AuthStore(hass)\n with patch(\n \"homeassistant.helpers.entity_registry.async_get\"\n ) as mock_ent_registry, patch(\n \"homeassistant.helpers.device_registry.async_get\"\n ) as mock_dev_registry, patch(\n \"homeassistant.helpers.storage.Store.async_load\", return_value=None\n ) as mock_load:\n results = await asyncio.gather(store.async_get_users(), store.async_get_users())\n\n mock_ent_registry.assert_called_once_with(hass)\n mock_dev_registry.assert_called_once_with(hass)\n mock_load.assert_called_once_with()\n assert results[0] == results[1]\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 109, "n_words": 35, "vocab_size": 28, "complexity": 1, "nloc": 14, "token_counts": 86, "n_ast_nodes": 152, "n_identifiers": 15, "d_id": 99666, "documentation": { "docstring": "Test only one storage load called when concurrent loading occurred .", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 46201, "commit_id": "774ca085d2d2d58d4292b43c3511a145cc07154b", "repo": "airflow", "path": "tests/providers/amazon/aws/hooks/test_base_aws.py", "file_name": "test_base_aws.py", "fun_name": "test_get_resource_deprecation_warning", "commit_message": "Enable JSON serialization for connections (#19857)\n\nPreviously in general we could only store connections in the Airflow URI format. With this change we can serialize as JSON. The Airflow URI format can be very tricky to work with and although we have for some time had a convenience method Connection.get_uri, using JSON is just simpler.", "code": "def test_get_resource_deprecation_warning(self):\n hook = AwsBaseHook(aws_conn_id='aws_default', resource_type='dynamodb')\n warning_message = \n with pytest.warns(DeprecationWarning) as warnings:\n hook.get_resource_type('dynamodb')\n assert warning_message in [str(w.message) for w in warnings]\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 64, "n_words": 21, "vocab_size": 18, "complexity": 2, "nloc": 6, "token_counts": 51, "n_ast_nodes": 91, "n_identifiers": 15, "d_id": 8811, "documentation": { "docstring": "resource_type is deprecated. Set resource_type from class attribute.", "n_words": 8, "vocab_size": 7, "n_whitespaces": 7, "language": "en" } }, { "id": 62982, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pep517/envbuild.py", "file_name": "envbuild.py", "fun_name": "pip_install", "commit_message": "upd; format", "code": "def pip_install(self, reqs):\n \n if not reqs:\n return\n log.info('Calling pip to install %s', reqs)\n cmd = [\n sys.executable, '-m', 'pip', 'install', '--ignore-installed',\n '--prefix', self.path] + list(reqs)\n check_call(\n cmd,\n stdout=LoggerWrapper(log, logging.INFO),\n stderr=LoggerWrapper(log, logging.ERROR),\n )\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 140, "n_words": 32, "vocab_size": 32, "complexity": 2, "nloc": 12, "token_counts": 74, "n_ast_nodes": 120, "n_identifiers": 17, "d_id": 13085, "documentation": { "docstring": "Install dependencies into this env by calling pip in a subprocess", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 19198, "commit_id": "847eb6b22d03f0cffef945996cf835272870435a", "repo": "mlflow", "path": "mlflow/sklearn/utils.py", "file_name": "utils.py", "fun_name": "_get_classifier_artifacts", "commit_message": "Improve confusion matrix plot (#5273)\n\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu ", "code": "def _get_classifier_artifacts(fitted_estimator, prefix, X, y_true, sample_weight):\n \n import sklearn\n\n if not _is_plotting_supported():\n return []\n", "url": "https://github.com/mlflow/mlflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 29, "n_words": 13, "vocab_size": 13, "complexity": 3, "nloc": 48, "token_counts": 187, "n_ast_nodes": 41, "n_identifiers": 8, "d_id": 2909, "documentation": { "docstring": "\n Draw and record various common artifacts for classifier\n\n For all classifiers, we always log:\n (1) confusion matrix:\n https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html\n\n For only binary classifiers, we will log:\n (2) precision recall curve:\n https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_precision_recall_curve.html\n (3) roc curve:\n https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html\n\n Steps:\n 1. Extract X and y_true from fit_args and fit_kwargs, and split into train & test datasets.\n 2. If the sample_weight argument exists in fit_func (accuracy_score by default\n has sample_weight), extract it from fit_args or fit_kwargs as\n (y_true, y_pred, sample_weight, multioutput), otherwise as (y_true, y_pred, multioutput)\n 3. return a list of artifacts path to be logged\n\n :param fitted_estimator: The already fitted regressor\n :param fit_args: Positional arguments given to fit_func.\n :param fit_kwargs: Keyword arguments given to fit_func.\n :return: List of artifacts to be logged\n ", "n_words": 117, "vocab_size": 91, "n_whitespaces": 178, "language": "en" } }, { "id": 288912, "commit_id": "6abf677092e2d45d39c515c8d4fa7e1787394766", "repo": "core", "path": "tests/components/plugwise/conftest.py", "file_name": "conftest.py", "fun_name": "mock_stretch", "commit_message": "Bump plugwise to v0.25.0 and adapt relevant plugwise code (#80129)", "code": "def mock_stretch() -> Generator[None, MagicMock, None]:\n \n chosen_env = \"stretch_v31\"\n with patch(\n \"homeassistant.components.plugwise.gateway.Smile\", autospec=True\n ) as smile_mock:\n smile = smile_mock.return_value\n\n smile.gateway_id = \"259882df3c05415b99c2d962534ce820\"\n smile.heater_id = None\n smile.smile_version = \"3.1.11\"\n smile.smile_type = \"stretch\"\n smile.smile_hostname = \"stretch98765\"\n smile.smile_model = \"Gateway\"\n smile.smile_name = \"Stretch\"\n\n smile.connect.return_value = True\n smile.async_update.return_value = _read_json(chosen_env, \"all_data\")\n\n yield smile\n\n\n@pytest.fixture", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 145, "n_words": 50, "vocab_size": 39, "complexity": 1, "nloc": 17, "token_counts": 90, "n_ast_nodes": 171, "n_identifiers": 21, "d_id": 88061, "documentation": { "docstring": "Create a Mock Stretch environment for testing exceptions.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 101008, "commit_id": "91fecc47b2157d684ab9c219a860df51543222a3", "repo": "faceswap", "path": "lib/utils.py", "file_name": "utils.py", "fun_name": "_model_version", "commit_message": "lib.Utils - add DPI detector", "code": "def _model_version(self) -> int:\n \n retval = int(self._model_full_name[self._model_full_name.rfind(\"_\") + 2:])\n self.logger.trace(retval) # type: ignore\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 44, "n_words": 15, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 39, "n_ast_nodes": 68, "n_identifiers": 8, "d_id": 20451, "documentation": { "docstring": " int: The model's version number from the model full name. ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 3288, "commit_id": "e665430adcd7690a1ea7565803f34043596045fe", "repo": "prophet", "path": "python/prophet/diagnostics.py", "file_name": "diagnostics.py", "fun_name": "rolling_mean_by_h", "commit_message": "Improved execution time of rolling_mean_by_h (#2142)", "code": "def rolling_mean_by_h(x, h, w, name):\n \n # Aggregate over h\n df = pd.DataFrame({'x': x, 'h': h})\n df2 = (\n df.groupby('h').agg(['sum', 'count']).reset_index().sort_values('h')\n )\n xs = df2['x']['sum'].values\n ns = df2['x']['count'].values\n hs = df2.h.values\n\n trailing_i = len(df2) - 1\n x_sum = 0\n n_sum = 0\n # We don't know output size but it is bounded by len(df2)\n res_x = np.empty(len(df2))\n\n # Start from the right and work backwards\n for i in range(len(df2) - 1, -1, -1):\n x_sum += xs[i]\n n_sum += ns[i]\n while n_sum >= w:\n # Include points from the previous horizon. All of them if still\n # less than w, otherwise weight the mean by the difference\n excess_n = n_sum - w\n excess_x = excess_n * xs[i] / ns[i]\n res_x[trailing_i] = (x_sum - excess_x)/ w\n x_sum -= xs[trailing_i]\n n_sum -= ns[trailing_i]\n trailing_i -= 1\n\n res_h = hs[(trailing_i + 1):]\n res_x = res_x[(trailing_i + 1):]\n\n return pd.DataFrame({'horizon': res_h, name: res_x})\n \n\n", "url": "https://github.com/facebook/prophet.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 321, "n_words": 147, "vocab_size": 101, "complexity": 3, "nloc": 25, "token_counts": 228, "n_ast_nodes": 376, "n_identifiers": 29, "d_id": 430, "documentation": { "docstring": "Compute a rolling mean of x, after first aggregating by h.\n\n Right-aligned. Computes a single mean for each unique value of h. Each\n mean is over at least w samples.\n\n Parameters\n ----------\n x: Array.\n h: Array of horizon for each value in x.\n w: Integer window size (number of elements).\n name: Name for metric in result dataframe\n\n Returns\n -------\n Dataframe with columns horizon and name, the rolling mean of x.\n ", "n_words": 70, "vocab_size": 53, "n_whitespaces": 106, "language": "en" } }, { "id": 312884, "commit_id": "009b31941a45c3d880b69dcf91d14edeb61a78a7", "repo": "core", "path": "homeassistant/helpers/restore_state.py", "file_name": "restore_state.py", "fun_name": "as_dict", "commit_message": "Support restoring SensorEntity native_value (#66068)", "code": "def as_dict(self) -> dict[str, Any]:\n \n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 12, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 5, "token_counts": 13, "n_ast_nodes": 22, "n_identifiers": 5, "d_id": 111515, "documentation": { "docstring": "Return a dict representation of the extra data.\n\n Must be serializable by Home Assistant's JSONEncoder.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 29, "language": "en" } }, { "id": 319336, "commit_id": "6d0fdc751027809a13e0430c16b8f248b3eb03e8", "repo": "paperless-ngx", "path": "src/documents/tests/test_tasks.py", "file_name": "test_tasks.py", "fun_name": "test_consume_barcode_unsupported_jpg_file", "commit_message": "add tests\nfix indention\nadd more documentation\n\nSigned-off-by: Florian Brandes ", "code": "def test_consume_barcode_unsupported_jpg_file(self, m):\n \n test_file = os.path.join(\n os.path.dirname(__file__),\n \"samples\",\n \"simple.jpg\",\n )\n dst = os.path.join(settings.SCRATCH_DIR, \"simple.jpg\")\n shutil.copy(test_file, dst)\n with self.assertLogs(\"paperless.tasks\", level=\"WARNING\") as cm:\n self.assertIn(\"Success\", tasks.consume_file(dst))\n self.assertEqual(\n cm.output,\n [\n \"WARNING:paperless.tasks:Unsupported file format for barcode reader: .jpg\",\n ],\n )\n m.assert_called_once()\n\n args, kwargs = m.call_args\n self.assertIsNone(kwargs[\"override_filename\"])\n self.assertIsNone(kwargs[\"override_title\"])\n self.assertIsNone(kwargs[\"override_correspondent_id\"])\n self.assertIsNone(kwargs[\"override_document_type_id\"])\n self.assertIsNone(kwargs[\"override_tag_ids\"])\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 242, "n_words": 45, "vocab_size": 42, "complexity": 1, "nloc": 23, "token_counts": 150, "n_ast_nodes": 258, "n_identifiers": 27, "d_id": 116934, "documentation": { "docstring": "\n This test assumes barcode and TIFF support are enabled and\n the user uploads an unsupported image file (e.g. jpg)\n\n The function shouldn't try to scan for separating barcodes\n and continue archiving the file as is.\n ", "n_words": 35, "vocab_size": 31, "n_whitespaces": 71, "language": "en" } }, { "id": 248920, "commit_id": "0f971ca68e808dd16f53f5594a6b33b7bddcc9a9", "repo": "synapse", "path": "tests/federation/test_federation_client.py", "file_name": "test_federation_client.py", "fun_name": "test_get_pdu_returns_nothing_when_event_does_not_exist", "commit_message": "Update `get_pdu` to return the original, pristine `EventBase` (#13320)\n\nUpdate `get_pdu` to return the untouched, pristine `EventBase` as it was originally seen over federation (no metadata added). Previously, we returned the same `event` reference that we stored in the cache which downstream code modified in place and added metadata like setting it as an `outlier` and essentially poisoned our cache. Now we always return a copy of the `event` so the original can stay pristine in our cache and re-used for the next cache call.\r\n\r\nSplit out from https://github.com/matrix-org/synapse/pull/13205\r\n\r\nAs discussed at:\r\n\r\n - https://github.com/matrix-org/synapse/pull/13205#discussion_r918365746\r\n - https://github.com/matrix-org/synapse/pull/13205#discussion_r918366125\r\n\r\nRelated to https://github.com/matrix-org/synapse/issues/12584. This PR doesn't fix that issue because it hits [`get_event` which exists from the local database before it tries to `get_pdu`](https://github.com/matrix-org/synapse/blob/7864f33e286dec22368dc0b11c06eebb1462a51e/synapse/federation/federation_client.py#L581-L594).", "code": "def test_get_pdu_returns_nothing_when_event_does_not_exist(self):\n \n remote_pdu = self.get_success(\n self.hs.get_federation_client().get_pdu(\n [\"yet.another.server\"],\n \"event_should_not_exist\",\n RoomVersions.V9,\n )\n )\n self.assertEqual(remote_pdu, None)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 108, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 9, "token_counts": 42, "n_ast_nodes": 71, "n_identifiers": 10, "d_id": 72511, "documentation": { "docstring": "No event should be returned when the event does not exist", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 101425, "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", "repo": "faceswap", "path": "tools/preview/preview.py", "file_name": "preview.py", "fun_name": "config_dicts", "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", "code": "def config_dicts(self) -> Dict[str, Any]:\n \n return self._config_dicts\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 28, "n_identifiers": 6, "d_id": 20839, "documentation": { "docstring": " dict: The convert configuration options in dictionary form.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 248370, "commit_id": "b83bc5fab57b37f75a79d02213d6032c586fd36e", "repo": "synapse", "path": "tests/storage/test_events.py", "file_name": "test_events.py", "fun_name": "test_prune_gap_if_old", "commit_message": "Pull out less state when handling gaps mk2 (#12852)", "code": "def test_prune_gap_if_old(self):\n \n\n # Advance the clock for many days to make the old extremity \"old\". We\n # also set the depth to \"lots\".\n self.reactor.advance(7 * 24 * 60 * 60)\n\n # Fudge a second event which points to an event we don't have. This is a\n # state event so that the state changes (otherwise we won't prune the\n # extremity as they'll have the same state group).\n remote_event_2 = event_from_pdu_json(\n {\n \"type\": EventTypes.Member,\n \"state_key\": \"@user:other2\",\n \"content\": {\"membership\": Membership.JOIN},\n \"room_id\": self.room_id,\n \"sender\": \"@user:other2\",\n \"depth\": 10000,\n \"prev_events\": [\"$some_unknown_message\"],\n \"auth_events\": [],\n \"origin_server_ts\": self.clock.time_msec(),\n },\n RoomVersions.V6,\n )\n\n state_before_gap = self.get_success(\n self.state.get_current_state_ids(self.room_id)\n )\n\n self.persist_event(remote_event_2, state=state_before_gap)\n\n # Check the new extremity is just the new remote event.\n self.assert_extremities([remote_event_2.event_id])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 390, "n_words": 113, "vocab_size": 84, "complexity": 1, "nloc": 21, "token_counts": 124, "n_ast_nodes": 218, "n_identifiers": 22, "d_id": 72247, "documentation": { "docstring": "Test that we drop extremities after a gap when the previous extremity\n is \"old\"\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 28, "language": "en" } }, { "id": 181943, "commit_id": "2635f58e7c3d10b161ee69a15ebfe6499ac26daa", "repo": "textual", "path": "src/textual/dom.py", "file_name": "dom.py", "fun_name": "tree", "commit_message": "docstrings and tidy", "code": "def tree(self) -> Tree:\n \n highlighter = ReprHighlighter()\n tree = Tree(highlighter(repr(self)))\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 31, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 11, "token_counts": 35, "n_ast_nodes": 45, "n_identifiers": 6, "d_id": 43682, "documentation": { "docstring": "Get a Rich tree object which will recursively render the structure of the node tree.\n\n Returns:\n Tree: A Rich object which may be printed.\n ", "n_words": 24, "vocab_size": 20, "n_whitespaces": 49, "language": "en" } }, { "id": 183562, "commit_id": "d14659c1a3760eade2dd3479b66eb8b2e7711db0", "repo": "textual", "path": "src/textual/_terminal_features.py", "file_name": "_terminal_features.py", "fun_name": "synchronized_update_sequences", "commit_message": "[terminal buffering] Add support for the \"mode 2026\"\n\nThat task is definitely way more complicated that it seemed to be 😅", "code": "def synchronized_update_sequences(self) -> tuple[str, str]:\n \n return (\n self._synchronized_update_start_sequence(),\n self._synchronized_update_end_sequence(),\n )\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 53, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 14, "token_counts": 28, "n_ast_nodes": 45, "n_identifiers": 6, "d_id": 44251, "documentation": { "docstring": "\n Returns the ANSI sequence that we should send to the terminal to tell it that\n it should buffer the content we're about to send, as well as the ANIS sequence to end the buffering.\n If the terminal doesn't seem to support synchronised updates both strings will be empty.\n\n Returns:\n tuple[str, str]: the start and end ANSI sequences, respectively. They will both be empty strings\n if the terminal emulator doesn't seem to support the \"synchronised updates\" mode.\n ", "n_words": 76, "vocab_size": 47, "n_whitespaces": 138, "language": "en" } }, { "id": 338065, "commit_id": "9fd08d79f9a72973073d2cdef6bf23f367b75d6f", "repo": "accelerate", "path": "src/accelerate/utils/launch.py", "file_name": "launch.py", "fun_name": "_filter_args", "commit_message": "Fully remove `subprocess` from the multi-gpu launcher (#623)\n\n* Remove one of the subprocesses!", "code": "def _filter_args(args):\n \n distrib_args = distrib_run.get_args_parser()\n known_args, _ = distrib_args.parse_known_args()\n for arg in list(vars(args).keys()):\n if arg not in vars(known_args).keys():\n delattr(args, arg)\n distrib_args = distrib_run.parse_args(vars(args))\n for key, value in vars(args).items():\n setattr(distrib_args, key, value)\n if is_torch_version(\"<\", \"1.9.0\"):\n setattr(distrib_args, \"use_env\", True)\n return distrib_args\n\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 95, "n_words": 39, "vocab_size": 28, "complexity": 5, "nloc": 12, "token_counts": 107, "n_ast_nodes": 177, "n_identifiers": 19, "d_id": 121158, "documentation": { "docstring": "\n Filters out all `accelerate` specific args\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 13, "language": "en" } }, { "id": 212784, "commit_id": "186b16e77ac2b54eb966bafc0e5f092e028e7ed8", "repo": "PySimpleGUI", "path": "DemoPrograms/Demo_Desktop_Widget_Weather.py", "file_name": "Demo_Desktop_Widget_Weather.py", "fun_name": "create_endpoint", "commit_message": "Addition of county to the Weather Desktop Widget", "code": "def create_endpoint(endpoint_type=0):\n \n if endpoint_type == 1:\n try:\n endpoint = f\"http://api.openweathermap.org/data/2.5/weather?zip={APP_DATA['Postal']},{APP_DATA['Country']}&appid={API_KEY}&units={APP_DATA['Units']}\"\n return endpoint\n except ConnectionError:\n return\n elif endpoint_type == 2:\n try:\n # endpoint = f\"http://api.openweathermap.org/data/2.5/weather?q={APP_DATA['City'].replace(' ', '%20')},us&APPID={API_KEY}&units={APP_DATA['Units']}\"\n endpoint = f\"http://api.openweathermap.org/data/2.5/weather?q={APP_DATA['City'].replace(' ', '%20')},{APP_DATA['Country']}&APPID={API_KEY}&units={APP_DATA['Units']}\"\n return endpoint\n except ConnectionError:\n return\n else:\n return\n\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 162, "n_words": 38, "vocab_size": 21, "complexity": 5, "nloc": 15, "token_counts": 45, "n_ast_nodes": 163, "n_identifiers": 7, "d_id": 53400, "documentation": { "docstring": " Create the api request endpoint\n {0: default, 1: zipcode, 2: city_name}", "n_words": 11, "vocab_size": 11, "n_whitespaces": 14, "language": "en" } }, { "id": 266595, "commit_id": "43e55db20821a1341d21ffa1e4e7e6185b244105", "repo": "ansible", "path": "lib/ansible/galaxy/collection/__init__.py", "file_name": "__init__.py", "fun_name": "install", "commit_message": "ansible-galaxy - add signature verification of the MANIFEST.json (#76681)\n\n* ansible-galaxy collection install|verify:\r\n\r\n - Support verifying the origin of the MANIFEST.json when the Galaxy server has provided signatures.\r\n - Allow supplemental signatures to use during verification on the CLI/requirements file.\r\n\r\n* ansible-galaxy collection install:\r\n\r\n - Support disabling signature verification. This silences the warning provided by ansible-galaxy if the Galaxy server provided signatures it cannot use because no keyring is configured.\r\n - Store Galaxy server metadata alongside installed collections for provenance. This is used by 'ansible-galaxy collection verify --offline'.\r\n\r\n* Add unit tests for method that gets signatures from a Galaxy server\r\n\r\n* Add integration tests for user-provided signature sources\r\n\r\n- Test CLI option combinations\r\n- Test installing collections with valid/invalid signature sources\r\n- Test disabling GPG verification when installing collections\r\n- Test verifying collections with valid/invalid signature sources\r\n\r\n* Make signature verification advisory-by-default if signatures are provided by the Galaxy server\r\n\r\n- Make the default keyring None\r\n- Warn if the keyring is None but the Galaxy server provided signatures\r\n- Error if the keyring is None but the user supplied signatures\r\n- Error if the keyring is not None but is invalid\r\n\r\n* changelog\r\n\r\n* add ansible-galaxy user documentation for new options\r\n\r\nCo-authored-by: Matt Martz \r\nCo-authored-by: Sviatoslav Sydorenko \r\nCo-authored-by: Martin Krizek \r\nCo-authored-by: Sandra McCann \r\nCo-authored-by: Andy Mott \r\nCo-authored-by: John R Barker ", "code": "def install(collection, path, artifacts_manager): # FIXME: mv to dataclasses?\n # type: (Candidate, str, ConcreteArtifactsManager) -> None\n \n b_artifact_path = (\n artifacts_manager.get_artifact_path if collection.is_concrete_artifact\n else artifacts_manager.get_galaxy_artifact_path\n )(collection)\n\n collection_path = os.path.join(path, collection.namespace, collection.name)\n b_collection_path = to_bytes(collection_path, errors='surrogate_or_strict')\n display.display(\n u\"Installing '{coll!s}' to '{path!s}'\".\n format(coll=to_text(collection), path=collection_path),\n )\n\n if os.path.exists(b_collection_path):\n shutil.rmtree(b_collection_path)\n\n if collection.is_dir:\n install_src(collection, b_artifact_path, b_collection_path, artifacts_manager)\n else:\n install_artifact(\n b_artifact_path,\n b_collection_path,\n artifacts_manager._b_working_directory,\n collection.signatures,\n artifacts_manager.keyring\n )\n if (collection.is_online_index_pointer and isinstance(collection.src, GalaxyAPI)):\n write_source_metadata(\n collection,\n b_collection_path,\n artifacts_manager\n )\n\n display.display(\n '{coll!s} was installed successfully'.\n format(coll=to_text(collection)),\n )\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 315, "n_words": 76, "vocab_size": 62, "complexity": 6, "nloc": 33, "token_counts": 170, "n_ast_nodes": 262, "n_identifiers": 34, "d_id": 78490, "documentation": { "docstring": "Install a collection under a given path.\n\n :param collection: Collection to be installed.\n :param path: Collection dirs layout path.\n :param artifacts_manager: Artifacts manager.\n ", "n_words": 23, "vocab_size": 18, "n_whitespaces": 35, "language": "en" } }, { "id": 96505, "commit_id": "fbe987561d2b9df070f34652785294cc430b41e4", "repo": "sentry", "path": "src/sentry/utils/services.py", "file_name": "services.py", "fun_name": "validate", "commit_message": "typing: Add type hints to sentry/utils/services.py (#31984)\n\nWe use these service backends in a lot of places that are typed. Adding typing here adds a lot of\r\nvalue since we can now properly introspect things that inherit from `Service` and related.", "code": "def validate(self) -> None:\n \n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 7, "token_counts": 8, "n_ast_nodes": 16, "n_identifiers": 2, "d_id": 19322, "documentation": { "docstring": "\n Validates the settings for this backend (i.e. such as proper connection\n info).\n\n Raise ``InvalidConfiguration`` if there is a configuration error.\n ", "n_words": 20, "vocab_size": 20, "n_whitespaces": 49, "language": "en" } }, { "id": 63394, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "scanString", "commit_message": "upd; format", "code": "def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):\n \n if not self.streamlined:\n self.streamline()\n for e in self.ignoreExprs:\n e.streamline()\n\n if not self.keepTabs:\n instring = _ustr(instring).expandtabs()\n instrlen = len(instring)\n loc = 0\n preparseFn = self.preParse\n parseFn = self._parse\n ParserElement.resetCache()\n matches = 0\n try:\n while loc <= instrlen and matches < maxMatches:\n try:\n preloc = preparseFn(instring, loc)\n nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)\n except ParseException:\n loc = preloc + 1\n else:\n if nextLoc > loc:\n matches += 1\n yield tokens, preloc, nextLoc\n if overlap:\n nextloc = preparseFn(instring, loc)\n if nextloc > loc:\n loc = nextLoc\n else:\n loc += 1\n else:\n loc = nextLoc\n else:\n loc = preloc + 1\n except ParseBaseException as exc:\n if ParserElement.verbose_stacktrace:\n raise\n else:\n # catch and re-raise exception from here, clearing out pyparsing internal stack trace\n if getattr(exc, '__traceback__', None) is not None:\n exc.__traceback__ = self._trim_traceback(exc.__traceback__)\n raise exc\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 789, "n_words": 135, "vocab_size": 80, "complexity": 13, "nloc": 41, "token_counts": 217, "n_ast_nodes": 354, "n_identifiers": 35, "d_id": 13285, "documentation": { "docstring": "\n Scan the input string for expression matches. Each match will return the\n matching tokens, start location, and end location. May be called with optional\n ``maxMatches`` argument, to clip scanning after 'n' matches are found. If\n ``overlap`` is specified, then overlapping matches will be reported.\n\n Note that the start and end locations are reported relative to the string\n being parsed. See :class:`parseString` for more information on parsing\n strings with embedded tabs.\n\n Example::\n\n source = \"sldjf123lsdjjkf345sldkjf879lkjsfd987\"\n print(source)\n for tokens, start, end in Word(alphas).scanString(source):\n print(' '*start + '^'*(end-start))\n print(' '*start + tokens[0])\n\n prints::\n\n sldjf123lsdjjkf345sldkjf879lkjsfd987\n ^^^^^\n sldjf\n ^^^^^^^\n lsdjjkf\n ^^^^^^\n sldkjf\n ^^^^^^\n lkjsfd\n ", "n_words": 99, "vocab_size": 78, "n_whitespaces": 442, "language": "en" } }, { "id": 247707, "commit_id": "1da0f79d5455b594f2aa989106a672786f5b990f", "repo": "synapse", "path": "tests/rest/client/test_relations.py", "file_name": "test_relations.py", "fun_name": "_get_related_events", "commit_message": "Refactor relations tests (#12232)\n\n* Moves the relation pagination tests to a separate class.\r\n* Move the assertion of the response code into the `_send_relation` helper.\r\n* Moves some helpers into the base-class.", "code": "def _get_related_events(self) -> List[str]:\n \n # Request the relations of the event.\n channel = self.make_request(\n \"GET\",\n f\"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}\",\n access_token=self.user_token,\n )\n self.assertEquals(200, channel.code, channel.json_body)\n return [ev[\"event_id\"] for ev in channel.json_body[\"chunk\"]]\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 102, "n_words": 27, "vocab_size": 26, "complexity": 2, "nloc": 11, "token_counts": 59, "n_ast_nodes": 109, "n_identifiers": 14, "d_id": 71860, "documentation": { "docstring": "\n Requests /relations on the parent ID and returns a list of event IDs.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 28, "language": "en" } }, { "id": 199355, "commit_id": "7c199e306648513c13c9b5c5b5fad06e5f1c3020", "repo": "sympy", "path": "sympy/physics/mechanics/joint.py", "file_name": "joint.py", "fun_name": "_express_axis", "commit_message": "Fix failing test and simplify joint orient methods", "code": "def _express_axis(self, axis, frame):\n \n try:\n ax_mat = axis.to_matrix(self.parent_interframe)\n except ValueError:\n ax_mat = axis.to_matrix(self.child_interframe)\n try:\n self.parent_interframe.dcm(frame) # Check if connected\n int_frame = self.parent_interframe\n except ValueError:\n int_frame = self.child_interframe\n return self._to_vector(ax_mat, int_frame).express(frame)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 128, "n_words": 30, "vocab_size": 22, "complexity": 3, "nloc": 11, "token_counts": 72, "n_ast_nodes": 118, "n_identifiers": 13, "d_id": 49253, "documentation": { "docstring": "Helper function to get an axis expressed in a specified frame.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 196265, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/geometry/curve.py", "file_name": "curve.py", "fun_name": "limits", "commit_message": "Updated import locations", "code": "def limits(self):\n \n return self.args[1]\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 23, "n_identifiers": 3, "d_id": 47765, "documentation": { "docstring": "The limits for the curve.\n\n Returns\n =======\n\n limits : tuple\n Contains parameter and lower and upper limits.\n\n Examples\n ========\n\n >>> from sympy.abc import t\n >>> from sympy import Curve\n >>> C = Curve([t, t**3], (t, -2, 2))\n >>> C.limits\n (t, -2, 2)\n\n See Also\n ========\n\n plot_interval\n\n ", "n_words": 46, "vocab_size": 36, "n_whitespaces": 155, "language": "en" } }, { "id": 21475, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py", "file_name": "tarfile.py", "fun_name": "extract", "commit_message": "Vendor in pip 22.1.2", "code": "def extract(self, member, path=\"\", set_attrs=True):\n \n self._check(\"r\")\n\n if isinstance(member, str):\n tarinfo = self.getmember(member)\n else:\n tarinfo = member\n\n # Prepare the link target for makelink().\n if tarinfo.islnk():\n tarinfo._link_target = os.path.join(path, tarinfo.linkname)\n\n try:\n self._extract_member(tarinfo, os.path.join(path, tarinfo.name),\n set_attrs=set_attrs)\n except EnvironmentError as e:\n if self.errorlevel > 0:\n raise\n else:\n if e.filename is None:\n self._dbg(1, \"tarfile: %s\" % e.strerror)\n else:\n self._dbg(1, \"tarfile: %s %r\" % (e.strerror, e.filename))\n except ExtractError as e:\n if self.errorlevel > 1:\n raise\n else:\n self._dbg(1, \"tarfile: %s\" % e)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 372, "n_words": 76, "vocab_size": 52, "complexity": 8, "nloc": 24, "token_counts": 170, "n_ast_nodes": 279, "n_identifiers": 24, "d_id": 3863, "documentation": { "docstring": "Extract a member from the archive to the current working directory,\n using its full name. Its file information is extracted as accurately\n as possible. `member' may be a filename or a TarInfo object. You can\n specify a different directory using `path'. File attributes (owner,\n mtime, mode) are set unless `set_attrs' is False.\n ", "n_words": 52, "vocab_size": 45, "n_whitespaces": 99, "language": "en" } }, { "id": 221244, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/calendar.py", "file_name": "calendar.py", "fun_name": "formatmonthname", "commit_message": "add python 3.10.4 for windows", "code": "def formatmonthname(self, theyear, themonth, withyear=True):\n \n if withyear:\n s = '%s %s' % (month_name[themonth], theyear)\n else:\n s = '%s' % month_name[themonth]\n return '%s' % (\n self.cssclass_month_head, s)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 89, "n_words": 28, "vocab_size": 24, "complexity": 2, "nloc": 7, "token_counts": 49, "n_ast_nodes": 80, "n_identifiers": 8, "d_id": 56291, "documentation": { "docstring": "\n Return a month name as a table row.\n ", "n_words": 8, "vocab_size": 7, "n_whitespaces": 23, "language": "en" } }, { "id": 22039, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/_internal_utils.py", "file_name": "_internal_utils.py", "fun_name": "unicode_is_ascii", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def unicode_is_ascii(u_string):\n \n assert isinstance(u_string, str)\n try:\n u_string.encode(\"ascii\")\n return True\n except UnicodeEncodeError:\n return False\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 46, "n_words": 13, "vocab_size": 12, "complexity": 2, "nloc": 7, "token_counts": 28, "n_ast_nodes": 50, "n_identifiers": 6, "d_id": 4128, "documentation": { "docstring": "Determine if unicode string only contains ASCII characters.\n\n :param str u_string: unicode string to check. Must be unicode\n and not Python 2 `str`.\n :rtype: bool\n ", "n_words": 25, "vocab_size": 22, "n_whitespaces": 41, "language": "en" } }, { "id": 319727, "commit_id": "08c3d6e84b17da2acfb10250438fe357398e5e0e", "repo": "paperless-ngx", "path": "src/documents/tests/test_management_convert_thumbnail.py", "file_name": "test_management_convert_thumbnail.py", "fun_name": "test_do_nothing_if_converted", "commit_message": "Fixes existing testing, adds test coverage of new command", "code": "def test_do_nothing_if_converted(self, run_convert_mock):\n \n\n stdout, _ = self.call_command()\n run_convert_mock.assert_not_called()\n self.assertIn(\"Converting all PNG thumbnails to WebP\", stdout)\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 43, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 53, "n_identifiers": 8, "d_id": 116987, "documentation": { "docstring": "\n GIVEN:\n - Document exists with default WebP thumbnail path\n WHEN:\n - Thumbnail conversion is attempted\n THEN:\n - Nothing is converted\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 82, "language": "en" } }, { "id": 204826, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/base.py", "file_name": "base.py", "fun_name": "set_rollback", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def set_rollback(self, rollback):\n \n if not self.in_atomic_block:\n raise TransactionManagementError(\n \"The rollback flag doesn't work outside of an 'atomic' block.\"\n )\n self.needs_rollback = rollback\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 80, "n_words": 22, "vocab_size": 21, "complexity": 2, "nloc": 6, "token_counts": 24, "n_ast_nodes": 43, "n_identifiers": 6, "d_id": 50909, "documentation": { "docstring": "\n Set or unset the \"needs rollback\" flag -- for *advanced use* only.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 207400, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_scripts/tests.py", "file_name": "tests.py", "fun_name": "test_warning_does_not_halt", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_warning_does_not_halt(self):\n \n\n self.write_settings(\n \"settings.py\",\n apps=[\n \"admin_scripts.app_raising_warning\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n ],\n sdict={\"DEBUG\": True},\n )\n args = [\"check\"]\n out, err = self.run_manage(args)\n expected_err = (\n \"System check identified some issues:\\n\" # No \"CommandError: \" part\n \"\\n\"\n \"WARNINGS:\\n\"\n \"?: A warning\\n\"\n \"\\n\"\n \"System check identified 1 issue (0 silenced).\\n\"\n )\n self.assertEqual(err, expected_err)\n self.assertNoOutput(out)\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 268, "n_words": 49, "vocab_size": 42, "complexity": 1, "nloc": 22, "token_counts": 71, "n_ast_nodes": 139, "n_identifiers": 12, "d_id": 51951, "documentation": { "docstring": "\n When there are only warnings or less serious messages, then Django\n should not prevent user from launching their project, so `check`\n command should not raise `CommandError` exception.\n\n In this test we also test output format.\n ", "n_words": 35, "vocab_size": 32, "n_whitespaces": 71, "language": "en" } }, { "id": 249167, "commit_id": "c97042f7eef3748e17c90e48a4122389a89c4735", "repo": "synapse", "path": "tests/rest/admin/test_statistics.py", "file_name": "test_statistics.py", "fun_name": "test_from", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13469)", "code": "def test_from(self) -> None:\n \n self._create_users_with_media(20, 2)\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?from=5\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"users\"]), 15)\n self.assertNotIn(\"next_token\", channel.json_body)\n self._check_fields(channel.json_body[\"users\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 121, "n_words": 25, "vocab_size": 25, "complexity": 1, "nloc": 15, "token_counts": 103, "n_ast_nodes": 168, "n_identifiers": 15, "d_id": 72674, "documentation": { "docstring": "\n Testing list of media with a defined starting point (from)\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 232103, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/polar/_angularaxis.py", "file_name": "_angularaxis.py", "fun_name": "period", "commit_message": "switch to black .22", "code": "def period(self):\n \n return self[\"period\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 63547, "documentation": { "docstring": "\n Set the angular period. Has an effect only when\n `angularaxis.type` is \"category\".\n\n The 'period' property is a number and may be specified as:\n - An int or float in the interval [0, inf]\n\n Returns\n -------\n int|float\n ", "n_words": 36, "vocab_size": 34, "n_whitespaces": 95, "language": "en" } }, { "id": 102109, "commit_id": "d9106116aa5e399f7d63feeb7fc77f92a076dd93", "repo": "pytorch", "path": "torch/backends/_nnapi/serializer.py", "file_name": "serializer.py", "fun_name": "operand_to_template_torchscript", "commit_message": "nnapi: Add int32 type torchscript expressions (#70197)\n\nSummary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/70197\n\nTest Plan:\n* `pytest test/test_nnapi.py`\n* Testing via ops following this commit\n\nReviewed By: anshuljain1, dreiss\n\nDifferential Revision: D33237917\n\nfbshipit-source-id: f0493620f28a62ad9fe0b97b67d1e25059d50c24", "code": "def operand_to_template_torchscript(op_id, oper):\n \n shape_parts = [\"(\"]\n for d, s in enumerate(oper.shape):\n if s > 0:\n # Fixed shape dimension: just add the value.\n shape_parts.append(str(s))\n else:\n # Flexible shape dimension: it should have been computed in a variable.\n shape_parts.append(flex_name(op_id, d))\n shape_parts.append(\",\")\n shape_parts.append(\")\")\n shape_code = \"\".join(shape_parts)\n if oper.op_type == NNAPI_OperandCode.TENSOR_FLOAT32:\n return f\"torch.zeros({shape_code}, dtype=torch.float32)\"\n elif oper.op_type == NNAPI_OperandCode.TENSOR_INT32:\n return f\"torch.zeros({shape_code}, dtype=torch.int32)\"\n elif oper.op_type == NNAPI_OperandCode.TENSOR_QUANT8_ASYMM:\n return (\n f\"torch.quantize_per_tensor(\"\n f\"torch.zeros(1), scale={oper.scale}, zero_point={oper.zero_point}, dtype=torch.quint8)\"\n f\".expand({shape_code}).contiguous()\"\n )\n raise Exception(f\"Unsupported output operand type: {oper.op_type}\")\n", "url": "https://github.com/pytorch/pytorch.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 322, "n_words": 77, "vocab_size": 62, "complexity": 6, "nloc": 21, "token_counts": 120, "n_ast_nodes": 238, "n_identifiers": 21, "d_id": 21468, "documentation": { "docstring": "Return a TorchScript expression to build a template for a given operand.", "n_words": 12, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 48708, "commit_id": "c0d95cb9678b1693f8f1a8658d4665c51de87ddf", "repo": "django-rest-framework", "path": "tests/test_permissions.py", "file_name": "test_permissions.py", "fun_name": "test_ignore_model_permissions_with_unauthenticated_user", "commit_message": "Fix #8771 - Checking for authentication even if `_ignore_model_permissions = True` (#8772)", "code": "def test_ignore_model_permissions_with_unauthenticated_user(self):\n \n request = factory.get('/', format='json')\n request.resolver_match = ResolverMatch('get', (), {})\n response = ignored_get_queryset_list_view(request)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n", "url": "https://github.com/encode/django-rest-framework.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 51, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 5, "token_counts": 50, "n_ast_nodes": 86, "n_identifiers": 14, "d_id": 9572, "documentation": { "docstring": "\n We check that the ``_ignore_model_permissions`` attribute\n doesn't ignore the authentication.\n ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 32, "language": "en" } }, { "id": 206470, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/test/testcases.py", "file_name": "testcases.py", "fun_name": "skipUnlessAnyDBFeature", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def skipUnlessAnyDBFeature(*features):\n \n return _deferredSkip(\n lambda: not any(\n getattr(connection.features, feature, False) for feature in features\n ),\n \"Database doesn't support any of the feature(s): %s\" % \", \".join(features),\n \"skipUnlessAnyDBFeature\",\n )\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 76, "n_words": 28, "vocab_size": 28, "complexity": 2, "nloc": 8, "token_counts": 43, "n_ast_nodes": 72, "n_identifiers": 8, "d_id": 51534, "documentation": { "docstring": "Skip a test unless a database has any of the named features.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 204850, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/introspection.py", "file_name": "introspection.py", "fun_name": "installed_models", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def installed_models(self, tables):\n \n tables = set(map(self.identifier_converter, tables))\n return {\n m\n for m in self.get_migratable_models()\n if self.identifier_converter(m._meta.db_table) in tables\n }\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 80, "n_words": 19, "vocab_size": 16, "complexity": 3, "nloc": 7, "token_counts": 46, "n_ast_nodes": 73, "n_identifiers": 10, "d_id": 50928, "documentation": { "docstring": "\n Return a set of all models represented by the provided list of table\n names.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 223004, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/sysconfig.py", "file_name": "sysconfig.py", "fun_name": "customize_compiler", "commit_message": "add python 3.10.4 for windows", "code": "def customize_compiler(compiler):\n \n if compiler.compiler_type == \"unix\":\n if sys.platform == \"darwin\":\n # Perform first-time customization of compiler-related\n # config vars on OS X now that we know we need a compiler.\n # This is primarily to support Pythons from binary\n # installers. The kind and paths to build tools on\n # the user system may vary significantly from the system\n # that Python itself was built on. Also the user OS\n # version and build tools may not support the same set\n # of CPU architectures for universal builds.\n if not _config_vars.get('CUSTOMIZED_OSX_COMPILER'):\n import _osx_support\n _osx_support.customize_compiler(_config_vars)\n _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'\n\n (cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \\\n get_config_vars('CC', 'CXX', 'CFLAGS',\n 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')\n\n if 'CC' in os.environ:\n newcc = os.environ['CC']\n if (sys.platform == 'darwin'\n and 'LDSHARED' not in os.environ\n and ldshared.startswith(cc)):\n # On OS X, if CC is overridden, use that as the default\n # command for LDSHARED as well\n ldshared = newcc + ldshared[len(cc):]\n cc = newcc\n if 'CXX' in os.environ:\n cxx = os.environ['CXX']\n if 'LDSHARED' in os.environ:\n ldshared = os.environ['LDSHARED']\n if 'CPP' in os.environ:\n cpp = os.environ['CPP']\n else:\n cpp = cc + \" -E\" # not always\n if 'LDFLAGS' in os.environ:\n ldshared = ldshared + ' ' + os.environ['LDFLAGS']\n if 'CFLAGS' in os.environ:\n cflags = cflags + ' ' + os.environ['CFLAGS']\n ldshared = ldshared + ' ' + os.environ['CFLAGS']\n if 'CPPFLAGS' in os.environ:\n cpp = cpp + ' ' + os.environ['CPPFLAGS']\n cflags = cflags + ' ' + os.environ['CPPFLAGS']\n ldshared = ldshared + ' ' + os.environ['CPPFLAGS']\n if 'AR' in os.environ:\n ar = os.environ['AR']\n if 'ARFLAGS' in os.environ:\n archiver = ar + ' ' + os.environ['ARFLAGS']\n else:\n archiver = ar + ' ' + ar_flags\n\n cc_cmd = cc + ' ' + cflags\n compiler.set_executables(\n preprocessor=cpp,\n compiler=cc_cmd,\n compiler_so=cc_cmd + ' ' + ccshared,\n compiler_cxx=cxx,\n linker_so=ldshared,\n linker_exe=cc,\n archiver=archiver)\n\n compiler.shared_lib_extension = shlib_suffix\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 957, "n_words": 303, "vocab_size": 147, "complexity": 16, "nloc": 50, "token_counts": 369, "n_ast_nodes": 658, "n_identifiers": 32, "d_id": 56846, "documentation": { "docstring": "Do any platform-specific customization of a CCompiler instance.\n\n Mainly needed on Unix, so we can plug in the information that\n varies across Unices and is stored in Python's Makefile.\n ", "n_words": 29, "vocab_size": 28, "n_whitespaces": 38, "language": "en" } }, { "id": 182359, "commit_id": "18a3fb4b576f06e1e82e8a030d95bea2df1836b1", "repo": "textual", "path": "src/textual/widgets/tabs.py", "file_name": "tabs.py", "fun_name": "activate_previous_tab", "commit_message": "Tidying Tabs, adding docstrings", "code": "def activate_previous_tab(self) -> None:\n \n current_tab_index = self.find_tab_by_name(self._active_tab_name)\n previous_tab_index = current_tab_index - 1\n previous_tab_name = self.tabs[previous_tab_index].name\n self._active_tab_name = previous_tab_name\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 53, "n_words": 18, "vocab_size": 13, "complexity": 1, "nloc": 6, "token_counts": 38, "n_ast_nodes": 63, "n_identifiers": 9, "d_id": 43804, "documentation": { "docstring": "Activate the tab to the left of the currently active tab", "n_words": 11, "vocab_size": 8, "n_whitespaces": 10, "language": "en" } }, { "id": 218645, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/lib2to3/fixes/fix_dict.py", "file_name": "fix_dict.py", "fun_name": "transform", "commit_message": "add python 3.10.4 for windows", "code": "def transform(self, node, results):\n head = results[\"head\"]\n method = results[\"method\"][0] # Extract node for method name\n tail = results[\"tail\"]\n syms = self.syms\n method_name = method.value\n isiter = method_name.startswith(\"iter\")\n isview = method_name.startswith(\"view\")\n if isiter or isview:\n method_name = method_name[4:]\n assert method_name in (\"keys\", \"items\", \"values\"), repr(method)\n head = [n.clone() for n in head]\n tail = [n.clone() for n in tail]\n special = not tail and self.in_special_context(node, isiter)\n args = head + [pytree.Node(syms.trailer,\n [Dot(),\n Name(method_name,\n prefix=method.prefix)]),\n results[\"parens\"].clone()]\n new = pytree.Node(syms.power, args)\n if not (special or isview):\n new.prefix = \"\"\n new = Call(Name(\"iter\" if isiter else \"list\"), [new])\n if tail:\n new = pytree.Node(syms.power, [new] + tail)\n new.prefix = node.prefix\n return new\n\n P1 = \"power< func=NAME trailer< '(' node=any ')' > any* >\"\n p1 = patcomp.compile_pattern(P1)\n\n P2 = \n p2 = patcomp.compile_pattern(P2)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 440, "n_words": 127, "vocab_size": 81, "complexity": 10, "nloc": 27, "token_counts": 232, "n_ast_nodes": 416, "n_identifiers": 34, "d_id": 55443, "documentation": { "docstring": "for_stmt< 'for' any 'in' node=any ':' any* >\n | comp_for< 'for' any 'in' node=any any* >\n ", "n_words": 16, "vocab_size": 10, "n_whitespaces": 35, "language": "en" } }, { "id": 196112, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/partitions.py", "file_name": "partitions.py", "fun_name": "__lt__", "commit_message": "Updated import locations", "code": "def __lt__(self, other):\n \n return self.sort_key() < sympify(other).sort_key()\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 40, "n_identifiers": 5, "d_id": 47612, "documentation": { "docstring": "\n Checks if a partition is less than the other.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Partition\n >>> a = Partition([1, 2], [3, 4, 5])\n >>> b = Partition([1], [2, 3], [4], [5])\n >>> a.rank, b.rank\n (9, 34)\n >>> a < b\n True\n ", "n_words": 42, "vocab_size": 34, "n_whitespaces": 120, "language": "en" } }, { "id": 154522, "commit_id": "1dc16415333bf2428ee2b1f4d31ff94e66b9a0a6", "repo": "modin", "path": "modin/core/execution/ray/implementations/cudf_on_ray/partitioning/partition.py", "file_name": "partition.py", "fun_name": "get_key", "commit_message": "REFACTOR-#5009: use RayWrapper.materialize instead of ray.get (#5010)\n\nSigned-off-by: Myachev ", "code": "def get_key(self):\n \n return (\n RayWrapper.materialize(self.key)\n if isinstance(self.key, ray.ObjectRef)\n else self.key\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 65, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 6, "token_counts": 32, "n_ast_nodes": 51, "n_identifiers": 8, "d_id": 36045, "documentation": { "docstring": "\n Get integer key of this partition in dict-storage of `self.gpu_manager`.\n\n Returns\n -------\n int\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 49, "language": "en" } }, { "id": 163074, "commit_id": "aa889881863d2f47edd4580f128be4e138ae1e80", "repo": "pandas", "path": "pandas/core/dtypes/cast.py", "file_name": "cast.py", "fun_name": "find_common_type", "commit_message": "CLN: assorted follow-ups (#45184)", "code": "def find_common_type(types):\n \n if not types:\n raise ValueError(\"no types given\")\n\n first = types[0]\n\n # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)\n # => object\n if lib.dtypes_all_equal(list(types)):\n return first\n\n # get unique types (dict.fromkeys is used as order-preserving set())\n types = list(dict.fromkeys(types).keys())\n\n if any(isinstance(t, ExtensionDtype) for t in types):\n for t in types:\n if isinstance(t, ExtensionDtype):\n res = t._get_common_dtype(types)\n if res is not None:\n return res\n return np.dtype(\"object\")\n\n # take lowest unit\n if all(is_datetime64_dtype(t) for t in types):\n return np.dtype(\"datetime64[ns]\")\n if all(is_timedelta64_dtype(t) for t in types):\n return np.dtype(\"timedelta64[ns]\")\n\n # don't mix bool / int or float or complex\n # this is different from numpy, which casts bool with float/int as int\n has_bools = any(is_bool_dtype(t) for t in types)\n if has_bools:\n for t in types:\n if is_integer_dtype(t) or is_float_dtype(t) or is_complex_dtype(t):\n return np.dtype(\"object\")\n\n return np.find_common_type(types, [])\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 317, "n_words": 131, "vocab_size": 74, "complexity": 18, "nloc": 24, "token_counts": 194, "n_ast_nodes": 322, "n_identifiers": 26, "d_id": 39366, "documentation": { "docstring": "\n Find a common data type among the given dtypes.\n\n Parameters\n ----------\n types : list of dtypes\n\n Returns\n -------\n pandas extension or numpy dtype\n\n See Also\n --------\n numpy.find_common_type\n\n ", "n_words": 27, "vocab_size": 27, "n_whitespaces": 61, "language": "en" } }, { "id": 20339, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/formatters/img.py", "file_name": "img.py", "fun_name": "_get_text_color", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _get_text_color(self, style):\n \n if style['color'] is not None:\n fill = '#' + style['color']\n else:\n fill = '#000'\n return fill\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 69, "n_words": 19, "vocab_size": 15, "complexity": 2, "nloc": 6, "token_counts": 32, "n_ast_nodes": 60, "n_identifiers": 4, "d_id": 3330, "documentation": { "docstring": "\n Get the correct color for the token from the style.\n ", "n_words": 10, "vocab_size": 8, "n_whitespaces": 25, "language": "en" } }, { "id": 291940, "commit_id": "af4e37339a39badd5596e8bc9ba86d6c1994aa1b", "repo": "core", "path": "homeassistant/components/sia/sia_entity_base.py", "file_name": "sia_entity_base.py", "fun_name": "async_added_to_hass", "commit_message": "Add Connectivity sensor to SIA (#64305)\n\n* implemented connectivity sensor\r\n\r\n* further cleanup off update code\r\n\r\n* cleanup and tighter behaviour for attributes\r\n\r\n* added seperate connectivity class to binary sensor\r\n\r\n* callbacks and keys\r\n\r\n* redid name and unique_id logic, non-breaking result\r\n\r\n* using entry more in inits\r\n\r\n* Fix import\r\n\r\n* fix ping_interval in sia_entity_base\r\n\r\n* added ping_interval default to next\r\n\r\n* fixed next\r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "async def async_added_to_hass(self) -> None:\n \n self.async_on_remove(\n async_dispatcher_connect(\n self.hass,\n SIA_EVENT.format(self.port, self.account),\n self.async_handle_event,\n )\n )\n self.handle_last_state(await self.async_get_last_state())\n if self._attr_available:\n self.async_create_post_interval_update_cb()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 131, "n_words": 18, "vocab_size": 17, "complexity": 2, "nloc": 19, "token_counts": 58, "n_ast_nodes": 95, "n_identifiers": 14, "d_id": 91043, "documentation": { "docstring": "Run when entity about to be added to hass.\n\n Overridden from Entity.\n\n 1. register the dispatcher and add the callback to on_remove\n 2. get previous state from storage and pass to entity specific function\n 3. if available: create availability cb\n ", "n_words": 40, "vocab_size": 33, "n_whitespaces": 75, "language": "en" } }, { "id": 141909, "commit_id": "cc53a1e28bdb0dc7121f4378c651e6290b7bc84d", "repo": "ray", "path": "python/ray/air/tests/test_checkpoints.py", "file_name": "test_checkpoints.py", "fun_name": "test_metadata", "commit_message": "[air] update checkpoint.py to deal with metadata in conversion. (#25727)\n\nThis is carved out from https://github.com/ray-project/ray/pull/25558. \r\ntlrd: checkpoint.py current doesn't support the following\r\n```\r\na. from fs to dict checkpoint;\r\nb. drop some marker to dict checkpoint;\r\nc. convert back to fs checkpoint;\r\nd. convert back to dict checkpoint.\r\nAssert that the marker should still be there\r\n```", "code": "def test_metadata(self):\n \n checkpoint = self._prepare_fs_checkpoint()\n\n # Convert into dict checkpoint\n data_dict = checkpoint.to_dict()\n self.assertIsInstance(data_dict, dict)\n\n data_dict[\"my_marker\"] = \"marked\"\n\n # Create from dict\n checkpoint = Checkpoint.from_dict(data_dict)\n self.assertTrue(checkpoint._data_dict)\n\n self._assert_fs_checkpoint(checkpoint)\n\n # Convert back to dict\n data_dict_2 = Checkpoint.from_directory(checkpoint.to_directory()).to_dict()\n assert data_dict_2[\"my_marker\"] == \"marked\"\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 130, "n_words": 39, "vocab_size": 27, "complexity": 1, "nloc": 10, "token_counts": 79, "n_ast_nodes": 142, "n_identifiers": 16, "d_id": 32525, "documentation": { "docstring": "Test conversion with metadata involved.\n\n a. from fs to dict checkpoint;\n b. drop some marker to dict checkpoint;\n c. convert back to fs checkpoint;\n d. convert back to dict checkpoint.\n\n Assert that the marker should still be there.", "n_words": 38, "vocab_size": 27, "n_whitespaces": 72, "language": "en" } }, { "id": 67268, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/report/uae_vat_201/uae_vat_201.py", "file_name": "uae_vat_201.py", "fun_name": "get_reverse_charge_recoverable_total", "commit_message": "style: format code with black", "code": "def get_reverse_charge_recoverable_total(filters):\n\t\n\tquery_filters = get_filters(filters)\n\tquery_filters.append([\"reverse_charge\", \"=\", \"Y\"])\n\tquery_filters.append([\"recoverable_reverse_charge\", \">\", \"0\"])\n\tquery_filters.append([\"docstatus\", \"=\", 1])\n\ttry:\n\t\treturn (\n\t\t\tfrappe.db.get_all(\n\t\t\t\t\"Purchase Invoice\", filters=query_filters, fields=[\"sum(total)\"], as_list=True, limit=1\n\t\t\t)[0][0]\n\t\t\tor 0\n\t\t)\n\texcept (IndexError, TypeError):\n\t\treturn 0\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 19, "n_words": 33, "vocab_size": 30, "complexity": 3, "nloc": 14, "token_counts": 96, "n_ast_nodes": 162, "n_identifiers": 13, "d_id": 14462, "documentation": { "docstring": "Returns the sum of the total of each Purchase invoice made with recoverable reverse charge.", "n_words": 15, "vocab_size": 13, "n_whitespaces": 14, "language": "en" } }, { "id": 218263, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/importlib/metadata/__init__.py", "file_name": "__init__.py", "fun_name": "find_distributions", "commit_message": "add python 3.10.4 for windows", "code": "def find_distributions(self, context=Context()):\n \n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 10, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 8, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 4, "d_id": 55232, "documentation": { "docstring": "\n Find distributions.\n\n Return an iterable of all Distribution instances capable of\n loading the metadata for packages matching the ``context``,\n a DistributionFinder.Context instance.\n ", "n_words": 22, "vocab_size": 20, "n_whitespaces": 58, "language": "en" } }, { "id": 319621, "commit_id": "69ef26dab04d51e7e102dcb33cd98ddc6ad975fd", "repo": "paperless-ngx", "path": "src/documents/tests/test_file_handling.py", "file_name": "test_file_handling.py", "fun_name": "test_multiple_doc_paths", "commit_message": "Feature: Dynamic document storage pathes (#916)\n\n* Added devcontainer\r\n\r\n* Add feature storage pathes\r\n\r\n* Exclude tests and add versioning\r\n\r\n* Check escaping\r\n\r\n* Check escaping\r\n\r\n* Check quoting\r\n\r\n* Echo\r\n\r\n* Escape\r\n\r\n* Escape :\r\n\r\n* Double escape \\\r\n\r\n* Escaping\r\n\r\n* Remove if\r\n\r\n* Escape colon\r\n\r\n* Missing \\\r\n\r\n* Esacpe :\r\n\r\n* Escape all\r\n\r\n* test\r\n\r\n* Remove sed\r\n\r\n* Fix exclude\r\n\r\n* Remove SED command\r\n\r\n* Add LD_LIBRARY_PATH\r\n\r\n* Adjusted to v1.7\r\n\r\n* Updated test-cases\r\n\r\n* Remove devcontainer\r\n\r\n* Removed internal build-file\r\n\r\n* Run pre-commit\r\n\r\n* Corrected flak8 error\r\n\r\n* Adjusted to v1.7\r\n\r\n* Updated test-cases\r\n\r\n* Corrected flak8 error\r\n\r\n* Adjusted to new plural translations\r\n\r\n* Small adjustments due to code-review backend\r\n\r\n* Adjusted line-break\r\n\r\n* Removed PAPERLESS prefix from settings variables\r\n\r\n* Corrected style change due to search+replace\r\n\r\n* First documentation draft\r\n\r\n* Revert changes to Pipfile\r\n\r\n* Add sphinx-autobuild with keep-outdated\r\n\r\n* Revert merge error that results in wrong storage path is evaluated\r\n\r\n* Adjust styles of generated files ...\r\n\r\n* Adds additional testing to cover dynamic storage path functionality\r\n\r\n* Remove unnecessary condition\r\n\r\n* Add hint to edit storage path dialog\r\n\r\n* Correct spelling of pathes to paths\r\n\r\n* Minor documentation tweaks\r\n\r\n* Minor typo\r\n\r\n* improving wrapping of filter editor buttons with new storage path button\r\n\r\n* Update .gitignore\r\n\r\n* Fix select border radius in non input-groups\r\n\r\n* Better storage path edit hint\r\n\r\n* Add note to edit storage path dialog re document_renamer\r\n\r\n* Add note to bulk edit storage path re document_renamer\r\n\r\n* Rename FILTER_STORAGE_DIRECTORY to PATH\r\n\r\n* Fix broken filter rule parsing\r\n\r\n* Show default storage if unspecified\r\n\r\n* Remove note re storage path on bulk edit\r\n\r\n* Add basic validation of filename variables\r\n\r\nCo-authored-by: Markus Kling \r\nCo-authored-by: Trenton Holmes \r\nCo-authored-by: Michael Shamoon <4887959+shamoon@users.noreply.github.com>\r\nCo-authored-by: Quinn Casey ", "code": "def test_multiple_doc_paths(self):\n \n doc_a = Document.objects.create(\n title=\"does not matter\",\n created=timezone.make_aware(datetime.datetime(2020, 6, 25, 7, 36, 51, 153)),\n mime_type=\"application/pdf\",\n pk=2,\n checksum=\"2\",\n archive_serial_number=4,\n storage_path=StoragePath.objects.create(\n name=\"sp1\",\n path=\"ThisIsAFolder/{asn}/{created}\",\n ),\n )\n doc_b = Document.objects.create(\n title=\"does not matter\",\n created=timezone.make_aware(datetime.datetime(2020, 7, 25, 7, 36, 51, 153)),\n mime_type=\"application/pdf\",\n pk=5,\n checksum=\"abcde\",\n storage_path=StoragePath.objects.create(\n name=\"sp2\",\n path=\"SomeImportantNone/{created}\",\n ),\n )\n\n self.assertEqual(generate_filename(doc_a), \"ThisIsAFolder/4/2020-06-25.pdf\")\n self.assertEqual(generate_filename(doc_b), \"SomeImportantNone/2020-07-25.pdf\")\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 323, "n_words": 49, "vocab_size": 33, "complexity": 1, "nloc": 26, "token_counts": 170, "n_ast_nodes": 262, "n_identifiers": 22, "d_id": 116978, "documentation": { "docstring": "\n GIVEN:\n - Two documents, each with different storage paths\n WHEN:\n - the filename is generated for the documents\n THEN:\n - Each document generated filename uses its storage path\n ", "n_words": 28, "vocab_size": 22, "n_whitespaces": 90, "language": "en" } }, { "id": 204713, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/management/templates.py", "file_name": "templates.py", "fun_name": "download", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def download(self, url):\n \n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 10, "n_words": 3, "vocab_size": 3, "complexity": 9, "nloc": 37, "token_counts": 261, "n_ast_nodes": 15, "n_identifiers": 3, "d_id": 50847, "documentation": { "docstring": "\n Download the given URL and return the file name.\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 24, "language": "en" } }, { "id": 306846, "commit_id": "5276d849ec497ccd0cecf3cb6a8dacae4fa6f845", "repo": "core", "path": "homeassistant/components/apple_tv/media_player.py", "file_name": "media_player.py", "fun_name": "media_album_name", "commit_message": "Improve type hints in apple_tv media player (#77940)", "code": "def media_album_name(self) -> str | None:\n \n if self._playing and self._is_feature_available(FeatureName.Album):\n return self._playing.album\n return None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 46, "n_words": 14, "vocab_size": 13, "complexity": 3, "nloc": 5, "token_counts": 32, "n_ast_nodes": 53, "n_identifiers": 8, "d_id": 105629, "documentation": { "docstring": "Album name of current playing media, music track only.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 50243, "commit_id": "ffcde21305c61d950a9f93e57e6180c9a9665b87", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/efficientnet.py", "file_name": "efficientnet.py", "fun_name": "get_model_params", "commit_message": "add disco_diffusion_ernievil_base", "code": "def get_model_params(model_name, override_params):\n \n if model_name.startswith('efficientnet'):\n w, d, _, p = efficientnet_params(model_name)\n blocks_args, global_params = efficientnet(width_coefficient=w, depth_coefficient=d, dropout_rate=p)\n else:\n raise NotImplementedError('model name is not pre-defined: %s' % model_name)\n if override_params:\n global_params = global_params._replace(**override_params)\n return blocks_args, global_params\n\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 78, "n_words": 35, "vocab_size": 29, "complexity": 3, "nloc": 9, "token_counts": 71, "n_ast_nodes": 116, "n_identifiers": 17, "d_id": 10062, "documentation": { "docstring": " Get the block args and global params for a given model ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 12, "language": "en" } }, { "id": 63184, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py", "file_name": "__init__.py", "fun_name": "load", "commit_message": "upd; format", "code": "def load(self, path):\n \n path = os.path.normpath(path)\n mtime = os.stat(path).st_mtime\n\n if path not in self or self[path].mtime != mtime:\n manifest = self.build(path)\n self[path] = self.manifest_mod(manifest, mtime)\n\n return self[path].manifest\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 84, "n_words": 27, "vocab_size": 23, "complexity": 3, "nloc": 7, "token_counts": 71, "n_ast_nodes": 111, "n_identifiers": 11, "d_id": 13190, "documentation": { "docstring": "\n Load a manifest at path or return a suitable manifest already loaded.\n ", "n_words": 12, "vocab_size": 10, "n_whitespaces": 27, "language": "en" } }, { "id": 316440, "commit_id": "7cd68381f1d4f58930ffd631dfbfc7159d459832", "repo": "core", "path": "tests/test_config_entries.py", "file_name": "test_config_entries.py", "fun_name": "test_default_discovery_in_progress", "commit_message": "Search/replace RESULT_TYPE_* by FlowResultType enum (#74642)", "code": "async def test_default_discovery_in_progress(hass, manager):\n \n mock_integration(hass, MockModule(\"comp\"))\n mock_entity_platform(hass, \"config_flow.comp\", None)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 18, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 21, "token_counts": 165, "n_ast_nodes": 45, "n_identifiers": 6, "d_id": 115018, "documentation": { "docstring": "Test that a flow using default discovery can only be triggered once.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 249580, "commit_id": "be76cd8200b18f3c68b895f85ac7ef5b0ddc2466", "repo": "synapse", "path": "tests/storage/test_registration.py", "file_name": "test_registration.py", "fun_name": "test_override", "commit_message": "Allow admins to require a manual approval process before new accounts can be used (using MSC3866) (#13556)", "code": "def test_override(self) -> None:\n \n self.get_success(\n self.store.register_user(\n self.user_id,\n self.pwhash,\n approved=True,\n )\n )\n\n user = self.get_success(self.store.get_user_by_id(self.user_id))\n self.assertIsNotNone(user)\n assert user is not None\n self.assertEqual(user[\"approved\"], 1)\n\n approved = self.get_success(self.store.is_user_approved(self.user_id))\n self.assertTrue(approved)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 156, "n_words": 26, "vocab_size": 23, "complexity": 1, "nloc": 17, "token_counts": 94, "n_ast_nodes": 150, "n_identifiers": 14, "d_id": 73002, "documentation": { "docstring": "Tests that if we require approval for new accounts, but we explicitly say the\n new user should be considered approved, they're marked as approved.\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 38, "language": "en" } }, { "id": 207495, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/test_actions.py", "file_name": "test_actions.py", "fun_name": "test_action_column_class", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_action_column_class(self):\n \n response = self.client.get(reverse(\"admin:admin_views_subscriber_changelist\"))\n self.assertIsNotNone(response.context[\"action_form\"])\n self.assertContains(response, \"action-checkbox-column\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 36, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 38, "n_ast_nodes": 69, "n_identifiers": 9, "d_id": 51985, "documentation": { "docstring": "The checkbox column class is present in the response.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 290029, "commit_id": "1589c06203c0bc9f87adcc97fe34d5c52aaf403a", "repo": "core", "path": "homeassistant/components/bluetooth/util.py", "file_name": "util.py", "fun_name": "async_load_history_from_system", "commit_message": "Significantly reduce clock_gettime syscalls on platforms with broken vdso (#81257)", "code": "async def async_load_history_from_system() -> dict[str, BluetoothServiceInfoBleak]:\n \n if platform.system() != \"Linux\":\n return {}\n from bluetooth_adapters import ( # pylint: disable=import-outside-toplevel\n BlueZDBusObjects,\n )\n\n bluez_dbus = BlueZDBusObjects()\n await bluez_dbus.load()\n now = monotonic_time_coarse()\n return {\n address: BluetoothServiceInfoBleak(\n name=history.advertisement_data.local_name\n or history.device.name\n or history.device.address,\n address=history.device.address,\n rssi=history.device.rssi,\n manufacturer_data=history.advertisement_data.manufacturer_data,\n service_data=history.advertisement_data.service_data,\n service_uuids=history.advertisement_data.service_uuids,\n source=history.source,\n device=history.device,\n advertisement=history.advertisement_data,\n connectable=False,\n time=now,\n )\n for address, history in bluez_dbus.history.items()\n }\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 261, "n_words": 55, "vocab_size": 51, "complexity": 5, "nloc": 28, "token_counts": 153, "n_ast_nodes": 233, "n_identifiers": 27, "d_id": 89155, "documentation": { "docstring": "Load the device and advertisement_data history if available on the current system.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 223376, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/text_file.py", "file_name": "text_file.py", "fun_name": "open", "commit_message": "add python 3.10.4 for windows", "code": "def open(self, filename):\n \n self.filename = filename\n self.file = io.open(self.filename, 'r', errors=self.errors)\n self.current_line = 0\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 42, "n_words": 14, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 38, "n_ast_nodes": 62, "n_identifiers": 7, "d_id": 56871, "documentation": { "docstring": "Open a new file named 'filename'. This overrides both the\n 'filename' and 'file' arguments to the constructor.", "n_words": 17, "vocab_size": 16, "n_whitespaces": 27, "language": "en" } }, { "id": 204916, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/schema.py", "file_name": "schema.py", "fun_name": "execute", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def execute(self, sql, params=()):\n \n # Don't perform the transactional DDL check if SQL is being collected\n # as it's not going to be executed anyway.\n if (\n not self.collect_sql\n and self.connection.in_atomic_block\n and not self.connection.features.can_rollback_ddl\n ):\n raise TransactionManagementError(\n \"Executing DDL statements while in a transaction on databases \"\n \"that can't perform a rollback is prohibited.\"\n )\n # Account for non-string statement objects.\n sql = str(sql)\n # Log the command we're running, then run it\n logger.debug(\n \"%s; (params %r)\", sql, params, extra={\"params\": params, \"sql\": sql}\n )\n if self.collect_sql:\n ending = \"\" if sql.rstrip().endswith(\";\") else \";\"\n if params is not None:\n self.collected_sql.append(\n (sql % tuple(map(self.quote_value, params))) + ending\n )\n else:\n self.collected_sql.append(sql + ending)\n else:\n with self.connection.cursor() as cursor:\n cursor.execute(sql, params)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 420, "n_words": 117, "vocab_size": 91, "complexity": 7, "nloc": 26, "token_counts": 154, "n_ast_nodes": 264, "n_identifiers": 23, "d_id": 50973, "documentation": { "docstring": "Execute the given SQL statement, with optional parameters.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 115957, "commit_id": "b6a81acd4cff972d66bffa341782e76f2897e134", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/pinot_handler/pinot_handler.py", "file_name": "pinot_handler.py", "fun_name": "disconnect", "commit_message": "updated the logic of the disconnect() method", "code": "def disconnect(self):\r\n \r\n self.is_connected = False\r\n return\r\n\r", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 27, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 12, "n_ast_nodes": 23, "n_identifiers": 3, "d_id": 25613, "documentation": { "docstring": " Close any existing connections\r\n\r\n Should switch self.is_connected.\r\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 320679, "commit_id": "ab65c542a0551abf105eeb58803cd08bd040753b", "repo": "qutebrowser", "path": "tests/unit/components/test_readlinecommands.py", "file_name": "test_readlinecommands.py", "fun_name": "test_rl_backward_kill_word", "commit_message": "Add :rl-rubout and :rl-filename-rubout\n\nCloses #4561", "code": "def test_rl_backward_kill_word(lineedit, text, deleted, rest):\n \n _validate_deletion(lineedit,\n readlinecommands.rl_backward_kill_word, [],\n text, deleted, rest)\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 61, "n_words": 11, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 29, "n_ast_nodes": 41, "n_identifiers": 8, "d_id": 117273, "documentation": { "docstring": "Delete to word beginning and see if it comes back with yank.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 247517, "commit_id": "32c828d0f760492711a98b11376e229d795fd1b3", "repo": "synapse", "path": "tests/rest/media/v1/test_media_storage.py", "file_name": "test_media_storage.py", "fun_name": "test_upload_ban", "commit_message": "Add type hints to `tests/rest`. (#12208)\n\nCo-authored-by: Patrick Cloke ", "code": "def test_upload_ban(self) -> None:\n \n\n data = b\"Some evil data\"\n\n self.helper.upload_media(\n self.upload_resource, data, tok=self.tok, expect_code=400\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 54, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 8, "token_counts": 34, "n_ast_nodes": 54, "n_identifiers": 8, "d_id": 71710, "documentation": { "docstring": "Attempt to upload some data that includes bytes \"evil\", which should\n get rejected by the spam checker.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 31, "language": "en" } }, { "id": 277059, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/tf_inspect.py", "file_name": "tf_inspect.py", "fun_name": "ismethod", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def ismethod(obj):\n \n return _inspect.ismethod(tf.__internal__.decorator.unwrap(obj)[1])\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 10, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 25, "n_ast_nodes": 42, "n_identifiers": 7, "d_id": 81836, "documentation": { "docstring": "TFDecorator-aware replacement for inspect.ismethod.", "n_words": 4, "vocab_size": 4, "n_whitespaces": 3, "language": "en" } }, { "id": 67455, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/selling/report/territory_wise_sales/territory_wise_sales.py", "file_name": "territory_wise_sales.py", "fun_name": "get_sales_orders", "commit_message": "style: format code with black", "code": "def get_sales_orders(quotations):\n\tif not quotations:\n\t\treturn []\n\n\tquotation_names = [q.name for q in quotations]\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\t\", \".join([\"%s\"] * len(quotation_names))\n\t\t),\n\t\ttuple(quotation_names),\n\t\tas_dict=1,\n\t) # nosec\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 17, "n_words": 27, "vocab_size": 26, "complexity": 3, "nloc": 15, "token_counts": 59, "n_ast_nodes": 98, "n_identifiers": 13, "d_id": 14529, "documentation": { "docstring": "\n\tSELECT so.`name`, so.`base_grand_total`, soi.prevdoc_docname as quotation\n\tFROM `tabSales Order` so, `tabSales Order Item` soi\n\tWHERE so.docstatus=1 AND so.name = soi.parent AND soi.prevdoc_docname in ({0})\n\t", "n_words": 24, "vocab_size": 21, "n_whitespaces": 21, "language": "en" } }, { "id": 198989, "commit_id": "83a11729046d30d413b4a8b82512ff765f11e0b5", "repo": "sympy", "path": "sympy/physics/hep/tests/test_gamma_matrices.py", "file_name": "test_gamma_matrices.py", "fun_name": "test_bug_13636", "commit_message": "Fixes issue #13636 regarding handling traces of sums of products of GammaMatrix mixed with other factors.", "code": "def test_bug_13636():\n \n pi, ki, pf = tensor_heads(\"pi, ki, pf\", [LorentzIndex])\n i0, i1, i2, i3, i4 = tensor_indices(\"i0:5\", LorentzIndex)\n x = Symbol(\"x\")\n pis = pi(i2) * G(-i2)\n kis = ki(i3) * G(-i3)\n pfs = pf(i4) * G(-i4)\n\n a = pfs * G(i0) * kis * G(i1) * pis * G(-i1) * kis * G(-i0)\n b = pfs * G(i0) * kis * G(i1) * pis * x * G(-i0) * pi(-i1)\n ta = gamma_trace(a)\n tb = gamma_trace(b)\n t_a_plus_b = gamma_trace(a + b)\n assert ta.equals(\n -16 * ki(i0) * ki(-i0) * pf(i1) * pi(-i1)\n + 32 * ki(i0) * ki(i1) * pf(-i0) * pi(-i1)\n )\n assert tb.equals(-8 * x * ki(i0) * pf(-i0) * pi(i1) * pi(-i1))\n assert t_a_plus_b.equals(ta + tb)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 181, "n_words": 119, "vocab_size": 58, "complexity": 1, "nloc": 18, "token_counts": 254, "n_ast_nodes": 416, "n_identifiers": 25, "d_id": 49072, "documentation": { "docstring": "Test issue 13636 regarding handling traces of sums of products \n of GammaMatrix mixed with other factors.", "n_words": 16, "vocab_size": 14, "n_whitespaces": 19, "language": "en" } }, { "id": 48758, "commit_id": "1e69d066aa7f762a4b242c0519818577b7222e4c", "repo": "PaddleHub", "path": "modules/image/semantic_segmentation/ann_resnet50_cityscapes/layers.py", "file_name": "layers.py", "fun_name": "SyncBatchNorm", "commit_message": "add 10 segmentation model", "code": "def SyncBatchNorm(*args, **kwargs):\n \n if paddle.get_device() == 'cpu':\n return nn.BatchNorm2D(*args, **kwargs)\n else:\n return nn.SyncBatchNorm(*args, **kwargs)\n\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 37, "n_words": 14, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 43, "n_ast_nodes": 74, "n_identifiers": 7, "d_id": 9594, "documentation": { "docstring": "In cpu environment nn.SyncBatchNorm does not have kernel so use nn.BatchNorm2D instead", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 176294, "commit_id": "b5d41847b8db0c82372faf69cd3a339d11da7ef0", "repo": "networkx", "path": "networkx/algorithms/shortest_paths/weighted.py", "file_name": "weighted.py", "fun_name": "bellman_ford_path_length", "commit_message": "DOC: Update documentation to include callables for weight argument (#5307)\n\nUpdate docs to include functions as valid input for weight argument.", "code": "def bellman_ford_path_length(G, source, target, weight=\"weight\"):\n \n if source == target:\n if source not in G:\n raise nx.NodeNotFound(f\"Node {source} not found in graph\")\n return 0\n\n weight = _weight_function(G, weight)\n\n length = _bellman_ford(G, [source], weight, target=target)\n\n try:\n return length[target]\n except KeyError as err:\n raise nx.NetworkXNoPath(f\"node {target} not reachable from {source}\") from err\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 106, "n_words": 49, "vocab_size": 40, "complexity": 4, "nloc": 11, "token_counts": 81, "n_ast_nodes": 138, "n_identifiers": 13, "d_id": 41811, "documentation": { "docstring": "Returns the shortest path length from source to target\n in a weighted graph.\n\n Parameters\n ----------\n G : NetworkX graph\n\n source : node label\n starting node for path\n\n target : node label\n ending node for path\n\n weight : string or function (default=\"weight\")\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number.\n\n Returns\n -------\n length : number\n Shortest path length.\n\n Raises\n ------\n NodeNotFound\n If `source` is not in `G`.\n\n NetworkXNoPath\n If no path exists between source and target.\n\n Examples\n --------\n >>> G = nx.path_graph(5)\n >>> nx.bellman_ford_path_length(G, 0, 4)\n 4\n\n Notes\n -----\n Edge weight attributes must be numerical.\n Distances are calculated as sums of weighted edges traversed.\n\n See Also\n --------\n dijkstra_path_length, bellman_ford_path\n ", "n_words": 195, "vocab_size": 116, "n_whitespaces": 381, "language": "en" } }, { "id": 246370, "commit_id": "546b9c9e648f5e2b25bb7c8350570787ff9befae", "repo": "synapse", "path": "tests/storage/databases/test_state_store.py", "file_name": "test_state_store.py", "fun_name": "test_in_flight_requests_stop_being_in_flight", "commit_message": "Add more tests for in-flight state query duplication. (#12033)", "code": "def test_in_flight_requests_stop_being_in_flight(self) -> None:\n \n req1 = ensureDeferred(\n self.state_datastore._get_state_for_group_using_inflight_cache(\n 42, StateFilter.all()\n )\n )\n self.pump(by=0.1)\n\n # This should have gone to the database\n self.assertEqual(len(self.get_state_group_calls), 1)\n self.assertFalse(req1.called)\n\n # Complete the request right away.\n self._complete_request_fake(*self.get_state_group_calls[0])\n self.assertTrue(req1.called)\n\n # Send off another request\n req2 = ensureDeferred(\n self.state_datastore._get_state_for_group_using_inflight_cache(\n 42, StateFilter.all()\n )\n )\n self.pump(by=0.1)\n\n # It should have gone to the database again, because the previous request\n # isn't in-flight and therefore isn't available for deduplication.\n self.assertEqual(len(self.get_state_group_calls), 2)\n self.assertFalse(req2.called)\n\n # Complete the request right away.\n self._complete_request_fake(*self.get_state_group_calls[1])\n self.assertTrue(req2.called)\n groups, sf, d = self.get_state_group_calls[0]\n\n self.assertEqual(self.get_success(req1), FAKE_STATE)\n self.assertEqual(self.get_success(req2), FAKE_STATE)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 330, "n_words": 88, "vocab_size": 55, "complexity": 1, "nloc": 29, "token_counts": 186, "n_ast_nodes": 295, "n_identifiers": 23, "d_id": 71182, "documentation": { "docstring": "\n Tests that in-flight request deduplication doesn't somehow 'hold on'\n to completed requests: once they're done, they're taken out of the\n in-flight cache.\n ", "n_words": 22, "vocab_size": 20, "n_whitespaces": 51, "language": "en" } }, { "id": 220981, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/windows_events.py", "file_name": "windows_events.py", "fun_name": "wait_for_handle", "commit_message": "add python 3.10.4 for windows", "code": "def wait_for_handle(self, handle, timeout=None):\n \n return self._wait_for_handle(handle, timeout, False)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 35, "n_identifiers": 5, "d_id": 56179, "documentation": { "docstring": "Wait for a handle.\n\n Return a Future object. The result of the future is True if the wait\n completed, or False if the wait did not complete (on timeout).\n ", "n_words": 29, "vocab_size": 24, "n_whitespaces": 50, "language": "en" } }, { "id": 61935, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py", "file_name": "database.py", "fun_name": "make_dist", "commit_message": "upd; format", "code": "def make_dist(name, version, **kwargs):\n \n summary = kwargs.pop('summary', 'Placeholder for summary')\n md = Metadata(**kwargs)\n md.name = name\n md.version = version\n md.summary = summary or 'Placeholder for summary'\n return Distribution(md)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 49, "n_words": 28, "vocab_size": 21, "complexity": 2, "nloc": 7, "token_counts": 50, "n_ast_nodes": 87, "n_identifiers": 9, "d_id": 12759, "documentation": { "docstring": "\n A convenience method for making a dist given just a name and version.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 20, "language": "en" } }, { "id": 266784, "commit_id": "a06fa496d3f837cca3c437ab6e9858525633d147", "repo": "ansible", "path": "test/lib/ansible_test/_internal/inventory.py", "file_name": "inventory.py", "fun_name": "create_posix_inventory", "commit_message": "ansible-test - Code cleanup and refactoring. (#77169)\n\n* Remove unnecessary PyCharm ignores.\r\n* Ignore intentional undefined attribute usage.\r\n* Add missing type hints. Fix existing type hints.\r\n* Fix docstrings and comments.\r\n* Use function to register completion handler.\r\n* Pass strings to display functions.\r\n* Fix CompositeAction handling of dest argument.\r\n* Use consistent types in expressions/assignments.\r\n* Use custom function to keep linters happy.\r\n* Add missing raise for custom exception.\r\n* Clean up key/value type handling in cloud plugins.\r\n* Use dataclass instead of dict for results.\r\n* Add custom type_guard function to check lists.\r\n* Ignore return type that can't be checked (yet).\r\n* Avoid changing types on local variables.", "code": "def create_posix_inventory(args, path, target_hosts, needs_ssh=False): # type: (EnvironmentConfig, str, t.List[HostProfile], bool) -> None\n \n target_hosts = t.cast(t.List[SshTargetHostProfile], target_hosts)\n\n if len(target_hosts) != 1:\n raise Exception()\n\n target_host = target_hosts[0]\n\n if isinstance(target_host, ControllerProfile) and not needs_ssh:\n inventory = Inventory(\n host_groups=dict(\n testgroup=dict(\n testhost=dict(\n ansible_connection='local',\n ansible_pipelining='yes',\n ansible_python_interpreter=target_host.python.path,\n ),\n ),\n ),\n )\n else:\n connections = target_host.get_controller_target_connections()\n\n if len(connections) != 1:\n raise Exception()\n\n ssh = connections[0]\n\n testhost = dict(\n ansible_connection='ssh',\n ansible_pipelining='yes',\n ansible_python_interpreter=ssh.settings.python_interpreter,\n ansible_host=ssh.settings.host,\n ansible_port=ssh.settings.port,\n ansible_user=ssh.settings.user,\n ansible_ssh_private_key_file=ssh.settings.identity_file,\n ) # type: t.Dict[str, t.Optional[t.Union[str, int]]]\n\n if ssh.become:\n testhost.update(\n ansible_become='yes',\n ansible_become_method=ssh.become.method,\n )\n\n testhost = exclude_none_values(testhost)\n\n inventory = Inventory(\n host_groups=dict(\n testgroup=dict(\n testhost=testhost,\n ),\n ),\n )\n\n inventory.write(args, path)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 571, "n_words": 94, "vocab_size": 65, "complexity": 6, "nloc": 45, "token_counts": 234, "n_ast_nodes": 365, "n_identifiers": 44, "d_id": 78587, "documentation": { "docstring": "Create and return inventory for use in POSIX integration tests.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 269108, "commit_id": "9dc9a78cc6502226775a99725c654fab3298aa5f", "repo": "keras", "path": "keras/utils/image_utils.py", "file_name": "image_utils.py", "fun_name": "smart_resize", "commit_message": "Expose all utilities in `keras.utils.__init__.py`, and resolve the hourglass import issues that led to the creation of an extraneous `all_utils.py` file / library.\n\nPiperOrigin-RevId: 435725558", "code": "def smart_resize(x, size, interpolation='bilinear'):\n \n if len(size) != 2:\n raise ValueError('Expected `size` to be a tuple of 2 integers, '\n f'but got: {size}.')\n img = tf.convert_to_tensor(x)\n if img.shape.rank is not None:\n if img.shape.rank < 3 or img.shape.rank > 4:\n raise ValueError(\n 'Expected an image array with shape `(height, width, channels)`, '\n 'or `(batch_size, height, width, channels)`, but '\n f'got input with incorrect rank, of shape {img.shape}.')\n shape = tf.shape(img)\n height, width = shape[-3], shape[-2]\n target_height, target_width = size\n if img.shape.rank is not None:\n static_num_channels = img.shape[-1]\n else:\n static_num_channels = None\n\n crop_height = tf.cast(\n tf.cast(width * target_height, 'float32') / target_width, 'int32')\n crop_width = tf.cast(\n tf.cast(height * target_width, 'float32') / target_height, 'int32')\n\n # Set back to input height / width if crop_height / crop_width is not smaller.\n crop_height = tf.minimum(height, crop_height)\n crop_width = tf.minimum(width, crop_width)\n\n crop_box_hstart = tf.cast(\n tf.cast(height - crop_height, 'float32') / 2, 'int32')\n crop_box_wstart = tf.cast(tf.cast(width - crop_width, 'float32') / 2, 'int32')\n\n if img.shape.rank == 4:\n crop_box_start = tf.stack([0, crop_box_hstart, crop_box_wstart, 0])\n crop_box_size = tf.stack([-1, crop_height, crop_width, -1])\n else:\n crop_box_start = tf.stack([crop_box_hstart, crop_box_wstart, 0])\n crop_box_size = tf.stack([crop_height, crop_width, -1])\n\n img = tf.slice(img, crop_box_start, crop_box_size)\n img = tf.image.resize(images=img, size=size, method=interpolation)\n # Apparent bug in resize_images_v2 may cause shape to be lost\n if img.shape.rank is not None:\n if img.shape.rank == 4:\n img.set_shape((None, None, None, static_num_channels))\n if img.shape.rank == 3:\n img.set_shape((None, None, static_num_channels))\n if isinstance(x, np.ndarray):\n return img.numpy()\n return img\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 360, "n_words": 226, "vocab_size": 122, "complexity": 11, "nloc": 43, "token_counts": 404, "n_ast_nodes": 646, "n_identifiers": 35, "d_id": 79905, "documentation": { "docstring": "Resize images to a target size without aspect ratio distortion.\n\n Warning: `tf.keras.preprocessing.image.smart_resize` is not recommended for\n new code. Prefer `tf.keras.layers.Resizing`, which provides the same\n functionality as a preprocessing layer and adds `tf.RaggedTensor` support. See\n the [preprocessing layer guide](\n https://www.tensorflow.org/guide/keras/preprocessing_layers)\n for an overview of preprocessing layers.\n\n TensorFlow image datasets typically yield images that have each a different\n size. However, these images need to be batched before they can be\n processed by Keras layers. To be batched, images need to share the same height\n and width.\n\n You could simply do:\n\n ```python\n size = (200, 200)\n ds = ds.map(lambda img: tf.image.resize(img, size))\n ```\n\n However, if you do this, you distort the aspect ratio of your images, since\n in general they do not all have the same aspect ratio as `size`. This is\n fine in many cases, but not always (e.g. for GANs this can be a problem).\n\n Note that passing the argument `preserve_aspect_ratio=True` to `resize`\n will preserve the aspect ratio, but at the cost of no longer respecting the\n provided target size. Because `tf.image.resize` doesn't crop images,\n your output images will still have different sizes.\n\n This calls for:\n\n ```python\n size = (200, 200)\n ds = ds.map(lambda img: smart_resize(img, size))\n ```\n\n Your output images will actually be `(200, 200)`, and will not be distorted.\n Instead, the parts of the image that do not fit within the target size\n get cropped out.\n\n The resizing process is:\n\n 1. Take the largest centered crop of the image that has the same aspect ratio\n as the target size. For instance, if `size=(200, 200)` and the input image has\n size `(340, 500)`, we take a crop of `(340, 340)` centered along the width.\n 2. Resize the cropped image to the target size. In the example above,\n we resize the `(340, 340)` crop to `(200, 200)`.\n\n Args:\n x: Input image or batch of images (as a tensor or NumPy array). Must be in\n format `(height, width, channels)` or `(batch_size, height, width,\n channels)`.\n size: Tuple of `(height, width)` integer. Target size.\n interpolation: String, interpolation to use for resizing. Defaults to\n `'bilinear'`. Supports `bilinear`, `nearest`, `bicubic`, `area`,\n `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`.\n\n Returns:\n Array with shape `(size[0], size[1], channels)`. If the input image was a\n NumPy array, the output is a NumPy array, and if it was a TF tensor,\n the output is a TF tensor.\n ", "n_words": 383, "vocab_size": 215, "n_whitespaces": 460, "language": "en" } }, { "id": 74165, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/tests/test_blocks.py", "file_name": "test_blocks.py", "fun_name": "test_get_api_representation_calls_same_method_on_fields_with_context", "commit_message": "Reformat with black", "code": "def test_get_api_representation_calls_same_method_on_fields_with_context(self):\n \n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 9, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 17, "token_counts": 85, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 16227, "documentation": { "docstring": "\n The get_api_representation method of a StructBlock should invoke\n the block's get_api_representation method on each field and the\n context should be passed on.\n ", "n_words": 22, "vocab_size": 18, "n_whitespaces": 51, "language": "en" } }, { "id": 310232, "commit_id": "3393b78e080f2e456337205d5bd1c9d2cd810625", "repo": "core", "path": "homeassistant/components/plaato/binary_sensor.py", "file_name": "binary_sensor.py", "fun_name": "device_class", "commit_message": "Remove plaato from mypy ignore list (#64516)\n\nCo-authored-by: epenet ", "code": "def device_class(self) -> BinarySensorDeviceClass | None:\n \n if self._coordinator is None:\n return None\n if self._sensor_type is PlaatoKeg.Pins.LEAK_DETECTION:\n return BinarySensorDeviceClass.PROBLEM\n if self._sensor_type is PlaatoKeg.Pins.POURING:\n return BinarySensorDeviceClass.OPENING\n return None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 94, "n_words": 26, "vocab_size": 16, "complexity": 4, "nloc": 9, "token_counts": 51, "n_ast_nodes": 82, "n_identifiers": 11, "d_id": 108919, "documentation": { "docstring": "Return the class of this device, from BinarySensorDeviceClass.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 244031, "commit_id": "cac356380d505bf15587f07c0529218cc36b9652", "repo": "mmdetection", "path": "mmdet/core/bbox/match_costs/match_cost.py", "file_name": "match_cost.py", "fun_name": "binary_mask_dice_loss", "commit_message": "[Feature] Add Maskformer to mmdet (#7212)\n\n* first commit\r\n\r\n* add README\r\n\r\n* move model description from config to readme\r\n\r\nadd description for binary_input\r\n\r\nadd description for dice loss\r\n\r\nadd a independent panoptic gt processing function\r\n\r\nadd a independent panoptic gt processing function\r\n\r\nremove compatibility of pretrain in maskformer\r\n\r\n* update comments in maskformer_head\r\n\r\n* update docs format", "code": "def binary_mask_dice_loss(self, mask_preds, gt_masks):\n \n mask_preds = mask_preds.flatten(1)\n gt_masks = gt_masks.flatten(1).float()\n numerator = 2 * torch.einsum('nc,mc->nm', mask_preds, gt_masks)\n denominator = mask_preds.sum(-1)[:, None] + gt_masks.sum(-1)[None, :]\n loss = 1 - (numerator + self.eps) / (denominator + self.eps)\n return loss\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 86, "n_words": 37, "vocab_size": 28, "complexity": 1, "nloc": 7, "token_counts": 92, "n_ast_nodes": 146, "n_identifiers": 13, "d_id": 70203, "documentation": { "docstring": "\n Args:\n mask_preds (Tensor): Mask prediction in shape (num_query, *).\n gt_masks (Tensor): Ground truth in shape (num_gt, *)\n store 0 or 1, 0 for negative class and 1 for\n positive class.\n\n Returns:\n Tensor: Dice cost matrix in shape (num_query, num_gt).\n ", "n_words": 39, "vocab_size": 31, "n_whitespaces": 124, "language": "en" } }, { "id": 284435, "commit_id": "f47e10918a3193c5e0c85981fb769e1b680f5f9d", "repo": "OpenBBTerminal", "path": "openbb_terminal/jupyter/widget_helpers.py", "file_name": "widget_helpers.py", "fun_name": "tab_clickable_evt", "commit_message": "Simplify way to generate report templates (#1788)\n\n* simplify way to generate report templates\r\n\r\n* add documentation\r\n\r\nCo-authored-by: James Maslek ", "code": "def tab_clickable_evt() -> str:\n \n return \n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 12, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 33, "token_counts": 9, "n_ast_nodes": 20, "n_identifiers": 2, "d_id": 84718, "documentation": { "docstring": "Adds javascript code within HTML at the bottom that allows the interactivity with tabs.\n\n Returns\n -------\n str\n javascript code in HTML to process interactive tabs\n \n ", "n_words": 93, "vocab_size": 67, "n_whitespaces": 308, "language": "en" } }, { "id": 265888, "commit_id": "9628dead07ccef9608b32906aa8194bc948e5a09", "repo": "netbox", "path": "netbox/netbox/search/__init__.py", "file_name": "__init__.py", "fun_name": "register_search", "commit_message": "Closes #10560: New global search (#10676)\n\n* Initial work on new search backend\r\n\r\n* Clean up search backends\r\n\r\n* Return only the most relevant result per object\r\n\r\n* Clear any pre-existing cached entries on cache()\r\n\r\n* #6003: Implement global search functionality for custom field values\r\n\r\n* Tweak field weights & document guidance\r\n\r\n* Extend search() to accept a lookup type\r\n\r\n* Move get_registry() out of SearchBackend\r\n\r\n* Enforce object permissions when returning search results\r\n\r\n* Add indexers for remaining models\r\n\r\n* Avoid calling remove() on non-cacheable objects\r\n\r\n* Use new search backend by default\r\n\r\n* Extend search backend to filter by object type\r\n\r\n* Clean up search view form\r\n\r\n* Enable specifying lookup logic\r\n\r\n* Add indexes for value field\r\n\r\n* Remove object type selector from search bar\r\n\r\n* Introduce SearchTable and enable HTMX for results\r\n\r\n* Enable pagination\r\n\r\n* Remove legacy search backend\r\n\r\n* Cleanup\r\n\r\n* Use a UUID for CachedValue primary key\r\n\r\n* Refactoring search methods\r\n\r\n* Define max search results limit\r\n\r\n* Extend reindex command to support specifying particular models\r\n\r\n* Add clear() and size to SearchBackend\r\n\r\n* Optimize bulk caching performance\r\n\r\n* Highlight matched portion of field value\r\n\r\n* Performance improvements for reindexing\r\n\r\n* Started on search tests\r\n\r\n* Cleanup & docs\r\n\r\n* Documentation updates\r\n\r\n* Clean up SearchIndex\r\n\r\n* Flatten search registry to register by app_label.model_name\r\n\r\n* Clean up search backend classes\r\n\r\n* Clean up RestrictedGenericForeignKey and RestrictedPrefetch\r\n\r\n* Resolve migrations conflict", "code": "def register_search(cls):\n \n model = cls.model\n label = f'{model._meta.app_label}.{model._meta.model_name}'\n registry['search'][label] = cls\n\n return cls\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 28, "n_words": 13, "vocab_size": 10, "complexity": 1, "nloc": 5, "token_counts": 26, "n_ast_nodes": 66, "n_identifiers": 8, "d_id": 78228, "documentation": { "docstring": "\n Decorator for registering a SearchIndex class.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 13, "language": "en" } }, { "id": 100841, "commit_id": "ff6b0209dd5ad57b81b0aca570df7f39a7119bfb", "repo": "faceswap", "path": "plugins/train/model/_base/model.py", "file_name": "model.py", "fun_name": "increment_iterations", "commit_message": "Refactoring and TravisCI to Github Actions (#1239)\n\n* refactor training\r\n\r\n* travis to actions", "code": "def increment_iterations(self) -> None:\n \n self._iterations += 1\n self._sessions[self._session_id][\"iterations\"] += 1\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 10, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 26, "n_ast_nodes": 45, "n_identifiers": 5, "d_id": 20292, "documentation": { "docstring": " Increment :attr:`iterations` and session iterations by 1. ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 8, "language": "en" } }, { "id": 337294, "commit_id": "fb5ed62c102c0323486b89805e1888495de3db15", "repo": "accelerate", "path": "src/accelerate/utils.py", "file_name": "utils.py", "fun_name": "slice_tensors", "commit_message": "Convert documentation to the new front (#271)\n\n* Main conversion\r\n\r\n* Doc styling\r\n\r\n* Style\r\n\r\n* New front deploy\r\n\r\n* Fixes\r\n\r\n* Fixes\r\n\r\n* Fix new docstrings\r\n\r\n* Style", "code": "def slice_tensors(data, tensor_slice):\n \n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 15, "n_identifiers": 3, "d_id": 121008, "documentation": { "docstring": "\n Recursively takes a slice in a nested list/tuple/dictionary of tensors.\n\n Args:\n data (nested list/tuple/dictionary of `torch.Tensor`):\n The data to slice.\n tensor_slice (`slice`):\n The slice to take.\n\n Returns:\n The same data structure as `data` with all the tensors slices.\n ", "n_words": 38, "vocab_size": 29, "n_whitespaces": 94, "language": "en" } }, { "id": 25044, "commit_id": "b5268dc3a0847dce2668265e07ff50d54265b2d8", "repo": "PaddleOCR", "path": "ppocr/metrics/ct_metric.py", "file_name": "ct_metric.py", "fun_name": "get_metric", "commit_message": "add centripetal text model", "code": "def get_metric(self):\n \n metrics = combine_results(self.results, rec_flag=False)\n self.reset()\n return metrics\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 37, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 25, "n_ast_nodes": 44, "n_identifiers": 7, "d_id": 4849, "documentation": { "docstring": "\n Input format: y0,x0, ..... yn,xn. Each detection is separated by the end of line token ('\\n')'\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 31, "language": "en" } }, { "id": 205316, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/migrations/operations/base.py", "file_name": "base.py", "fun_name": "database_forwards", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def database_forwards(self, app_label, schema_editor, from_state, to_state):\n \n raise NotImplementedError(\n \"subclasses of Operation must provide a database_forwards() method\"\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 49, "n_words": 17, "vocab_size": 17, "complexity": 1, "nloc": 4, "token_counts": 19, "n_ast_nodes": 31, "n_identifiers": 7, "d_id": 51091, "documentation": { "docstring": "\n Perform the mutation on the database schema in the normal\n (forwards) direction.\n ", "n_words": 12, "vocab_size": 10, "n_whitespaces": 34, "language": "en" } }, { "id": 63303, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "asList", "commit_message": "upd; format", "code": "def asList(self):\n \n return [res.asList() if isinstance(res, ParseResults) else res for res in self.__toklist]\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 27, "n_words": 13, "vocab_size": 12, "complexity": 3, "nloc": 2, "token_counts": 29, "n_ast_nodes": 46, "n_identifiers": 6, "d_id": 13240, "documentation": { "docstring": "\n Returns the parse results as a nested list of matching tokens, all converted to strings.\n\n Example::\n\n patt = OneOrMore(Word(alphas))\n result = patt.parseString(\"sldkj lsdkj sldkj\")\n # even though the result prints in string-like form, it is actually a pyparsing ParseResults\n print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj']\n\n # Use asList() to create an actual list\n result_list = result.asList()\n print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj']\n ", "n_words": 68, "vocab_size": 53, "n_whitespaces": 167, "language": "en" } }, { "id": 100383, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "plugins/train/model/_base.py", "file_name": "_base.py", "fun_name": "_load", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def _load(self, config_changeable_items):\n \n logger.debug(\"Loading State\")\n if not os.path.exists(self._filename):\n logger.info(\"No existing state file found. Generating.\")\n return\n state = self._serializer.load(self._filename)\n self._name = state.get(\"name\", self._name)\n self._sessions = state.get(\"sessions\", {})\n self._lowest_avg_loss = state.get(\"lowest_avg_loss\", {})\n self._iterations = state.get(\"iterations\", 0)\n self._config = state.get(\"config\", {})\n logger.debug(\"Loaded state: %s\", state)\n self._replace_config(config_changeable_items)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 142, "n_words": 43, "vocab_size": 35, "complexity": 2, "nloc": 13, "token_counts": 125, "n_ast_nodes": 214, "n_identifiers": 20, "d_id": 19869, "documentation": { "docstring": " Load a state file and set the serialized values to the class instance.\n\n Updates the model's config with the values stored in the state file.\n\n Parameters\n ----------\n config_changeable_items: dict\n Configuration options that can be altered when resuming a model, and their current\n values\n ", "n_words": 43, "vocab_size": 34, "n_whitespaces": 101, "language": "en" } }, { "id": 121009, "commit_id": "f6476f7a03f8390627c1a8e2a2ec8702d8a320e5", "repo": "jax", "path": "jax/_src/numpy/polynomial.py", "file_name": "polynomial.py", "fun_name": "_roots_with_zeros", "commit_message": "jnp.roots: better support for computation under JIT", "code": "def _roots_with_zeros(p, num_leading_zeros):\n # Avoid lapack errors when p is all zero\n p = _where(len(p) == num_leading_zeros, 1.0, p)\n # Roll any leading zeros to the end & compute the roots\n roots = _roots_no_zeros(roll(p, -num_leading_zeros))\n # Sort zero roots to the end.\n roots = lax.sort_key_val(roots == 0, roots)[1]\n # Set roots associated with num_leading_zeros to NaN\n return _where(arange(roots.size) < roots.size - num_leading_zeros, roots, complex(np.nan, np.nan))\n\n\n@_wraps(np.roots, lax_description=,\nextra_params=)", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@_wraps(np.roots, lax_description=\"\"\"\\\nUnlike the numpy version of this function, the JAX version returns the roots in\na complex array regardless of the values of the roots. Additionally, the jax\nversion of this function adds the ``strip_zeros`` function which must be set to\nFalse for the function to be compatible with JIT and other JAX transformations.\nWith ``strip_zeros=False``, if your coefficients have leading zeros, the\nroots will be padded with NaN values:\n\n>>> coeffs = jnp.array([0, 1, 2])\n\n# The default behavior matches numpy and strips leading zeros:\n>>> jnp.roots(coeffs)\nDeviceArray([-2.+0.j], dtype=complex64)\n\n# With strip_zeros=False, extra roots are set to NaN:\n>>> jnp.roots(coeffs, strip_zeros=False)\nDeviceArray([-2. +0.j, nan+nanj], dtype=complex64)\n\"\"\",\nextra_params=\"\"\"\nstrip_zeros : bool, default=True\n If set to True, then leading zeros in the coefficients will be stripped, similar\n to :func:`numpy.roots`. If set to False, leading zeros will not be stripped, and\n undefined roots will be represented by NaN values in the function output.\n ``strip_zeros`` must be set to ``False`` for the function to be compatible with\n :func:`jax.jit` and other JAX transformations.\n\"\"\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 73, "n_words": 68, "vocab_size": 51, "complexity": 1, "nloc": 5, "token_counts": 80, "n_ast_nodes": 147, "n_identifiers": 18, "d_id": 27011, "documentation": { "docstring": "\\\nUnlike the numpy version of this function, the JAX version returns the roots in\na complex array regardless of the values of the roots. Additionally, the jax\nversion of this function adds the ``strip_zeros`` function which must be set to\nFalse for the function to be compatible with JIT and other JAX transformations.\nWith ``strip_zeros=False``, if your coefficients have leading zeros, the\nroots will be padded with NaN values:\n\n>>> coeffs = jnp.array([0, 1, 2])\n\n# The default behavior matches numpy and strips leading zeros:\n>>> jnp.roots(coeffs)\nDeviceArray([-2.+0.j], dtype=complex64)\n\n# With strip_zeros=False, extra roots are set to NaN:\n>>> jnp.roots(coeffs, strip_zeros=False)\nDeviceArray([-2. +0.j, nan+nanj], dtype=complex64)\n\nstrip_zeros : bool, default=True\n If set to True, then leading zeros in the coefficients will be stripped, similar\n to :func:`numpy.roots`. If set to False, leading zeros will not be stripped, and\n undefined roots will be represented by NaN values in the function output.\n ``strip_zeros`` must be set to ``False`` for the function to be compatible with\n :func:`jax.jit` and other JAX transformations.\n", "n_words": 167, "vocab_size": 92, "n_whitespaces": 167, "language": "en" } }, { "id": 248246, "commit_id": "d38d242411b8910dfacde1e61fd3a0ec5cbcaa66", "repo": "synapse", "path": "tests/config/test_cache.py", "file_name": "test_cache.py", "fun_name": "test_config_overrides_environ", "commit_message": "Reload cache factors from disk on SIGHUP (#12673)", "code": "def test_config_overrides_environ(self):\n \n config = {\"caches\": {\"per_cache_factors\": {\"foo\": 2, \"bar\": 3}}}\n self.config._environ = {\n \"SYNAPSE_CACHE_FACTOR_SOMETHING_OR_OTHER\": \"2\",\n \"SYNAPSE_CACHE_FACTOR_FOO\": 1,\n }\n self.config.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.config.resize_all_caches()\n\n self.assertEqual(\n dict(self.config.cache_factors),\n {\"foo\": 1.0, \"bar\": 3.0, \"something_or_other\": 2.0},\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 131, "n_words": 31, "vocab_size": 28, "complexity": 1, "nloc": 12, "token_counts": 98, "n_ast_nodes": 163, "n_identifiers": 11, "d_id": 72177, "documentation": { "docstring": "\n Individual cache factors defined in the environment will take precedence\n over those in the config.\n ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 37, "language": "en" } }, { "id": 262858, "commit_id": "1a7d704ffbabb433007e3ba04750c2f13ade48e5", "repo": "pyinstaller", "path": "tests/functional/test_regression.py", "file_name": "test_regression.py", "fun_name": "test_issue_5131", "commit_message": "Fix typos (#6782) [skip ci]", "code": "def test_issue_5131(monkeypatch, tmpdir):\n ", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 25, "token_counts": 194, "n_ast_nodes": 15, "n_identifiers": 3, "d_id": 77409, "documentation": { "docstring": "\n While fixing the endless recursion when the package's __init__ module is an extension (see\n tests/unit/test_modulegraph_more.py::package_init_is_extension_*), another error occurred:\n PyInstaller.building._utils._load_code() tried to complete the source code for extension module - triggered by\n PYZ.assemble(), which is collecting all source files - caused by this being marked as \"PYMODULE\" in the TOC.\n ", "n_words": 49, "vocab_size": 40, "n_whitespaces": 65, "language": "en" } }, { "id": 64240, "commit_id": "b50036c04a116b2a3aa1784daf161a2f618765a8", "repo": "erpnext", "path": "erpnext/stock/doctype/delivery_note/delivery_note.py", "file_name": "delivery_note.py", "fun_name": "update_billed_amount_based_on_so", "commit_message": "fix: consider returned_qty while updating billed_amt\n\n(cherry picked from commit 63aaa1e357280b24c537a502a479f7bb7a6654e4)", "code": "def update_billed_amount_based_on_so(so_detail, update_modified=True):\n\t# Billed against Sales Order directly\n\tbilled_against_so = frappe.db.sql(, so_detail)\n\tbilled_against_so = billed_against_so and billed_against_so[0][0] or 0\n\n\t# Get all Delivery Note Item rows against the Sales Order Item row\n\tdn_details = frappe.db.sql(, so_detail, as_dict=1)\n\n\tupdated_dn = []\n\tfor dnd in dn_details:\n\t\tbilled_amt_agianst_dn = 0\n\n\t\t# If delivered against Sales Invoice\n\t\tif dnd.si_detail:\n\t\t\tbilled_amt_agianst_dn = flt(dnd.amount)\n\t\t\tbilled_against_so -= billed_amt_agianst_dn\n\t\telse:\n\t\t\t# Get billed amount directly against Delivery Note\n\t\t\tbilled_amt_agianst_dn = frappe.db.sql(, dnd.name)\n\t\t\tbilled_amt_agianst_dn = billed_amt_agianst_dn and billed_amt_agianst_dn[0][0] or 0\n\n\t\t# Distribute billed amount directly against SO between DNs based on FIFO\n\t\tif billed_against_so and billed_amt_agianst_dn < dnd.amount:\n\t\t\tif dnd.returned_qty:\n\t\t\t\tpending_to_bill = flt(dnd.amount) * (dnd.stock_qty - dnd.returned_qty) / dnd.stock_qty\n\t\t\telse:\n\t\t\t\tpending_to_bill = flt(dnd.amount)\n\t\t\tpending_to_bill -= billed_amt_agianst_dn\n\t\t\tif pending_to_bill <= billed_against_so:\n\t\t\t\tbilled_amt_agianst_dn += pending_to_bill\n\t\t\t\tbilled_against_so -= pending_to_bill\n\t\t\telse:\n\t\t\t\tbilled_amt_agianst_dn += billed_against_so\n\t\t\t\tbilled_against_so = 0\n\n\t\tfrappe.db.set_value(\"Delivery Note Item\", dnd.name, \"billed_amt\", billed_amt_agianst_dn, update_modified=update_modified)\n\n\t\tupdated_dn.append(dnd.parent)\n\n\treturn updated_dn\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 112, "n_words": 145, "vocab_size": 75, "complexity": 11, "nloc": 44, "token_counts": 205, "n_ast_nodes": 331, "n_identifiers": 22, "d_id": 13584, "documentation": { "docstring": "select sum(si_item.amount)\n\t\tfrom `tabSales Invoice Item` si_item, `tabSales Invoice` si\n\t\twhere\n\t\t\tsi_item.parent = si.name\n\t\t\tand si_item.so_detail=%s\n\t\t\tand (si_item.dn_detail is null or si_item.dn_detail = '')\n\t\t\tand si_item.docstatus=1\n\t\t\tand si.update_stock = 0\n\t\tselect dn_item.name, dn_item.amount, dn_item.si_detail, dn_item.parent, dn_item.stock_qty, dn_item.returned_qty\n\t\tfrom `tabDelivery Note Item` dn_item, `tabDelivery Note` dn\n\t\twhere\n\t\t\tdn.name = dn_item.parent\n\t\t\tand dn_item.so_detail=%s\n\t\t\tand dn.docstatus=1\n\t\t\tand dn.is_return = 0\n\t\torder by dn.posting_date asc, dn.posting_time asc, dn.name ascselect sum(amount) from `tabSales Invoice Item`\n\t\t\t\twhere dn_detail=%s and docstatus=1", "n_words": 74, "vocab_size": 49, "n_whitespaces": 57, "language": "en" } }, { "id": 204726, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/management/utils.py", "file_name": "utils.py", "fun_name": "parse_apps_and_model_labels", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def parse_apps_and_model_labels(labels):\n \n apps = set()\n models = set()\n\n for label in labels:\n if \".\" in label:\n try:\n model = installed_apps.get_model(label)\n except LookupError:\n raise CommandError(\"Unknown model: %s\" % label)\n models.add(model)\n else:\n try:\n app_config = installed_apps.get_app_config(label)\n except LookupError as e:\n raise CommandError(str(e))\n apps.add(app_config)\n\n return models, apps\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 199, "n_words": 44, "vocab_size": 35, "complexity": 5, "nloc": 17, "token_counts": 87, "n_ast_nodes": 152, "n_identifiers": 16, "d_id": 50857, "documentation": { "docstring": "\n Parse a list of \"app_label.ModelName\" or \"app_label\" strings into actual\n objects and return a two-element tuple:\n (set of model classes, set of app_configs).\n Raise a CommandError if some specified models or apps don't exist.\n ", "n_words": 34, "vocab_size": 29, "n_whitespaces": 54, "language": "en" } }, { "id": 208123, "commit_id": "1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc", "repo": "celery", "path": "t/unit/tasks/test_canvas.py", "file_name": "test_canvas.py", "fun_name": "test_double_stamping", "commit_message": "Canvas Header Stamping (#7384)\n\n* Strip down the header-stamping PR to the basics.\r\n\r\n* Serialize groups.\r\n\r\n* Add groups to result backend meta data.\r\n\r\n* Fix spelling mistake.\r\n\r\n* Revert changes to canvas.py\r\n\r\n* Revert changes to app/base.py\r\n\r\n* Add stamping implementation to canvas.py\r\n\r\n* Send task to AMQP with groups.\r\n\r\n* Successfully pass single group to result.\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* First draft of the visitor API.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed lint and elements\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* type -> isinstance\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Redo header stamping (#7341)\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed lint and elements\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* type -> isinstance\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Omer Katz \r\n\r\n* Added stamping mechanism\r\n\r\n* Manual stamping improved\r\n\r\n* flake8 fixed\r\n\r\n* Added subtests\r\n\r\n* Add comma.\r\n\r\n* Moved groups to stamps\r\n\r\n* Fixed chord and added test for that\r\n\r\n* Strip down the header-stamping PR to the basics.\r\n\r\n* Serialize groups.\r\n\r\n* Add groups to result backend meta data.\r\n\r\n* Fix spelling mistake.\r\n\r\n* Revert changes to canvas.py\r\n\r\n* Revert changes to app/base.py\r\n\r\n* Add stamping implementation to canvas.py\r\n\r\n* Send task to AMQP with groups.\r\n\r\n* Successfully pass single group to result.\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* First draft of the visitor API.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* Fixed lint and elements\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* type -> isinstance\r\n\r\n* Added stamping mechanism\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Manual stamping improved\r\n\r\n* fail_ci_if_error uncommented\r\n\r\n* flake8 fixed\r\n\r\n* Added subtests\r\n\r\n* Changes\r\n\r\n* Add comma.\r\n\r\n* Fixed chord and added test for that\r\n\r\n* canvas.py fixed\r\n\r\n* Test chord.py fixed\r\n\r\n* Fixed stamped_headers\r\n\r\n* collections import fixed\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* collections import fixed\r\n\r\n* Update celery/backends/base.py\r\n\r\nCo-authored-by: Omer Katz \r\n\r\n* ampq.py fixed\r\n\r\n* Refrain from using deprecated import path.\r\n\r\n* Fix test_complex_chain regression.\r\n\r\nWhenever we stamp a group we need to freeze it first if it wasn't already frozen.\r\nSomewhere along the line, the group id changed because we were freezing twice.\r\nThis commit places the stamping operation after preparing the chain's steps which fixes the problem somehow.\r\n\r\nWe don't know why yet.\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed issues with maybe_list. Add documentation\r\n\r\n* Fixed potential issue with integration tests\r\n\r\n* Fixed issues with _regen\r\n\r\n* Fixed issues with _regen\r\n\r\n* Fixed test_generator issues\r\n\r\n* Fixed _regen stamping\r\n\r\n* Fixed _regen stamping\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Update docs/userguide/canvas.rst\r\n\r\nCo-authored-by: Omer Katz \r\n\r\n* Fixed Couchbase\r\n\r\n* Better stamping intro\r\n\r\n* New GroupVisitor example\r\n\r\n* Adjust documentation.\r\n\r\nCo-authored-by: Naomi Elstein \r\nCo-authored-by: Omer Katz \r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Asif Saif Uddin \r\nCo-authored-by: Omer Katz ", "code": "def test_double_stamping(self, subtests):\n \n self.app.conf.task_always_eager = True\n self.app.conf.task_store_eager_result = True\n self.app.conf.result_extended = True\n\n sig_1 = self.add.s(2, 2)\n sig_1.stamp(stamp1=\"stamp1\")\n sig_1.stamp(stamp2=\"stamp2\")\n sig_1_res = sig_1.freeze()\n sig_1.apply()\n\n with subtests.test(\"sig_1_res is stamped with stamp1\", stamp1=[\"stamp1\"]):\n assert sig_1_res._get_task_meta()[\"stamp1\"] == [\"stamp1\"]\n\n with subtests.test(\"sig_1_res is stamped with stamp2\", stamp2=[\"stamp2\"]):\n assert sig_1_res._get_task_meta()[\"stamp2\"] == [\"stamp2\"]\n\n with subtests.test(\"sig_1_res is stamped twice\", stamped_headers=[\"stamp2\", \"stamp1\"]):\n assert sig_1_res._get_task_meta()[\"stamped_headers\"] == [\"stamp2\", \"stamp1\", \"groups\"]\n", "url": "https://github.com/celery/celery.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 174, "n_words": 57, "vocab_size": 37, "complexity": 1, "nloc": 15, "token_counts": 162, "n_ast_nodes": 291, "n_identifiers": 20, "d_id": 52214, "documentation": { "docstring": "\n Test manual signature stamping with two different stamps.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 244173, "commit_id": "b403751bd409795cf63fcc6aa7ee280326358bac", "repo": "mmdetection", "path": "mmdet/models/dense_heads/tood_head.py", "file_name": "tood_head.py", "fun_name": "deform_sampling", "commit_message": "[Fix] Avoid invalid bbox after deform_sampling (#7567)\n\n* Avoid invalid bbox after deform_sampling\r\n\r\n* replace in-place opertion with torch.where, update docstring\r\n\r\n* Update", "code": "def deform_sampling(self, feat, offset):\n \n # it is an equivalent implementation of bilinear interpolation\n b, c, h, w = feat.shape\n weight = feat.new_ones(c, 1, 1, 1)\n y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c)\n return y\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 79, "n_words": 37, "vocab_size": 30, "complexity": 1, "nloc": 5, "token_counts": 57, "n_ast_nodes": 79, "n_identifiers": 13, "d_id": 70270, "documentation": { "docstring": "Sampling the feature x according to offset.\n\n Args:\n feat (Tensor): Feature\n offset (Tensor): Spatial offset for feature sampling\n ", "n_words": 18, "vocab_size": 15, "n_whitespaces": 54, "language": "en" } }, { "id": 248006, "commit_id": "4586119f0b0901be64f08655d3aaaef289a51bde", "repo": "synapse", "path": "tests/config/test_registration_config.py", "file_name": "test_registration_config.py", "fun_name": "test_session_lifetime_must_not_be_exceeded_by_smaller_lifetimes", "commit_message": "Add missing type hints to config classes. (#12402)", "code": "def test_session_lifetime_must_not_be_exceeded_by_smaller_lifetimes(self):\n \n config_dict = default_config(\"test\")\n\n # First test all the error conditions\n with self.assertRaises(ConfigError):\n HomeServerConfig().parse_config_dict(\n {\n \"session_lifetime\": \"30m\",\n \"nonrefreshable_access_token_lifetime\": \"31m\",\n **config_dict,\n },\n \"\",\n \"\",\n )\n\n with self.assertRaises(ConfigError):\n HomeServerConfig().parse_config_dict(\n {\n \"session_lifetime\": \"30m\",\n \"refreshable_access_token_lifetime\": \"31m\",\n **config_dict,\n },\n \"\",\n \"\",\n )\n\n with self.assertRaises(ConfigError):\n HomeServerConfig().parse_config_dict(\n {\n \"session_lifetime\": \"30m\",\n \"refresh_token_lifetime\": \"31m\",\n **config_dict,\n },\n \"\",\n \"\",\n )\n\n # Then test all the fine conditions\n HomeServerConfig().parse_config_dict(\n {\n \"session_lifetime\": \"31m\",\n \"nonrefreshable_access_token_lifetime\": \"31m\",\n **config_dict,\n },\n \"\",\n \"\",\n )\n\n HomeServerConfig().parse_config_dict(\n {\n \"session_lifetime\": \"31m\",\n \"refreshable_access_token_lifetime\": \"31m\",\n **config_dict,\n },\n \"\",\n \"\",\n )\n\n HomeServerConfig().parse_config_dict(\n {\"session_lifetime\": \"31m\", \"refresh_token_lifetime\": \"31m\", **config_dict},\n \"\",\n \"\",\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 808, "n_words": 89, "vocab_size": 30, "complexity": 1, "nloc": 55, "token_counts": 185, "n_ast_nodes": 353, "n_identifiers": 8, "d_id": 72044, "documentation": { "docstring": "\n session_lifetime should logically be larger than, or at least as large as,\n all the different token lifetimes.\n Test that the user is faced with configuration errors if they make it\n smaller, as that configuration doesn't make sense.\n ", "n_words": 37, "vocab_size": 32, "n_whitespaces": 73, "language": "en" } }, { "id": 63909, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/urllib3/_collections.py", "file_name": "_collections.py", "fun_name": "getlist", "commit_message": "upd; format", "code": "def getlist(self, key, default=__marker):\n \n try:\n vals = self._container[key.lower()]\n except KeyError:\n if default is self.__marker:\n return []\n return default\n else:\n return vals[1:]\n\n # Backwards compatibility for httplib\n getheaders = getlist\n getallmatchingheaders = getlist\n iget = getlist\n\n # Backwards compatibility for http.cookiejar\n get_all = getlist\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 148, "n_words": 43, "vocab_size": 29, "complexity": 3, "nloc": 9, "token_counts": 49, "n_ast_nodes": 102, "n_identifiers": 13, "d_id": 13528, "documentation": { "docstring": "Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.", "n_words": 20, "vocab_size": 16, "n_whitespaces": 26, "language": "en" } }, { "id": 151215, "commit_id": "eaa43337d2d7c13eeeb8c809d212e047f5935470", "repo": "freqtrade", "path": "freqtrade/freqai/freqai_interface.py", "file_name": "freqai_interface.py", "fun_name": "_set_train_queue", "commit_message": "improve train queue system, ensure crash resilience in train queue.", "code": "def _set_train_queue(self):\n \n current_pairlist = self.config.get(\"exchange\", {}).get(\"pair_whitelist\")\n if not self.dd.pair_dict:\n logger.info('Set fresh train queue from whitelist.')\n return deque(current_pairlist)\n\n best_queue = deque()\n\n pair_dict_sorted = sorted(self.dd.pair_dict.items(),\n key=lambda k: k[1]['trained_timestamp'])\n for pair in pair_dict_sorted:\n if pair[0] in current_pairlist:\n best_queue.appendleft(pair[0])\n logger.info('Set existing queue from trained timestamps.')\n return best_queue\n\n # Following methods which are overridden by user made prediction models.\n # See freqai/prediction_models/CatboostPredictionModel.py for an example.\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 203, "n_words": 60, "vocab_size": 49, "complexity": 4, "nloc": 13, "token_counts": 105, "n_ast_nodes": 180, "n_identifiers": 18, "d_id": 34978, "documentation": { "docstring": "\n Sets train queue from existing train timestamps if they exist\n otherwise it sets the train queue based on the provided whitelist.\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 43, "language": "en" } }, { "id": 20522, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/util.py", "file_name": "util.py", "fun_name": "duplicates_removed", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def duplicates_removed(it, already_seen=()):\n \n lst = []\n seen = set()\n for i in it:\n if i in seen or i in already_seen:\n continue\n lst.append(i)\n seen.add(i)\n return lst\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 73, "n_words": 26, "vocab_size": 19, "complexity": 4, "nloc": 14, "token_counts": 49, "n_ast_nodes": 82, "n_identifiers": 9, "d_id": 3409, "documentation": { "docstring": "\n Returns a list with duplicates removed from the iterable `it`.\n\n Order is preserved.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 23, "language": "en" } }, { "id": 249499, "commit_id": "74f60cec92c5aff87d6e74d177e95ec5f1a69f2b", "repo": "synapse", "path": "tests/rest/admin/test_user.py", "file_name": "test_user.py", "fun_name": "test_success_urlencoded", "commit_message": "Add an admin API endpoint to find a user based on its external ID in an auth provider. (#13810)", "code": "def test_success_urlencoded(self) -> None:\n \n url = \"/_synapse/admin/v1/auth_providers/another-auth-provider/users/a%3Acomplex%40external%2Fid\"\n\n channel = self.make_request(\n \"GET\",\n url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(\n {\"user_id\": self.other_user},\n channel.json_body,\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 126, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 13, "token_counts": 61, "n_ast_nodes": 99, "n_identifiers": 12, "d_id": 72954, "documentation": { "docstring": "Tests a successful external ID lookup with an url-encoded ID", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 99100, "commit_id": "25469e1e30a717637456d6ab470bdd28864be301", "repo": "sentry", "path": "src/sentry/notifications/notifications/activity/base.py", "file_name": "base.py", "fun_name": "get_base_context", "commit_message": "ref(notifications): Remove `get_activity_name()` (#34061)", "code": "def get_base_context(self) -> MutableMapping[str, Any]:\n \n return {\n \"data\": self.activity.data,\n \"author\": self.activity.user,\n \"title\": self.title,\n \"project\": self.project,\n \"project_link\": self.get_project_link(),\n **super().get_base_context(),\n }\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 106, "n_words": 19, "vocab_size": 19, "complexity": 1, "nloc": 10, "token_counts": 61, "n_ast_nodes": 102, "n_identifiers": 12, "d_id": 19615, "documentation": { "docstring": "The most basic context shared by every notification type.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 13242, "commit_id": "107631e955b21db8a4ddb3bee02130de3650d032", "repo": "jina", "path": "jina/serve/instrumentation/__init__.py", "file_name": "__init__.py", "fun_name": "tracing_client_interceptor", "commit_message": "feat(instrumentation): add OpenTelemetry tracing and metrics with basic configurations (#5175)", "code": "def tracing_client_interceptor(self) -> Optional['OpenTelemetryClientInterceptor']:\n \n if self.tracing:\n from opentelemetry.instrumentation.grpc import (\n client_interceptor as grpc_client_interceptor,\n )\n\n return grpc_client_interceptor(self.tracer_provider)\n else:\n return None\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 99, "n_words": 19, "vocab_size": 18, "complexity": 2, "nloc": 11, "token_counts": 40, "n_ast_nodes": 65, "n_identifiers": 10, "d_id": 2578, "documentation": { "docstring": "\n :returns: a gRPC client interceptor with the global tracing provider.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 207855, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_readonly_manytomany_backwards_ref", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_readonly_manytomany_backwards_ref(self):\n \n topping = Topping.objects.create(name=\"Salami\")\n pizza = Pizza.objects.create(name=\"Americano\")\n pizza.toppings.add(topping)\n response = self.client.get(reverse(\"admin:admin_views_topping_add\"))\n self.assertEqual(response.status_code, 200)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 56, "n_words": 14, "vocab_size": 12, "complexity": 1, "nloc": 6, "token_counts": 61, "n_ast_nodes": 106, "n_identifiers": 17, "d_id": 52136, "documentation": { "docstring": "\n Regression test for #16433 - backwards references for related objects\n broke if the related field is read-only due to the help_text attribute\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 44, "language": "en" } }, { "id": 167120, "commit_id": "7440fe27eef0aab0f217ca9fb434f1e3ac74836e", "repo": "pandas", "path": "pandas/core/apply.py", "file_name": "apply.py", "fun_name": "transform", "commit_message": "TYP: setter for index/columns property-like (AxisProperty) (#46565)\n\nCo-authored-by: Matthew Roeschke ", "code": "def transform(self) -> DataFrame | Series:\n \n obj = self.obj\n func = self.orig_f\n axis = self.axis\n args = self.args\n kwargs = self.kwargs\n\n is_series = obj.ndim == 1\n\n if obj._get_axis_number(axis) == 1:\n assert not is_series\n return obj.T.transform(func, 0, *args, **kwargs).T\n\n if is_list_like(func) and not is_dict_like(func):\n func = cast(List[AggFuncTypeBase], func)\n # Convert func equivalent dict\n if is_series:\n func = {com.get_callable_name(v) or v: v for v in func}\n else:\n func = {col: func for col in obj}\n\n if is_dict_like(func):\n func = cast(AggFuncTypeDict, func)\n return self.transform_dict_like(func)\n\n # func is either str or callable\n func = cast(AggFuncTypeBase, func)\n try:\n result = self.transform_str_or_callable(func)\n except TypeError:\n raise\n except Exception as err:\n raise ValueError(\"Transform function failed\") from err\n\n # Functions that transform may return empty Series/DataFrame\n # when the dtype is not appropriate\n if (\n isinstance(result, (ABCSeries, ABCDataFrame))\n and result.empty\n and not obj.empty\n ):\n raise ValueError(\"Transform function failed\")\n # error: Argument 1 to \"__get__\" of \"AxisProperty\" has incompatible type\n # \"Union[Series, DataFrame, GroupBy[Any], SeriesGroupBy,\n # DataFrameGroupBy, BaseWindow, Resampler]\"; expected \"Union[DataFrame,\n # Series]\"\n if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(\n obj.index # type:ignore[arg-type]\n ):\n raise ValueError(\"Function did not transform\")\n\n return result\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 582, "n_words": 182, "vocab_size": 115, "complexity": 16, "nloc": 51, "token_counts": 246, "n_ast_nodes": 400, "n_identifiers": 37, "d_id": 39941, "documentation": { "docstring": "\n Transform a DataFrame or Series.\n\n Returns\n -------\n DataFrame or Series\n Result of applying ``func`` along the given axis of the\n Series or DataFrame.\n\n Raises\n ------\n ValueError\n If the transform function fails or does not transform.\n ", "n_words": 35, "vocab_size": 27, "n_whitespaces": 125, "language": "en" } }, { "id": 220550, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/locks.py", "file_name": "locks.py", "fun_name": "acquire", "commit_message": "add python 3.10.4 for windows", "code": "async def acquire(self):\n \n if (not self._locked and (self._waiters is None or\n all(w.cancelled() for w in self._waiters))):\n self._locked = True\n return True\n\n if self._waiters is None:\n self._waiters = collections.deque()\n fut = self._get_loop().create_future()\n self._waiters.append(fut)\n\n # Finally block should be called before the CancelledError\n # handling as we don't want CancelledError to call\n # _wake_up_first() and attempt to wake up itself.\n try:\n try:\n await fut\n finally:\n self._waiters.remove(fut)\n except exceptions.CancelledError:\n if not self._locked:\n self._wake_up_first()\n raise\n\n self._locked = True\n return True\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 297, "n_words": 76, "vocab_size": 56, "complexity": 9, "nloc": 20, "token_counts": 119, "n_ast_nodes": 203, "n_identifiers": 17, "d_id": 56049, "documentation": { "docstring": "Acquire a lock.\n\n This method blocks until the lock is unlocked, then sets it to\n locked and returns True.\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 40, "language": "en" } }, { "id": 198366, "commit_id": "7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c", "repo": "sympy", "path": "sympy/combinatorics/tensor_can.py", "file_name": "tensor_can.py", "fun_name": "canonicalize", "commit_message": "Cleanup loops and ranges", "code": "def canonicalize(g, dummies, msym, *v):\n \n from sympy.combinatorics.testutil import canonicalize_naive\n if not isinstance(msym, list):\n if msym not in (0, 1, None):\n raise ValueError('msym must be 0, 1 or None')\n num_types = 1\n else:\n num_types = len(msym)\n if not all(msymx in (0, 1, None) for msymx in msym):\n raise ValueError('msym entries must be 0, 1 or None')\n if len(dummies) != num_types:\n raise ValueError(\n 'dummies and msym must have the same number of elements')\n size = g.size\n num_tensors = 0\n v1 = []\n for base_i, gens_i, n_i, sym_i in v:\n # check that the BSGS is minimal;\n # this property is used in double_coset_can_rep;\n # if it is not minimal use canonicalize_naive\n if not _is_minimal_bsgs(base_i, gens_i):\n mbsgs = get_minimal_bsgs(base_i, gens_i)\n if not mbsgs:\n can = canonicalize_naive(g, dummies, msym, *v)\n return can\n base_i, gens_i = mbsgs\n v1.append((base_i, gens_i, [[]] * n_i, sym_i))\n num_tensors += n_i\n\n if num_types == 1 and not isinstance(msym, list):\n dummies = [dummies]\n msym = [msym]\n flat_dummies = []\n for dumx in dummies:\n flat_dummies.extend(dumx)\n\n if flat_dummies and flat_dummies != list(range(flat_dummies[0], flat_dummies[-1] + 1)):\n raise ValueError('dummies is not valid')\n\n # slot symmetry of the tensor\n size1, sbase, sgens = gens_products(*v1)\n if size != size1:\n raise ValueError(\n 'g has size %d, generators have size %d' % (size, size1))\n free = [i for i in range(size - 2) if i not in flat_dummies]\n num_free = len(free)\n\n # g1 minimal tensor under slot symmetry\n g1 = canonical_free(sbase, sgens, g, num_free)\n if not flat_dummies:\n return g1\n # save the sign of g1\n sign = 0 if g1[-1] == size - 1 else 1\n\n # the free indices are kept fixed.\n # Determine free_i, the list of slots of tensors which are fixed\n # since they are occupied by free indices, which are fixed.\n start = 0\n for i, (base_i, gens_i, n_i, sym_i) in enumerate(v):\n free_i = []\n len_tens = gens_i[0].size - 2\n # for each component tensor get a list od fixed islots\n for j in range(n_i):\n # get the elements corresponding to the component tensor\n h = g1[start:(start + len_tens)]\n fr = []\n # get the positions of the fixed elements in h\n for k in free:\n if k in h:\n fr.append(h.index(k))\n free_i.append(fr)\n start += len_tens\n v1[i] = (base_i, gens_i, free_i, sym_i)\n # BSGS of the tensor with fixed free indices\n # if tensor_gens fails in gens_product, use canonicalize_naive\n size, sbase, sgens = gens_products(*v1)\n\n # reduce the permutations getting rid of the free indices\n pos_free = [g1.index(x) for x in range(num_free)]\n size_red = size - num_free\n g1_red = [x - num_free for x in g1 if x in flat_dummies]\n if sign:\n g1_red.extend([size_red - 1, size_red - 2])\n else:\n g1_red.extend([size_red - 2, size_red - 1])\n map_slots = _get_map_slots(size, pos_free)\n sbase_red = [map_slots[i] for i in sbase if i not in pos_free]\n sgens_red = [_af_new([map_slots[i] for i in y._array_form if i not in pos_free]) for y in sgens]\n dummies_red = [[x - num_free for x in y] for y in dummies]\n transv_red = get_transversals(sbase_red, sgens_red)\n g1_red = _af_new(g1_red)\n g2 = double_coset_can_rep(\n dummies_red, msym, sbase_red, sgens_red, transv_red, g1_red)\n if g2 == 0:\n return 0\n # lift to the case with the free indices\n g3 = _lift_sgens(size, pos_free, free, g2)\n return g3\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1079, "n_words": 523, "vocab_size": 243, "complexity": 35, "nloc": 76, "token_counts": 638, "n_ast_nodes": 980, "n_identifiers": 69, "d_id": 48885, "documentation": { "docstring": "\n canonicalize tensor formed by tensors\n\n Parameters\n ==========\n\n g : permutation representing the tensor\n\n dummies : list representing the dummy indices\n it can be a list of dummy indices of the same type\n or a list of lists of dummy indices, one list for each\n type of index;\n the dummy indices must come after the free indices,\n and put in order contravariant, covariant\n [d0, -d0, d1,-d1,...]\n\n msym : symmetry of the metric(s)\n it can be an integer or a list;\n in the first case it is the symmetry of the dummy index metric;\n in the second case it is the list of the symmetries of the\n index metric for each type\n\n v : list, (base_i, gens_i, n_i, sym_i) for tensors of type `i`\n\n base_i, gens_i : BSGS for tensors of this type.\n The BSGS should have minimal base under lexicographic ordering;\n if not, an attempt is made do get the minimal BSGS;\n in case of failure,\n canonicalize_naive is used, which is much slower.\n\n n_i : number of tensors of type `i`.\n\n sym_i : symmetry under exchange of component tensors of type `i`.\n\n Both for msym and sym_i the cases are\n * None no symmetry\n * 0 commuting\n * 1 anticommuting\n\n Returns\n =======\n\n 0 if the tensor is zero, else return the array form of\n the permutation representing the canonical form of the tensor.\n\n Algorithm\n =========\n\n First one uses canonical_free to get the minimum tensor under\n lexicographic order, using only the slot symmetries.\n If the component tensors have not minimal BSGS, it is attempted\n to find it; if the attempt fails canonicalize_naive\n is used instead.\n\n Compute the residual slot symmetry keeping fixed the free indices\n using tensor_gens(base, gens, list_free_indices, sym).\n\n Reduce the problem eliminating the free indices.\n\n Then use double_coset_can_rep and lift back the result reintroducing\n the free indices.\n\n Examples\n ========\n\n one type of index with commuting metric;\n\n `A_{a b}` and `B_{a b}` antisymmetric and commuting\n\n `T = A_{d0 d1} * B^{d0}{}_{d2} * B^{d2 d1}`\n\n `ord = [d0,-d0,d1,-d1,d2,-d2]` order of the indices\n\n g = [1, 3, 0, 5, 4, 2, 6, 7]\n\n `T_c = 0`\n\n >>> from sympy.combinatorics.tensor_can import get_symmetric_group_sgs, canonicalize, bsgs_direct_product\n >>> from sympy.combinatorics import Permutation\n >>> base2a, gens2a = get_symmetric_group_sgs(2, 1)\n >>> t0 = (base2a, gens2a, 1, 0)\n >>> t1 = (base2a, gens2a, 2, 0)\n >>> g = Permutation([1, 3, 0, 5, 4, 2, 6, 7])\n >>> canonicalize(g, range(6), 0, t0, t1)\n 0\n\n same as above, but with `B_{a b}` anticommuting\n\n `T_c = -A^{d0 d1} * B_{d0}{}^{d2} * B_{d1 d2}`\n\n can = [0,2,1,4,3,5,7,6]\n\n >>> t1 = (base2a, gens2a, 2, 1)\n >>> canonicalize(g, range(6), 0, t0, t1)\n [0, 2, 1, 4, 3, 5, 7, 6]\n\n two types of indices `[a,b,c,d,e,f]` and `[m,n]`, in this order,\n both with commuting metric\n\n `f^{a b c}` antisymmetric, commuting\n\n `A_{m a}` no symmetry, commuting\n\n `T = f^c{}_{d a} * f^f{}_{e b} * A_m{}^d * A^{m b} * A_n{}^a * A^{n e}`\n\n ord = [c,f,a,-a,b,-b,d,-d,e,-e,m,-m,n,-n]\n\n g = [0,7,3, 1,9,5, 11,6, 10,4, 13,2, 12,8, 14,15]\n\n The canonical tensor is\n `T_c = -f^{c a b} * f^{f d e} * A^m{}_a * A_{m d} * A^n{}_b * A_{n e}`\n\n can = [0,2,4, 1,6,8, 10,3, 11,7, 12,5, 13,9, 15,14]\n\n >>> base_f, gens_f = get_symmetric_group_sgs(3, 1)\n >>> base1, gens1 = get_symmetric_group_sgs(1)\n >>> base_A, gens_A = bsgs_direct_product(base1, gens1, base1, gens1)\n >>> t0 = (base_f, gens_f, 2, 0)\n >>> t1 = (base_A, gens_A, 4, 0)\n >>> dummies = [range(2, 10), range(10, 14)]\n >>> g = Permutation([0, 7, 3, 1, 9, 5, 11, 6, 10, 4, 13, 2, 12, 8, 14, 15])\n >>> canonicalize(g, dummies, [0, 0], t0, t1)\n [0, 2, 4, 1, 6, 8, 10, 3, 11, 7, 12, 5, 13, 9, 15, 14]\n ", "n_words": 601, "vocab_size": 295, "n_whitespaces": 946, "language": "en" } }, { "id": 250369, "commit_id": "652d1669c5a103b1c20478770c4aaf18849c09a3", "repo": "synapse", "path": "tests/handlers/test_presence.py", "file_name": "test_presence.py", "fun_name": "test_set_presence_with_status_msg_none", "commit_message": "Add missing type hints to tests.handlers. (#14680)\n\nAnd do not allow untyped defs in tests.handlers.", "code": "def test_set_presence_with_status_msg_none(self) -> None:\n \n user_id = \"@test:server\"\n status_msg = \"I'm here!\"\n\n # Mark user as online\n self._set_presencestate_with_status_msg(\n user_id, PresenceState.ONLINE, status_msg\n )\n\n # Mark user as online and `status_msg = None`\n self._set_presencestate_with_status_msg(user_id, PresenceState.ONLINE, None)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 100, "n_words": 33, "vocab_size": 24, "complexity": 1, "nloc": 10, "token_counts": 38, "n_ast_nodes": 66, "n_identifiers": 7, "d_id": 73405, "documentation": { "docstring": "Test that if a user set again the presence manually\n and status is `None`, that `status_msg` is `None`.\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 32, "language": "en" } }, { "id": 228753, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/bar/marker/_colorbar.py", "file_name": "_colorbar.py", "fun_name": "tickangle", "commit_message": "switch to black .22", "code": "def tickangle(self):\n \n return self[\"tickangle\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 60426, "documentation": { "docstring": "\n Sets the angle of the tick labels with respect to the\n horizontal. For example, a `tickangle` of -90 draws the tick\n labels vertically.\n\n The 'tickangle' property is a angle (in degrees) that may be\n specified as a number between -180 and 180. Numeric values outside this\n range are converted to the equivalent value\n (e.g. 270 is converted to -90).\n\n Returns\n -------\n int|float\n ", "n_words": 62, "vocab_size": 48, "n_whitespaces": 140, "language": "en" } }, { "id": 126370, "commit_id": "545c51609f0f55b41cf99cec95a9c21bee6846de", "repo": "ray", "path": "python/ray/serve/_private/router.py", "file_name": "router.py", "fun_name": "_reset_replica_iterator", "commit_message": "[Serve] ServeHandle detects ActorError and drop replicas from target group (#26685)", "code": "def _reset_replica_iterator(self):\n \n replicas = list(self.in_flight_queries.keys())\n random.shuffle(replicas)\n self.replica_iterator = itertools.cycle(replicas)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 37, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 34, "n_ast_nodes": 59, "n_identifiers": 11, "d_id": 28152, "documentation": { "docstring": "Reset the iterator used to load balance replicas.\n\n This call is expected to be called after the replica membership has\n been updated. It will shuffle the replicas randomly to avoid multiple\n handle sending requests in the same order.\n ", "n_words": 38, "vocab_size": 33, "n_whitespaces": 66, "language": "en" } }, { "id": 81880, "commit_id": "7de5f772626a00d31026270865276365287cbe37", "repo": "awx", "path": "awx/main/tests/functional/test_copy.py", "file_name": "test_copy.py", "fun_name": "test_workflow_job_template_copy", "commit_message": "adding test coverage to ensure that FIELDS_TO_PRESERVE_AT_COPY is behaving as expected for WFJTs", "code": "def test_workflow_job_template_copy(workflow_job_template, post, get, admin, organization):\n \n workflow_job_template.organization = organization\n\n label = Label.objects.create(name=\"foobar\", organization=organization)\n workflow_job_template.labels.add(label)\n\n ee = ExecutionEnvironment.objects.create(name=\"barfoo\", organization=organization)\n workflow_job_template.execution_environment = ee\n\n ig = InstanceGroup.objects.create(name=\"bazbar\", organization=organization)\n workflow_job_template.instance_groups.add(ig)\n\n workflow_job_template.save()\n\n jts = [JobTemplate.objects.create(name='test-jt-{}'.format(i)) for i in range(0, 5)]\n nodes = [WorkflowJobTemplateNode.objects.create(workflow_job_template=workflow_job_template, unified_job_template=jts[i]) for i in range(0, 5)]\n nodes[0].success_nodes.add(nodes[1])\n nodes[1].success_nodes.add(nodes[2])\n nodes[0].failure_nodes.add(nodes[3])\n nodes[3].failure_nodes.add(nodes[4])\n with mock.patch('awx.api.generics.trigger_delayed_deep_copy') as deep_copy_mock:\n wfjt_copy_id = post(\n reverse('api:workflow_job_template_copy', kwargs={'pk': workflow_job_template.pk}), {'name': 'new wfjt name'}, admin, expect=201\n ).data['id']\n wfjt_copy = type(workflow_job_template).objects.get(pk=wfjt_copy_id)\n args, kwargs = deep_copy_mock.call_args\n deep_copy_model_obj(*args, **kwargs)\n\n assert wfjt_copy.organization == organization\n assert wfjt_copy.created_by == admin\n assert wfjt_copy.name == 'new wfjt name'\n assert wfjt_copy.labels.count() != 0\n assert wfjt_copy.labels.get(pk=label.pk) == label\n assert wfjt_copy.execution_environment == ee\n assert wfjt_copy.instance_groups.count() != 0\n assert wfjt_copy.instance_groups.get(pk=ig.pk) == ig\n\n copied_node_list = [x for x in wfjt_copy.workflow_job_template_nodes.all()]\n copied_node_list.sort(key=lambda x: int(x.unified_job_template.name[-1]))\n for node, success_count, failure_count, always_count in zip(copied_node_list, [1, 1, 0, 0, 0], [1, 0, 0, 1, 0], [0, 0, 0, 0, 0]):\n assert node.success_nodes.count() == success_count\n assert node.failure_nodes.count() == failure_count\n assert node.always_nodes.count() == always_count\n assert copied_node_list[1] in copied_node_list[0].success_nodes.all()\n assert copied_node_list[2] in copied_node_list[1].success_nodes.all()\n assert copied_node_list[3] in copied_node_list[0].failure_nodes.all()\n assert copied_node_list[4] in copied_node_list[3].failure_nodes.all()\n\n\n@pytest.mark.django_db", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "@pytest.mark.django_db", "n_ast_errors": 1, "ast_levels": 18, "n_whitespaces": 328, "n_words": 169, "vocab_size": 102, "complexity": 5, "nloc": 40, "token_counts": 538, "n_ast_nodes": 837, "n_identifiers": 62, "d_id": 17273, "documentation": { "docstring": "\n Tests the FIELDS_TO_PRESERVE_AT_COPY attribute on WFJTs\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 13, "language": "en" } }, { "id": 109916, "commit_id": "df6f95703b60348e01603f98a439b133da2938a0", "repo": "matplotlib", "path": "lib/mpl_toolkits/mplot3d/art3d.py", "file_name": "art3d.py", "fun_name": "line_collection_2d_to_3d", "commit_message": "Improve mpl_toolkit documentation", "code": "def line_collection_2d_to_3d(col, zs=0, zdir='z'):\n \n segments3d = _paths_to_3d_segments(col.get_paths(), zs, zdir)\n col.__class__ = Line3DCollection\n col.set_segments(segments3d)\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 25, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 39, "n_ast_nodes": 64, "n_identifiers": 10, "d_id": 23823, "documentation": { "docstring": "Convert a `.LineCollection` to a `.Line3DCollection` object.", "n_words": 7, "vocab_size": 6, "n_whitespaces": 6, "language": "en" } }, { "id": 198001, "commit_id": "4a7c0c31501685f9d8e6572fe735b592a1fa3c33", "repo": "sympy", "path": "sympy/solvers/recurr.py", "file_name": "recurr.py", "fun_name": "rsolve_hyper", "commit_message": "rsolve_hyper: take into account degenerate solutions\n\nThis fixes sympy/sympy#8697:\n\n In [2]: rsolve(a(n + 3) - a(n + 2) - a(n + 1) + a(n), a(n))\n Out[2]:\n n\n (-1) ⋅C₁ + C₀ + C₂⋅n\n\nAdded also test from issue thread, which is not related\nto the problem. And from PR request diofant/diofant#442.\n\nTest for sympy/sympy#6844 was adapted.", "code": "def rsolve_hyper(coeffs, f, n, **hints):\n r\n coeffs = list(map(sympify, coeffs))\n\n f = sympify(f)\n\n r, kernel, symbols = len(coeffs) - 1, [], set()\n\n if not f.is_zero:\n if f.is_Add:\n similar = {}\n\n for g in f.expand().args:\n if not g.is_hypergeometric(n):\n return None\n\n for h in similar.keys():\n if hypersimilar(g, h, n):\n similar[h] += g\n break\n else:\n similar[g] = S.Zero\n\n inhomogeneous = []\n\n for g, h in similar.items():\n inhomogeneous.append(g + h)\n elif f.is_hypergeometric(n):\n inhomogeneous = [f]\n else:\n return None\n\n for i, g in enumerate(inhomogeneous):\n coeff, polys = S.One, coeffs[:]\n denoms = [S.One]*(r + 1)\n\n s = hypersimp(g, n)\n\n for j in range(1, r + 1):\n coeff *= s.subs(n, n + j - 1)\n\n p, q = coeff.as_numer_denom()\n\n polys[j] *= p\n denoms[j] = q\n\n for j in range(r + 1):\n polys[j] *= Mul(*(denoms[:j] + denoms[j + 1:]))\n\n R = rsolve_poly(polys, Mul(*denoms), n)\n\n if not (R is None or R is S.Zero):\n inhomogeneous[i] *= R\n else:\n return None\n\n result = Add(*inhomogeneous)\n else:\n result = S.Zero\n\n Z = Dummy('Z')\n\n p, q = coeffs[0], coeffs[r].subs(n, n - r + 1)\n\n p_factors = [z for z in roots(p, n).keys()]\n q_factors = [z for z in roots(q, n).keys()]\n\n factors = [(S.One, S.One)]\n\n for p in p_factors:\n for q in q_factors:\n if p.is_integer and q.is_integer and p <= q:\n continue\n else:\n factors += [(n - p, n - q)]\n\n p = [(n - p, S.One) for p in p_factors]\n q = [(S.One, n - q) for q in q_factors]\n\n factors = p + factors + q\n\n for A, B in factors:\n polys, degrees = [], []\n D = A*B.subs(n, n + r - 1)\n\n for i in range(r + 1):\n a = Mul(*[A.subs(n, n + j) for j in range(i)])\n b = Mul(*[B.subs(n, n + j) for j in range(i, r)])\n\n poly = quo(coeffs[i]*a*b, D, n)\n polys.append(poly.as_poly(n))\n\n if not poly.is_zero:\n degrees.append(polys[i].degree())\n\n if degrees:\n d, poly = max(degrees), S.Zero\n else:\n return None\n\n for i in range(r + 1):\n coeff = polys[i].nth(d)\n\n if coeff is not S.Zero:\n poly += coeff * Z**i\n\n for z in roots(poly, Z).keys():\n if z.is_zero:\n continue\n\n recurr_coeffs = [polys[i].as_expr()*z**i for i in range(r + 1)]\n if d == 0 and 0 != Add(*[recurr_coeffs[j]*j for j in range(1, r + 1)]):\n # faster inline check (than calling rsolve_poly) for a\n # constant solution to a constant coefficient recurrence.\n sol = [Symbol(\"C\" + str(len(symbols)))]\n else:\n sol, syms = rsolve_poly(recurr_coeffs, 0, n, len(symbols), symbols=True)\n sol = sol.collect(syms)\n sol = [sol.coeff(s) for s in syms]\n\n for C in sol:\n ratio = z * A * C.subs(n, n + 1) / B / C\n ratio = simplify(ratio)\n # If there is a nonnegative root in the denominator of the ratio,\n # this indicates that the term y(n_root) is zero, and one should\n # start the product with the term y(n_root + 1).\n n0 = 0\n for n_root in roots(ratio.as_numer_denom()[1], n).keys():\n if n_root.has(I):\n return None\n elif (n0 < (n_root + 1)) == True:\n n0 = n_root + 1\n K = product(ratio, (n, n0, n - 1))\n if K.has(factorial, FallingFactorial, RisingFactorial):\n K = simplify(K)\n\n if casoratian(kernel + [K], n, zero=False) != 0:\n kernel.append(K)\n\n kernel.sort(key=default_sort_key)\n sk = list(zip(numbered_symbols('C'), kernel))\n\n if sk:\n for C, ker in sk:\n result += C * ker\n else:\n return None\n\n if hints.get('symbols', False):\n # XXX: This returns the symbols in a non-deterministic order\n symbols |= {s for s, k in sk}\n return (result, list(symbols))\n else:\n return result\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 1808, "n_words": 553, "vocab_size": 259, "complexity": 48, "nloc": 164, "token_counts": 1051, "n_ast_nodes": 1617, "n_identifiers": 98, "d_id": 48766, "documentation": { "docstring": "\n Given linear recurrence operator `\\operatorname{L}` of order `k`\n with polynomial coefficients and inhomogeneous equation\n `\\operatorname{L} y = f` we seek for all hypergeometric solutions\n over field `K` of characteristic zero.\n\n The inhomogeneous part can be either hypergeometric or a sum\n of a fixed number of pairwise dissimilar hypergeometric terms.\n\n The algorithm performs three basic steps:\n\n (1) Group together similar hypergeometric terms in the\n inhomogeneous part of `\\operatorname{L} y = f`, and find\n particular solution using Abramov's algorithm.\n\n (2) Compute generating set of `\\operatorname{L}` and find basis\n in it, so that all solutions are linearly independent.\n\n (3) Form final solution with the number of arbitrary\n constants equal to dimension of basis of `\\operatorname{L}`.\n\n Term `a(n)` is hypergeometric if it is annihilated by first order\n linear difference equations with polynomial coefficients or, in\n simpler words, if consecutive term ratio is a rational function.\n\n The output of this procedure is a linear combination of fixed\n number of hypergeometric terms. However the underlying method\n can generate larger class of solutions - D'Alembertian terms.\n\n Note also that this method not only computes the kernel of the\n inhomogeneous equation, but also reduces in to a basis so that\n solutions generated by this procedure are linearly independent\n\n Examples\n ========\n\n >>> from sympy.solvers import rsolve_hyper\n >>> from sympy.abc import x\n\n >>> rsolve_hyper([-1, -1, 1], 0, x)\n C0*(1/2 - sqrt(5)/2)**x + C1*(1/2 + sqrt(5)/2)**x\n\n >>> rsolve_hyper([-1, 1], 1 + x, x)\n C0 + x*(x + 1)/2\n\n References\n ==========\n\n .. [1] M. Petkovsek, Hypergeometric solutions of linear recurrences\n with polynomial coefficients, J. Symbolic Computation,\n 14 (1992), 243-264.\n\n .. [2] M. Petkovsek, H. S. Wilf, D. Zeilberger, A = B, 1996.\n ", "n_words": 270, "vocab_size": 169, "n_whitespaces": 443, "language": "en" } }, { "id": 160174, "commit_id": "f404e9e92e87a3990712d723d5c562a89300ac01", "repo": "numpy", "path": "numpy/distutils/ccompiler_opt.py", "file_name": "ccompiler_opt.py", "fun_name": "parse_targets", "commit_message": "Add space after argument name", "code": "def parse_targets(self, source):\n \n self.dist_log(\"looking for '@targets' inside -> \", source)\n # get lines between /*@targets and */\n with open(source) as fd:\n tokens = \"\"\n max_to_reach = 1000 # good enough, isn't?\n start_with = \"@targets\"\n start_pos = -1\n end_with = \"*/\"\n end_pos = -1\n for current_line, line in enumerate(fd):\n if current_line == max_to_reach:\n self.dist_fatal(\"reached the max of lines\")\n break\n if start_pos == -1:\n start_pos = line.find(start_with)\n if start_pos == -1:\n continue\n start_pos += len(start_with)\n tokens += line\n end_pos = line.find(end_with)\n if end_pos != -1:\n end_pos += len(tokens) - len(line)\n break\n\n if start_pos == -1:\n self.dist_fatal(\"expected to find '%s' within a C comment\" % start_with)\n if end_pos == -1:\n self.dist_fatal(\"expected to end with '%s'\" % end_with)\n\n tokens = tokens[start_pos:end_pos]\n return self._parse_target_tokens(tokens)\n\n _parse_regex_arg = re.compile(r'\\s|,|([+-])')", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 511, "n_words": 122, "vocab_size": 78, "complexity": 8, "nloc": 29, "token_counts": 165, "n_ast_nodes": 305, "n_identifiers": 22, "d_id": 38546, "documentation": { "docstring": "\n Fetch and parse configuration statements that required for\n defining the targeted CPU features, statements should be declared\n in the top of source in between **C** comment and start\n with a special mark **@targets**.\n\n Configuration statements are sort of keywords representing\n CPU features names, group of statements and policies, combined\n together to determine the required optimization.\n\n Parameters\n ----------\n source : str\n the path of **C** source file.\n\n Returns\n -------\n - bool, True if group has the 'baseline' option\n - list, list of CPU features\n - list, list of extra compiler flags\n ", "n_words": 90, "vocab_size": 63, "n_whitespaces": 214, "language": "en" } }, { "id": 310457, "commit_id": "6bbe38578c74e5ecd8aadcd2cf39cddca8a59a52", "repo": "core", "path": "tests/components/guardian/conftest.py", "file_name": "conftest.py", "fun_name": "data_system_ping_fixture", "commit_message": "Add diagnostics to Elexa Guardian (#64599)", "code": "def data_system_ping_fixture():\n \n return json.loads(load_fixture(\"system_ping_data.json\", \"guardian\"))\n\n\n@pytest.fixture(name=\"data_valve_status\", scope=\"session\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture(name=\"data_valve_status\", scope=\"session\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 12, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 58, "n_identifiers": 8, "d_id": 109142, "documentation": { "docstring": "Define data from a successful system_ping response.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 119870, "commit_id": "69969ef8031e424b19dd020a396b3fbdc25b703e", "repo": "jax", "path": "jax/_src/random.py", "file_name": "random.py", "fun_name": "_softmax", "commit_message": "add random.loggamma and improve dirichlet & beta implementation", "code": "def _softmax(x, axis):\n \n if not dtypes.issubdtype(x.dtype, np.floating):\n raise TypeError(f\"_softmax only accepts floating dtypes, got {x.dtype}\")\n x_max = jnp.max(x, axis, keepdims=True)\n unnormalized = jnp.exp(x - lax.stop_gradient(x_max))\n return unnormalized / unnormalized.sum(axis, keepdims=True)\n\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 38, "n_words": 30, "vocab_size": 27, "complexity": 2, "nloc": 6, "token_counts": 71, "n_ast_nodes": 118, "n_identifiers": 18, "d_id": 26701, "documentation": { "docstring": "Utility to compute the softmax of x along a given axis.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 259122, "commit_id": "d616e43947340e152e4a901931e954d699368fa9", "repo": "scikit-learn", "path": "sklearn/kernel_approximation.py", "file_name": "kernel_approximation.py", "fun_name": "fit", "commit_message": "ENH Adds feature_names_out for most of kernel_approximation (#22694)", "code": "def fit(self, X, y=None):\n \n if not self.degree >= 1:\n raise ValueError(f\"degree={self.degree} should be >=1.\")\n\n X = self._validate_data(X, accept_sparse=\"csc\")\n random_state = check_random_state(self.random_state)\n\n n_features = X.shape[1]\n if self.coef0 != 0:\n n_features += 1\n\n self.indexHash_ = random_state.randint(\n 0, high=self.n_components, size=(self.degree, n_features)\n )\n\n self.bitHash_ = random_state.choice(a=[-1, 1], size=(self.degree, n_features))\n self._n_features_out = self.n_components\n return self\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 160, "n_words": 50, "vocab_size": 42, "complexity": 3, "nloc": 14, "token_counts": 126, "n_ast_nodes": 202, "n_identifiers": 22, "d_id": 75581, "documentation": { "docstring": "Fit the model with X.\n\n Initializes the internal variables. The method needs no information\n about the distribution of data, so we only care about n_features in X.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training data, where `n_samples` is the number of samples\n and `n_features` is the number of features.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs), \\\n default=None\n Target values (None for unsupervised transformations).\n\n Returns\n -------\n self : object\n Returns the instance itself.\n ", "n_words": 80, "vocab_size": 61, "n_whitespaces": 209, "language": "en" } }, { "id": 44110, "commit_id": "602abe8394fafe7de54df7e73af56de848cdf617", "repo": "airflow", "path": "airflow/providers/google/cloud/hooks/mlengine.py", "file_name": "mlengine.py", "fun_name": "_poll_with_exponential_delay", "commit_message": "Remove `:type` lines now sphinx-autoapi supports typehints (#20951)\n\n* Remove `:type` lines now sphinx-autoapi supports typehints\r\n\r\nSince we have no updated sphinx-autoapi to a more recent version it\r\nsupports showing type hints in the documentation, so we don't need to\r\nhave the type hints _and_ the `:type` lines -- which is good, as the\r\nones in the doc strings are easy to get out of date!\r\n\r\nThe following settings have been set:\r\n\r\n`autodoc_typehints = 'description'` -- show types in description (where\r\nprevious `:type` used to show up)\r\n\r\n`autodoc_typehints_description_target = 'documented'` -- only link to\r\ntypes that are documented. (Without this we have some missing return\r\ntypes that aren't documented, and aren't linked to in our current python\r\nAPI docs, so this caused a build failure)\r\n\r\n`autodoc_typehints_format = 'short'` -- Shorten type hints where\r\npossible, i.e. `StringIO` instead of `io.StringIO`\r\n\r\n* Add argument type names to local spelling dictionary\r\n\r\nNow that we are using the type hints in the docs, sphinxcontrib-spelling\r\npicks them up as words to be checked, so we have to ignore them.\r\n\r\nI've chosen to add the provider specific ones to local dictionary files\r\nrather than the global, as for example, `mgmt` is an error in most\r\nplaces, but not in some of the Azure provider.", "code": "def _poll_with_exponential_delay(request, execute_num_retries, max_n, is_done_func, is_error_func):\n \n for i in range(0, max_n):\n try:\n response = request.execute(num_retries=execute_num_retries)\n if is_error_func(response):\n raise ValueError(f'The response contained an error: {response}')\n if is_done_func(response):\n log.info('Operation is done: %s', response)\n return response\n\n time.sleep((2 ** i) + (random.randint(0, 1000) / 1000))\n except HttpError as e:\n if e.resp.status != 429:\n log.info('Something went wrong. Not retrying: %s', format(e))\n raise\n else:\n time.sleep((2 ** i) + (random.randint(0, 1000) / 1000))\n\n raise ValueError(f'Connection could not be established after {max_n} retries.')\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 254, "n_words": 75, "vocab_size": 60, "complexity": 6, "nloc": 17, "token_counts": 144, "n_ast_nodes": 238, "n_identifiers": 23, "d_id": 8160, "documentation": { "docstring": "\n Execute request with exponential delay.\n\n This method is intended to handle and retry in case of api-specific errors,\n such as 429 \"Too Many Requests\", unlike the `request.execute` which handles\n lower level errors like `ConnectionError`/`socket.timeout`/`ssl.SSLError`.\n\n :param request: request to be executed.\n :param execute_num_retries: num_retries for `request.execute` method.\n :param max_n: number of times to retry request in this method.\n :param is_done_func: callable to determine if operation is done.\n :param is_error_func: callable to determine if operation is failed.\n :return: response\n :rtype: httplib2.Response\n ", "n_words": 79, "vocab_size": 58, "n_whitespaces": 116, "language": "en" } }, { "id": 203402, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/models.py", "file_name": "models.py", "fun_name": "get_change_message", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_change_message(self):\n \n if self.change_message and self.change_message[0] == \"[\":\n try:\n change_message = json.loads(self.change_message)\n except json.JSONDecodeError:\n return self.change_message\n messages = []\n for sub_message in change_message:\n if \"added\" in sub_message:\n if sub_message[\"added\"]:\n sub_message[\"added\"][\"name\"] = gettext(\n sub_message[\"added\"][\"name\"]\n )\n messages.append(\n gettext(\"Added {name} “{object}”.\").format(\n **sub_message[\"added\"]\n )\n )\n else:\n messages.append(gettext(\"Added.\"))\n\n elif \"changed\" in sub_message:\n sub_message[\"changed\"][\"fields\"] = get_text_list(\n [\n gettext(field_name)\n for field_name in sub_message[\"changed\"][\"fields\"]\n ],\n gettext(\"and\"),\n )\n if \"name\" in sub_message[\"changed\"]:\n sub_message[\"changed\"][\"name\"] = gettext(\n sub_message[\"changed\"][\"name\"]\n )\n messages.append(\n gettext(\"Changed {fields} for {name} “{object}”.\").format(\n **sub_message[\"changed\"]\n )\n )\n else:\n messages.append(\n gettext(\"Changed {fields}.\").format(\n **sub_message[\"changed\"]\n )\n )\n\n elif \"deleted\" in sub_message:\n sub_message[\"deleted\"][\"name\"] = gettext(\n sub_message[\"deleted\"][\"name\"]\n )\n messages.append(\n gettext(\"Deleted {name} “{object}”.\").format(\n **sub_message[\"deleted\"]\n )\n )\n\n change_message = \" \".join(msg[0].upper() + msg[1:] for msg in messages)\n return change_message or gettext(\"No fields changed.\")\n else:\n return self.change_message\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 1263, "n_words": 119, "vocab_size": 64, "complexity": 13, "nloc": 56, "token_counts": 289, "n_ast_nodes": 520, "n_identifiers": 16, "d_id": 50352, "documentation": { "docstring": "\n If self.change_message is a JSON structure, interpret it as a change\n string, properly translated.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 284355, "commit_id": "34bc290dded1bd2418fc3c6b375a79f9cdd68d5a", "repo": "OpenBBTerminal", "path": "openbb_terminal/portfolio/portfolio_optimization/po_controller.py", "file_name": "po_controller.py", "fun_name": "call_ef", "commit_message": "New portfolio optimization menu (#1642)\n\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* Update _index.md\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* configure portfolio optimization parameters ini\r\n\r\n* minor improvement\r\n\r\n* Revert \"New-Portfolio-Optimization-Menu\"\r\n\r\nThis reverts commit b4b7169cfbc8f28c379eb1920307c2cdd2e47a0f.\r\n\r\n* Add in Excel functionality and improve the capabilities\r\n\r\n* Add Excel load function\r\n\r\n* Tidying up the functions\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* Re-add my code\r\n\r\n* Some spacing and details\r\n\r\n* Add folder structure for portfolio\r\n\r\n* Update terminal file loading\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* Make it possible to move from params to po with loaded file\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* Making the connection between the parameters file and the functions\r\n\r\n* Add in allocation and new params files\r\n\r\n* Improve params default settings\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* Update Portfolios and Params sheets\r\n\r\n* Update sheets\r\n\r\n* Update command to load in correct sheet\r\n\r\n* Adjust function to only read specific columns\r\n\r\n* Update portfolio\r\n\r\n* Small correction\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* Patched up show error\r\n\r\n* Add Equity portfolio\r\n\r\n* Make functions more robust\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* New-Portfolio-Optimization-Menu\r\n\r\n* Add in Params documentation\r\n\r\n* Fixing Linting\r\n\r\n* Add in Requirements and Poetry Updates\r\n\r\n* Update website\r\n\r\n* linting\r\n\r\n* Update tests\r\n\r\n* Minor fix\r\n\r\n* remove unneccesary READMEs\r\n\r\n* Remove expected variable type\r\n\r\n* Improve documentation\r\n\r\n* Clean up the code\r\n\r\n* Refractoring\r\n\r\n* Adjust names to make it OS friendly\r\n\r\nCo-authored-by: Jeroen Bouma \r\nCo-authored-by: jmaslek \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: DidierRLopes ", "code": "def call_ef(self, other_args):\n \n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"ef\",\n description=,\n )\n parser.add_argument(\n \"-p\",\n \"--period\",\n default=self.params[\"historic_period\"]\n if \"historic_period\" in self.params\n else \"3y\",\n dest=\"historic_period\",\n help=,\n )\n parser.add_argument(\n \"-s\",\n \"--start\",\n default=self.params[\"start_period\"]\n if \"start_period\" in self.params\n else \"\",\n dest=\"start_period\",\n help=,\n )\n parser.add_argument(\n \"-e\",\n \"--end\",\n default=self.params[\"end_period\"] if \"end_period\" in self.params else \"\",\n dest=\"end_period\",\n help=,\n )\n parser.add_argument(\n \"-lr\",\n \"--log-returns\",\n action=\"store_true\",\n default=self.params[\"log_returns\"]\n if \"log_returns\" in self.params\n else False,\n dest=\"log_returns\",\n help=\"If use logarithmic or arithmetic returns to calculate returns\",\n )\n parser.add_argument(\n \"-f\",\n \"--freq\",\n default=self.params[\"return_frequency\"]\n if \"return_frequency\" in self.params\n else \"d\",\n dest=\"return_frequency\",\n help=,\n choices=self.FREQ_CHOICES,\n )\n parser.add_argument(\n \"-mn\",\n \"--maxnan\",\n type=float,\n default=self.params[\"max_nan\"] if \"max_nan\" in self.params else 0.05,\n dest=\"max_nan\",\n help=,\n )\n parser.add_argument(\n \"-th\",\n \"--threshold\",\n type=float,\n default=self.params[\"threshold_value\"]\n if \"threshold_value\" in self.params\n else 0.30,\n dest=\"threshold_value\",\n help=,\n )\n parser.add_argument(\n \"-mt\",\n \"--method\",\n default=self.params[\"nan_fill_method\"]\n if \"nan_fill_method\" in self.params\n else \"time\",\n dest=\"nan_fill_method\",\n help=,\n )\n parser.add_argument(\n \"-rm\",\n \"--risk-measure\",\n default=self.params[\"risk_measure\"]\n if \"risk_measure\" in self.params\n else \"MV\",\n dest=\"risk_measure\",\n help=,\n choices=self.MEAN_RISK_CHOICES,\n )\n parser.add_argument(\n \"-r\",\n \"--risk-free-rate\",\n type=float,\n dest=\"risk_free\",\n default=self.params[\"risk_free\"]\n if \"risk_free\" in self.params\n else get_rf(),\n help=,\n )\n parser.add_argument(\n \"-a\",\n \"--alpha\",\n type=float,\n default=self.params[\"significance_level\"]\n if \"significance_level\" in self.params\n else 0.05,\n dest=\"significance_level\",\n help=\"Significance level of CVaR, EVaR, CDaR and EDaR\",\n )\n parser.add_argument(\n \"-v\",\n \"--value\",\n dest=\"long_allocation\",\n help=\"Amount to allocate to portfolio in long positions\",\n type=float,\n default=self.params[\"long_allocation\"]\n if \"long_allocation\" in self.params\n else 1,\n )\n parser.add_argument(\n \"-vs\",\n \"--value-short\",\n dest=\"short_allocation\",\n help=\"Amount to allocate to portfolio in short positions\",\n type=float,\n default=self.params[\"short_allocation\"]\n if \"short_allocation\" in self.params\n else 0.0,\n )\n if other_args and \"-\" not in other_args[0][0]:\n other_args.insert(0, \"-n\")\n parser.add_argument(\n \"-n\",\n \"--number-portfolios\",\n default=self.params[\"amount_portfolios\"]\n if \"amount_portfolios\" in self.params\n else 100,\n type=check_non_negative,\n dest=\"amount_portfolios\",\n help=\"Number of portfolios to simulate\",\n )\n parser.add_argument(\n \"-se\",\n \"--seed\",\n default=self.params[\"random_seed\"] if \"random_seed\" in self.params else 123,\n type=check_non_negative,\n dest=\"random_seed\",\n help=\"Seed used to generate random portfolios\",\n )\n parser.add_argument(\n \"-t\",\n \"--tangency\",\n action=\"store_true\",\n dest=\"tangency\",\n default=self.params[\"tangency\"] if \"tangency\" in self.params else False,\n help=\"Adds the optimal line with the risk-free asset\",\n )\n ns_parser = parse_known_args_and_warn(parser, other_args)\n\n if ns_parser:\n if len(self.tickers) < 2:\n console.print(\n \"Please have at least 2 loaded tickers to calculate weights.\\n\"\n )\n return\n\n optimizer_view.display_ef(\n stocks=self.tickers,\n period=ns_parser.historic_period,\n start=ns_parser.start_period,\n end=ns_parser.end_period,\n log_returns=ns_parser.log_returns,\n freq=ns_parser.return_frequency,\n maxnan=ns_parser.max_nan,\n threshold=ns_parser.threshold_value,\n method=ns_parser.nan_fill_method,\n risk_measure=ns_parser.risk_measure.lower(),\n risk_free_rate=ns_parser.risk_free,\n alpha=ns_parser.significance_level,\n value=ns_parser.long_allocation,\n value_short=ns_parser.short_allocation,\n n_portfolios=ns_parser.amount_portfolios,\n seed=ns_parser.random_seed,\n tangency=ns_parser.tangency,\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 2276, "n_words": 327, "vocab_size": 194, "complexity": 21, "nloc": 223, "token_counts": 800, "n_ast_nodes": 1304, "n_identifiers": 64, "d_id": 84706, "documentation": { "docstring": "Process ef commandThis function plots random portfolios based on their\n risk and returns and shows the efficient frontier.Period to get yfinance data from.\n Possible frequency strings are:\n 'd': means days, for example '252d' means 252 days\n 'w': means weeks, for example '52w' means 52 weeks\n 'mo': means months, for example '12mo' means 12 months\n 'y': means years, for example '1y' means 1 year\n 'ytd': downloads data from beginning of year to today\n 'max': downloads all data available for each assetStart date to get yfinance data from. Must be in\n 'YYYY-MM-DD' formatEnd date to get yfinance data from. Must be in\n 'YYYY-MM-DD' formatFrequency used to calculate returns. Possible values are:\n 'd': for daily returns\n 'w': for weekly returns\n 'm': for monthly returns\n Max percentage of nan values accepted per asset to be\n considered in the optimization processValue used to replace outliers that are higher to threshold\n in absolute valueMethod used to fill nan values in time series, by default time.\n Possible values are:\n 'linear': linear interpolation\n 'time': linear interpolation based on time index\n 'nearest': use nearest value to replace nan values\n 'zero': spline of zeroth order\n 'slinear': spline of first order\n 'quadratic': spline of second order\n 'cubic': spline of third order\n 'barycentric': builds a polynomial that pass for all pointsRisk measure used to optimize the portfolio. Possible values are:\n 'MV' : Variance\n 'MAD' : Mean Absolute Deviation\n 'MSV' : Semi Variance (Variance of negative returns)\n 'FLPM' : First Lower Partial Moment\n 'SLPM' : Second Lower Partial Moment\n 'CVaR' : Conditional Value at Risk\n 'EVaR' : Entropic Value at Risk\n 'WR' : Worst Realization\n 'ADD' : Average Drawdown of uncompounded returns\n 'UCI' : Ulcer Index of uncompounded returns\n 'CDaR' : Conditional Drawdown at Risk of uncompounded returns\n 'EDaR' : Entropic Drawdown at Risk of uncompounded returns\n 'MDD' : Maximum Drawdown of uncompounded returns\n Risk-free rate of borrowing/lending. The period of the\n risk-free rate must be annual", "n_words": 314, "vocab_size": 174, "n_whitespaces": 1057, "language": "en" } }, { "id": 152965, "commit_id": "0bdc482d6f1682e103b4c4d7ee7c4d505d2d3b1c", "repo": "modin", "path": "modin/config/envvars.py", "file_name": "envvars.py", "fun_name": "get", "commit_message": "REFACTOR-#3768: change 'compute_chunksize' signature (#3769)\n\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Anatoly Myachev ", "code": "def get(cls):\n \n min_partition_size = super().get()\n assert min_partition_size > 0, \"`min_partition_size` should be > 0\"\n return min_partition_size\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 44, "n_words": 16, "vocab_size": 13, "complexity": 1, "nloc": 4, "token_counts": 23, "n_ast_nodes": 42, "n_identifiers": 4, "d_id": 35209, "documentation": { "docstring": "\n Get ``MinPartitionSize`` with extra checks.\n\n Returns\n -------\n int\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 44, "language": "en" } }, { "id": 259436, "commit_id": "75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc", "repo": "scikit-learn", "path": "sklearn/linear_model/_glm/glm.py", "file_name": "glm.py", "fun_name": "_get_loss", "commit_message": "ENH migrate GLMs / TweedieRegressor to linear loss (#22548)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Thomas J. Fan ", "code": "def _get_loss(self):\n \n return HalfSquaredError()\n\n # TODO(1.3): remove", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 24, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 21, "n_identifiers": 3, "d_id": 75770, "documentation": { "docstring": "This is only necessary because of the link and power arguments of the\n TweedieRegressor.\n\n Note that we do not need to pass sample_weight to the loss class as this is\n only needed to set loss.constant_hessian on which GLMs do not rely.\n ", "n_words": 41, "vocab_size": 32, "n_whitespaces": 69, "language": "en" } }, { "id": 65607, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/buying/utils.py", "file_name": "utils.py", "fun_name": "update_last_purchase_rate", "commit_message": "style: format code with black", "code": "def update_last_purchase_rate(doc, is_submit):\n\t\n\timport frappe.utils\n\n\tthis_purchase_date = frappe.utils.getdate(doc.get(\"posting_date\") or doc.get(\"transaction_date\"))\n\n\tfor d in doc.get(\"items\"):\n\t\t# get last purchase details\n\t\tlast_purchase_details = get_last_purchase_details(d.item_code, doc.name)\n\n\t\t# compare last purchase date and this transaction's date\n\t\tlast_purchase_rate = None\n\t\tif last_purchase_details and (\n\t\t\tdoc.get(\"docstatus\") == 2 or last_purchase_details.purchase_date > this_purchase_date\n\t\t):\n\t\t\tlast_purchase_rate = last_purchase_details[\"base_net_rate\"]\n\t\telif is_submit == 1:\n\t\t\t# even if this transaction is the latest one, it should be submitted\n\t\t\t# for it to be considered for latest purchase rate\n\t\t\tif flt(d.conversion_factor):\n\t\t\t\tlast_purchase_rate = flt(d.base_net_rate) / flt(d.conversion_factor)\n\t\t\t# Check if item code is present\n\t\t\t# Conversion factor should not be mandatory for non itemized items\n\t\t\telif d.item_code:\n\t\t\t\tfrappe.throw(_(\"UOM Conversion factor is required in row {0}\").format(d.idx))\n\n\t\t# update last purchsae rate\n\t\tfrappe.db.set_value(\"Item\", d.item_code, \"last_purchase_rate\", flt(last_purchase_rate))\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 98, "n_words": 121, "vocab_size": 80, "complexity": 9, "nloc": 16, "token_counts": 153, "n_ast_nodes": 263, "n_identifiers": 24, "d_id": 13953, "documentation": { "docstring": "updates last_purchase_rate in item table for each item", "n_words": 8, "vocab_size": 7, "n_whitespaces": 7, "language": "en" } }, { "id": 73792, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/models/__init__.py", "file_name": "__init__.py", "fun_name": "clean_subpage_models", "commit_message": "Reformat with black", "code": "def clean_subpage_models(cls):\n \n if cls._clean_subpage_models is None:\n subpage_types = getattr(cls, \"subpage_types\", None)\n if subpage_types is None:\n # if subpage_types is not specified on the Page class, allow all page types as subpages\n cls._clean_subpage_models = get_page_models()\n else:\n cls._clean_subpage_models = [\n resolve_model_string(model_string, cls._meta.app_label)\n for model_string in subpage_types\n ]\n\n for model in cls._clean_subpage_models:\n if not issubclass(model, Page):\n raise LookupError(\"%s is not a Page subclass\" % model)\n\n return cls._clean_subpage_models\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 273, "n_words": 64, "vocab_size": 44, "complexity": 6, "nloc": 14, "token_counts": 84, "n_ast_nodes": 137, "n_identifiers": 14, "d_id": 16113, "documentation": { "docstring": "\n Returns the list of subpage types, normalised as model classes.\n Throws ValueError if any entry in subpage_types cannot be recognised as a model name,\n or LookupError if a model does not exist (or is not a Page subclass).\n ", "n_words": 38, "vocab_size": 31, "n_whitespaces": 67, "language": "en" } }, { "id": 157273, "commit_id": "80dd84d46ef6b7befa1b416c4597c83ef81ef972", "repo": "dask", "path": "dask/tests/test_sizeof.py", "file_name": "test_sizeof.py", "fun_name": "test_pandas_contiguous_dtypes", "commit_message": "Deflate sizeof() of duplicate references to pandas object types (#9776)", "code": "def test_pandas_contiguous_dtypes():\n \n pd = pytest.importorskip(\"pandas\")\n df1 = pd.DataFrame([[1, 2.2], [3, 4.4]])\n df2 = pd.DataFrame([[1.1, 2.2], [3.3, 4.4]])\n assert sizeof(df2) < sizeof(df1)\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 36, "n_words": 21, "vocab_size": 17, "complexity": 1, "nloc": 5, "token_counts": 75, "n_ast_nodes": 99, "n_identifiers": 8, "d_id": 36896, "documentation": { "docstring": "2+ contiguous columns of the same dtype in the same DataFrame share the same\n surface thus have lower overhead\n ", "n_words": 19, "vocab_size": 15, "n_whitespaces": 25, "language": "en" } }, { "id": 228667, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/bar/_hoverlabel.py", "file_name": "_hoverlabel.py", "fun_name": "namelengthsrc", "commit_message": "switch to black .22", "code": "def namelengthsrc(self):\n \n return self[\"namelengthsrc\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 60340, "documentation": { "docstring": "\n Sets the source reference on Chart Studio Cloud for\n `namelength`.\n\n The 'namelengthsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 84, "language": "en" } }, { "id": 217896, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/imaplib.py", "file_name": "imaplib.py", "fun_name": "list", "commit_message": "add python 3.10.4 for windows", "code": "def list(self, directory='\"\"', pattern='*'):\n \n name = 'LIST'\n typ, dat = self._simple_command(name, directory, pattern)\n return self._untagged_response(typ, dat, name)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 45, "n_words": 17, "vocab_size": 16, "complexity": 1, "nloc": 4, "token_counts": 42, "n_ast_nodes": 69, "n_identifiers": 9, "d_id": 54999, "documentation": { "docstring": "List mailbox names in directory matching pattern.\n\n (typ, [data]) = .list(directory='\"\"', pattern='*')\n\n 'data' is list of LIST responses.\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 39, "language": "en" } }, { "id": 130920, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/serve/pipeline/node.py", "file_name": "node.py", "fun_name": "deploy", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def deploy(self) -> Pipeline:\n \n [node.deploy() for node in self._incoming_edges]\n self._executor = create_executor_from_step_config(\n self._serialized_callable_factory, self._config\n )\n\n return Pipeline(self)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 63, "n_words": 17, "vocab_size": 17, "complexity": 2, "nloc": 10, "token_counts": 40, "n_ast_nodes": 65, "n_identifiers": 9, "d_id": 29427, "documentation": { "docstring": "Instantiates executors for this and all dependent nodes.\n\n After the pipeline is deployed, .call() and .call_async() can be used.\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 33, "language": "en" } }, { "id": 288895, "commit_id": "f23b1750e85f07091eb896a0b12b8f95e5646338", "repo": "core", "path": "tests/components/homekit_controller/test_light.py", "file_name": "test_light.py", "fun_name": "test_only_migrate_once", "commit_message": "Migrate HomeKit Controller to use stable identifiers (#80064)", "code": "async def test_only_migrate_once(hass, utcnow):\n \n entity_registry = er.async_get(hass)\n aid = get_next_aid()\n old_light_entry = entity_registry.async_get_or_create(\n \"light\",\n \"homekit_controller\",\n f\"homekit-00:00:00:00:00:00-{aid}-8\",\n )\n new_light_entry = entity_registry.async_get_or_create(\n \"light\",\n \"homekit_controller\",\n f\"00:00:00:00:00:00_{aid}_8\",\n )\n await setup_test_component(hass, create_lightbulb_service_with_color_temp)\n\n assert (\n entity_registry.async_get(old_light_entry.entity_id).unique_id\n == f\"homekit-00:00:00:00:00:00-{aid}-8\"\n )\n\n assert (\n entity_registry.async_get(new_light_entry.entity_id).unique_id\n == f\"00:00:00:00:00:00_{aid}_8\"\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 145, "n_words": 39, "vocab_size": 27, "complexity": 1, "nloc": 22, "token_counts": 88, "n_ast_nodes": 163, "n_identifiers": 15, "d_id": 88044, "documentation": { "docstring": "Test a we handle migration happening after an upgrade and than a downgrade and then an upgrade.", "n_words": 17, "vocab_size": 14, "n_whitespaces": 16, "language": "en" } }, { "id": 197542, "commit_id": "7fe8e027ae1d7f683243c0229b961671a6cbb4c5", "repo": "sympy", "path": "sympy/stats/stochastic_process_types.py", "file_name": "stochastic_process_types.py", "fun_name": "expectation", "commit_message": "Improved some documentation in the stats module", "code": "def expectation(self, expr, condition=None, evaluate=True, **kwargs):\n \n\n return _SubstituteRV._expectation(expr, condition, evaluate, **kwargs)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 25, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 33, "n_ast_nodes": 48, "n_identifiers": 8, "d_id": 48620, "documentation": { "docstring": "\n Computes expectation.\n\n Parameters\n ==========\n\n expr : RandomIndexedSymbol, Relational, Logic\n Condition for which expectation has to be computed. Must\n contain a RandomIndexedSymbol of the process.\n condition : Relational, Logic\n The given conditions under which computations should be done.\n\n Returns\n =======\n\n Expectation of the RandomIndexedSymbol.\n\n ", "n_words": 43, "vocab_size": 36, "n_whitespaces": 140, "language": "en" } }, { "id": 271113, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/data_adapter.py", "file_name": "data_adapter.py", "fun_name": "slice_inputs", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def slice_inputs(self, indices_dataset, inputs):\n \n dataset = tf.data.Dataset.zip(\n (indices_dataset, tf.data.Dataset.from_tensors(inputs).repeat())\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 42, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 14, "token_counts": 103, "n_ast_nodes": 62, "n_identifiers": 11, "d_id": 80693, "documentation": { "docstring": "Slice inputs into a Dataset of batches.\n\n Given a Dataset of batch indices and the unsliced inputs,\n this step slices the inputs in a parallelized fashion\n and produces a dataset of input batches.\n\n Args:\n indices_dataset: A Dataset of batched indices\n inputs: A python data structure that contains the inputs, targets,\n and possibly sample weights.\n\n Returns:\n A Dataset of input batches matching the batch indices.\n ", "n_words": 64, "vocab_size": 41, "n_whitespaces": 144, "language": "en" } }, { "id": 229518, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/contour/_contours.py", "file_name": "_contours.py", "fun_name": "operation", "commit_message": "switch to black .22", "code": "def operation(self):\n \n return self[\"operation\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 61191, "documentation": { "docstring": "\n Sets the constraint operation. \"=\" keeps regions equal to\n `value` \"<\" and \"<=\" keep regions less than `value` \">\" and\n \">=\" keep regions greater than `value` \"[]\", \"()\", \"[)\", and\n \"(]\" keep regions inside `value[0]` to `value[1]` \"][\", \")(\",\n \"](\", \")[\" keep regions outside `value[0]` to value[1]` Open\n vs. closed intervals make no difference to constraint display,\n but all versions are allowed for consistency with filter\n transforms.\n\n The 'operation' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['=', '<', '>=', '>', '<=', '[]', '()', '[)', '(]', '][',\n ')(', '](', ')[']\n\n Returns\n -------\n Any\n ", "n_words": 101, "vocab_size": 82, "n_whitespaces": 232, "language": "en" } }, { "id": 207036, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_checks/tests.py", "file_name": "tests.py", "fun_name": "test_generic_inline_model_admin_bad_fk_field", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_generic_inline_model_admin_bad_fk_field(self):\n \n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 9, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 15, "token_counts": 72, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 51842, "documentation": { "docstring": "\n A GenericInlineModelAdmin errors if the ct_fk_field points to a\n nonexistent field.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 33, "language": "en" } }, { "id": 20032, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distlib/markers.py", "file_name": "markers.py", "fun_name": "evaluate", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def evaluate(self, expr, context):\n \n if isinstance(expr, string_types):\n if expr[0] in '\\'\"':\n result = expr[1:-1]\n else:\n if expr not in context:\n raise SyntaxError('unknown variable: %s' % expr)\n result = context[expr]\n else:\n assert isinstance(expr, dict)\n op = expr['op']\n if op not in self.operations:\n raise NotImplementedError('op not implemented: %s' % op)\n elhs = expr['lhs']\n erhs = expr['rhs']\n if _is_literal(expr['lhs']) and _is_literal(expr['rhs']):\n raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs))\n\n lhs = self.evaluate(elhs, context)\n rhs = self.evaluate(erhs, context)\n if ((elhs == 'python_version' or erhs == 'python_version') and\n op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')):\n lhs = NV(lhs)\n rhs = NV(rhs)\n elif elhs == 'python_version' and op in ('in', 'not in'):\n lhs = NV(lhs)\n rhs = _get_versions(rhs)\n result = self.operations[op](lhs, rhs)\n return result\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 463, "n_words": 123, "vocab_size": 73, "complexity": 12, "nloc": 28, "token_counts": 233, "n_ast_nodes": 395, "n_identifiers": 19, "d_id": 3185, "documentation": { "docstring": "\n Evaluate a marker expression returned by the :func:`parse_requirement`\n function in the specified context.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 35, "language": "en" } }, { "id": 197067, "commit_id": "e0dc14eca132f37c5f49369eb4051eae37c9b119", "repo": "sympy", "path": "sympy/solvers/solveset.py", "file_name": "solveset.py", "fun_name": "_is_function_class_equation", "commit_message": "Refactored import ordering in functions", "code": "def _is_function_class_equation(func_class, f, symbol):\n \n if f.is_Mul or f.is_Add:\n return all(_is_function_class_equation(func_class, arg, symbol)\n for arg in f.args)\n\n if f.is_Pow:\n if not f.exp.has(symbol):\n return _is_function_class_equation(func_class, f.base, symbol)\n else:\n return False\n\n if not f.has(symbol):\n return True\n\n if isinstance(f, func_class):\n try:\n g = Poly(f.args[0], symbol)\n return g.degree() <= 1\n except PolynomialError:\n return False\n else:\n return False\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 192, "n_words": 52, "vocab_size": 35, "complexity": 9, "nloc": 19, "token_counts": 119, "n_ast_nodes": 185, "n_identifiers": 18, "d_id": 48321, "documentation": { "docstring": " Tests whether the equation is an equation of the given function class.\n\n The given equation belongs to the given function class if it is\n comprised of functions of the function class which are multiplied by\n or added to expressions independent of the symbol. In addition, the\n arguments of all such functions must be linear in the symbol as well.\n\n Examples\n ========\n\n >>> from sympy.solvers.solveset import _is_function_class_equation\n >>> from sympy import tan, sin, tanh, sinh, exp\n >>> from sympy.abc import x\n >>> from sympy.functions.elementary.trigonometric import TrigonometricFunction\n >>> from sympy.functions.elementary.hyperbolic import HyperbolicFunction\n >>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x)\n False\n >>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x)\n True\n >>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x)\n False\n >>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x)\n True\n >>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x)\n True\n ", "n_words": 123, "vocab_size": 73, "n_whitespaces": 190, "language": "en" } }, { "id": 264921, "commit_id": "951627093c11584ffb73ad2be2aef40a91a90934", "repo": "netbox", "path": "netbox/wireless/signals.py", "file_name": "signals.py", "fun_name": "update_connected_interfaces", "commit_message": "Test cleanup", "code": "def update_connected_interfaces(instance, created, raw=False, **kwargs):\n \n logger = logging.getLogger('netbox.wireless.wirelesslink')\n if raw:\n logger.debug(f\"Skipping endpoint updates for imported wireless link {instance}\")\n return\n\n if instance.interface_a.wireless_link != instance:\n logger.debug(f\"Updating interface A for wireless link {instance}\")\n instance.interface_a.wireless_link = instance\n instance.interface_a._link_peer = instance.interface_b\n instance.interface_a.save()\n if instance.interface_b.cable != instance:\n logger.debug(f\"Updating interface B for wireless link {instance}\")\n instance.interface_b.wireless_link = instance\n instance.interface_b._link_peer = instance.interface_a\n instance.interface_b.save()\n\n # Create/update cable paths\n if created:\n for interface in (instance.interface_a, instance.interface_b):\n create_cablepath([interface])\n\n\n@receiver(post_delete, sender=WirelessLink)", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "@receiver(post_delete, sender=WirelessLink)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 177, "n_words": 69, "vocab_size": 46, "complexity": 6, "nloc": 18, "token_counts": 134, "n_ast_nodes": 244, "n_identifiers": 21, "d_id": 77914, "documentation": { "docstring": "\n When a WirelessLink is saved, save a reference to it on each connected interface.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 21, "language": "en" } }, { "id": 223818, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/message.py", "file_name": "message.py", "fun_name": "get_payload", "commit_message": "add python 3.10.4 for windows", "code": "def get_payload(self, i=None, decode=False):\n \n # Here is the logic table for this code, based on the email5.0.0 code:\n # i decode is_multipart result\n # ------ ------ ------------ ------------------------------\n # None True True None\n # i True True None\n # None False True _payload (a list)\n # i False True _payload element i (a Message)\n # i False False error (not a list)\n # i True False error (not a list)\n # None False False _payload\n # None True False _payload decoded (bytes)\n # Note that Barry planned to factor out the 'decode' case, but that\n # isn't so easy now that we handle the 8 bit data, which needs to be\n # converted in both the decode and non-decode path.\n if self.is_multipart():\n if decode:\n return None\n if i is None:\n return self._payload\n else:\n return self._payload[i]\n # For backward compatibility, Use isinstance and this error message\n # instead of the more logical is_multipart test.\n if i is not None and not isinstance(self._payload, list):\n raise TypeError('Expected list, got %s' % type(self._payload))\n payload = self._payload\n # cte might be a Header, so for now stringify it.\n cte = str(self.get('content-transfer-encoding', '')).lower()\n # payload may be bytes here.\n if isinstance(payload, str):\n if utils._has_surrogates(payload):\n bpayload = payload.encode('ascii', 'surrogateescape')\n if not decode:\n try:\n payload = bpayload.decode(self.get_param('charset', 'ascii'), 'replace')\n except LookupError:\n payload = bpayload.decode('ascii', 'replace')\n elif decode:\n try:\n bpayload = payload.encode('ascii')\n except UnicodeError:\n # This won't happen for RFC compliant messages (messages\n # containing only ASCII code points in the unicode input).\n # If it does happen, turn the string into bytes in a way\n # guaranteed not to fail.\n bpayload = payload.encode('raw-unicode-escape')\n if not decode:\n return payload\n if cte == 'quoted-printable':\n return quopri.decodestring(bpayload)\n elif cte == 'base64':\n # XXX: this is a bit of a hack; decode_b should probably be factored\n # out somewhere, but I haven't figured out where yet.\n value, defects = decode_b(b''.join(bpayload.splitlines()))\n for defect in defects:\n self.policy.handle_defect(self, defect)\n return value\n elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):\n in_file = BytesIO(bpayload)\n out_file = BytesIO()\n try:\n uu.decode(in_file, out_file, quiet=True)\n return out_file.getvalue()\n except uu.Error:\n # Some decoding problem\n return bpayload\n if isinstance(payload, str):\n return bpayload\n return payload\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1271, "n_words": 350, "vocab_size": 187, "complexity": 19, "nloc": 45, "token_counts": 301, "n_ast_nodes": 534, "n_identifiers": 39, "d_id": 57083, "documentation": { "docstring": "Return a reference to the payload.\n\n The payload will either be a list object or a string. If you mutate\n the list object, you modify the message's payload in place. Optional\n i returns that index into the payload.\n\n Optional decode is a flag indicating whether the payload should be\n decoded or not, according to the Content-Transfer-Encoding header\n (default is False).\n\n When True and the message is not a multipart, the payload will be\n decoded if this header's value is `quoted-printable' or `base64'. If\n some other encoding is used, or the header is missing, or if the\n payload has bogus data (i.e. bogus base64 or uuencoded data), the\n payload is returned as-is.\n\n If the message is a multipart and the decode flag is True, then None\n is returned.\n ", "n_words": 127, "vocab_size": 73, "n_whitespaces": 228, "language": "en" } }, { "id": 226302, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_box.py", "file_name": "_box.py", "fun_name": "notched", "commit_message": "switch to black .22", "code": "def notched(self):\n \n return self[\"notched\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 57975, "documentation": { "docstring": "\n Determines whether or not notches are drawn. Notches displays a\n confidence interval around the median. We compute the\n confidence interval as median +/- 1.57 * IQR / sqrt(N), where\n IQR is the interquartile range and N is the sample size. If two\n boxes' notches do not overlap there is 95% confidence their\n medians differ. See\n https://sites.google.com/site/davidsstatistics/home/notched-\n box-plots for more info. Defaults to False unless `notchwidth`\n or `notchspan` is set.\n\n The 'notched' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n ", "n_words": 85, "vocab_size": 68, "n_whitespaces": 191, "language": "en" } }, { "id": 196425, "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", "repo": "sympy", "path": "sympy/solvers/polysys.py", "file_name": "polysys.py", "fun_name": "solve_biquadratic", "commit_message": "Moved imports to higher level", "code": "def solve_biquadratic(f, g, opt):\n \n G = groebner([f, g])\n\n if len(G) == 1 and G[0].is_ground:\n return None\n\n if len(G) != 2:\n raise SolveFailed\n\n x, y = opt.gens\n p, q = G\n if not p.gcd(q).is_ground:\n # not 0-dimensional\n raise SolveFailed\n\n p = Poly(p, x, expand=False)\n p_roots = [rcollect(expr, y) for expr in roots(p).keys()]\n\n q = q.ltrim(-1)\n q_roots = list(roots(q).keys())\n\n solutions = []\n\n for q_root in q_roots:\n for p_root in p_roots:\n solution = (p_root.subs(y, q_root), q_root)\n solutions.append(solution)\n\n return sorted(solutions, key=default_sort_key)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 176, "n_words": 77, "vocab_size": 55, "complexity": 8, "nloc": 20, "token_counts": 170, "n_ast_nodes": 266, "n_identifiers": 34, "d_id": 47925, "documentation": { "docstring": "Solve a system of two bivariate quadratic polynomial equations.\n\n Parameters\n ==========\n\n f: a single Expr or Poly\n First equation\n g: a single Expr or Poly\n Second Equation\n opt: an Options object\n For specifying keyword arguments and generators\n\n Returns\n =======\n\n List[Tuple]\n A List of tuples. Solutions for symbols that satisfy the\n equations listed in seq.\n\n Examples\n ========\n\n >>> from sympy import Options, Poly\n >>> from sympy.abc import x, y\n >>> from sympy.solvers.polysys import solve_biquadratic\n >>> NewOption = Options((x, y), {'domain': 'ZZ'})\n\n >>> a = Poly(y**2 - 4 + x, y, x, domain='ZZ')\n >>> b = Poly(y*2 + 3*x - 7, y, x, domain='ZZ')\n >>> solve_biquadratic(a, b, NewOption)\n [(1/3, 3), (41/27, 11/9)]\n\n >>> a = Poly(y + x**2 - 3, y, x, domain='ZZ')\n >>> b = Poly(-y + x - 4, y, x, domain='ZZ')\n >>> solve_biquadratic(a, b, NewOption)\n [(7/2 - sqrt(29)/2, -sqrt(29)/2 - 1/2), (sqrt(29)/2 + 7/2, -1/2 + \\\n sqrt(29)/2)]\n ", "n_words": 149, "vocab_size": 97, "n_whitespaces": 258, "language": "en" } }, { "id": 271042, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/compile_utils.py", "file_name": "compile_utils.py", "fun_name": "_get_metric_object", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _get_metric_object(self, metric, y_t, y_p):\n \n if metric is None:\n return None # Ok to have no metric for an output.\n\n # Convenience feature for selecting b/t binary, categorical,\n # and sparse categorical.\n if str(metric).lower() not in [\"accuracy\", \"acc\", \"crossentropy\", \"ce\"]:\n metric_obj = metrics_mod.get(metric)\n else:\n y_t_rank = len(y_t.shape.as_list())\n y_p_rank = len(y_p.shape.as_list())\n y_t_last_dim = y_t.shape.as_list()[-1]\n y_p_last_dim = y_p.shape.as_list()[-1]\n\n is_binary = y_p_last_dim == 1\n is_sparse_categorical = (\n y_t_rank < y_p_rank or y_t_last_dim == 1 and y_p_last_dim > 1\n )\n\n if str(metric).lower() in [\"accuracy\", \"acc\"]:\n if is_binary:\n metric_obj = metrics_mod.binary_accuracy\n elif is_sparse_categorical:\n metric_obj = metrics_mod.sparse_categorical_accuracy\n else:\n metric_obj = metrics_mod.categorical_accuracy\n else:\n if is_binary:\n metric_obj = metrics_mod.binary_crossentropy\n elif is_sparse_categorical:\n metric_obj = metrics_mod.sparse_categorical_crossentropy\n else:\n metric_obj = metrics_mod.categorical_crossentropy\n\n if isinstance(metric_obj, losses_mod.Loss):\n metric_obj._allow_sum_over_batch_size = (\n True # pylint: disable=protected-access\n )\n\n if not isinstance(metric_obj, metrics_mod.Metric):\n if isinstance(metric, str):\n metric_name = metric\n else:\n metric_name = get_custom_object_name(metric)\n if metric_name is None:\n raise ValueError(\n f\"Metric should be a callable, received: {metric}\"\n )\n\n metric_obj = metrics_mod.MeanMetricWrapper(\n metric_obj, name=metric_name\n )\n\n return metric_obj\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 764, "n_words": 157, "vocab_size": 89, "complexity": 14, "nloc": 45, "token_counts": 256, "n_ast_nodes": 428, "n_identifiers": 35, "d_id": 80679, "documentation": { "docstring": "Converts user-supplied metric to a `Metric` object.\n\n Args:\n metric: A string, function, or `Metric` object.\n y_t: Sample of label.\n y_p: Sample of output.\n\n Returns:\n A `Metric` object.\n ", "n_words": 27, "vocab_size": 20, "n_whitespaces": 84, "language": "en" } }, { "id": 248358, "commit_id": "4cc4229cd7a55d2556c798fecbb1c9660dc821c8", "repo": "synapse", "path": "tests/rest/client/test_retention.py", "file_name": "test_retention.py", "fun_name": "test_visibility_when_disabled", "commit_message": "Prevent expired events from being filtered out when retention is disabled (#12611)\n\nCo-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>\r\nCo-authored-by: Patrick Cloke ", "code": "def test_visibility_when_disabled(self) -> None:\n \n room_id = self.helper.create_room_as(self.user_id, tok=self.token)\n\n self.helper.send_state(\n room_id=room_id,\n event_type=EventTypes.Retention,\n body={\"max_lifetime\": one_day_ms},\n tok=self.token,\n )\n\n resp = self.helper.send(room_id=room_id, body=\"test\", tok=self.token)\n\n self.reactor.advance(one_day_ms * 2 / 1000)\n\n self.get_event(room_id, resp[\"event_id\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 120, "n_words": 27, "vocab_size": 25, "complexity": 1, "nloc": 12, "token_counts": 102, "n_ast_nodes": 160, "n_identifiers": 19, "d_id": 72235, "documentation": { "docstring": "Retention policies should be ignored when the retention feature is disabled.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 195887, "commit_id": "f5ef4e62e5cb5637f2bf2af0ee73e43c58c33c25", "repo": "sympy", "path": "sympy/core/basic.py", "file_name": "basic.py", "fun_name": "__eq__", "commit_message": "core/basic: Basic.__eq__ only performs user defined conversions\n\ncore/evalf: no longer create unneeded Tuples with None arguments\nFixes #22581\n\nonly use _sympify in __eq__ when needed\n\ndefined _converter and updated Boolean comparisons\n\nremoved try-except for sympify because it should always be possible at that point\n\ncompletely split sympy and external converters\n\nchecking entire mro\n\nuse the relevant part of sympify directly\n\ntype from copy paste\n\nremoved ambiguous try-except blocks\n\nchanged resolve order for sympy/user converters and mro\n\nupdated documentation\n\ntypo", "code": "def __eq__(self, other):\n \n if self is other:\n return True\n\n if not isinstance(other, Basic):\n return self._do_eq_sympify(other)\n\n # check for pure number expr\n if not (self.is_Number and other.is_Number) and (\n type(self) != type(other)):\n return False\n a, b = self._hashable_content(), other._hashable_content()\n if a != b:\n return False\n # check number *in* an expression\n for a, b in zip(a, b):\n if not isinstance(a, Basic):\n continue\n if a.is_Number and type(a) != type(b):\n return False\n return True\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 253, "n_words": 71, "vocab_size": 45, "complexity": 11, "nloc": 17, "token_counts": 120, "n_ast_nodes": 193, "n_identifiers": 12, "d_id": 47468, "documentation": { "docstring": "Return a boolean indicating whether a == b on the basis of\n their symbolic trees.\n\n This is the same as a.compare(b) == 0 but faster.\n\n Notes\n =====\n\n If a class that overrides __eq__() needs to retain the\n implementation of __hash__() from a parent class, the\n interpreter must be told this explicitly by setting\n __hash__ : Callable[[object], int] = .__hash__.\n Otherwise the inheritance of __hash__() will be blocked,\n just as if __hash__ had been explicitly set to None.\n\n References\n ==========\n\n from http://docs.python.org/dev/reference/datamodel.html#object.__hash__\n ", "n_words": 81, "vocab_size": 64, "n_whitespaces": 179, "language": "en" } }, { "id": 30421, "commit_id": "deca40c2e26afed62e1f9ec4be14aff9e125929b", "repo": "spotify-downloader", "path": "spotdl/utils/console.py", "file_name": "console.py", "fun_name": "check_for_updates", "commit_message": "moved console actions to a new file", "code": "def check_for_updates():\n \n\n version_message = get_update_status()\n\n print(version_message)\n\n", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 15, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 28, "n_identifiers": 4, "d_id": 5565, "documentation": { "docstring": "\n Check for updates to the current version.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 286154, "commit_id": "eb244a1d8d01e1ad93c5dc349656aa4170397f90", "repo": "OpenBBTerminal", "path": "openbb_terminal/terminal_helper.py", "file_name": "terminal_helper.py", "fun_name": "is_packaged_application", "commit_message": "Docker : building + publishing (#2904)\n\n* fixed integrated test test_stocks_ba.openbb\r\n\r\n* fixed integrated test test_stocks_dd.openbb\r\n\r\n* improved and centralised the check\r\n\r\n* fix lint\r\n\r\n* Docker : update ci + build files\r\n\r\n* Docker : update build and CD\r\n\r\n* Docker : update CD\r\n\r\n* Docker : test\r\n\r\n* Docker : test CD\r\n\r\n* Docker : test CD\r\n\r\n* Docker : rename `build.sh`\r\n\r\n* Docker : tests CD\r\n\r\n* Docker : test CD\r\n\r\n* Docker : update CD + build\r\n\r\n* Docker : fix CD\r\n\r\n* Docker : fix CD\r\n\r\n* Docker : build\r\n\r\n* Docker : test CD\r\n\r\n* Docker : CD\r\n\r\n* Docker : CD\r\n\r\n* Docker : test\r\n\r\n* Docker : test CD\r\n\r\n* Docker : test CD\r\n\r\n* Docker : test CD\r\n\r\n* Docker : test CD\r\n\r\n* Docker : test CD\r\n\r\n* Docker : test CD\r\n\r\n* Docker : test CD\r\n\r\n* Docker : test CD\r\n\r\n* Docker : build + CD\r\n\r\n* Docker : test CD\r\n\r\n* Docker : test CD\r\n\r\n* Docker : test CD\r\n\r\n* Docker : test CD\r\n\r\n* Docker : build\r\n\r\n* Docker : test CD\r\n\r\n* Docker : build + cd\r\n\r\n* Moving `scripts`\r\n\r\n* Checkout `helper_funcs.py` from main\r\n\r\n* Docker : remove test file with alpine\r\n\r\n* fixing readme errors\r\n\r\n* fixing missed readme errors\r\n\r\n* Docker : build\r\n\r\n* Logging : handle docker app name\r\n\r\n* Docker : test CD\r\n\r\n* Docker : cd\r\n\r\n* Doc\r\n\r\n* Doc\r\n\r\n* Doc : linting\r\n\r\n* Doc\r\n\r\n* Docker\r\n\r\n* Doc\r\n\r\n* Fixing `terminal_controller`\r\n\r\n* Linting\r\n\r\n* Doc : fixing links\r\n\r\n* Version 1.9.1\r\n\r\n* Docker : fix name\r\n\r\n* Doc : add volumes in command\r\n\r\nCo-authored-by: hjoaquim \r\nCo-authored-by: James Simmons \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>", "code": "def is_packaged_application() -> bool:\n \n\n return cfg.LOGGING_APP_NAME == \"gst_packaged\"\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 8, "token_counts": 13, "n_ast_nodes": 26, "n_identifiers": 4, "d_id": 85600, "documentation": { "docstring": "Tell whether or not it is a packaged version (Windows or Mac installer).\n\n\n Returns:\n bool: If the application is packaged\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 33, "language": "en" } }, { "id": 124671, "commit_id": "45ba0e3cacbf4f38b9724437798c75341c2ddc7c", "repo": "ray", "path": "python/ray/data/dataset_pipeline.py", "file_name": "dataset_pipeline.py", "fun_name": "_optimize_stages", "commit_message": "Object GC for block splitting inside the dataset splitting (#26196)\n\nThe pipeline will spill objects when splitting the dataset into multiple equal parts.\r\n\r\nCo-authored-by: Ubuntu ", "code": "def _optimize_stages(self):\n \n context = DatasetContext.get_current()\n\n if not context.optimize_fuse_stages:\n self._optimized_stages = self._stages\n return\n\n # This dummy dataset will be used to get a set of optimized stages.\n dummy_ds = Dataset(\n ExecutionPlan(BlockList([], []), DatasetStats(stages={}, parent=None)),\n 0,\n True,\n used_from_dataset_pipeline=True,\n )\n # Apply all pipeline operations to the dummy dataset.\n for stage in self._stages:\n dummy_ds = stage(dummy_ds)\n # Get the optimized stages.\n _, _, stages = dummy_ds._plan._optimize()\n # Apply these optimized stages to the datasets underlying the pipeline.\n # These optimized stages will be executed by the PipelineExecutor.\n optimized_stages = []\n for stage in stages:\n optimized_stages.append(\n lambda ds, stage=stage: Dataset(\n ds._plan.with_stage(stage),\n ds._epoch,\n True,\n used_from_dataset_pipeline=True,\n )\n )\n self._optimized_stages = optimized_stages\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 415, "n_words": 105, "vocab_size": 67, "complexity": 4, "nloc": 25, "token_counts": 138, "n_ast_nodes": 217, "n_identifiers": 25, "d_id": 27652, "documentation": { "docstring": "Optimize this pipeline, fusing stages together as possible.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 211434, "commit_id": "d4e34fe165c09db65fd00113708be1b711ac957c", "repo": "PaddleDetection", "path": "ppdet/modeling/losses/pose3d_loss.py", "file_name": "pose3d_loss.py", "fun_name": "vertices_loss", "commit_message": "pose3d metro modeling (#6612)\n\n* pose3d metro modeling\r\n\r\n* delete extra comments", "code": "def vertices_loss(criterion_vertices, pred_vertices, gt_vertices, has_smpl):\n \n pred_vertices_with_shape = pred_vertices[has_smpl == 1]\n gt_vertices_with_shape = gt_vertices[has_smpl == 1]\n if len(gt_vertices_with_shape) > 0:\n return criterion_vertices(pred_vertices_with_shape,\n gt_vertices_with_shape)\n else:\n return paddle.to_tensor([1.]).fill_(0.)\n\n\n@register\n@serializable", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "@register\n@serializable", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 87, "n_words": 27, "vocab_size": 23, "complexity": 2, "nloc": 8, "token_counts": 61, "n_ast_nodes": 99, "n_identifiers": 13, "d_id": 53098, "documentation": { "docstring": "\n Compute per-vertex loss if vertex annotations are available.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 132815, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/trainable.py", "file_name": "trainable.py", "fun_name": "step", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def step(self):\n \n if self._implements_method(\"_train\") and log_once(\"_train\"):\n raise DeprecationWarning(\n \"Trainable._train is deprecated and is now removed. Override \"\n \"Trainable.step instead.\"\n )\n raise NotImplementedError\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 95, "n_words": 22, "vocab_size": 19, "complexity": 3, "nloc": 7, "token_counts": 27, "n_ast_nodes": 55, "n_identifiers": 6, "d_id": 29807, "documentation": { "docstring": "Subclasses should override this to implement train().\n\n The return value will be automatically passed to the loggers. Users\n can also return `tune.result.DONE` or `tune.result.SHOULD_CHECKPOINT`\n as a key to manually trigger termination or checkpointing of this\n trial. Note that manual checkpointing only works when subclassing\n Trainables.\n\n .. versionadded:: 0.8.7\n\n Returns:\n A dict that describes training progress.\n\n ", "n_words": 55, "vocab_size": 48, "n_whitespaces": 122, "language": "en" } }, { "id": 281450, "commit_id": "82747072c511beb1b2672846ae2ee4aec53eb562", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/cryptocurrency/discovery/discovery_controller.py", "file_name": "discovery_controller.py", "fun_name": "print_help", "commit_message": "Terminal Wide Rich (#1161)\n\n* My idea for how we handle Rich moving forward\r\n\r\n* remove independent consoles\r\n\r\n* FIxed pylint issues\r\n\r\n* add a few vars\r\n\r\n* Switched print to console\r\n\r\n* More transitions\r\n\r\n* Changed more prints\r\n\r\n* Replaced all prints\r\n\r\n* Fixing tabulate\r\n\r\n* Finished replace tabulate\r\n\r\n* Finished removing rich from Tabulate\r\n\r\n* add Panel around menu\r\n\r\n* add GST watermark under feature flag\r\n\r\n* Fixed 46 tests\r\n\r\n* Delete test_screener[False].yaml\r\n\r\n* Delete test_screener[True].yaml\r\n\r\n* Fixed the rest of the tests\r\n\r\n* add help and source color vars and use rgb\r\n\r\n* rich on stocks/options\r\n\r\n* update rich on disc, dps, sia\r\n\r\n* rich in gov, ins and scr menus\r\n\r\n* ba and ca menus with rich\r\n\r\n* Fixed import issue\r\n\r\n* Fixed some tests\r\n\r\n* removed termcolor\r\n\r\n* Removed prettytable\r\n\r\n* add rich to remaining stocks menus\r\n\r\n* FIxed linting issue\r\n\r\n* Added James' changes\r\n\r\n* Updated dependencies\r\n\r\n* Add rich to cryptocurrency menu\r\n\r\n* refactor economy and forex\r\n\r\n* refactor etf with rich\r\n\r\n* refactor mfunds\r\n\r\n* refactor rich rest\r\n\r\n* not specify style so default color works well on any background\r\n\r\n* Fixing mypy issues\r\n\r\n* Updated tests\r\n\r\n* More test fixes\r\n\r\n* James' test fixes\r\n\r\n* Updating tests : stocks/screener - fix cassettes using BR\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : disable DEBUG_MODE\r\n\r\n* Updating tests : stocks/fa/yfinance\r\n\r\n* minor fixes that escape\r\n\r\n* Improve the rich table function (that replaces tabulate :D )\r\n\r\n* Fixed bad code\r\n\r\n* delete rogue file + dcf fix + NoConsole\r\n\r\n* sia mypy\r\n\r\n* fuck you linter\r\n\r\n* fuck you linter pt 2\r\n\r\n* skip hehe\r\n\r\n* i hate the black linter\r\n\r\n* ubuntu mypy attempt\r\n\r\n* Update : rich_config + gtff\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : stocks\r\n\r\n* Update : rich_config\r\n\r\n* Updating : rich_config\r\n\r\n* make panel configurable for Theodore :b\r\n\r\n* colors update\r\n\r\n* Merged\r\n\r\n* Updating : rich_config + feature_flags\r\n\r\n* Updating : rich_config\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating : feature_flags\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: james \r\nCo-authored-by: jose-donato ", "code": "def print_help(self):\n \n help_text = \n console.print(text=help_text, menu=\"Cryptocurrency - Discovery\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 21, "token_counts": 21, "n_ast_nodes": 40, "n_identifiers": 7, "d_id": 83767, "documentation": { "docstring": "Print help[cmds]\n[src][CoinGecko][/src]\n cgtrending trending coins\n cgvoted most voted coins\n cgvisited most visited coins\n cgvolume coins with highest volume\n cgrecently recently added\n cgsentiment coins with most positive sentiment\n cggainers top gainers - coins which price gained the most in given period\n cglosers top losers - coins which price dropped the most in given period\n cgyfarms top yield farms\n cgdefi top defi protocols\n cgdex top decentralized exchanges\n cgnft top non fungible tokens\n[src][CoinPaprika][/src]\n cpsearch search for coins\n[src][CoinMarketCap][/src]\n cmctop top coins[/cmds]\n", "n_words": 80, "vocab_size": 55, "n_whitespaces": 246, "language": "en" } }, { "id": 148776, "commit_id": "be84a028c18bdbfd58dea8a51b6d59b77b672a8c", "repo": "freqtrade", "path": "freqtrade/rpc/rpc.py", "file_name": "rpc.py", "fun_name": "_rpc_stats", "commit_message": "Avoid mixed types in the api for /stats", "code": "def _rpc_stats(self) -> Dict[str, Any]:\n ", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 12, "n_words": 5, "vocab_size": 5, "complexity": 9, "nloc": 21, "token_counts": 272, "n_ast_nodes": 22, "n_identifiers": 5, "d_id": 34332, "documentation": { "docstring": "\n Generate generic stats for trades in database\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 262050, "commit_id": "176b712c1a40cf630da9a77f1826836723c40fde", "repo": "TTS", "path": "TTS/tts/datasets/dataset.py", "file_name": "dataset.py", "fun_name": "preprocess_samples", "commit_message": "Refactor TTSDataset ⚡️", "code": "def preprocess_samples(self):\n r\n\n # sort items based on the sequence length in ascending order\n text_ignore_idx, text_keep_idx = self.sort_and_filter_by_length(self.text_lengths, self.min_text_len, self.max_text_len)\n audio_ignore_idx, audio_keep_idx = self.sort_and_filter_by_length(self.audio_lengths, self.min_audio_len, self.max_audio_len)\n keep_idx = list(set(audio_keep_idx) | set(text_keep_idx))\n ignore_idx = list(set(audio_ignore_idx) | set(text_ignore_idx))\n\n samples = []\n for idx in keep_idx:\n samples.append(self.samples[idx])\n\n if len(samples) == 0:\n raise RuntimeError(\" [!] No samples left\")\n\n # shuffle batch groups\n # create batches with similar length items\n # the larger the `batch_group_size`, the higher the length variety in a batch.\n samples = self.create_buckets(samples, self.batch_group_size)\n\n # update items to the new sorted items\n self.samples = samples\n\n if self.verbose:\n print(\" | > Preprocessing samples\")\n print(\" | > Max text length: {}\".format(np.max(self.text_lengths)))\n print(\" | > Min text length: {}\".format(np.min(self.text_lengths)))\n print(\" | > Avg text length: {}\".format(np.mean(self.text_lengths)))\n print(\" | \")\n print(\" | > Max audio length: {}\".format(np.max(self.audio_lengths)))\n print(\" | > Min audio length: {}\".format(np.min(self.audio_lengths)))\n print(\" | > Avg audio length: {}\".format(np.mean(self.audio_lengths)))\n print(f\" | > Num. instances discarded samples: {len(ignore_idx)}\")\n print(\" | > Batch group size: {}.\".format(self.batch_group_size))\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 403, "n_words": 160, "vocab_size": 95, "complexity": 4, "nloc": 26, "token_counts": 250, "n_ast_nodes": 434, "n_identifiers": 31, "d_id": 77109, "documentation": { "docstring": "Sort `items` based on text length or audio length in ascending order. Filter out samples out or the length\n range.\n ", "n_words": 20, "vocab_size": 16, "n_whitespaces": 34, "language": "en" } }, { "id": 226180, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_bar.py", "file_name": "_bar.py", "fun_name": "selectedpoints", "commit_message": "switch to black .22", "code": "def selectedpoints(self):\n \n return self[\"selectedpoints\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 57853, "documentation": { "docstring": "\n Array containing integer indices of selected points. Has an\n effect only for traces that support selections. Note that an\n empty array means an empty selection where the `unselected` are\n turned on for all points, whereas, any other non-array values\n means no selection all where the `selected` and `unselected`\n styles have no effect.\n\n The 'selectedpoints' property accepts values of any type\n\n Returns\n -------\n Any\n ", "n_words": 63, "vocab_size": 48, "n_whitespaces": 141, "language": "en" } }, { "id": 222813, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/command/sdist.py", "file_name": "sdist.py", "fun_name": "make_release_tree", "commit_message": "add python 3.10.4 for windows", "code": "def make_release_tree(self, base_dir, files):\n \n # Create all the directories under 'base_dir' necessary to\n # put 'files' there; the 'mkpath()' is just so we don't die\n # if the manifest happens to be empty.\n self.mkpath(base_dir)\n dir_util.create_tree(base_dir, files, dry_run=self.dry_run)\n\n # And walk over the list of files, either making a hard link (if\n # os.link exists) to each one that doesn't already exist in its\n # corresponding location under 'base_dir', or copying each file\n # that's out-of-date in 'base_dir'. (Usually, all files will be\n # out-of-date, because by default we blow away 'base_dir' when\n # we're done making the distribution archives.)\n\n if hasattr(os, 'link'): # can make hard links on this system\n link = 'hard'\n msg = \"making hard links in %s...\" % base_dir\n else: # nope, have to copy\n link = None\n msg = \"copying files to %s...\" % base_dir\n\n if not files:\n log.warn(\"no files to distribute -- empty manifest?\")\n else:\n log.info(msg)\n for file in files:\n if not os.path.isfile(file):\n log.warn(\"'%s' not a regular file -- skipping\", file)\n else:\n dest = os.path.join(base_dir, file)\n self.copy_file(file, dest, link=link)\n\n self.distribution.metadata.write_pkg_info(base_dir)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 468, "n_words": 175, "vocab_size": 117, "complexity": 5, "nloc": 20, "token_counts": 134, "n_ast_nodes": 235, "n_identifiers": 24, "d_id": 56754, "documentation": { "docstring": "Create the directory tree that will become the source\n distribution archive. All directories implied by the filenames in\n 'files' are created under 'base_dir', and then we hard link or copy\n (if hard linking is unavailable) those files into place.\n Essentially, this duplicates the developer's source tree, but in a\n directory named after the distribution, containing only the files\n to be distributed.\n ", "n_words": 61, "vocab_size": 51, "n_whitespaces": 111, "language": "en" } }, { "id": 77224, "commit_id": "bc1a2ab1148b0f27cfd1435f8cb0e44c2721102d", "repo": "wagtail", "path": "wagtail/admin/views/generic/mixins.py", "file_name": "mixins.py", "fun_name": "run_after_hook", "commit_message": "Extract mixins from Snippet views and use it in generic create/edit/delete views (#8361)", "code": "def run_after_hook(self):\n \n return None\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 8, "n_ast_nodes": 16, "n_identifiers": 2, "d_id": 16643, "documentation": { "docstring": "\n Define how to run the hooks after the operation is executed.\n The `self.run_hook(hook_name, *args, **kwargs)` from HookResponseMixin\n can be utilised to call the hooks.\n\n If this method returns a response, it will be returned as the view\n response immediately after the operation finishes, skipping the default\n response.\n ", "n_words": 47, "vocab_size": 38, "n_whitespaces": 97, "language": "en" } }, { "id": 223837, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/message.py", "file_name": "message.py", "fun_name": "__setitem__", "commit_message": "add python 3.10.4 for windows", "code": "def __setitem__(self, name, val):\n \n max_count = self.policy.header_max_count(name)\n if max_count:\n lname = name.lower()\n found = 0\n for k, v in self._headers:\n if k.lower() == lname:\n found += 1\n if found >= max_count:\n raise ValueError(\"There may be at most {} {} headers \"\n \"in a message\".format(max_count, name))\n self._headers.append(self.policy.header_store_parse(name, val))\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 224, "n_words": 47, "vocab_size": 39, "complexity": 5, "nloc": 12, "token_counts": 89, "n_ast_nodes": 146, "n_identifiers": 17, "d_id": 57096, "documentation": { "docstring": "Set the value of a header.\n\n Note: this does not overwrite an existing header with the same field\n name. Use __delitem__() first to delete any existing headers.\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 49, "language": "en" } }, { "id": 196174, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/permutations.py", "file_name": "permutations.py", "fun_name": "__call__", "commit_message": "Updated import locations", "code": "def __call__(self, *i):\n \n # list indices can be Integer or int; leave this\n # as it is (don't test or convert it) because this\n # gets called a lot and should be fast\n if len(i) == 1:\n i = i[0]\n if not isinstance(i, Iterable):\n i = as_int(i)\n if i < 0 or i > self.size:\n raise TypeError(\n \"{} should be an integer between 0 and {}\"\n .format(i, self.size-1))\n return self._array_form[i]\n # P([a, b, c])\n if len(i) != self.size:\n raise TypeError(\n \"{} should have the length {}.\".format(i, self.size))\n return [i[j] for j in self._array_form]\n # P(1, 2, 3)\n return self*Permutation(Cycle(*i), size=self.size)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 348, "n_words": 100, "vocab_size": 73, "complexity": 7, "nloc": 15, "token_counts": 126, "n_ast_nodes": 204, "n_identifiers": 14, "d_id": 47674, "documentation": { "docstring": "\n Allows applying a permutation instance as a bijective function.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation\n >>> p = Permutation([[2, 0], [3, 1]])\n >>> p.array_form\n [2, 3, 0, 1]\n >>> [p(i) for i in range(4)]\n [2, 3, 0, 1]\n\n If an array is given then the permutation selects the items\n from the array (i.e. the permutation is applied to the array):\n\n >>> from sympy.abc import x\n >>> p([x, 1, 0, x**2])\n [0, x**2, x, 1]\n ", "n_words": 75, "vocab_size": 52, "n_whitespaces": 181, "language": "en" } }, { "id": 261576, "commit_id": "d8fa96c29828e3ca79ddd5d7466521ac4d95213c", "repo": "scikit-learn", "path": "sklearn/impute/_base.py", "file_name": "_base.py", "fun_name": "transform", "commit_message": "ENH keep features with all missing values during imputation (#24770)\n\nCo-authored-by: Chiara Marmo \r\nCo-authored-by: Julien Jerphanion \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>\r\nCo-authored-by: Vitor SRG \r\nFixes https://github.com/scikit-learn/scikit-learn/pull/16695\r\nFixes https://github.com/scikit-learn/scikit-learn/issues/16426\r\nFixes https://github.com/scikit-learn/scikit-learn/issues/16977", "code": "def transform(self, X):\n \n check_is_fitted(self)\n\n X = self._validate_input(X, in_fit=False)\n statistics = self.statistics_\n\n if X.shape[1] != statistics.shape[0]:\n raise ValueError(\n \"X has %d features per sample, expected %d\"\n % (X.shape[1], self.statistics_.shape[0])\n )\n\n # compute mask before eliminating invalid features\n missing_mask = _get_mask(X, self.missing_values)\n\n # Decide whether to keep missing features\n if self.strategy == \"constant\" or self.keep_empty_features:\n valid_statistics = statistics\n valid_statistics_indexes = None\n else:\n # same as np.isnan but also works for object dtypes\n invalid_mask = _get_mask(statistics, np.nan)\n valid_mask = np.logical_not(invalid_mask)\n valid_statistics = statistics[valid_mask]\n valid_statistics_indexes = np.flatnonzero(valid_mask)\n\n if invalid_mask.any():\n invalid_features = np.arange(X.shape[1])[invalid_mask]\n if self.verbose != \"deprecated\" and self.verbose:\n # use feature names warning if features are provided\n if hasattr(self, \"feature_names_in_\"):\n invalid_features = self.feature_names_in_[invalid_features]\n warnings.warn(\n \"Skipping features without any observed values:\"\n f\" {invalid_features}. At least one non-missing value is needed\"\n f\" for imputation with strategy='{self.strategy}'.\"\n )\n X = X[:, valid_statistics_indexes]\n\n # Do actual imputation\n if sp.issparse(X):\n if self.missing_values == 0:\n raise ValueError(\n \"Imputation not possible when missing_values \"\n \"== 0 and input is sparse. Provide a dense \"\n \"array instead.\"\n )\n else:\n # if no invalid statistics are found, use the mask computed\n # before, else recompute mask\n if valid_statistics_indexes is None:\n mask = missing_mask.data\n else:\n mask = _get_mask(X.data, self.missing_values)\n indexes = np.repeat(\n np.arange(len(X.indptr) - 1, dtype=int), np.diff(X.indptr)\n )[mask]\n\n X.data[mask] = valid_statistics[indexes].astype(X.dtype, copy=False)\n else:\n # use mask computed before eliminating invalid mask\n if valid_statistics_indexes is None:\n mask_valid_features = missing_mask\n else:\n mask_valid_features = missing_mask[:, valid_statistics_indexes]\n n_missing = np.sum(mask_valid_features, axis=0)\n values = np.repeat(valid_statistics, n_missing)\n coordinates = np.where(mask_valid_features.transpose())[::-1]\n\n X[coordinates] = values\n\n X_indicator = super()._transform_indicator(missing_mask)\n\n return super()._concatenate_indicator(X, X_indicator)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 1085, "n_words": 249, "vocab_size": 160, "complexity": 12, "nloc": 56, "token_counts": 388, "n_ast_nodes": 642, "n_identifiers": 56, "d_id": 76872, "documentation": { "docstring": "Impute all missing values in `X`.\n\n Parameters\n ----------\n X : {array-like, sparse matrix}, shape (n_samples, n_features)\n The input data to complete.\n\n Returns\n -------\n X_imputed : {ndarray, sparse matrix} of shape \\\n (n_samples, n_features_out)\n `X` with imputed values.\n ", "n_words": 37, "vocab_size": 33, "n_whitespaces": 123, "language": "en" } }, { "id": 220442, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/events.py", "file_name": "events.py", "fun_name": "get_running_loop", "commit_message": "add python 3.10.4 for windows", "code": "def get_running_loop():\n \n # NOTE: this function is implemented in C (see _asynciomodule.c)\n loop = _get_running_loop()\n if loop is None:\n raise RuntimeError('no running event loop')\n return loop\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 48, "n_words": 26, "vocab_size": 23, "complexity": 2, "nloc": 5, "token_counts": 22, "n_ast_nodes": 43, "n_identifiers": 4, "d_id": 55997, "documentation": { "docstring": "Return the running event loop. Raise a RuntimeError if there is none.\n\n This function is thread-specific.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 23, "language": "en" } }, { "id": 101368, "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", "repo": "faceswap", "path": "scripts/convert.py", "file_name": "convert.py", "fun_name": "_predict_faces", "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", "code": "def _predict_faces(self) -> None:\n \n faces_seen = 0\n consecutive_no_faces = 0\n batch: List[ConvertItem] = []\n is_amd = get_backend() == \"amd\"\n while True:\n item: Union[Literal[\"EOF\"], ConvertItem] = self._in_queue.get()\n if item == \"EOF\":\n logger.debug(\"EOF Received\")\n break\n logger.trace(\"Got from queue: '%s'\", item.inbound.filename) # type:ignore\n faces_count = len(item.inbound.detected_faces)\n\n # Safety measure. If a large stream of frames appear that do not have faces,\n # these will stack up into RAM. Keep a count of consecutive frames with no faces.\n # If self._batchsize number of frames appear, force the current batch through\n # to clear RAM.\n consecutive_no_faces = consecutive_no_faces + 1 if faces_count == 0 else 0\n self._faces_count += faces_count\n if faces_count > 1:\n self._verify_output = True\n logger.verbose(\"Found more than one face in an image! '%s'\", # type:ignore\n os.path.basename(item.inbound.filename))\n\n self.load_aligned(item)\n faces_seen += faces_count\n\n batch.append(item)\n\n if faces_seen < self._batchsize and consecutive_no_faces < self._batchsize:\n logger.trace(\"Continuing. Current batchsize: %s, \" # type:ignore\n \"consecutive_no_faces: %s\", faces_seen, consecutive_no_faces)\n continue\n\n if batch:\n logger.trace(\"Batching to predictor. Frames: %s, Faces: %s\", # type:ignore\n len(batch), faces_seen)\n feed_batch = [feed_face for item in batch\n for feed_face in item.feed_faces]\n if faces_seen != 0:\n feed_faces = self._compile_feed_faces(feed_batch)\n batch_size = None\n if is_amd and feed_faces.shape[0] != self._batchsize:\n logger.verbose(\"Fallback to BS=1\") # type:ignore\n batch_size = 1\n predicted = self._predict(feed_faces, batch_size)\n else:\n predicted = np.array([])\n\n self._queue_out_frames(batch, predicted)\n\n consecutive_no_faces = 0\n faces_seen = 0\n batch = []\n\n logger.debug(\"Putting EOF\")\n self._out_queue.put(\"EOF\")\n logger.debug(\"Load queue complete\")\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 919, "n_words": 221, "vocab_size": 138, "complexity": 13, "nloc": 51, "token_counts": 300, "n_ast_nodes": 509, "n_identifiers": 44, "d_id": 20783, "documentation": { "docstring": " Run Prediction on the Faceswap model in a background thread.\n\n Reads from the :attr:`self._in_queue`, prepares images for prediction\n then puts the predictions back to the :attr:`self.out_queue`\n ", "n_words": 26, "vocab_size": 23, "n_whitespaces": 48, "language": "en" } }, { "id": 85595, "commit_id": "9731253cf103cfdced62c36753a0e957ab29d705", "repo": "sentry", "path": "src/sentry/tasks/reports.py", "file_name": "reports.py", "fun_name": "prepare", "commit_message": "feat: Add instrumentation to Celery tasks for weekly reports (#38561)\n\nIt seems that if we don't include the parent celery task, it will not trace any of the children tasks.\r\n\r\nThis enables further investigation as to why the building of the report is slow.\r\n\r\nFixes WOR-2159.", "code": "def prepare(self, timestamp, duration, organization):\n \n reports = {}\n for project in organization.project_set.all():\n reports[project.id] = self.__encode(self.build(timestamp, duration, project))\n\n if not reports:\n # XXX: HMSET requires at least one key/value pair, so we need to\n # protect ourselves here against organizations that were created\n # but haven't set up any projects yet.\n return\n\n with self.cluster.map() as client:\n key = self.__make_key(timestamp, duration, organization)\n client.hmset(key, reports)\n client.expire(key, self.ttl)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 187, "n_words": 64, "vocab_size": 58, "complexity": 3, "nloc": 10, "token_counts": 95, "n_ast_nodes": 152, "n_identifiers": 20, "d_id": 18014, "documentation": { "docstring": "\n For every project belonging to the organization, serially build a report and zlib compress it\n After this completes, store it in Redis with an expiration\n ", "n_words": 25, "vocab_size": 24, "n_whitespaces": 47, "language": "en" } }, { "id": 88596, "commit_id": "2e0d2c856eb17a842c67d88363bed92c99578c20", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_project_stacktrace_link.py", "file_name": "test_project_stacktrace_link.py", "fun_name": "test_file_not_found_error", "commit_message": "ref(stacktrace_link): Add more than one code mapping in the tests (#41409)\n\nInclude more than one code mapping in the setup code. Cleaning up a bit how we tag the transactions.\r\n\r\nThis makes the PR for WOR-2395 a little easier to read.", "code": "def test_file_not_found_error(self):\n \n response = self.get_success_response(\n self.organization.slug, self.project.slug, qs_params={\"file\": self.filepath}\n )\n assert response.data[\"config\"] == self.expected_configurations(self.code_mapping1)\n assert not response.data[\"sourceUrl\"]\n # XXX: This depends on what was the last attempted code mapping\n assert response.data[\"error\"] == \"stack_root_mismatch\"\n assert response.data[\"integrations\"] == [serialized_integration(self.integration)]\n # XXX: This depends on what was the last attempted code mapping\n assert (\n response.data[\"attemptedUrl\"]\n == f\"https://example.com/{self.repo.name}/blob/master/src/sentry/src/sentry/utils/safe.py\"\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 165, "n_words": 55, "vocab_size": 35, "complexity": 1, "nloc": 12, "token_counts": 95, "n_ast_nodes": 171, "n_identifiers": 16, "d_id": 18415, "documentation": { "docstring": "File matches code mapping but it cannot be found in the source repository.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 331645, "commit_id": "abc9ba254430ef971ea3dbd12f2b4f1969da55be", "repo": "pytorch-image-models", "path": "timm/models/registry.py", "file_name": "registry.py", "fun_name": "has_pretrained_cfg_key", "commit_message": "Transitioning default_cfg -> pretrained_cfg. Improving handling of pretrained_cfg source (HF-Hub, files, timm config, etc). Checkpoint handling tweaks.", "code": "def has_pretrained_cfg_key(model_name, cfg_key):\n \n if model_name in _model_pretrained_cfgs and cfg_key in _model_pretrained_cfgs[model_name]:\n return True\n return False\n\n", "url": "https://github.com/huggingface/pytorch-image-models.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 31, "n_words": 15, "vocab_size": 13, "complexity": 3, "nloc": 4, "token_counts": 24, "n_ast_nodes": 39, "n_identifiers": 4, "d_id": 119879, "documentation": { "docstring": " Query model default_cfgs for existence of a specific key.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 13, "language": "en" } }, { "id": 32535, "commit_id": "2b81f72be9fa6d69734ae27cfcbfd72b04988fe4", "repo": "transformers", "path": "src/transformers/utils/import_utils.py", "file_name": "import_utils.py", "fun_name": "is_ccl_available", "commit_message": "start from 1.12, torch_ccl is renamed as oneccl_bindings_for_pytorch … (#18229)\n\n* start from 1.12, torch_ccl is renamed as oneccl_bindings_for_pytorch and should import it before use\r\n\r\nSigned-off-by: Wang, Yi A \r\n\r\n* add doc for perf_train_cpu_many\r\n\r\nSigned-off-by: Wang, Yi A \r\n\r\n* update doc\r\n\r\nSigned-off-by: Wang, Yi A ", "code": "def is_ccl_available():\n return _is_ccl_available\n\n\n# docstyle-ignore\nDATASETS_IMPORT_ERROR = \n\n\n# docstyle-ignore\nTOKENIZERS_IMPORT_ERROR = \n\n\n# docstyle-ignore\nSENTENCEPIECE_IMPORT_ERROR = \n\n\n# docstyle-ignore\nPROTOBUF_IMPORT_ERROR = \n\n\n# docstyle-ignore\nFAISS_IMPORT_ERROR = \n\n\n# docstyle-ignore\nPYTORCH_IMPORT_ERROR = \n\n# docstyle-ignore\nPYTORCH_IMPORT_ERROR_WITH_TF = \n\n# docstyle-ignore\nTF_IMPORT_ERROR_WITH_PYTORCH = \n\n\n# docstyle-ignore\nSKLEARN_IMPORT_ERROR = \n\n\n# docstyle-ignore\nTENSORFLOW_IMPORT_ERROR = \n\n\n# docstyle-ignore\nDETECTRON2_IMPORT_ERROR = \n\n\n# docstyle-ignore\nFLAX_IMPORT_ERROR = \n\n# docstyle-ignore\nFTFY_IMPORT_ERROR = \n\n\n# docstyle-ignore\nSCATTER_IMPORT_ERROR = \n\n# docstyle-ignore\nPYTORCH_QUANTIZATION_IMPORT_ERROR = \n\n# docstyle-ignore\nTENSORFLOW_PROBABILITY_IMPORT_ERROR = \n\n# docstyle-ignore\nTENSORFLOW_TEXT_IMPORT_ERROR = \n\n\n# docstyle-ignore\nPANDAS_IMPORT_ERROR = \n\n\n# docstyle-ignore\nPHONEMIZER_IMPORT_ERROR = \n\n\n# docstyle-ignore\nSACREMOSES_IMPORT_ERROR = \n\n\n# docstyle-ignore\nSCIPY_IMPORT_ERROR = \n\n\n# docstyle-ignore\nSPEECH_IMPORT_ERROR = \n\n# docstyle-ignore\nTIMM_IMPORT_ERROR = \n\n# docstyle-ignore\nVISION_IMPORT_ERROR = \n\n\n# docstyle-ignore\nPYTESSERACT_IMPORT_ERROR = \n\n# docstyle-ignore\nPYCTCDECODE_IMPORT_ERROR = \n\n# docstyle-ignore\nACCELERATE_IMPORT_ERROR = \n\n# docstyle-ignore\nCCL_IMPORT_ERROR = \n\nBACKENDS_MAPPING = OrderedDict(\n [\n (\"datasets\", (is_datasets_available, DATASETS_IMPORT_ERROR)),\n (\"detectron2\", (is_detectron2_available, DETECTRON2_IMPORT_ERROR)),\n (\"faiss\", (is_faiss_available, FAISS_IMPORT_ERROR)),\n (\"flax\", (is_flax_available, FLAX_IMPORT_ERROR)),\n (\"ftfy\", (is_ftfy_available, FTFY_IMPORT_ERROR)),\n (\"pandas\", (is_pandas_available, PANDAS_IMPORT_ERROR)),\n (\"phonemizer\", (is_phonemizer_available, PHONEMIZER_IMPORT_ERROR)),\n (\"protobuf\", (is_protobuf_available, PROTOBUF_IMPORT_ERROR)),\n (\"pyctcdecode\", (is_pyctcdecode_available, PYCTCDECODE_IMPORT_ERROR)),\n (\"pytesseract\", (is_pytesseract_available, PYTESSERACT_IMPORT_ERROR)),\n (\"sacremoses\", (is_sacremoses_available, SACREMOSES_IMPORT_ERROR)),\n (\"scatter\", (is_scatter_available, SCATTER_IMPORT_ERROR)),\n (\"pytorch_quantization\", (is_pytorch_quantization_available, PYTORCH_QUANTIZATION_IMPORT_ERROR)),\n (\"sentencepiece\", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)),\n (\"sklearn\", (is_sklearn_available, SKLEARN_IMPORT_ERROR)),\n (\"speech\", (is_speech_available, SPEECH_IMPORT_ERROR)),\n (\"tensorflow_probability\", (is_tensorflow_probability_available, TENSORFLOW_PROBABILITY_IMPORT_ERROR)),\n (\"tf\", (is_tf_available, TENSORFLOW_IMPORT_ERROR)),\n (\"tensorflow_text\", (is_tensorflow_text_available, TENSORFLOW_TEXT_IMPORT_ERROR)),\n (\"timm\", (is_timm_available, TIMM_IMPORT_ERROR)),\n (\"tokenizers\", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)),\n (\"torch\", (is_torch_available, PYTORCH_IMPORT_ERROR)),\n (\"vision\", (is_vision_available, VISION_IMPORT_ERROR)),\n (\"scipy\", (is_scipy_available, SCIPY_IMPORT_ERROR)),\n (\"accelerate\", (is_accelerate_available, ACCELERATE_IMPORT_ERROR)),\n (\"oneccl_bind_pt\", (is_ccl_available, CCL_IMPORT_ERROR)),\n ]\n)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 360, "n_words": 200, "vocab_size": 118, "complexity": 1, "nloc": 2, "token_counts": 6, "n_ast_nodes": 611, "n_identifiers": 57, "d_id": 5947, "documentation": { "docstring": "\n{0} requires the 🤗 Datasets library but it was not found in your environment. You can install it with:\n```\npip install datasets\n```\nIn a notebook or a colab, you can install it by executing a cell with\n```\n!pip install datasets\n```\nthen restarting your kernel.\n\nNote that if you have a local folder named `datasets` or a local python file named `datasets.py` in your current\nworking directory, python may try to import this instead of the 🤗 Datasets library. You should rename this folder or\nthat python file if that's the case.\n\n{0} requires the 🤗 Tokenizers library but it was not found in your environment. You can install it with:\n```\npip install tokenizers\n```\nIn a notebook or a colab, you can install it by executing a cell with\n```\n!pip install tokenizers\n```\n\n{0} requires the SentencePiece library but it was not found in your environment. Checkout the instructions on the\ninstallation page of its repo: https://github.com/google/sentencepiece#installation and follow the ones\nthat match your environment.\n\n{0} requires the protobuf library but it was not found in your environment. Checkout the instructions on the\ninstallation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones\nthat match your environment.\n\n{0} requires the faiss library but it was not found in your environment. Checkout the instructions on the\ninstallation page of its repo: https://github.com/facebookresearch/faiss/blob/master/INSTALL.md and follow the ones\nthat match your environment.\n\n{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the\ninstallation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment.\n\n{0} requires the PyTorch library but it was not found in your environment.\nHowever, we were able to find a TensorFlow installation. TensorFlow classes begin\nwith \"TF\", but are otherwise identically named to our PyTorch classes. This\nmeans that the TF equivalent of the class you tried to import would be \"TF{0}\".\nIf you want to use TensorFlow, please use TF classes instead!\n\nIf you really do want to use PyTorch please go to\nhttps://pytorch.org/get-started/locally/ and follow the instructions that\nmatch your environment.\n\n{0} requires the TensorFlow library but it was not found in your environment.\nHowever, we were able to find a PyTorch installation. PyTorch classes do not begin\nwith \"TF\", but are otherwise identically named to our TF classes.\nIf you want to use PyTorch, please use those classes instead!\n\nIf you really do want to use TensorFlow, please follow the instructions on the\ninstallation page https://www.tensorflow.org/install that match your environment.\n\n{0} requires the scikit-learn library but it was not found in your environment. You can install it with:\n```\npip install -U scikit-learn\n```\nIn a notebook or a colab, you can install it by executing a cell with\n```\n!pip install -U scikit-learn\n```\n\n{0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the\ninstallation page: https://www.tensorflow.org/install and follow the ones that match your environment.\n\n{0} requires the detectron2 library but it was not found in your environment. Checkout the instructions on the\ninstallation page: https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md and follow the ones\nthat match your environment.\n\n{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the\ninstallation page: https://github.com/google/flax and follow the ones that match your environment.\n\n{0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the\ninstallation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones\nthat match your environment.\n\n{0} requires the torch-scatter library but it was not found in your environment. You can install it with pip as\nexplained here: https://github.com/rusty1s/pytorch_scatter.\n\n{0} requires the pytorch-quantization library but it was not found in your environment. You can install it with pip:\n`pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com`\n\n{0} requires the tensorflow_probability library but it was not found in your environment. You can install it with pip as\nexplained here: https://github.com/tensorflow/probability.\n\n{0} requires the tensorflow_text library but it was not found in your environment. You can install it with pip as\nexplained here: https://www.tensorflow.org/text/guide/tf_text_intro.\n\n{0} requires the pandas library but it was not found in your environment. You can install it with pip as\nexplained here: https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html.\n\n{0} requires the phonemizer library but it was not found in your environment. You can install it with pip:\n`pip install phonemizer`\n\n{0} requires the sacremoses library but it was not found in your environment. You can install it with pip:\n`pip install sacremoses`\n\n{0} requires the scipy library but it was not found in your environment. You can install it with pip:\n`pip install scipy`\n\n{0} requires the torchaudio library but it was not found in your environment. You can install it with pip:\n`pip install torchaudio`\n\n{0} requires the timm library but it was not found in your environment. You can install it with pip:\n`pip install timm`\n\n{0} requires the PIL library but it was not found in your environment. You can install it with pip:\n`pip install pillow`\n\n{0} requires the PyTesseract library but it was not found in your environment. You can install it with pip:\n`pip install pytesseract`\n\n{0} requires the pyctcdecode library but it was not found in your environment. You can install it with pip:\n`pip install pyctcdecode`\n\n{0} requires the accelerate library but it was not found in your environment. You can install it with pip:\n`pip install accelerate`\n\n{0} requires the torch ccl library but it was not found in your environment. You can install it with pip:\n`pip install oneccl_bind_pt -f https://developer.intel.com/ipex-whl-stable`\n", "n_words": 917, "vocab_size": 167, "n_whitespaces": 824, "language": "en" } }, { "id": 84134, "commit_id": "a142fbff85302c5e3acb2e204eca2e9c75dbc74b", "repo": "zulip", "path": "zerver/tests/test_subs.py", "file_name": "test_subs.py", "fun_name": "test_json_get_subscribers", "commit_message": "tests: Refactor away result.json() calls with helpers.\n\nSigned-off-by: Zixuan James Li ", "code": "def test_json_get_subscribers(self) -> None:\n \n stream_name = gather_subscriptions(self.user_profile)[0][0][\"name\"]\n stream_id = get_stream(stream_name, self.user_profile.realm).id\n expected_subscribers = gather_subscriptions(self.user_profile, include_subscribers=True)[0][\n 0\n ][\"subscribers\"]\n result = self.client_get(f\"/json/streams/{stream_id}/members\")\n result_dict = self.assert_json_success(result)\n self.assertIn(\"subscribers\", result_dict)\n self.assertIsInstance(result_dict[\"subscribers\"], list)\n subscribers: List[int] = []\n for subscriber in result_dict[\"subscribers\"]:\n self.assertIsInstance(subscriber, int)\n subscribers.append(subscriber)\n self.assertEqual(set(subscribers), set(expected_subscribers))\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 157, "n_words": 40, "vocab_size": 35, "complexity": 2, "nloc": 19, "token_counts": 141, "n_ast_nodes": 231, "n_identifiers": 25, "d_id": 17779, "documentation": { "docstring": "\n json_get_subscribers in zerver/views/streams.py\n also returns the list of subscribers for a stream, when requested.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 36, "language": "en" } }, { "id": 222586, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/ccompiler.py", "file_name": "ccompiler.py", "fun_name": "_fix_compile_args", "commit_message": "add python 3.10.4 for windows", "code": "def _fix_compile_args(self, output_dir, macros, include_dirs):\n \n if output_dir is None:\n output_dir = self.output_dir\n elif not isinstance(output_dir, str):\n raise TypeError(\"'output_dir' must be a string or None\")\n\n if macros is None:\n macros = self.macros\n elif isinstance(macros, list):\n macros = macros + (self.macros or [])\n else:\n raise TypeError(\"'macros' (if supplied) must be a list of tuples\")\n\n if include_dirs is None:\n include_dirs = self.include_dirs\n elif isinstance(include_dirs, (list, tuple)):\n include_dirs = list(include_dirs) + (self.include_dirs or [])\n else:\n raise TypeError(\n \"'include_dirs' (if supplied) must be a list of strings\")\n\n return output_dir, macros, include_dirs\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 261, "n_words": 86, "vocab_size": 48, "complexity": 9, "nloc": 19, "token_counts": 123, "n_ast_nodes": 199, "n_identifiers": 10, "d_id": 56654, "documentation": { "docstring": "Typecheck and fix-up some of the arguments to the 'compile()'\n method, and return fixed-up values. Specifically: if 'output_dir'\n is None, replaces it with 'self.output_dir'; ensures that 'macros'\n is a list, and augments it with 'self.macros'; ensures that\n 'include_dirs' is a list, and augments it with 'self.include_dirs'.\n Guarantees that the returned values are of the correct type,\n i.e. for 'output_dir' either string or None, and for 'macros' and\n 'include_dirs' either list or None.\n ", "n_words": 72, "vocab_size": 44, "n_whitespaces": 129, "language": "en" } }, { "id": 206882, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/views/generic/list.py", "file_name": "list.py", "fun_name": "get_template_names", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_template_names(self):\n \n try:\n names = super().get_template_names()\n except ImproperlyConfigured:\n # If template_name isn't specified, it's not a problem --\n # we just start with an empty list.\n names = []\n\n # If the list is a queryset, we'll invent a template name based on the\n # app and model name. This name gets put at the end of the template\n # name list so that user-supplied names override the automatically-\n # generated ones.\n if hasattr(self.object_list, \"model\"):\n opts = self.object_list.model._meta\n names.append(\n \"%s/%s%s.html\"\n % (opts.app_label, opts.model_name, self.template_name_suffix)\n )\n elif not names:\n raise ImproperlyConfigured(\n \"%(cls)s requires either a 'template_name' attribute \"\n \"or a get_queryset() method that returns a QuerySet.\"\n % {\n \"cls\": self.__class__.__name__,\n }\n )\n return names\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 391, "n_words": 113, "vocab_size": 85, "complexity": 4, "nloc": 20, "token_counts": 86, "n_ast_nodes": 155, "n_identifiers": 16, "d_id": 51781, "documentation": { "docstring": "\n Return a list of template names to be used for the request. Must return\n a list. May not be called if render_to_response is overridden.\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 46, "language": "en" } }, { "id": 212675, "commit_id": "40757180b5d0ac66d44958e4ab13329c7b03ea36", "repo": "PySimpleGUI", "path": "PySimpleGUI.py", "file_name": "PySimpleGUI.py", "fun_name": "_SpinboxSelectHandler", "commit_message": "Fix for enable_events for Spin element. Changed how the event is generated. Need to determine manual entry of value still", "code": "def _SpinboxSelectHandler(self):\n \n self._generic_callback_handler('')\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 17, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 25, "n_identifiers": 3, "d_id": 53336, "documentation": { "docstring": "\n Internal callback function for when an entry is selected in a Combobox.\n\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 32078, "commit_id": "5ae087cf8ec080b121c9cdc9bafdc2b35b6e110e", "repo": "transformers", "path": "tests/models/mt5/test_modeling_tf_mt5.py", "file_name": "test_modeling_tf_mt5.py", "fun_name": "test_small_integration_test", "commit_message": "Fix T5/mT5 tests (#18029)", "code": "def test_small_integration_test(self):\n \n\n model = TFAutoModelForSeq2SeqLM.from_pretrained(\"google/mt5-small\")\n tokenizer = AutoTokenizer.from_pretrained(\"google/mt5-small\")\n\n input_ids = tokenizer(\"Hello there\", return_tensors=\"tf\").input_ids\n labels = tokenizer(\"Hi I am\", return_tensors=\"tf\").input_ids\n\n loss = model(input_ids, labels=labels).loss\n mtf_score = -tf.math.reduce_mean(loss).numpy()\n\n EXPECTED_SCORE = -21.210594\n self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 2e-4)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 97, "n_words": 34, "vocab_size": 27, "complexity": 1, "nloc": 9, "token_counts": 94, "n_ast_nodes": 158, "n_identifiers": 19, "d_id": 5847, "documentation": { "docstring": "\n For comparision run:\n >>> import t5 # pip install t5==0.7.1\n >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary\n\n >>> path_to_mtf_small_mt5_checkpoint = ''\n >>> path_to_mtf_small_mt5_spm_model_path = ''\n >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_mt5_checkpoint, batch_size=1, tpu=None)\n >>> vocab = SentencePieceVocabulary(path_to_mtf_small_mt5_spm_model_path, extra_ids=100)\n >>> score = t5_model.score(inputs=[\"Hello there\"], targets=[\"Hi I am\"], vocabulary=vocab)\n ", "n_words": 43, "vocab_size": 31, "n_whitespaces": 108, "language": "en" } }, { "id": 181302, "commit_id": "d79039beb1c3eab597de4871f7eb6522196d1a00", "repo": "gradio", "path": "test/test_components.py", "file_name": "test_components.py", "fun_name": "test_in_interface", "commit_message": "Latex support (#2696)\n\n* initial use of dollarmath plugin\r\n\r\n* add frontend support\r\n\r\n* chnages\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* changes\r\n\r\n* fix\r\n\r\n* added latex to kinematics blocks\r\n\r\n* changes\r\n\r\n* Update CHANGELOG.md\r\n\r\nCo-authored-by: Abubakar Abid \r\n\r\n* added example to changelog\r\n\r\n* remove param\r\n\r\n* doc fix\r\n\r\n* fixes\r\n\r\n* latex noteboox fix\r\n\r\n* fix\r\n\r\n* changes\r\n\r\nCo-authored-by: Ali Abid \r\nCo-authored-by: Abubakar Abid ", "code": "async def test_in_interface(self):\n \n iface = gr.Interface(lambda x: x, \"text\", \"markdown\")\n input_data = \"Here's an [image](https://gradio.app/images/gradio_logo.png)\"\n output_data = iface(input_data)\n assert (\n output_data\n == \n )\n\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 88, "n_words": 23, "vocab_size": 20, "complexity": 1, "nloc": 8, "token_counts": 36, "n_ast_nodes": 69, "n_identifiers": 8, "d_id": 43297, "documentation": { "docstring": "\n Interface, process\n

    Here's an image

    \\n", "n_words": 6, "vocab_size": 6, "n_whitespaces": 20, "language": "en" } }, { "id": 49740, "commit_id": "f4d6e64cdc132ae868699a0ba442f4ab1d304a14", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_cnclip_vitb16/cn_clip/clip/modeling_bert.py", "file_name": "modeling_bert.py", "fun_name": "gelu", "commit_message": "add disco_diffusion_cnclip_vitb16 module", "code": "def gelu(x):\n \n return x * 0.5 * (1.0 + paddle.erf(x / math.sqrt(2.0)))\n\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 18, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 34, "n_ast_nodes": 47, "n_identifiers": 6, "d_id": 9899, "documentation": { "docstring": " Original Implementation of the gelu activation function in Google Bert repo when initially created.\n For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):\n 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n Also see https://arxiv.org/abs/1606.08415\n ", "n_words": 46, "vocab_size": 39, "n_whitespaces": 71, "language": "en" } }, { "id": 100389, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "plugins/train/trainer/_base.py", "file_name": "_base.py", "fun_name": "_set_preview_feed", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def _set_preview_feed(self):\n \n retval = {}\n for idx, side in enumerate((\"a\", \"b\")):\n logger.debug(\"Setting preview feed: (side: '%s')\", side)\n preview_images = self._config.get(\"preview_images\", 14)\n preview_images = min(max(preview_images, 2), 16)\n batchsize = min(len(self._images[side]), preview_images)\n retval[side] = self._load_generator(idx).minibatch_ab(self._images[side],\n batchsize,\n side,\n do_shuffle=True,\n is_preview=True)\n logger.debug(\"Set preview feed. Batchsize: %s\", batchsize)\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 395, "n_words": 45, "vocab_size": 38, "complexity": 2, "nloc": 14, "token_counts": 116, "n_ast_nodes": 184, "n_identifiers": 20, "d_id": 19874, "documentation": { "docstring": " Set the preview feed for this feeder.\n\n Creates a generator from :class:`lib.training_data.TrainingDataGenerator` specifically\n for previews for the feeder.\n\n Returns\n -------\n dict\n The side (\"a\" or \"b\") as key, :class:`~lib.training_data.TrainingDataGenerator` as\n value.\n ", "n_words": 31, "vocab_size": 26, "n_whitespaces": 96, "language": "en" } }, { "id": 251307, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "mitmproxy/addons/script.py", "file_name": "script.py", "fun_name": "script_error_handler", "commit_message": "make it black!", "code": "def script_error_handler(path, exc, msg=\"\", tb=False):\n \n exception = type(exc).__name__\n if msg:\n exception = msg\n lineno = \"\"\n if hasattr(exc, \"lineno\"):\n lineno = str(exc.lineno)\n log_msg = f\"in script {path}:{lineno} {exception}\"\n if tb:\n etype, value, tback = sys.exc_info()\n tback = addonmanager.cut_traceback(tback, \"invoke_addon_sync\")\n log_msg = (\n log_msg + \"\\n\" + \"\".join(traceback.format_exception(etype, value, tback))\n )\n ctx.log.error(log_msg)\n\n\nReloadInterval = 1\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 130, "n_words": 54, "vocab_size": 37, "complexity": 4, "nloc": 15, "token_counts": 108, "n_ast_nodes": 199, "n_identifiers": 26, "d_id": 73672, "documentation": { "docstring": "\n Handles all the user's script errors with\n an optional traceback\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 20, "language": "en" } }, { "id": 28243, "commit_id": "8201efcde2d7aacccf3512c544cceea6780a0598", "repo": "saleor", "path": "saleor/graphql/checkout/utils.py", "file_name": "utils.py", "fun_name": "prevent_sync_event_circular_query", "commit_message": "GraphQL subscription support for synchronous webhook events (#9763)\n\n* WIP add sync webhooks subscription payload handling\r\n\r\n* add tests, fix minor things\r\n\r\n* update schema\r\n\r\n* remove unneeded code\r\n\r\n* add fix for circular field resolve\r\n\r\n* fix-filter-shipping-methods-payload\r\n\r\n* added_in added to desription\r\n\r\n* add missing types\r\n\r\n* revert refactor, precommit issues\r\n\r\n* fixes after review\r\n\r\n* cosmetix fixes post-review\r\n\r\n* subscription types description fixes\r\n\r\n* remove unneeded description from PaymentBase\r\n\r\n* add validation for creating webhook with two top level fields, add tests for shippingListMethodsForCheckout\r\n\r\n* add docstring, refactor prevent_sync_event_circular_wuery wrapper\r\n\r\n* fix docstring of revent_sync_event_circular_query\r\n\r\n* fix linters", "code": "def prevent_sync_event_circular_query(func):\n \n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 3, "token_counts": 10, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 5164, "documentation": { "docstring": "Prevent circular dependencies in synchronous events resolvers.\n\n Synchronous events are not allowed to request fields that are resolved using other\n synchronous events, which would lead to circular calls of the webhook.\n Using this decorator prevents such circular events resolution.\n\n :raises CircularSubscriptionSyncEvent: When a field being resolved from a\n synchronous webhook's payload uses another synchronous webhook internally.\n ", "n_words": 56, "vocab_size": 45, "n_whitespaces": 74, "language": "en" } }, { "id": 275290, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_experimental/optimizer.py", "file_name": "optimizer.py", "fun_name": "iterations", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def iterations(self):\n \n return self._iterations\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 81374, "documentation": { "docstring": "The number of training steps this `optimizer` has run.\n\n By default, iterations would be incremented by one every time\n `apply_gradients()` is called.\n ", "n_words": 22, "vocab_size": 22, "n_whitespaces": 43, "language": "en" } }, { "id": 186216, "commit_id": "32b7308ac83c20c49ca422726be149fdc5b8fc2d", "repo": "textual", "path": "tests/snapshot_tests/test_snapshots.py", "file_name": "test_snapshots.py", "fun_name": "test_nested_auto_heights", "commit_message": "fox for nested heights", "code": "def test_nested_auto_heights(snap_compare):\n \n assert snap_compare(\"snapshot_apps/nested_auto_heights.py\", press=[\"1\", \"2\"])\n\n\n# --- Other ---\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 15, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 19, "n_ast_nodes": 38, "n_identifiers": 3, "d_id": 45406, "documentation": { "docstring": "Test refreshing widget within a auto sized container", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 213609, "commit_id": "d743336b1f3654cd0315f380f43eed4116997c1d", "repo": "ivy", "path": "ivy/core/device.py", "file_name": "device.py", "fun_name": "num_arrays_on_dev", "commit_message": "renamed dev_str arg to dev for all methods.", "code": "def num_arrays_on_dev(dev):\n \n return len(get_all_arrays_on_dev(dev))\n\n\n# noinspection PyShadowingNames", "url": "https://github.com/unifyai/ivy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 12, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 27, "n_identifiers": 4, "d_id": 53674, "documentation": { "docstring": "\n Returns the number of arrays which are currently alive on the specified device.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 20, "language": "en" } }, { "id": 291305, "commit_id": "003e4224c89a6da381960dc5347750d1521d85c9", "repo": "core", "path": "homeassistant/components/text/__init__.py", "file_name": "__init__.py", "fun_name": "pattern", "commit_message": "Add `text` platform (#79454)\n\nCo-authored-by: Franck Nijhof \r\nCo-authored-by: Franck Nijhof ", "code": "def pattern(self) -> str | None:\n \n if hasattr(self, \"_attr_pattern\"):\n return self._attr_pattern\n if hasattr(self, \"entity_description\"):\n return self.entity_description.pattern\n return None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 68, "n_words": 18, "vocab_size": 14, "complexity": 3, "nloc": 7, "token_counts": 38, "n_ast_nodes": 65, "n_identifiers": 6, "d_id": 90415, "documentation": { "docstring": "Return the regex pattern that the value must match.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 171627, "commit_id": "e2df99823758210fb2b7c4aba39e23f3445f7cd3", "repo": "pandas", "path": "pandas/_version.py", "file_name": "_version.py", "fun_name": "render_pep440", "commit_message": "BLD: use nonvendor versioneer (#49924)\n\n* BLD: remove vendored versioneer\r\n\r\n* run vis\r\n\r\n* move config to pyproject.toml\r\n\r\n* add versioneer to deps\r\n\r\n* run pyupgrade\r\n\r\n* fix isort and pylint\r\n\r\n* fix ci\r\n\r\n* fix env", "code": "def render_pep440(pieces):\n \n if pieces[\"closest-tag\"]:\n rendered = pieces[\"closest-tag\"]\n if pieces[\"distance\"] or pieces[\"dirty\"]:\n rendered += plus_or_dot(pieces)\n rendered += f\"{pieces['distance']}.g{pieces['short']}\"\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n else:\n # exception #1\n rendered = f\"0+untagged.{pieces['distance']}.g{pieces['short']}\"\n if pieces[\"dirty\"]:\n rendered += \".dirty\"\n return rendered\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 142, "n_words": 36, "vocab_size": 20, "complexity": 6, "nloc": 13, "token_counts": 65, "n_ast_nodes": 163, "n_identifiers": 4, "d_id": 40694, "documentation": { "docstring": "Build up version string, with post-release \"local version identifier\".\n\n Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you\n get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty\n\n Exceptions:\n 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]\n ", "n_words": 37, "vocab_size": 35, "n_whitespaces": 52, "language": "en" } }, { "id": 20624, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pyparsing/results.py", "file_name": "results.py", "fun_name": "clear", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def clear(self):\n \n del self._toklist[:]\n self._tokdict.clear()\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 26, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 36, "n_identifiers": 4, "d_id": 3461, "documentation": { "docstring": "\n Clear all elements and results names.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 298014, "commit_id": "8819634b613f6bfd55885283bab86c3852ae40c4", "repo": "core", "path": "homeassistant/components/ubus/device_tracker.py", "file_name": "device_tracker.py", "fun_name": "_refresh_on_access_denied", "commit_message": "String formatting and max line length - Part 6 (#84525)", "code": "def _refresh_on_access_denied(func):\n \n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 3, "token_counts": 10, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 96962, "documentation": { "docstring": "If remove rebooted, it lost our session so rebuild one and try again.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 20262, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pygments/__init__.py", "file_name": "__init__.py", "fun_name": "format", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin\n \n try:\n if not outfile:\n realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()\n formatter.format(tokens, realoutfile)\n return realoutfile.getvalue()\n else:\n formatter.format(tokens, outfile)\n except TypeError as err:\n if (isinstance(err.args[0], str) and\n ('unbound method format' in err.args[0] or\n 'missing 1 required positional argument' in err.args[0])):\n raise TypeError('format() argument must be a formatter instance, '\n 'not a class')\n raise\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 204, "n_words": 61, "vocab_size": 54, "complexity": 8, "nloc": 15, "token_counts": 107, "n_ast_nodes": 180, "n_identifiers": 14, "d_id": 3301, "documentation": { "docstring": "\n Format a tokenlist ``tokens`` with the formatter ``formatter``.\n\n If ``outfile`` is given and a valid file object (an object\n with a ``write`` method), the result will be written to it, otherwise\n it is returned as a string.\n ", "n_words": 37, "vocab_size": 30, "n_whitespaces": 53, "language": "en" } }, { "id": 267630, "commit_id": "b56d73796e85f162d50b4fcd5930035183032d4a", "repo": "ansible", "path": "lib/ansible/module_utils/common/process.py", "file_name": "process.py", "fun_name": "get_bin_path", "commit_message": "Clarify that sbin directories are always looked at in get_bin_path (#78171)", "code": "def get_bin_path(arg, opt_dirs=None, required=None):\n \n opt_dirs = [] if opt_dirs is None else opt_dirs\n\n sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']\n paths = []\n for d in opt_dirs:\n if d is not None and os.path.exists(d):\n paths.append(d)\n paths += os.environ.get('PATH', '').split(os.pathsep)\n bin_path = None\n # mangle PATH to include /sbin dirs\n for p in sbin_paths:\n if p not in paths and os.path.exists(p):\n paths.append(p)\n for d in paths:\n if not d:\n continue\n path = os.path.join(d, arg)\n if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):\n bin_path = path\n break\n if bin_path is None:\n raise ValueError('Failed to find required executable \"%s\" in paths: %s' % (arg, os.pathsep.join(paths)))\n\n return bin_path\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 234, "n_words": 101, "vocab_size": 61, "complexity": 14, "nloc": 22, "token_counts": 187, "n_ast_nodes": 303, "n_identifiers": 21, "d_id": 78989, "documentation": { "docstring": "\n Find system executable in PATH. Raises ValueError if executable is not found.\n Optional arguments:\n - required: [Deprecated] Prior to 2.10, if executable is not found and required is true it raises an Exception.\n In 2.10 and later, an Exception is always raised. This parameter will be removed in 2.14.\n - opt_dirs: optional list of directories to search in addition to PATH\n In addition to PATH and opt_dirs, this function also looks through /sbin, /usr/sbin and /usr/local/sbin. A lot of\n modules, especially for gathering facts, depend on this behaviour.\n If found return full path, otherwise raise ValueError.\n ", "n_words": 96, "vocab_size": 73, "n_whitespaces": 148, "language": "en" } }, { "id": 205775, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/query.py", "file_name": "query.py", "fun_name": "aggregate", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def aggregate(self, *args, **kwargs):\n \n if self.query.distinct_fields:\n raise NotImplementedError(\"aggregate() + distinct(fields) not implemented.\")\n self._validate_values_are_expressions(\n (*args, *kwargs.values()), method_name=\"aggregate\"\n )\n for arg in args:\n # The default_alias property raises TypeError if default_alias\n # can't be set automatically or AttributeError if it isn't an\n # attribute.\n try:\n arg.default_alias\n except (AttributeError, TypeError):\n raise TypeError(\"Complex aggregates require an alias\")\n kwargs[arg.default_alias] = arg\n\n query = self.query.chain()\n for (alias, aggregate_expr) in kwargs.items():\n query.add_annotation(aggregate_expr, alias, is_summary=True)\n annotation = query.annotations[alias]\n if not annotation.contains_aggregate:\n raise TypeError(\"%s is not an aggregate expression\" % alias)\n for expr in annotation.get_source_expressions():\n if (\n expr.contains_aggregate\n and isinstance(expr, Ref)\n and expr.refs in kwargs\n ):\n name = expr.refs\n raise exceptions.FieldError(\n \"Cannot compute %s('%s'): '%s' is an aggregate\"\n % (annotation.name, name, name)\n )\n return query.get_aggregation(self.db, kwargs)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 540, "n_words": 117, "vocab_size": 88, "complexity": 10, "nloc": 30, "token_counts": 191, "n_ast_nodes": 306, "n_identifiers": 33, "d_id": 51208, "documentation": { "docstring": "\n Return a dictionary containing the calculations (aggregation)\n over the current queryset.\n\n If args is present the expression is passed as a kwarg using\n the Aggregate object's default alias.\n ", "n_words": 28, "vocab_size": 23, "n_whitespaces": 64, "language": "en" } }, { "id": 967, "commit_id": "8fdb37e3227eb40d431c32ae8f5bfb44866e4490", "repo": "PySyft", "path": "packages/syft/src/syft/core/tensor/autodp/ndim_entity_phi.py", "file_name": "ndim_entity_phi.py", "fun_name": "to_local_object_without_private_data_child", "commit_message": "working ndept addition", "code": "def to_local_object_without_private_data_child(self) -> NDimEntityPhiTensor:\n \n # relative\n from ..tensor import Tensor\n\n public_shape = getattr(self, \"public_shape\", None)\n public_dtype = getattr(self, \"public_dtype\", None)\n return Tensor(\n child=NDimEntityPhiTensor(\n child=FixedPrecisionTensor(value=None),\n entities=self.entities,\n min_vals=self.min_vals, # type: ignore\n max_vals=self.max_vals, # type: ignore\n ),\n public_shape=public_shape,\n public_dtype=public_dtype,\n )\n\n\n@serializable(capnp_bytes=True)", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "@serializable(capnp_bytes=True)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 192, "n_words": 38, "vocab_size": 31, "complexity": 1, "nloc": 16, "token_counts": 79, "n_ast_nodes": 137, "n_identifiers": 16, "d_id": 147, "documentation": { "docstring": "Convert this pointer into a partial version of the NDimEntityPhiTensor but without\n any of the private data therein.", "n_words": 18, "vocab_size": 16, "n_whitespaces": 24, "language": "en" } }, { "id": 20003, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_internal/utils/unpacking.py", "file_name": "unpacking.py", "fun_name": "current_umask", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def current_umask() -> int:\n \n mask = os.umask(0)\n os.umask(mask)\n return mask\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 22, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 5, "token_counts": 23, "n_ast_nodes": 41, "n_identifiers": 5, "d_id": 3171, "documentation": { "docstring": "Get the current umask which involves having to set it temporarily.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 126657, "commit_id": "326b5bd1acc6d3d00ab0546e4ae45da6bed501f7", "repo": "ray", "path": "dashboard/modules/job/job_manager.py", "file_name": "job_manager.py", "fun_name": "_recover_running_jobs", "commit_message": "Convert job_manager to be async (#27123)\n\nUpdates jobs api\r\nUpdates snapshot api\r\nUpdates state api\r\n\r\nIncreases jobs api version to 2\r\n\r\nSigned-off-by: Alan Guo aguo@anyscale.com\r\n\r\nWhy are these changes needed?\r\nfollow-up for #25902 (comment)", "code": "async def _recover_running_jobs(self):\n \n all_jobs = await self._job_info_client.get_all_jobs()\n for job_id, job_info in all_jobs.items():\n if not job_info.status.is_terminal():\n create_task(self._monitor_job(job_id))\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 63, "n_words": 16, "vocab_size": 16, "complexity": 3, "nloc": 5, "token_counts": 46, "n_ast_nodes": 80, "n_identifiers": 12, "d_id": 28217, "documentation": { "docstring": "Recovers all running jobs from the status client.\n\n For each job, we will spawn a coroutine to monitor it.\n Each will be added to self._running_jobs and reconciled.\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 48, "language": "en" } }, { "id": 262565, "commit_id": "2c9f00a808e0aa76a82af2e8b325abb71f50d1df", "repo": "TTS", "path": "TTS/vocoder/datasets/wavegrad_dataset.py", "file_name": "wavegrad_dataset.py", "fun_name": "collate_full_clips", "commit_message": "Fix tune wavegrad (#1844)\n\n* fix imports in tune_wavegrad\r\n\r\n* load_config returns Coqpit object instead None\r\n\r\n* set action (store true) for flag \"--use_cuda\"; start to tune if module is running as the main program\r\n\r\n* fix var order in the result of batch collating\r\n\r\n* make style\r\n\r\n* make style with black and isort", "code": "def collate_full_clips(batch):\n \n max_mel_length = max([b[0].shape[1] for b in batch]) if len(batch) > 1 else batch[0][0].shape[1]\n max_audio_length = max([b[1].shape[0] for b in batch]) if len(batch) > 1 else batch[0][1].shape[0]\n\n mels = torch.zeros([len(batch), batch[0][0].shape[0], max_mel_length])\n audios = torch.zeros([len(batch), max_audio_length])\n\n for idx, b in enumerate(batch):\n mel = b[0]\n audio = b[1]\n mels[idx, :, : mel.shape[1]] = mel\n audios[idx, : audio.shape[0]] = audio\n\n return mels, audios\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 155, "n_words": 62, "vocab_size": 38, "complexity": 6, "nloc": 11, "token_counts": 185, "n_ast_nodes": 272, "n_identifiers": 16, "d_id": 77276, "documentation": { "docstring": "This is used in tune_wavegrad.py.\n It pads sequences to the max length.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 18, "language": "en" } }, { "id": 196508, "commit_id": "c32aa66c02befb7a12915e6ae4ae953a1a81c8f7", "repo": "sympy", "path": "sympy/physics/optics/gaussopt.py", "file_name": "gaussopt.py", "fun_name": "waist2rayleigh", "commit_message": "Refractive_Index_Parameter_Considered", "code": "def waist2rayleigh(w, wavelen, n=1):\n \n w, wavelen = map(sympify, (w, wavelen))\n return w**2*n*pi/wavelen\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 21, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 3, "token_counts": 36, "n_ast_nodes": 55, "n_identifiers": 7, "d_id": 47949, "documentation": { "docstring": "\n Calculate the rayleigh range from the waist of a gaussian beam.\n\n See Also\n ========\n\n rayleigh2waist, BeamParameter\n\n Examples\n ========\n\n >>> from sympy.physics.optics import waist2rayleigh\n >>> from sympy import symbols\n >>> w, wavelen = symbols('w wavelen')\n >>> waist2rayleigh(w, wavelen)\n pi*w**2/wavelen\n ", "n_words": 38, "vocab_size": 30, "n_whitespaces": 75, "language": "en" } }, { "id": 155358, "commit_id": "2bb9a1fab7b0092974853e616dfd5e7ed98f085d", "repo": "modin", "path": "modin/core/execution/ray/implementations/cudf_on_ray/partitioning/partition.py", "file_name": "partition.py", "fun_name": "__copy__", "commit_message": "REFACTOR-#5363: introduce partition constructor; move `add_to_apply_calls` impl in base class (#5354)\n\nSigned-off-by: Myachev ", "code": "def __copy__(self):\n \n # Shallow copy.\n return self.__constructor__(\n self.gpu_manager, self.key, self._length_cache, self._width_cache\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 51, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 27, "n_ast_nodes": 43, "n_identifiers": 7, "d_id": 36353, "documentation": { "docstring": "\n Create a copy of this object.\n\n Returns\n -------\n cuDFOnRayDataframePartition\n A copy of this object.\n ", "n_words": 14, "vocab_size": 10, "n_whitespaces": 61, "language": "en" } }, { "id": 45476, "commit_id": "69f6f9e01b6df76c3c8fa266d460324163957887", "repo": "airflow", "path": "airflow/migrations/versions/8d48763f6d53_add_unique_constraint_to_conn_id.py", "file_name": "8d48763f6d53_add_unique_constraint_to_conn_id.py", "fun_name": "upgrade", "commit_message": "Autogenerate migration reference doc (#21601)\n\n* document airflow version in each alembic migration module and use this to autogen the doc\r\n* update each migration module to have the same description used in migration ref (so it can be used in autogen)", "code": "def upgrade():\n \n try:\n with op.batch_alter_table('connection') as batch_op:\n batch_op.alter_column(\"conn_id\", nullable=False, existing_type=sa.String(250, **COLLATION_ARGS))\n batch_op.create_unique_constraint(constraint_name=\"unique_conn_id\", columns=[\"conn_id\"])\n\n except sa.exc.IntegrityError:\n raise Exception(\"Make sure there are no duplicate connections with the same conn_id or null values\")\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 75, "n_words": 30, "vocab_size": 29, "complexity": 2, "nloc": 7, "token_counts": 65, "n_ast_nodes": 117, "n_identifiers": 16, "d_id": 8603, "documentation": { "docstring": "Apply Add unique constraint to ``conn_id`` and set it as non-nullable", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 322100, "commit_id": "b0c35d5e1ff02a634fa26392b60d3885c2c78677", "repo": "PaddleNLP", "path": "paddlenlp/transformers/tinybert/modeling.py", "file_name": "modeling.py", "fun_name": "forward", "commit_message": "Fix the attention mask for fp16 (#1585)", "code": "def forward(self, input_ids, token_type_ids=None, attention_mask=None):\n r\n\n if attention_mask is None:\n attention_mask = paddle.unsqueeze(\n (input_ids == self.pad_token_id\n ).astype(self.pooler.dense.weight.dtype) * -1e4,\n axis=[1, 2])\n embedding_output = self.embeddings(input_ids, token_type_ids)\n encoded_layer = self.encoder(embedding_output, attention_mask)\n pooled_output = self.pooler(encoded_layer)\n\n return encoded_layer, pooled_output\n\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 133, "n_words": 35, "vocab_size": 30, "complexity": 2, "nloc": 68, "token_counts": 92, "n_ast_nodes": 139, "n_identifiers": 19, "d_id": 118057, "documentation": { "docstring": "\n The TinyBertModel forward method, overrides the `__call__()` special method.\n\n Args:\n input_ids (Tensor):\n Indices of input sequence tokens in the vocabulary. They are\n numerical representations of tokens that build the input sequence.\n Its data type should be `int64` and it has a shape of [batch_size, sequence_length].\n token_type_ids (Tensor, optional):\n Segment token indices to indicate different portions of the inputs.\n Selected in the range ``[0, type_vocab_size - 1]``.\n If `type_vocab_size` is 2, which means the inputs have two portions.\n Indices can either be 0 or 1:\n\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n\n Its data type should be `int64` and it has a shape of [batch_size, sequence_length].\n Defaults to `None`, which means we don't add segment embeddings.\n attention_mask (Tensor, optional):\n Mask used in multi-head attention to avoid performing attention to some unwanted positions,\n usually the paddings or the subsequent positions.\n Its data type can be int, float and bool.\n When the data type is bool, the `masked` tokens have `False` values and the others have `True` values.\n When the data type is int, the `masked` tokens have `0` values and the others have `1` values.\n When the data type is float, the `masked` tokens have `-INF` values and the others have `0` values.\n It is a tensor with shape broadcasted to `[batch_size, num_attention_heads, sequence_length, sequence_length]`.\n For example, its shape can be [batch_size, sequence_length], [batch_size, sequence_length, sequence_length],\n [batch_size, num_attention_heads, sequence_length, sequence_length].\n Defaults to `None`, which means nothing needed to be prevented attention to.\n\n Returns:\n tuple: Returns tuple (`encoder_output`, `pooled_output`).\n\n With the fields:\n\n - `encoder_output` (Tensor):\n Sequence of hidden-states at the last layer of the model.\n It's data type should be float32 and its shape is [batch_size, sequence_length, hidden_size].\n\n - `pooled_output` (Tensor):\n The output of first token (`[CLS]`) in sequence.\n We \"pool\" the model by simply taking the hidden state corresponding to the first token.\n Its data type should be float32 and its shape is [batch_size, hidden_size].\n\n Example:\n .. code-block::\n\n import paddle\n from paddlenlp.transformers import TinyBertModel, TinyBertTokenizer\n\n tokenizer = TinyBertTokenizer.from_pretrained('tinybert-4l-312d')\n model = TinyBertModel.from_pretrained('tinybert-4l-312d')\n\n inputs = tokenizer(\"Welcome to use PaddlePaddle and PaddleNLP! \")\n inputs = {k:paddle.to_tensor([v]) for (k, v) in inputs.items()}\n output = model(**inputs)\n ", "n_words": 358, "vocab_size": 185, "n_whitespaces": 978, "language": "en" } }, { "id": 305670, "commit_id": "420733a064286cfe6fc5cf11483835d15ff83462", "repo": "core", "path": "homeassistant/components/netdata/sensor.py", "file_name": "sensor.py", "fun_name": "available", "commit_message": "Improve entity type hints [n] (#77824)", "code": "def available(self) -> bool:\n \n return self.netdata.available\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 25, "n_identifiers": 4, "d_id": 104454, "documentation": { "docstring": "Could the resource be accessed during the last update call.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 197452, "commit_id": "9a3ffc6781bd44c47cf49e128ef154389c32876a", "repo": "sympy", "path": "sympy/physics/vector/printing.py", "file_name": "printing.py", "fun_name": "vlatex", "commit_message": "Some pep8 cleanup of sympy.physics.vector.", "code": "def vlatex(expr, **settings):\n r\n latex_printer = VectorLatexPrinter(settings)\n\n return latex_printer.doprint(expr)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 17, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 38, "token_counts": 23, "n_ast_nodes": 38, "n_identifiers": 6, "d_id": 48558, "documentation": { "docstring": "Function for printing latex representation of sympy.physics.vector\n objects.\n\n For latex representation of Vectors, Dyadics, and dynamicsymbols. Takes the\n same options as SymPy's :func:`~.latex`; see that function for more\n information;\n\n Parameters\n ==========\n\n expr : valid SymPy object\n SymPy expression to represent in LaTeX form\n settings : args\n Same as latex()\n\n Examples\n ========\n\n >>> from sympy.physics.vector import vlatex, ReferenceFrame, dynamicsymbols\n >>> N = ReferenceFrame('N')\n >>> q1, q2 = dynamicsymbols('q1 q2')\n >>> q1d, q2d = dynamicsymbols('q1 q2', 1)\n >>> q1dd, q2dd = dynamicsymbols('q1 q2', 2)\n >>> vlatex(N.x + N.y)\n '\\\\mathbf{\\\\hat{n}_x} + \\\\mathbf{\\\\hat{n}_y}'\n >>> vlatex(q1 + q2)\n 'q_{1} + q_{2}'\n >>> vlatex(q1d)\n '\\\\dot{q}_{1}'\n >>> vlatex(q1 * q2d)\n 'q_{1} \\\\dot{q}_{2}'\n >>> vlatex(q1dd * q1 / q1d)\n '\\\\frac{q_{1} \\\\ddot{q}_{1}}{\\\\dot{q}_{1}}'\n\n ", "n_words": 113, "vocab_size": 84, "n_whitespaces": 205, "language": "en" } }, { "id": 181690, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/tpot_tests.py", "file_name": "tpot_tests.py", "fun_name": "test_pick_two_individuals_eligible_for_crossover_bad", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_pick_two_individuals_eligible_for_crossover_bad():\n \n\n ind1 = creator.Individual.from_string(\n 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)',\n tpot_obj._pset\n )\n ind2 = creator.Individual.from_string(\n 'BernoulliNB(input_matrix, BernoulliNB__alpha=1.0, BernoulliNB__fit_prior=True)',\n tpot_obj._pset\n )\n ind3 = creator.Individual.from_string(\n 'GaussianNB(input_matrix)',\n tpot_obj._pset\n )\n\n # Ind1 and ind2 are not a pair because they are the same, ind3 shares no primitive\n pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1, ind2, ind3])\n assert pick1 is None and pick2 is None\n\n # You can not do crossover with a population of only 1.\n pick1, pick2 = pick_two_individuals_eligible_for_crossover([ind1])\n assert pick1 is None and pick2 is None\n\n # You can not do crossover with a population of 0.\n pick1, pick2 = pick_two_individuals_eligible_for_crossover([])\n assert pick1 is None and pick2 is None\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 192, "n_words": 102, "vocab_size": 48, "complexity": 4, "nloc": 19, "token_counts": 104, "n_ast_nodes": 171, "n_identifiers": 12, "d_id": 43477, "documentation": { "docstring": "Assert that pick_two_individuals_eligible_for_crossover() returns the right output when no pair is eligible", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 156931, "commit_id": "b016998fa931f644df4d266a3ed5e7604c20d2a9", "repo": "dask", "path": "dask/array/core.py", "file_name": "core.py", "fun_name": "_vindex", "commit_message": "Removed unused loop control variables (`B007`) (#9458)\n\nCo-authored-by: James Bourbeau ", "code": "def _vindex(x, *indexes):\n \n indexes = replace_ellipsis(x.ndim, indexes)\n\n nonfancy_indexes = []\n reduced_indexes = []\n for ind in indexes:\n if isinstance(ind, Number):\n nonfancy_indexes.append(ind)\n elif isinstance(ind, slice):\n nonfancy_indexes.append(ind)\n reduced_indexes.append(slice(None))\n else:\n nonfancy_indexes.append(slice(None))\n reduced_indexes.append(ind)\n\n nonfancy_indexes = tuple(nonfancy_indexes)\n reduced_indexes = tuple(reduced_indexes)\n\n x = x[nonfancy_indexes]\n\n array_indexes = {}\n for i, (ind, size) in enumerate(zip(reduced_indexes, x.shape)):\n if not isinstance(ind, slice):\n ind = np.array(ind, copy=True)\n if ind.dtype.kind == \"b\":\n raise IndexError(\"vindex does not support indexing with boolean arrays\")\n if ((ind >= size) | (ind < -size)).any():\n raise IndexError(\n \"vindex key has entries out of bounds for \"\n \"indexing along axis %s of size %s: %r\" % (i, size, ind)\n )\n ind %= size\n array_indexes[i] = ind\n\n if array_indexes:\n x = _vindex_array(x, array_indexes)\n\n return x\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 379, "n_words": 115, "vocab_size": 82, "complexity": 9, "nloc": 32, "token_counts": 221, "n_ast_nodes": 355, "n_identifiers": 27, "d_id": 36811, "documentation": { "docstring": "Point wise indexing with broadcasting.\n\n >>> x = np.arange(56).reshape((7, 8))\n >>> x\n array([[ 0, 1, 2, 3, 4, 5, 6, 7],\n [ 8, 9, 10, 11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20, 21, 22, 23],\n [24, 25, 26, 27, 28, 29, 30, 31],\n [32, 33, 34, 35, 36, 37, 38, 39],\n [40, 41, 42, 43, 44, 45, 46, 47],\n [48, 49, 50, 51, 52, 53, 54, 55]])\n\n >>> d = from_array(x, chunks=(3, 4))\n >>> result = _vindex(d, [0, 1, 6, 0], [0, 1, 0, 7])\n >>> result.compute()\n array([ 0, 9, 48, 7])\n ", "n_words": 95, "vocab_size": 80, "n_whitespaces": 189, "language": "en" } }, { "id": 128341, "commit_id": "0e8eb8aedb3e158da8c3e7378e818ce87ca7813e", "repo": "ray", "path": "python/ray/train/tests/test_session.py", "file_name": "test_session.py", "fun_name": "test_warn_once", "commit_message": "[AIR] More Train and Tune session deprecations (#28856)\n\nSigned-off-by: Amog Kamsetty amogkamsetty@yahoo.com\r\n\r\nFinish marking train. and tune. session APIs as deprecated", "code": "def test_warn_once():\n \n\n with warnings.catch_warnings(record=True) as record:\n # Ignore Deprecation warnings.\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n assert not load_checkpoint()\n assert not load_checkpoint()\n assert not save_checkpoint(x=2)\n assert not report(x=2)\n assert not report(x=3)\n assert not get_dataset_shard()\n\n # Should only warn once.\n assert len(record) == 4\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 107, "n_words": 39, "vocab_size": 26, "complexity": 1, "nloc": 10, "token_counts": 73, "n_ast_nodes": 130, "n_identifiers": 13, "d_id": 28675, "documentation": { "docstring": "Checks if session misuse warning is only shown once per function.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 34690, "commit_id": "e09473a817c5e5871e11cc81004355ef30250502", "repo": "transformers", "path": "src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py", "file_name": "modeling_xlm_roberta_xl.py", "fun_name": "_tie_weights", "commit_message": "Add support for XLM-R XL and XXL models by modeling_xlm_roberta_xl.py (#13727)\n\n* add xlm roberta xl\r\n\r\n* add convert xlm xl fairseq checkpoint to pytorch\r\n\r\n* fix init and documents for xlm-roberta-xl\r\n\r\n* fix indention\r\n\r\n* add test for XLM-R xl,xxl\r\n\r\n* fix model hub name\r\n\r\n* fix some stuff\r\n\r\n* up\r\n\r\n* correct init\r\n\r\n* fix more\r\n\r\n* fix as suggestions\r\n\r\n* add torch_device\r\n\r\n* fix default values of doc strings\r\n\r\n* fix leftovers\r\n\r\n* merge to master\r\n\r\n* up\r\n\r\n* correct hub names\r\n\r\n* fix docs\r\n\r\n* fix model\r\n\r\n* up\r\n\r\n* finalize\r\n\r\n* last fix\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* add copied from\r\n\r\n* make style\r\n\r\nCo-authored-by: Patrick von Platen \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def _tie_weights(self):\n # To tie those two weights if they get disconnected (on TPU or when the bias is resized)\n self.bias = self.decoder.bias\n\n\n@add_start_docstrings(\n ,\n XLM_ROBERTA_XL_START_DOCSTRING,\n)", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@add_start_docstrings(\n \"\"\"\n XLM-RoBERTa-xlarge Model transformer with a sequence classification/regression head on top (a linear layer on top\n of the pooled output) e.g. for GLUE tasks.\n \"\"\",\n XLM_ROBERTA_XL_START_DOCSTRING,\n)", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 44, "n_words": 27, "vocab_size": 27, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 38, "n_identifiers": 6, "d_id": 6311, "documentation": { "docstring": "\n XLM-RoBERTa-xlarge Model transformer with a sequence classification/regression head on top (a linear layer on top\n of the pooled output) e.g. for GLUE tasks.\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 33, "language": "en" } }, { "id": 109611, "commit_id": "4a5d09cba5f4a20e14553cebd8f70c1f34d20d35", "repo": "matplotlib", "path": "lib/matplotlib/collections.py", "file_name": "collections.py", "fun_name": "_convert_mesh_to_triangles", "commit_message": "Deprecate draw_gouraud_triangle (#23824)\n\n* Deprecate draw_gouraud_triangle\r\n\r\n* DOC: minor rewording\r\n\r\nCo-authored-by: Elliott Sales de Andrade \r\n\r\nCo-authored-by: Thomas A Caswell \r\nCo-authored-by: Elliott Sales de Andrade ", "code": "def _convert_mesh_to_triangles(self, coordinates):\n \n if isinstance(coordinates, np.ma.MaskedArray):\n p = coordinates.data\n else:\n p = coordinates\n\n p_a = p[:-1, :-1]\n p_b = p[:-1, 1:]\n p_c = p[1:, 1:]\n p_d = p[1:, :-1]\n p_center = (p_a + p_b + p_c + p_d) / 4.0\n triangles = np.concatenate([\n p_a, p_b, p_center,\n p_b, p_c, p_center,\n p_c, p_d, p_center,\n p_d, p_a, p_center,\n ], axis=2).reshape((-1, 3, 2))\n\n c = self.get_facecolor().reshape((*coordinates.shape[:2], 4))\n c_a = c[:-1, :-1]\n c_b = c[:-1, 1:]\n c_c = c[1:, 1:]\n c_d = c[1:, :-1]\n c_center = (c_a + c_b + c_c + c_d) / 4.0\n colors = np.concatenate([\n c_a, c_b, c_center,\n c_b, c_c, c_center,\n c_c, c_d, c_center,\n c_d, c_a, c_center,\n ], axis=2).reshape((-1, 3, 4))\n\n return triangles, colors\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 355, "n_words": 112, "vocab_size": 56, "complexity": 2, "nloc": 29, "token_counts": 273, "n_ast_nodes": 390, "n_identifiers": 27, "d_id": 23670, "documentation": { "docstring": "\n Convert a given mesh into a sequence of triangles, each point\n with its own color. The result can be used to construct a call to\n `~.RendererBase.draw_gouraud_triangles`.\n ", "n_words": 26, "vocab_size": 23, "n_whitespaces": 56, "language": "en" } }, { "id": 282485, "commit_id": "e1b6022b9cf156ffc0697d0d25a5ed2772ea8d68", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/cryptocurrency/due_diligence/binance_model.py", "file_name": "binance_model.py", "fun_name": "get_binance_available_quotes_for_each_coin", "commit_message": "Global plot styles (#1228)\n\n* Add default stylesheets\r\n\r\n* Add terminal style helper class and global style initialization in cfg\r\n\r\n* Style comments and docstrings\r\n\r\n* Load rich terminal theme from config file\r\n\r\n* Add application chart styles to candle charts\r\n\r\n* Add todos\r\n\r\n* Remove explicit color setting for some ta charts\r\n\r\n* Add user styles folder to gitignore\r\n\r\n* Update default stylesheets\r\n\r\n* Add matplotlib font manager support\r\n\r\n* Add matplotlib font manager support\r\n\r\n* Update docstrings and default style\r\n\r\n* Update stocks candle chart formatting (return fig to style title)\r\n\r\n* Style common ta overlap view\r\n\r\n* Make up and down market colors a part of the style helper\r\n\r\n* Update stylesheets\r\n\r\n* Style common ta volume view\r\n\r\n* Style common ta momentum view\r\n\r\n* Style common ta trend indicators view\r\n\r\n* Style common ta volatility view\r\n\r\n* Style common ta volume view\r\n\r\n* Style common ta custom indicators view\r\n\r\n* Fix styling bugs and remove the obvious time x lablel\r\n\r\n* Style charts in the covid menu\r\n\r\n* Set legend position to upper left in the mpl stylesheet\r\n\r\n* Add mpl_rcparams configs for parameters not covered by stylesheets\r\n\r\n* Remove font configuration files\r\n\r\n* Update style class utility functions\r\n\r\n* Implement passing external axes and style utility usage in ema & stoch\r\n\r\n* Add theme watermark and output helpers\r\n\r\n* Rename style to theme\r\n\r\n* Update helper usage in ta/ma and ta/stoch\r\n\r\n* Update style to theme in sample menus\r\n\r\n* Style forex (#1305)\r\n\r\n* Make tight layout optional 'cause mplfinance doesn't support it\r\n\r\n* Apply global style to the forex menu\r\n\r\n* Update code layout in oanda view and black\r\n\r\n* Style common TA (#1315)\r\n\r\n* Make tight layout optional 'cause mplfinance doesn't support it\r\n\r\n* Apply global style to the forex menu\r\n\r\n* Add linewidth to theme for use in mpf's addplots\r\n\r\n* Add vwap to the stocks notebook api\r\n\r\n* Update common/ta overlap to follow charting style\r\n\r\n* Apply style on TerminalStyle init\r\n\r\n* Enable infrastructure for excluding non-trading days from plots\r\n\r\n* Update notebook api to include there and resolve bandit warning\r\n\r\n* Update ta/common/overlap to exclude non-trading days\r\n\r\n* Enable external ax, style and non-trading days in common/ta/momentum\r\n\r\n* Enable external ax, style and non-trading days in common/ta/trend\r\n\r\n* Update vwap to the argument naming convention\r\n\r\n* Enable external ax, style and non-trading days in common/ta/volatility\r\n\r\n* Enable external ax, style and non-trading days in common/ta/volume\r\n\r\n* Enable external ax, style and non-trading days in common/ta/custom\r\n\r\n* Fix controller tests\r\n\r\n* Forgot to disable rewriting of the cassettes ...\r\n\r\n* Fix controller errors that came up because a merge conflict\r\n\r\n* Fix price label position on fib\r\n\r\n* Fix line having wrong x values in fib\r\n\r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\n\r\n* Style economy (#1308)\r\n\r\n* Began converting\r\n\r\n* Added alphavan_view\r\n\r\n* Added CNN View\r\n\r\n* Updated nasdaq view, fixed glitch\r\n\r\n* Added fred\r\n\r\n* Refactored URL\r\n\r\n* Theo's requested changes\r\n\r\n* Updated docstrings\r\n\r\n* Updated tests\r\n\r\n* Fixed pylint\r\n\r\n* Fixed tests\r\n\r\n* Theo changes\r\n\r\n* Econ Fix\r\n\r\n* Refactor chart style for Crypto context (#1306)\r\n\r\n* Remove mock for gff\r\n\r\n* Mock visualize_output helper function\r\n\r\n* Refactor\r\n\r\n* Fix plot helper\r\n\r\n* Update legend loc\r\n\r\n* Refactor mplfinance candle plot\r\n\r\n* Fix errors in the helper function\r\n\r\n* Fix binbook having the wrong call_ function name\r\n\r\n* Remove hardcoded style params\r\n\r\n* Resolve kwargs future warning from pandas\r\n\r\n* Remove warnings import\r\n\r\nCo-authored-by: Theodore Aptekarev \r\n\r\n* funds + custom (#1311)\r\n\r\n* funds + custom\r\n\r\n* cleanup cleanup everybody everywhere\r\n\r\n* Fix external axes conditional and a typo\r\n\r\nCo-authored-by: Theodore Aptekarev \r\n\r\n* Add external axes mode to covid charts (#1328)\r\n\r\n* Add portfolio menu plots (#1318)\r\n\r\n* Portfolio view plots (commenting out report stuff)\r\n\r\n* PA Menu broken. Commenting out and fix tests\r\n\r\n* portfolio optimization\r\n\r\n* comment out commented api line\r\n\r\n* Add notes on disabling the pa submenu\r\n\r\nCo-authored-by: Theodore Aptekarev \r\n\r\n* Plot updates in common BA (#1335)\r\n\r\n* Add external axes support to common/ba/finbrain\r\n\r\n* Add external axes support to common/ba/twitter\r\n\r\n* Add external axes support to common/ba/google\r\n\r\n* Add external axes support to common/ba/sentimentinvestor\r\n\r\n* Add sentimentinvestor to the notebooks API\r\n\r\n* Fix tests\r\n\r\n* Etf refactor (#1323)\r\n\r\n* Refactored no ETF\r\n\r\n* Fixed gtff import\r\n\r\n* Fixed tests\r\n\r\n* Fix pie chart style\r\n\r\n* Refactored etf/candle\r\n\r\n* Added pylint fix\r\n\r\n* Fixed tests\r\n\r\n* Update candle chart layout\r\n\r\n* Update etf controller test\r\n\r\n* Remove strange binary file\r\n\r\nCo-authored-by: Theodore Aptekarev \r\n\r\n* Expose ETF candle function in the notebooks API\r\n\r\n* Common BA and Common QA charts update (#1342)\r\n\r\n* Add external axes support to common/ba/finbrain\r\n\r\n* Add external axes support to common/ba/twitter\r\n\r\n* Add external axes support to common/ba/google\r\n\r\n* Add external axes support to common/ba/sentimentinvestor\r\n\r\n* Add sentimentinvestor to the notebooks API\r\n\r\n* Fix tests\r\n\r\n* Update stylesheet files\r\n\r\n* Refactor charts for common/qa\r\n\r\n* Update the forgotten line plot\r\n\r\n* Update tests\r\n\r\n* Add missing arg to a docstring\r\n\r\n* Remove scientific notation\r\n\r\n* Black imports\r\n\r\nCo-authored-by: Minh Hoang \r\n\r\n* Options refactor (#1324)\r\n\r\n* Fixed alphaquery_view\r\n\r\n* finished options\r\n\r\n* Fixed pylint\r\n\r\n* Fixed tests\r\n\r\n* Fixed tests\r\n\r\n* Fixed tests\r\n\r\n* update yfinance\r\n\r\n* Tradier + Chartexchange\r\n\r\n* change mocks from gtff to theme.visualize output\r\n\r\n* tests\r\n\r\nCo-authored-by: Theodore Aptekarev \r\nCo-authored-by: james \r\n\r\n* Refactor Stocks menu (#1325)\r\n\r\n* Fix backtesting menu\r\n\r\n* Refactor comparison analysis\r\n\r\n* Refactor Dark pool shorts\r\n\r\n* Refactor rest of menu\r\n\r\n* Fix test\r\n\r\n* Fix tests failing\r\n\r\n* Fix tests fail\r\n\r\n* Fix test failing\r\n\r\n* Remove record mode=none to record new output\r\n\r\n* Rewrite test output\r\n\r\n* Rewrite test outputs\r\n\r\n* Adding more rewritten test output\r\n\r\n* Mock plt.show\r\n\r\n* Mock missing plt.show\r\n\r\n* Missing @pytest.mark.vcr\r\n\r\n* Updating tests : common/behavioural_analysis/finbrain\r\n\r\n* Improve notebooks API coverage for CA and DPS\r\n\r\n* Silence annoying flake8 warning\r\n\r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: Theodore Aptekarev \r\n\r\n* Charts update for common/pred (#1344)\r\n\r\n* Add external axes support to common/ba/finbrain\r\n\r\n* Add external axes support to common/ba/twitter\r\n\r\n* Add external axes support to common/ba/google\r\n\r\n* Add external axes support to common/ba/sentimentinvestor\r\n\r\n* Add sentimentinvestor to the notebooks API\r\n\r\n* Fix tests\r\n\r\n* Update stylesheet files\r\n\r\n* Refactor charts for common/qa\r\n\r\n* Update the forgotten line plot\r\n\r\n* Update tests\r\n\r\n* Add missing arg to a docstring\r\n\r\n* Style pred helper and controllers\r\n\r\n* Update ETS plot\r\n\r\n* Update plots in KNN and pred helper\r\n\r\n* Update plot and pretty table for arima\r\n\r\n* Update plot for common/pred/regression\r\n\r\n* Refactor mc_view\r\n\r\n* Fix linting\r\n\r\n* Fix mypy\r\n\r\n* Move plot title to the axis level to make more vertical space\r\n\r\nCo-authored-by: Minh Hoang \r\nCo-authored-by: jmaslek \r\n\r\n* linter\r\n\r\n* Update common/ba test data\r\n\r\n* Change etf candle to match stock candle\r\n\r\n* try updating sia test\r\n\r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: jmaslek \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: Minh Hoang \r\nCo-authored-by: Chavithra PARANA ", "code": "def get_binance_available_quotes_for_each_coin() -> dict:\n \n trading_pairs = _get_trading_pairs()\n results = defaultdict(list)\n for pair in trading_pairs:\n results[pair[\"baseAsset\"]].append(pair[\"quoteAsset\"])\n return results\n\n\n@log_start_end(log=logger)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@log_start_end(log=logger)", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 39, "n_words": 18, "vocab_size": 16, "complexity": 2, "nloc": 15, "token_counts": 40, "n_ast_nodes": 82, "n_identifiers": 12, "d_id": 84165, "documentation": { "docstring": "Helper methods that for every coin available on Binance add all quote assets. [Source: Binance]\n\n Returns\n -------\n dict:\n All quote assets for given coin\n {'ETH' : ['BTC', 'USDT' ...], 'UNI' : ['ETH', 'BTC','BUSD', ...]\n\n ", "n_words": 34, "vocab_size": 30, "n_whitespaces": 60, "language": "en" } }, { "id": 66198, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/doctype/shift_assignment/shift_assignment.py", "file_name": "shift_assignment.py", "fun_name": "get_actual_start_end_datetime_of_shift", "commit_message": "style: format code with black", "code": "def get_actual_start_end_datetime_of_shift(employee, for_datetime, consider_default_shift=False):\n\t\n\tactual_shift_start = actual_shift_end = shift_details = None\n\tshift_timings_as_per_timestamp = get_employee_shift_timings(\n\t\temployee, for_datetime, consider_default_shift\n\t)\n\ttimestamp_list = []\n\tfor shift in shift_timings_as_per_timestamp:\n\t\tif shift:\n\t\t\ttimestamp_list.extend([shift.actual_start, shift.actual_end])\n\t\telse:\n\t\t\ttimestamp_list.extend([None, None])\n\ttimestamp_index = None\n\tfor index, timestamp in enumerate(timestamp_list):\n\t\tif timestamp and for_datetime <= timestamp:\n\t\t\ttimestamp_index = index\n\t\t\tbreak\n\tif timestamp_index and timestamp_index % 2 == 1:\n\t\tshift_details = shift_timings_as_per_timestamp[int((timestamp_index - 1) / 2)]\n\t\tactual_shift_start = shift_details.actual_start\n\t\tactual_shift_end = shift_details.actual_end\n\telif timestamp_index:\n\t\tshift_details = shift_timings_as_per_timestamp[int(timestamp_index / 2)]\n\n\treturn actual_shift_start, actual_shift_end, shift_details\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 59, "n_words": 82, "vocab_size": 54, "complexity": 9, "nloc": 23, "token_counts": 145, "n_ast_nodes": 225, "n_identifiers": 19, "d_id": 14136, "documentation": { "docstring": "Takes a datetime and returns the 'actual' start datetime and end datetime of the shift in which the timestamp belongs.\n\tHere 'actual' means - taking in to account the \"begin_check_in_before_shift_start_time\" and \"allow_check_out_after_shift_end_time\".\n\tNone is returned if the timestamp is outside any actual shift timings.\n\tShift Details is also returned(current/upcoming i.e. if timestamp not in any actual shift then details of next shift returned)\n\t", "n_words": 63, "vocab_size": 41, "n_whitespaces": 59, "language": "en" } }, { "id": 152959, "commit_id": "3c740dbfcdd69ddc3ab45a42be996e5c61104342", "repo": "modin", "path": "modin/core/dataframe/pandas/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "synchronize_labels", "commit_message": "FEAT-#3111: Ensure relabeling Modin Frame does not lose partition shape (#3662)\n\nCo-authored-by: Devin Petersohn \r\nSigned-off-by: Naren Krishna ", "code": "def synchronize_labels(self, axis=None):\n \n if axis is None:\n self._deferred_index = True\n self._deferred_column = True\n elif axis == 0:\n self._deferred_index = True\n else:\n self._deferred_column = True\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 96, "n_words": 24, "vocab_size": 15, "complexity": 3, "nloc": 8, "token_counts": 42, "n_ast_nodes": 70, "n_identifiers": 5, "d_id": 35205, "documentation": { "docstring": "\n Set the deferred axes variables for the ``PandasDataframe``.\n\n Parameters\n ----------\n axis : int, default: None\n The deferred axis.\n 0 for the index, 1 for the columns.\n ", "n_words": 26, "vocab_size": 20, "n_whitespaces": 84, "language": "en" } }, { "id": 129037, "commit_id": "976ece4bc43abdb628cf4cbffc8546abab723a6d", "repo": "ray", "path": "python/ray/tune/tests/test_trial_runner_pg.py", "file_name": "test_trial_runner_pg.py", "fun_name": "testResourceDeadlock", "commit_message": "[tune] Add test for heterogeneous resource request deadlocks (#21397)\n\nThis adds a test for potential resource deadlocks in experiments with heterogeneous PGFs. If the PGF of a later trial becomes ready before that of a previous trial, we could run into a deadlock. This is currently avoided, but untested, flagging the code path for removal in #21387.", "code": "def testResourceDeadlock(self):\n \n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 9, "n_words": 2, "vocab_size": 2, "complexity": 2, "nloc": 24, "token_counts": 190, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 28880, "documentation": { "docstring": "Tests that resource deadlock is avoided for heterogeneous PGFs.\n\n We start 4 trials in a cluster with 2 CPUs. The first two trials\n require 1 CPU each, the third trial 2 CPUs, the fourth trial 1 CPU.\n\n The second trial needs a bit more time to finish. This means that the\n resources from the first trial will be freed, and the PG of the\n _fourth_ trial becomes ready (not that of the third trial, because that\n requires 2 CPUs - however, one is still occupied by trial 2).\n\n After the first two trials finished, the FIFOScheduler tries to start\n the third trial. However, it can't be started because its placement\n group is not ready. Instead, the placement group of the fourth\n trial is ready. Thus, we opt to run the fourth trial instead.\n ", "n_words": 133, "vocab_size": 84, "n_whitespaces": 210, "language": "en" } }, { "id": 288885, "commit_id": "f23b1750e85f07091eb896a0b12b8f95e5646338", "repo": "core", "path": "tests/components/homekit_controller/specific_devices/test_vocolinc_flowerbud.py", "file_name": "test_vocolinc_flowerbud.py", "fun_name": "test_vocolinc_flowerbud_setup", "commit_message": "Migrate HomeKit Controller to use stable identifiers (#80064)", "code": "async def test_vocolinc_flowerbud_setup(hass):\n \n accessories = await setup_accessories_from_file(hass, \"vocolinc_flowerbud.json\")\n await setup_test_accessories(hass, accessories)\n\n await assert_devices_and_entities_created(\n hass,\n DeviceTestInfo(\n unique_id=HUB_TEST_ACCESSORY_ID,\n name=\"VOCOlinc-Flowerbud-0d324b\",\n model=\"Flowerbud\",\n manufacturer=\"VOCOlinc\",\n sw_version=\"3.121.2\",\n hw_version=\"0.1\",\n serial_number=\"AM01121849000327\",\n devices=[],\n entities=[\n EntityTestInfo(\n entity_id=\"humidifier.vocolinc_flowerbud_0d324b\",\n friendly_name=\"VOCOlinc-Flowerbud-0d324b\",\n unique_id=\"00:00:00:00:00:00_1_30\",\n supported_features=HumidifierEntityFeature.MODES,\n capabilities={\n \"available_modes\": [\"normal\", \"auto\"],\n \"max_humidity\": 100.0,\n \"min_humidity\": 0.0,\n },\n state=\"off\",\n ),\n EntityTestInfo(\n entity_id=\"light.vocolinc_flowerbud_0d324b_mood_light\",\n friendly_name=\"VOCOlinc-Flowerbud-0d324b Mood Light\",\n unique_id=\"00:00:00:00:00:00_1_9\",\n supported_features=0,\n capabilities={\"supported_color_modes\": [\"hs\"]},\n state=\"on\",\n ),\n EntityTestInfo(\n entity_id=\"number.vocolinc_flowerbud_0d324b_spray_quantity\",\n friendly_name=\"VOCOlinc-Flowerbud-0d324b Spray Quantity\",\n unique_id=\"00:00:00:00:00:00_1_30_38\",\n capabilities={\n \"max\": 5,\n \"min\": 1,\n \"mode\": NumberMode.AUTO,\n \"step\": 1,\n },\n state=\"5\",\n entity_category=EntityCategory.CONFIG,\n ),\n EntityTestInfo(\n entity_id=\"sensor.vocolinc_flowerbud_0d324b_current_humidity\",\n friendly_name=\"VOCOlinc-Flowerbud-0d324b Current Humidity\",\n unique_id=\"00:00:00:00:00:00_1_30_33\",\n capabilities={\"state_class\": SensorStateClass.MEASUREMENT},\n unit_of_measurement=PERCENTAGE,\n state=\"45.0\",\n ),\n ],\n ),\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1005, "n_words": 84, "vocab_size": 70, "complexity": 1, "nloc": 59, "token_counts": 238, "n_ast_nodes": 389, "n_identifiers": 34, "d_id": 88034, "documentation": { "docstring": "Test that a Vocolinc Flowerbud can be correctly setup in HA.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 42481, "commit_id": "692adaff901dd9daf29400fdf3385130aefbfb2a", "repo": "nltk", "path": "nltk/corpus/reader/wordnet.py", "file_name": "wordnet.py", "fun_name": "closure", "commit_message": "Fix some tests in Wordnet-related DocStrings", "code": "def closure(self, rel, depth=-1):\n \n\n from nltk.util import acyclic_breadth_first\n\n for synset in acyclic_breadth_first(self, rel, depth):\n if synset != self:\n yield synset\n\n from nltk.util import acyclic_depth_first as acyclic_tree\n from nltk.util import unweighted_minimum_spanning_tree as mst\n\n # Also add this shortcut?\n # from nltk.util import unweighted_minimum_spanning_digraph as umsd\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 106, "n_words": 44, "vocab_size": 29, "complexity": 3, "nloc": 5, "token_counts": 38, "n_ast_nodes": 89, "n_identifiers": 12, "d_id": 7566, "documentation": { "docstring": "\n Return the transitive closure of source under the rel\n relationship, breadth-first, discarding cycles:\n\n >>> from nltk.corpus import wordnet as wn\n >>> computer = wn.synset('computer.n.01')\n >>> topic = lambda s:s.topic_domains()\n >>> print(list(computer.closure(topic)))\n [Synset('computer_science.n.01')]\n\n UserWarning: Discarded redundant search for Synset('computer.n.01') at depth 2\n\n\n Include redundant paths (but only once), avoiding duplicate searches\n (from 'animal.n.01' to 'entity.n.01'):\n\n >>> dog = wn.synset('dog.n.01')\n >>> hyp = lambda s:s.hypernyms()\n >>> print(list(dog.closure(hyp)))\n [Synset('canine.n.02'), Synset('domestic_animal.n.01'), Synset('carnivore.n.01'),\\\n Synset('animal.n.01'), Synset('placental.n.01'), Synset('organism.n.01'),\\\n Synset('mammal.n.01'), Synset('living_thing.n.01'), Synset('vertebrate.n.01'),\\\n Synset('whole.n.02'), Synset('chordate.n.01'), Synset('object.n.01'),\\\n Synset('physical_entity.n.01'), Synset('entity.n.01')]\n\n UserWarning: Discarded redundant search for Synset('animal.n.01') at depth 7\n ", "n_words": 88, "vocab_size": 69, "n_whitespaces": 201, "language": "en" } }, { "id": 290673, "commit_id": "c7dfd6b15a3fc9fa81d260b3dfa8a3d836f9afa8", "repo": "core", "path": "homeassistant/components/flipr/binary_sensor.py", "file_name": "binary_sensor.py", "fun_name": "is_on", "commit_message": "Add flipr battery level sensor (#81389)\n\n* Addition of battery level sensor. Correction of pylint errors\r\n\r\n* Review improvement for typing\r\n\r\n* Review improvement for typing\r\n\r\n* Correction following review", "code": "def is_on(self) -> bool:\n \n return (\n self.coordinator.data[self.entity_description.key] == \"TooLow\"\n or self.coordinator.data[self.entity_description.key] == \"TooHigh\"\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 57, "n_words": 14, "vocab_size": 12, "complexity": 2, "nloc": 6, "token_counts": 40, "n_ast_nodes": 67, "n_identifiers": 7, "d_id": 89787, "documentation": { "docstring": "Return true if the binary sensor is on in case of a Problem is detected.", "n_words": 15, "vocab_size": 14, "n_whitespaces": 14, "language": "en" } }, { "id": 90778, "commit_id": "8cdaa4e86e8296cdbc145f2a53d3eb38cb7a1c2b", "repo": "sentry", "path": "tests/sentry/lang/javascript/test_processor.py", "file_name": "test_processor.py", "fun_name": "test_archive_too_large_for_mem_cache", "commit_message": "ref: close files explicitly in tests.sentry.lang.javascript.test_processor (#35262)", "code": "def test_archive_too_large_for_mem_cache(self, cache_set):\n \n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 10, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 8, "token_counts": 74, "n_ast_nodes": 15, "n_identifiers": 3, "d_id": 18689, "documentation": { "docstring": "cache.set is never called if the archive is too large", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 87360, "commit_id": "ce3e457ef18fe0046d6aca0b545eac55eae8f17c", "repo": "sentry", "path": "src/sentry/testutils/helpers/task_runner.py", "file_name": "task_runner.py", "fun_name": "BurstTaskRunner", "commit_message": "feat(perf-issues): Move queue info for post_process into headers (ISP… (#40239)\n\nRe-do of https://github.com/getsentry/sentry/pull/39946 as merge\r\nconflict didn't mesh right.\r\n\r\nSends dedicated issue category data to post_process_group call so we can\r\nroute to the appropriate celery queue\r\n\r\nWill need to include changes from\r\nhttps://github.com/getsentry/sentry/pull/40283 to be merged first and an\r\nensuing PR to remove the old queue.", "code": "def BurstTaskRunner():\n \n\n job_queue = []\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 7, "token_counts": 28, "n_ast_nodes": 19, "n_identifiers": 2, "d_id": 18288, "documentation": { "docstring": "\n A fixture for queueing up Celery tasks and working them off in bursts.\n\n The main interesting property is that one can run tasks at a later point in\n the future, testing \"concurrency\" without actually spawning any kind of\n worker.\n ", "n_words": 39, "vocab_size": 37, "n_whitespaces": 55, "language": "en" } }, { "id": 203669, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/auth/migrations/0011_update_proxy_permissions.py", "file_name": "0011_update_proxy_permissions.py", "fun_name": "update_proxy_model_permissions", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def update_proxy_model_permissions(apps, schema_editor, reverse=False):\n \n style = color_style()\n Permission = apps.get_model(\"auth\", \"Permission\")\n ContentType = apps.get_model(\"contenttypes\", \"ContentType\")\n alias = schema_editor.connection.alias\n for Model in apps.get_models():\n opts = Model._meta\n if not opts.proxy:\n continue\n proxy_default_permissions_codenames = [\n \"%s_%s\" % (action, opts.model_name) for action in opts.default_permissions\n ]\n permissions_query = Q(codename__in=proxy_default_permissions_codenames)\n for codename, name in opts.permissions:\n permissions_query = permissions_query | Q(codename=codename, name=name)\n content_type_manager = ContentType.objects.db_manager(alias)\n concrete_content_type = content_type_manager.get_for_model(\n Model, for_concrete_model=True\n )\n proxy_content_type = content_type_manager.get_for_model(\n Model, for_concrete_model=False\n )\n old_content_type = proxy_content_type if reverse else concrete_content_type\n new_content_type = concrete_content_type if reverse else proxy_content_type\n try:\n with transaction.atomic(using=alias):\n Permission.objects.using(alias).filter(\n permissions_query,\n content_type=old_content_type,\n ).update(content_type=new_content_type)\n except IntegrityError:\n old = \"{}_{}\".format(old_content_type.app_label, old_content_type.model)\n new = \"{}_{}\".format(new_content_type.app_label, new_content_type.model)\n sys.stdout.write(\n style.WARNING(WARNING.format(old=old, new=new, query=permissions_query))\n )\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 422, "n_words": 106, "vocab_size": 74, "complexity": 8, "nloc": 36, "token_counts": 259, "n_ast_nodes": 413, "n_identifiers": 52, "d_id": 50502, "documentation": { "docstring": "\n Update the content_type of proxy model permissions to use the ContentType\n of the proxy model.\n ", "n_words": 15, "vocab_size": 11, "n_whitespaces": 25, "language": "en" } }, { "id": 100308, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "lib/gui/analysis/event_reader.py", "file_name": "event_reader.py", "fun_name": "get_loss", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def get_loss(self, session_id=None):\n \n logger.debug(\"Getting loss: (session_id: %s)\", session_id)\n retval = {}\n for idx in [session_id] if session_id else self.session_ids:\n self._check_cache(idx)\n data = self._cache.get_data(idx, \"loss\")\n if not data:\n continue\n data = data[idx]\n retval[idx] = {title: data[\"loss\"][:, idx] for idx, title in enumerate(data[\"labels\"])}\n logger.debug({key: {k: v.shape for k, v in val.items()}\n for key, val in retval.items()})\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 189, "n_words": 56, "vocab_size": 44, "complexity": 7, "nloc": 13, "token_counts": 132, "n_ast_nodes": 210, "n_identifiers": 20, "d_id": 19805, "documentation": { "docstring": " Read the loss from the TensorBoard event logs\n\n Parameters\n ----------\n session_id: int, optional\n The Session ID to return the loss for. Set to ``None`` to return all session\n losses. Default ``None``\n\n Returns\n -------\n dict\n The session id(s) as key, with a further dictionary as value containing the loss name\n and list of loss values for each step\n ", "n_words": 57, "vocab_size": 44, "n_whitespaces": 151, "language": "en" } }, { "id": 241709, "commit_id": "4710a8128b52179be2b1fa46b17677eda7b849ea", "repo": "lightning", "path": "tests/callbacks/test_gpu_stats_monitor.py", "file_name": "test_gpu_stats_monitor.py", "fun_name": "test_gpu_stats_monitor_no_queries", "commit_message": "Update test_gpu_stats_monitor.py to use `devices` instead of `gpus` or `ipus` (#11340)", "code": "def test_gpu_stats_monitor_no_queries(tmpdir):\n \n model = BoringModel()\n with pytest.deprecated_call(match=\"GPUStatsMonitor` callback was deprecated in v1.5\"):\n gpu_stats = GPUStatsMonitor(\n memory_utilization=False,\n gpu_utilization=False,\n intra_step_time=True,\n inter_step_time=True,\n )\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_train_batches=2,\n limit_val_batches=0,\n log_every_n_steps=1,\n accelerator=\"gpu\",\n devices=1,\n callbacks=[gpu_stats],\n )\n with mock.patch(\"pytorch_lightning.loggers.tensorboard.TensorBoardLogger.log_metrics\") as log_metrics_mock:\n trainer.fit(model)\n\n assert log_metrics_mock.mock_calls[1:] == [\n mock.call({\"batch_time/intra_step (ms)\": mock.ANY}, step=0),\n mock.call({\"batch_time/inter_step (ms)\": mock.ANY}, step=1),\n mock.call({\"batch_time/intra_step (ms)\": mock.ANY}, step=1),\n ]\n\n\n@pytest.mark.skipif(torch.cuda.is_available(), reason=\"test requires CPU machine\")", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "@pytest.mark.skipif(torch.cuda.is_available(), reason=\"test requires CPU machine\")", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 224, "n_words": 59, "vocab_size": 49, "complexity": 1, "nloc": 26, "token_counts": 159, "n_ast_nodes": 283, "n_identifiers": 37, "d_id": 69662, "documentation": { "docstring": "Test GPU logger doesn't fail if no \"nvidia-smi\" queries are to be performed.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 133808, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/agents/qmix/qmix_policy.py", "file_name": "qmix_policy.py", "fun_name": "_unpack_observation", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _unpack_observation(self, obs_batch):\n \n\n unpacked = _unpack_obs(\n np.array(obs_batch, dtype=np.float32),\n self.observation_space.original_space,\n tensorlib=np,\n )\n\n if isinstance(unpacked[0], dict):\n assert \"obs\" in unpacked[0]\n unpacked_obs = [np.concatenate(tree.flatten(u[\"obs\"]), 1) for u in unpacked]\n else:\n unpacked_obs = unpacked\n\n obs = np.concatenate(unpacked_obs, axis=1).reshape(\n [len(obs_batch), self.n_agents, self.obs_size]\n )\n\n if self.has_action_mask:\n action_mask = np.concatenate(\n [o[\"action_mask\"] for o in unpacked], axis=1\n ).reshape([len(obs_batch), self.n_agents, self.n_actions])\n else:\n action_mask = np.ones(\n [len(obs_batch), self.n_agents, self.n_actions], dtype=np.float32\n )\n\n if self.has_env_global_state:\n state = np.concatenate(tree.flatten(unpacked[0][ENV_STATE]), 1)\n else:\n state = None\n return obs, action_mask, state\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 332, "n_words": 75, "vocab_size": 50, "complexity": 6, "nloc": 27, "token_counts": 223, "n_ast_nodes": 338, "n_identifiers": 33, "d_id": 30117, "documentation": { "docstring": "Unpacks the observation, action mask, and state (if present)\n from agent grouping.\n\n Returns:\n obs (np.ndarray): obs tensor of shape [B, n_agents, obs_size]\n mask (np.ndarray): action mask, if any\n state (np.ndarray or None): state tensor of shape [B, state_size]\n or None if it is not in the batch\n ", "n_words": 47, "vocab_size": 34, "n_whitespaces": 116, "language": "en" } }, { "id": 71575, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/tests/pages/test_edit_page.py", "file_name": "test_edit_page.py", "fun_name": "test_create_accessible", "commit_message": "Reformat with black", "code": "def test_create_accessible(self):\n \n response, page = self._create_page(Page.objects.get(pk=2))\n self.assertIsNotNone(page.url)\n self.assertTrue(\n any(\n \"View live\" in message.message and page.url in message.message\n for message in response.context[\"messages\"]\n )\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 110, "n_words": 23, "vocab_size": 19, "complexity": 3, "nloc": 9, "token_counts": 63, "n_ast_nodes": 105, "n_identifiers": 15, "d_id": 15690, "documentation": { "docstring": "\n Create a page under the site root, check the flash message has a valid\n \"View live\" button.\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 39, "language": "en" } }, { "id": 49786, "commit_id": "f4d6e64cdc132ae868699a0ba442f4ab1d304a14", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/gaussian_diffusion.py", "file_name": "gaussian_diffusion.py", "fun_name": "q_sample", "commit_message": "add disco_diffusion_cnclip_vitb16 module", "code": "def q_sample(self, x_start, t, noise=None):\n \n if noise is None:\n # noise = th.randn_like(x_start)\n noise = paddle.randn(x_start.shape, x_start.dtype)\n assert noise.shape == x_start.shape\n return (_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +\n _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 98, "n_words": 33, "vocab_size": 26, "complexity": 2, "nloc": 6, "token_counts": 73, "n_ast_nodes": 109, "n_identifiers": 12, "d_id": 9909, "documentation": { "docstring": "\n Diffuse the data for a given number of diffusion steps.\n\n In other words, sample from q(x_t | x_0).\n\n :param x_start: the initial data batch.\n :param t: the number of diffusion steps (minus 1). Here, 0 means one step.\n :param noise: if specified, the split-out normal noise.\n :return: A noisy version of x_start.\n ", "n_words": 52, "vocab_size": 42, "n_whitespaces": 102, "language": "en" } }, { "id": 75890, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/search/utils.py", "file_name": "utils.py", "fun_name": "parse_query_string", "commit_message": "Reformat with black", "code": "def parse_query_string(query_string, operator=None, zero_terms=MATCH_NONE):\n \n filters, query_string = separate_filters_from_query(query_string)\n\n is_phrase = False\n tokens = []\n for part in query_string.split('\"'):\n part = part.strip()\n\n if part:\n if is_phrase:\n tokens.append(Phrase(part))\n else:\n tokens.append(\n PlainText(part, operator=operator or PlainText.DEFAULT_OPERATOR)\n )\n\n is_phrase = not is_phrase\n\n if tokens:\n if operator == \"or\":\n search_query = OR(tokens)\n else:\n search_query = AND(tokens)\n else:\n search_query = zero_terms\n\n return filters, search_query\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 231, "n_words": 57, "vocab_size": 38, "complexity": 7, "nloc": 22, "token_counts": 115, "n_ast_nodes": 193, "n_identifiers": 19, "d_id": 16438, "documentation": { "docstring": "\n This takes a query string typed in by a user and extracts the following:\n\n - Quoted terms (for phrase search)\n - Filters\n\n For example, the following query:\n\n `hello \"this is a phrase\" live:true` would be parsed into:\n\n filters: {'live': 'true'}\n tokens: And([PlainText('hello'), Phrase('this is a phrase')])\n ", "n_words": 46, "vocab_size": 40, "n_whitespaces": 75, "language": "en" } }, { "id": 251123, "commit_id": "035b3bf37d9785fef45e81eb9c0c47fc53ab24d2", "repo": "mitmproxy", "path": "test/mitmproxy/proxy/layers/http/test_http.py", "file_name": "test_http.py", "fun_name": "test_memory_usage_completed_flows", "commit_message": "drop HTTP streams that are completed, fix #4456", "code": "def test_memory_usage_completed_flows(tctx):\n \n gc.collect()\n flow_count = flows_tracked()\n\n server = Placeholder(Server)\n assert (\n Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)\n >> DataReceived(tctx.client, b\"GET http://example.com/ HTTP/1.1\\r\\nHost: example.com\\r\\n\\r\\n\")\n << OpenConnection(server)\n >> reply(None)\n << SendData(server, b\"GET / HTTP/1.1\\r\\nHost: example.com\\r\\n\\r\\n\")\n >> DataReceived(server, b\"HTTP/1.1 204 No Content\\r\\n\\r\\n\")\n << SendData(tctx.client, b\"HTTP/1.1 204 No Content\\r\\n\\r\\n\")\n )\n\n gc.collect()\n assert flows_tracked() == flow_count\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 149, "n_words": 48, "vocab_size": 32, "complexity": 1, "nloc": 15, "token_counts": 105, "n_ast_nodes": 179, "n_identifiers": 20, "d_id": 73601, "documentation": { "docstring": "Make sure that flows are not kept in memory after they are completed.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 202430, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/csrf_tests/tests.py", "file_name": "tests.py", "fun_name": "test_bad_csrf_cookie_length", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_bad_csrf_cookie_length(self):\n \n self._check_bad_or_missing_cookie(16 * \"a\", \"CSRF cookie has incorrect length.\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 24, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 32, "n_identifiers": 3, "d_id": 50129, "documentation": { "docstring": "\n If the CSRF cookie has an incorrect length in a POST request, the\n middleware rejects the incoming request.\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 40, "language": "en" } }, { "id": 319622, "commit_id": "69ef26dab04d51e7e102dcb33cd98ddc6ad975fd", "repo": "paperless-ngx", "path": "src/documents/tests/test_file_handling.py", "file_name": "test_file_handling.py", "fun_name": "test_dynamic_path", "commit_message": "Feature: Dynamic document storage pathes (#916)\n\n* Added devcontainer\r\n\r\n* Add feature storage pathes\r\n\r\n* Exclude tests and add versioning\r\n\r\n* Check escaping\r\n\r\n* Check escaping\r\n\r\n* Check quoting\r\n\r\n* Echo\r\n\r\n* Escape\r\n\r\n* Escape :\r\n\r\n* Double escape \\\r\n\r\n* Escaping\r\n\r\n* Remove if\r\n\r\n* Escape colon\r\n\r\n* Missing \\\r\n\r\n* Esacpe :\r\n\r\n* Escape all\r\n\r\n* test\r\n\r\n* Remove sed\r\n\r\n* Fix exclude\r\n\r\n* Remove SED command\r\n\r\n* Add LD_LIBRARY_PATH\r\n\r\n* Adjusted to v1.7\r\n\r\n* Updated test-cases\r\n\r\n* Remove devcontainer\r\n\r\n* Removed internal build-file\r\n\r\n* Run pre-commit\r\n\r\n* Corrected flak8 error\r\n\r\n* Adjusted to v1.7\r\n\r\n* Updated test-cases\r\n\r\n* Corrected flak8 error\r\n\r\n* Adjusted to new plural translations\r\n\r\n* Small adjustments due to code-review backend\r\n\r\n* Adjusted line-break\r\n\r\n* Removed PAPERLESS prefix from settings variables\r\n\r\n* Corrected style change due to search+replace\r\n\r\n* First documentation draft\r\n\r\n* Revert changes to Pipfile\r\n\r\n* Add sphinx-autobuild with keep-outdated\r\n\r\n* Revert merge error that results in wrong storage path is evaluated\r\n\r\n* Adjust styles of generated files ...\r\n\r\n* Adds additional testing to cover dynamic storage path functionality\r\n\r\n* Remove unnecessary condition\r\n\r\n* Add hint to edit storage path dialog\r\n\r\n* Correct spelling of pathes to paths\r\n\r\n* Minor documentation tweaks\r\n\r\n* Minor typo\r\n\r\n* improving wrapping of filter editor buttons with new storage path button\r\n\r\n* Update .gitignore\r\n\r\n* Fix select border radius in non input-groups\r\n\r\n* Better storage path edit hint\r\n\r\n* Add note to edit storage path dialog re document_renamer\r\n\r\n* Add note to bulk edit storage path re document_renamer\r\n\r\n* Rename FILTER_STORAGE_DIRECTORY to PATH\r\n\r\n* Fix broken filter rule parsing\r\n\r\n* Show default storage if unspecified\r\n\r\n* Remove note re storage path on bulk edit\r\n\r\n* Add basic validation of filename variables\r\n\r\nCo-authored-by: Markus Kling \r\nCo-authored-by: Trenton Holmes \r\nCo-authored-by: Michael Shamoon <4887959+shamoon@users.noreply.github.com>\r\nCo-authored-by: Quinn Casey ", "code": "def test_dynamic_path(self):\n \n doc = Document.objects.create(\n title=\"does not matter\",\n created=timezone.make_aware(datetime.datetime(2020, 6, 25, 7, 36, 51, 153)),\n mime_type=\"application/pdf\",\n pk=2,\n checksum=\"2\",\n storage_path=StoragePath.objects.create(path=\"TestFolder/{created}\"),\n )\n self.assertEqual(generate_filename(doc), \"TestFolder/2020-06-25.pdf\")\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 116, "n_words": 22, "vocab_size": 22, "complexity": 1, "nloc": 10, "token_counts": 81, "n_ast_nodes": 127, "n_identifiers": 19, "d_id": 116979, "documentation": { "docstring": "\n GIVEN:\n - A document with a defined storage path\n WHEN:\n - the filename is generated for the document\n THEN:\n - the generated filename uses the defined storage path for the document\n ", "n_words": 31, "vocab_size": 17, "n_whitespaces": 93, "language": "en" } }, { "id": 199617, "commit_id": "e875bdb804b0285e4a9bd8de0158436e792c03cb", "repo": "sympy", "path": "sympy/polys/appellseqs.py", "file_name": "appellseqs.py", "fun_name": "appell_poly", "commit_message": "Initial definition of Appell sequences", "code": "def appell_poly(n, seq, v, f, K, x=None, polys=False):\n \n if n < 0:\n raise ValueError(\n \"Cannot generate Appell sequence polynomial of order %s\" % n)\n poly = DMP(dup_appell(int(n), seq, v, f, K), K)\n if x is not None:\n poly = Poly.new(poly, x)\n else:\n poly = PurePoly.new(poly, Dummy('x'))\n return poly if polys else poly.as_expr()\n\n\n@public", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "@public", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 102, "n_words": 53, "vocab_size": 43, "complexity": 4, "nloc": 10, "token_counts": 97, "n_ast_nodes": 151, "n_identifiers": 19, "d_id": 49295, "documentation": { "docstring": "Generates the nth polynomial in `x` of the Appell sequence with\n parameters `seq`, `v` and `f`.\n\n Parameters\n ==========\n\n n : int\n Order of the polynomial.\n seq : iterable\n v : Expr\n f : callable\n K : Domain\n Domain in which to perform computations and in which the coefficients\n of the specified sequence's polynomials lie in.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ", "n_words": 72, "vocab_size": 53, "n_whitespaces": 133, "language": "en" } }, { "id": 274086, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/rnn/rnn_utils.py", "file_name": "rnn_utils.py", "fun_name": "config_for_enable_caching_device", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def config_for_enable_caching_device(rnn_cell):\n \n default_enable_caching_device = (\n tf.compat.v1.executing_eagerly_outside_functions()\n )\n if rnn_cell._enable_caching_device != default_enable_caching_device:\n return {\"enable_caching_device\": rnn_cell._enable_caching_device}\n return {}\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 45, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 7, "token_counts": 37, "n_ast_nodes": 64, "n_identifiers": 8, "d_id": 81176, "documentation": { "docstring": "Return the dict config for RNN cell wrt to enable_caching_device field.\n\n Since enable_caching_device is a internal implementation detail for speed up\n the RNN variable read when running on the multi remote worker setting, we\n don't want this config to be serialized constantly in the JSON. We will only\n serialize this field when a none default value is used to create the cell.\n Args:\n rnn_cell: the RNN cell for serialize.\n\n Returns:\n A dict which contains the JSON config for enable_caching_device value or\n empty dict if the enable_caching_device value is same as the default value.\n ", "n_words": 93, "vocab_size": 62, "n_whitespaces": 129, "language": "en" } }, { "id": 228787, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/bar/marker/_pattern.py", "file_name": "_pattern.py", "fun_name": "size", "commit_message": "switch to black .22", "code": "def size(self):\n \n return self[\"size\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 60460, "documentation": { "docstring": "\n Sets the size of unit squares of the pattern fill in pixels,\n which corresponds to the interval of repetition of the pattern.\n\n The 'size' property is a number and may be specified as:\n - An int or float in the interval [0, inf]\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n int|float|numpy.ndarray\n ", "n_words": 57, "vocab_size": 44, "n_whitespaces": 125, "language": "en" } }, { "id": 154585, "commit_id": "e5b1888cd932909e49194d58035da34b210b91c4", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/hdk_on_native/expr.py", "file_name": "expr.py", "fun_name": "_cmp_op", "commit_message": "FEAT-#4946: Replace OmniSci with HDK (#4947)\n\nCo-authored-by: Iaroslav Igoshev \r\nSigned-off-by: Andrey Pavlenko ", "code": "def _cmp_op(self, other, op_name):\n \n lhs_dtype_class = self._get_dtype_cmp_class(self._dtype)\n rhs_dtype_class = self._get_dtype_cmp_class(other._dtype)\n res_dtype = get_dtype(bool)\n # In HDK comparison with NULL always results in NULL,\n # but in pandas it is True for 'ne' comparison and False\n # for others.\n # Also pandas allows 'eq' and 'ne' comparison for values\n # of incompatible types which doesn't work in HDK.\n if lhs_dtype_class != rhs_dtype_class:\n if op_name == \"eq\" or op_name == \"ne\":\n return LiteralExpr(op_name == \"ne\")\n else:\n raise TypeError(\n f\"Invalid comparison between {self._dtype} and {other._dtype}\"\n )\n else:\n cmp = OpExpr(self.binary_operations[op_name], [self, other], res_dtype)\n return build_if_then_else(\n self.is_null(), LiteralExpr(op_name == \"ne\"), cmp, res_dtype\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 310, "n_words": 99, "vocab_size": 70, "complexity": 4, "nloc": 16, "token_counts": 106, "n_ast_nodes": 192, "n_identifiers": 18, "d_id": 36095, "documentation": { "docstring": "\n Build a comparison expression.\n\n Parameters\n ----------\n other : BaseExpr\n A value to compare with.\n op_name : str\n The comparison operation name.\n\n Returns\n -------\n BaseExpr\n The resulting comparison expression.\n ", "n_words": 28, "vocab_size": 22, "n_whitespaces": 125, "language": "en" } }, { "id": 292225, "commit_id": "beb30a1ff199596163c655e8ae745a0f1649b78a", "repo": "core", "path": "tests/components/google_travel_time/test_sensor.py", "file_name": "test_sensor.py", "fun_name": "mock_update_empty_fixture", "commit_message": "Add google_travel_time sensor tests (#66568)\n\nCo-authored-by: Paulus Schoutsen ", "code": "def mock_update_empty_fixture(mock_update):\n \n mock_update.return_value = None\n yield mock_update\n\n\n@pytest.mark.parametrize(\n \"data,options\",\n [(MOCK_CONFIG, {})],\n)\n@pytest.mark.usefixtures(\"mock_update\", \"mock_config\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\n \"data,options\",\n [(MOCK_CONFIG, {})],\n)\n@pytest.mark.usefixtures(\"mock_update\", \"mock_config\")", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 26, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 3, "token_counts": 13, "n_ast_nodes": 75, "n_identifiers": 8, "d_id": 91325, "documentation": { "docstring": "Mock an update to the sensor with an empty response.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 248017, "commit_id": "1783156dbcf4164692e66275d1c29857c434995b", "repo": "synapse", "path": "synapse/storage/databases/main/registration.py", "file_name": "registration.py", "fun_name": "count_real_users", "commit_message": "Add some type hints to datastore (#12423)\n\n* Add some type hints to datastore\r\n\r\n* newsfile\r\n\r\n* change `Collection` to `List`\r\n\r\n* refactor return type of `select_users_txn`\r\n\r\n* correct type hint in `stream.py`\r\n\r\n* Remove `Optional` in `select_users_txn`\r\n\r\n* remove not needed return type in `__init__`\r\n\r\n* Revert change in `get_stream_id_for_event_txn`\r\n\r\n* Remove import from `Literal`", "code": "async def count_real_users(self) -> int:\n \n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 12, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 4, "token_counts": 22, "n_ast_nodes": 17, "n_identifiers": 3, "d_id": 72048, "documentation": { "docstring": "Counts all users without a special user_type registered on the homeserver.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 196818, "commit_id": "f757f3daae6e11ea0cfb7dadc133274d8d74315f", "repo": "sympy", "path": "sympy/series/gruntz.py", "file_name": "gruntz.py", "fun_name": "sign", "commit_message": "Reordered imports 2", "code": "def sign(e, x):\n \n if not isinstance(e, Basic):\n raise TypeError(\"e should be an instance of Basic\")\n\n if e.is_positive:\n return 1\n elif e.is_negative:\n return -1\n elif e.is_zero:\n return 0\n\n elif not e.has(x):\n from sympy.simplify import logcombine\n e = logcombine(e)\n return _sign(e)\n elif e == x:\n return 1\n elif e.is_Mul:\n a, b = e.as_two_terms()\n sa = sign(a, x)\n if not sa:\n return 0\n return sa * sign(b, x)\n elif isinstance(e, exp):\n return 1\n elif e.is_Pow:\n if e.base == S.Exp1:\n return 1\n s = sign(e.base, x)\n if s == 1:\n return 1\n if e.exp.is_Integer:\n return s**e.exp\n elif isinstance(e, log):\n return sign(e.args[0] - 1, x)\n\n # if all else fails, do it the hard way\n c0, e0 = mrv_leadterm(e, x)\n return sign(c0, x)\n\n\n@debug\n@timeit\n@cacheit", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "@debug\n@timeit\n@cacheit", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 330, "n_words": 121, "vocab_size": 73, "complexity": 15, "nloc": 35, "token_counts": 209, "n_ast_nodes": 339, "n_identifiers": 34, "d_id": 48196, "documentation": { "docstring": "\n Returns a sign of an expression e(x) for x->oo.\n\n ::\n\n e > 0 for x sufficiently large ... 1\n e == 0 for x sufficiently large ... 0\n e < 0 for x sufficiently large ... -1\n\n The result of this function is currently undefined if e changes sign\n arbitrarily often for arbitrarily large x (e.g. sin(x)).\n\n Note that this returns zero only if e is *constantly* zero\n for x sufficiently large. [If e is constant, of course, this is just\n the same thing as the sign of e.]\n ", "n_words": 89, "vocab_size": 50, "n_whitespaces": 139, "language": "en" } }, { "id": 204002, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/gdal/raster/band.py", "file_name": "band.py", "fun_name": "statistics", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def statistics(self, refresh=False, approximate=False):\n \n # Prepare array with arguments for capi function\n smin, smax, smean, sstd = c_double(), c_double(), c_double(), c_double()\n stats_args = [\n self._ptr,\n c_int(approximate),\n byref(smin),\n byref(smax),\n byref(smean),\n byref(sstd),\n c_void_p(),\n c_void_p(),\n ]\n\n if refresh or self._stats_refresh:\n func = capi.compute_band_statistics\n else:\n # Add additional argument to force computation if there is no\n # existing PAM file to take the values from.\n force = True\n stats_args.insert(2, c_int(force))\n func = capi.get_band_statistics\n\n # Computation of statistics fails for empty bands.\n try:\n func(*stats_args)\n result = smin.value, smax.value, smean.value, sstd.value\n except GDALException:\n result = (None, None, None, None)\n\n self._stats_refresh = False\n\n return result\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 369, "n_words": 98, "vocab_size": 77, "complexity": 4, "nloc": 25, "token_counts": 156, "n_ast_nodes": 241, "n_identifiers": 24, "d_id": 50606, "documentation": { "docstring": "\n Compute statistics on the pixel values of this band.\n\n The return value is a tuple with the following structure:\n (minimum, maximum, mean, standard deviation).\n\n If approximate=True, the statistics may be computed based on overviews\n or a subset of image tiles.\n\n If refresh=True, the statistics will be computed from the data directly,\n and the cache will be updated where applicable.\n\n For empty bands (where all pixel values are nodata), all statistics\n values are returned as None.\n\n For raster formats using Persistent Auxiliary Metadata (PAM) services,\n the statistics might be cached in an auxiliary file.\n ", "n_words": 93, "vocab_size": 68, "n_whitespaces": 178, "language": "en" } }, { "id": 219615, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "logical_or", "commit_message": "add python 3.10.4 for windows", "code": "def logical_or(self, other, context=None):\n \n if context is None:\n context = getcontext()\n\n other = _convert_other(other, raiseit=True)\n\n if not self._islogical() or not other._islogical():\n return context._raise_error(InvalidOperation)\n\n # fill to context.prec\n (opa, opb) = self._fill_logical(context, self._int, other._int)\n\n # make the operation, and clean starting zeroes\n result = \"\".join([str(int(a)|int(b)) for a,b in zip(opa,opb)])\n return _dec_from_triple(0, result.lstrip('0') or '0', 0)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 139, "n_words": 54, "vocab_size": 45, "complexity": 6, "nloc": 9, "token_counts": 122, "n_ast_nodes": 197, "n_identifiers": 23, "d_id": 55652, "documentation": { "docstring": "Applies an 'or' operation between self and other's digits.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 207245, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_inlines/tests.py", "file_name": "tests.py", "fun_name": "test_localize_pk_shortcut", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_localize_pk_shortcut(self):\n \n holder = Holder.objects.create(pk=123456789, dummy=42)\n inner = Inner.objects.create(pk=987654321, holder=holder, dummy=42, readonly=\"\")\n response = self.client.get(\n reverse(\"admin:admin_inlines_holder_change\", args=(holder.id,))\n )\n inner_shortcut = \"r/%s/%s/\" % (\n ContentType.objects.get_for_model(inner).pk,\n inner.pk,\n )\n self.assertContains(response, inner_shortcut)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 117, "n_words": 28, "vocab_size": 24, "complexity": 1, "nloc": 11, "token_counts": 97, "n_ast_nodes": 153, "n_identifiers": 21, "d_id": 51910, "documentation": { "docstring": "\n The \"View on Site\" link is correct for locales that use thousand\n separators.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 35, "language": "en" } }, { "id": 265857, "commit_id": "99cf1b16718ca8bc037f546c41a9258bcc89b495", "repo": "netbox", "path": "netbox/netbox/graphql/__init__.py", "file_name": "__init__.py", "fun_name": "convert_field_to_list_or_connection", "commit_message": "8245 add graphql filtering at all levels (#10618)\n\n* 8245 monkey-patch graphene-django to support filtering at all levels\r\n\r\n* 8245 fix tests\r\n\r\n* 8245 fix tests", "code": "def convert_field_to_list_or_connection(field, registry=None):\n \n model = field.related_model\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 4, "token_counts": 22, "n_ast_nodes": 26, "n_identifiers": 5, "d_id": 78216, "documentation": { "docstring": "\n From graphene_django.converter.py we need to monkey-patch this to return\n our ObjectListField with filtering support instead of DjangoListField\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 27, "language": "en" } }, { "id": 196330, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/integrals/intpoly.py", "file_name": "intpoly.py", "fun_name": "integration_reduction", "commit_message": "Updated import locations", "code": "def integration_reduction(facets, index, a, b, expr, dims, degree):\n \n expr = _sympify(expr)\n if expr.is_zero:\n return expr\n\n value = S.Zero\n x0 = facets[index].points[0]\n m = len(facets)\n gens = (x, y)\n\n inner_product = diff(expr, gens[0]) * x0[0] + diff(expr, gens[1]) * x0[1]\n\n if inner_product != 0:\n value += integration_reduction(facets, index, a, b,\n inner_product, dims, degree - 1)\n\n value += left_integral2D(m, index, facets, x0, expr, gens)\n\n return value/(len(dims) + degree - 1)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 153, "n_words": 68, "vocab_size": 43, "complexity": 3, "nloc": 14, "token_counts": 145, "n_ast_nodes": 208, "n_identifiers": 23, "d_id": 47830, "documentation": { "docstring": "Helper method for main_integrate. Returns the value of the input\n expression evaluated over the polytope facet referenced by a given index.\n\n Parameters\n ===========\n\n facets :\n List of facets of the polytope.\n index :\n Index referencing the facet to integrate the expression over.\n a :\n Hyperplane parameter denoting direction.\n b :\n Hyperplane parameter denoting distance.\n expr :\n The expression to integrate over the facet.\n dims :\n List of symbols denoting axes.\n degree :\n Degree of the homogeneous polynomial.\n\n Examples\n ========\n\n >>> from sympy.abc import x, y\n >>> from sympy.integrals.intpoly import integration_reduction,\\\n hyperplane_parameters\n >>> from sympy import Point, Polygon\n >>> triangle = Polygon(Point(0, 3), Point(5, 3), Point(1, 1))\n >>> facets = triangle.sides\n >>> a, b = hyperplane_parameters(triangle)[0]\n >>> integration_reduction(facets, 0, a, b, 1, (x, y), 0)\n 5\n ", "n_words": 125, "vocab_size": 79, "n_whitespaces": 240, "language": "en" } }, { "id": 244367, "commit_id": "9c5b3331ac8edbfa328922fbab45c382380da540", "repo": "mmdetection", "path": "mmdet/models/detectors/base.py", "file_name": "base.py", "fun_name": "forward_test", "commit_message": "Simplify api of one-stage detector", "code": "def forward_test(self, aug_batch_imgs, aug_batch_data_samples, **kwargs):\n \n num_augs = len(aug_batch_data_samples)\n batch_size = len(aug_batch_data_samples[0])\n\n aug_batch_img_metas = []\n for aug_index in range(num_augs):\n batch_img_metas = []\n for batch_index in range(batch_size):\n single_data_sample = aug_batch_data_samples[aug_index][\n batch_index]\n batch_img_metas.append(single_data_sample.meta)\n\n aug_batch_img_metas.append(batch_img_metas)\n\n for var, name in [(aug_batch_imgs, 'imgs'),\n (aug_batch_img_metas, 'img_metas')]:\n if not isinstance(var, list):\n raise TypeError('{} must be a list, but got {}'.format(\n name, type(var)))\n\n num_augs = len(aug_batch_imgs)\n if num_augs != len(aug_batch_img_metas):\n raise ValueError(\n 'num of augmentations ({}) != num of image meta ({})'.format(\n len(aug_batch_imgs), len(aug_batch_img_metas)))\n\n # NOTE the batched image size information may be useful, e.g.\n # in DETR, this is needed for the construction of masks, which is\n # then used for the transformer_head.\n for batch_img, batch_img_metas in zip(aug_batch_imgs,\n aug_batch_img_metas):\n batch_size = len(batch_img_metas)\n for img_id in range(batch_size):\n batch_img_metas[img_id]['batch_input_shape'] = \\\n tuple(batch_img.size()[-2:])\n\n if num_augs == 1:\n return self.simple_test(aug_batch_imgs[0], aug_batch_img_metas[0],\n **kwargs)\n else:\n assert 'proposals' not in kwargs, '`self.aug_test` do not ' \\\n 'support pre-difined proposals'\n aug_results = self.aug_test(aug_batch_imgs, aug_batch_img_metas,\n **kwargs)\n return aug_results\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 710, "n_words": 151, "vocab_size": 105, "complexity": 9, "nloc": 36, "token_counts": 247, "n_ast_nodes": 394, "n_identifiers": 32, "d_id": 70356, "documentation": { "docstring": "\n Args:\n aug_batch_imgs (List[Tensor]): the outer list indicates test-time\n augmentations, the Tensor should have a shape NxCxHxW.\n We only support batch size = 1 when do the augtest.\n aug_batch_data_samples (List[List[:obj:`GeneralData`]]): the\n outer list indicates test-time augmentations and inner list\n indicates batch dimension. We only support batch size = 1 when\n do the augtest.\n\n Returns:\n list(obj:`InstanceData`): Detection results of the\n input images. Each item usually contains\\\n following keys.\n\n - scores (Tensor): Classification scores, has a shape\n (num_instance,)\n - labels (Tensor): Labels of bboxes, has a shape\n (num_instances,).\n - bboxes (Tensor): Has a shape (num_instances, 4),\n the last dimension 4 arrange as (x1, y1, x2, y2).\n ", "n_words": 103, "vocab_size": 68, "n_whitespaces": 327, "language": "en" } }, { "id": 37154, "commit_id": "5da33f872913255d64717efe745a053975bbc28e", "repo": "transformers", "path": "src/transformers/modeling_utils.py", "file_name": "modeling_utils.py", "fun_name": "_move_model_to_meta", "commit_message": "[modeling utils] revamp `from_pretrained(..., low_cpu_mem_usage=True)` + tests (#16657)\n\n* add low_cpu_mem_usage tests\r\n\r\n* wip: revamping\r\n\r\n* wip\r\n\r\n* install /usr/bin/time\r\n\r\n* wip\r\n\r\n* cleanup\r\n\r\n* cleanup\r\n\r\n* cleanup\r\n\r\n* cleanup\r\n\r\n* cleanup\r\n\r\n* fix assert\r\n\r\n* put the wrapper back\r\n\r\n* cleanup; switch to bert-base-cased\r\n\r\n* Trigger CI\r\n\r\n* Trigger CI", "code": "def _move_model_to_meta(model, loaded_state_dict_keys, start_prefix):\n \n\n # meta device was added in pt=1.9\n require_version_core(\"torch>=1.9\")\n\n # dematerialize param storage for keys that are going to be replaced by state_dict, by\n # putting those on the meta device\n for k in loaded_state_dict_keys:\n submodule, param_name = find_submodule_and_param_name(model, k, start_prefix)\n if submodule is not None:\n # selectively switch to the meta device only those params/buffers that will\n # be next replaced from state_dict. This a complex way to do p.to_(\"meta\")\n # since we have no in-place to_ for tensors.\n new_val = getattr(submodule, param_name)\n if isinstance(new_val, torch.nn.Parameter):\n # isinstance returns False for Params on meta device, so switch after the check\n new_val = torch.nn.Parameter(new_val.to(\"meta\"))\n else:\n new_val = new_val.to(\"meta\")\n setattr(submodule, param_name, new_val)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 268, "n_words": 114, "vocab_size": 82, "complexity": 4, "nloc": 11, "token_counts": 90, "n_ast_nodes": 152, "n_identifiers": 17, "d_id": 6745, "documentation": { "docstring": "\n Moves `loaded_state_dict_keys` in model to meta device which frees up the memory taken by those params.\n\n `start_prefix` is used for models which insert their name into model keys, e.g. `bert` in\n `bert.pooler.dense.weight`\n\n ", "n_words": 32, "vocab_size": 29, "n_whitespaces": 45, "language": "en" } }, { "id": 215965, "commit_id": "f2a783643de61cac1ff3288b40241e5ce6e1ddc8", "repo": "salt", "path": "salt/modules/win_pkg.py", "file_name": "win_pkg.py", "fun_name": "_get_source_sum", "commit_message": "Update to latest ``pyupgrade`` hook. Stop skipping it on CI.\n\nSigned-off-by: Pedro Algarvio ", "code": "def _get_source_sum(source_hash, file_path, saltenv):\n \n ret = dict()\n schemes = (\"salt\", \"http\", \"https\", \"ftp\", \"swift\", \"s3\", \"file\")\n invalid_hash_msg = (\n \"Source hash '{}' format is invalid. It must be in \"\n \"the format =\".format(source_hash)\n )\n source_hash = str(source_hash)\n source_hash_scheme = urllib.parse.urlparse(source_hash).scheme\n\n if source_hash_scheme in schemes:\n # The source_hash is a file on a server\n try:\n cached_hash_file = __salt__[\"cp.cache_file\"](source_hash, saltenv)\n except MinionError as exc:\n log.exception(\"Failed to cache %s\", source_hash, exc_info=exc)\n raise\n\n if not cached_hash_file:\n raise CommandExecutionError(\n \"Source hash file {} not found\".format(source_hash)\n )\n\n ret = __salt__[\"file.extract_hash\"](cached_hash_file, \"\", file_path)\n if ret is None:\n raise SaltInvocationError(invalid_hash_msg)\n else:\n # The source_hash is a hash string\n items = source_hash.split(\"=\", 1)\n\n if len(items) != 2:\n invalid_hash_msg = \"{}, or it must be a supported protocol: {}\".format(\n invalid_hash_msg, \", \".join(schemes)\n )\n raise SaltInvocationError(invalid_hash_msg)\n\n ret[\"hash_type\"], ret[\"hsum\"] = (item.strip().lower() for item in items)\n\n return ret\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 379, "n_words": 136, "vocab_size": 93, "complexity": 7, "nloc": 31, "token_counts": 201, "n_ast_nodes": 350, "n_identifiers": 31, "d_id": 54287, "documentation": { "docstring": "\n Extract the hash sum, whether it is in a remote hash file, or just a string.\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 23, "language": "en" } }, { "id": 189410, "commit_id": "d8dc0b462d973f0c1ddd62e557d2da89e45f6265", "repo": "manim", "path": "manim/mobject/types/vectorized_mobject.py", "file_name": "vectorized_mobject.py", "fun_name": "generate_rgbas_array", "commit_message": "Cleanup `simple_functions.py` (#2437)\n\n* Remove fdiv\n\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\n\nfor more information, see https://pre-commit.ci\n\n* actually remove fdiv\n\n* Use lru cache and scipy's func\n\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\n\nfor more information, see https://pre-commit.ci\n\n* set maxsize\n\nshould be enough for how it's used\n\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\n\nfor more information, see https://pre-commit.ci\n\n* Remove get_num_args\n\n* Remove one instance of clip_in_place\n\n* Readd clip_in_place, it has a use\n\n* rm unnecessary line\n\n* Properly clip color\n\n* Revert \"Properly clip color\"\n\nThis reverts commit 0591c7833457930b399f4125958f81d038c96e69.\n\n* remove clip in place\n\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\n\nfor more information, see https://pre-commit.ci\n\n* actually remove\n\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def generate_rgbas_array(self, color, opacity):\n \n colors = list(tuplify(color))\n opacities = list(tuplify(opacity))\n rgbas = np.array(\n [color_to_rgba(c, o) for c, o in zip(*make_even(colors, opacities))],\n )\n\n sheen_factor = self.get_sheen_factor()\n if sheen_factor != 0 and len(rgbas) == 1:\n light_rgbas = np.array(rgbas)\n light_rgbas[:, :3] += sheen_factor\n np.clip(light_rgbas, 0, 1, out=light_rgbas)\n rgbas = np.append(rgbas, light_rgbas, axis=0)\n return rgbas\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 162, "n_words": 51, "vocab_size": 42, "complexity": 4, "nloc": 13, "token_counts": 125, "n_ast_nodes": 193, "n_identifiers": 24, "d_id": 46046, "documentation": { "docstring": "\n First arg can be either a color, or a tuple/list of colors.\n Likewise, opacity can either be a float, or a tuple of floats.\n If self.sheen_factor is not zero, and only\n one color was passed in, a second slightly light color\n will automatically be added for the gradient\n ", "n_words": 48, "vocab_size": 37, "n_whitespaces": 91, "language": "en" } }, { "id": 178812, "commit_id": "5251e9561d7d1527fb99068e7b3e33592394cc16", "repo": "Nuitka", "path": "nuitka/plugins/PluginBase.py", "file_name": "PluginBase.py", "fun_name": "getExtraIncludeDirectories", "commit_message": "Plugins: Add interface for adding include directories for C", "code": "def getExtraIncludeDirectories(self):\n \n\n # Virtual method, pylint: disable=no-self-use\n return None\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 8, "n_ast_nodes": 17, "n_identifiers": 2, "d_id": 42830, "documentation": { "docstring": "Decide which extra directories to use for C includes in compilation.\n\n Returns:\n List of directories or None by default\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 44, "language": "en" } }, { "id": 8063, "commit_id": "e4fc06f986e03919d9aef3ab55c05fee5a6b9d3a", "repo": "ludwig", "path": "ludwig/datasets/__init__.py", "file_name": "__init__.py", "fun_name": "list_datasets", "commit_message": "Config-first Datasets API (ludwig.datasets refactor) (#2479)\n\n* Adds README and stub for reading dataset configs.\r\n\r\n* Adds __init__.py for configs, moves circular import into function scope in ludwig/datasets/__init__.py\r\n\r\n* Print config files in datasets folder.\r\n\r\n* First pass at automatic archive extraction.\r\n\r\n* Implemented downloading and extract.\r\n\r\n* Refactor DatasetConfig into its own file.\r\n\r\n* Fixed bugs downloading kaggle dataset.\r\n\r\n* Makes registry store dataset instances, not classes. Also comments out import_submodules for testing.\r\n\r\n* Typo fix.\r\n\r\n* Only pass data files on to load_unprocessed_dataframe, symlink directories.\r\n\r\n* Downloading dataset files into existing directory if exists.\r\n\r\n* Refactor: make datasets fully config-first, lazy load dataset loaders.\r\n\r\n* Implemented agnews custom loader.\r\n\r\n* Implements train/validation/test split by files, and globbing support\r\n\r\n* Adds _glob_multiple\r\n\r\n* Adds adult_census_income, agnews, allstate_claims_severity.\r\n\r\n* Implements sha256 verification, adds more datasets up to creditcard_fraud.\r\n\r\n* Adds checksums, dbpedia, electricity\r\n\r\n* Fixes gzip file name returned as string not list, adds up to forest_cover dataset.\r\n\r\n* Adds datasets up to reuters_r8\r\n\r\n* Adds all datasets which don't require a custom class.\r\n\r\n* Restore dataset import behavior by implementing module __getattr__\r\n\r\n* Adds KDD datasets.\r\n\r\n* Adds ieee_fraud.\r\n\r\n* Adds imbalanced_insurance, insurance_lite.\r\n\r\n* Adds mnist.\r\n\r\n* Completes implementation of all of the built-in datasets.\r\n\r\n* Made cache_dir optional, read from environment variable if set.\r\n\r\n* Upgrades datasets tests.\r\n\r\n* Adds test for new dataset config API. Also adds scripts for dataset link checking.\r\n\r\n* Fixes loading allstate claims severity dataset.\r\n\r\n* Use @lru_cache(1), @cache not supported in python < 3.9\r\n\r\n* Deletes dataset registry, updates automl test utils\r\n\r\n* Fix imports of datasets API.\r\n\r\n* Adds more detail to sha256: docstring and basic README\r\n\r\n* Copy-paste link oops.\r\n\r\n* Fixes handling of nested archive types like .tar.bz Also adds a LUDWIG_CACHE and export to the README\r\n\r\n* Adds link for twitter bots.\r\n\r\n* Fix order of splits in README.md\r\n\r\n* typo\r\n\r\n* Adds verify as a phase in doc string.\r\n\r\n* Support .pqt, .pq extensions for parquet.\r\n\r\n* Handle nested archives with longer file extensions like .csv.zip\r\n\r\n* Handle nested .gz types properly too. Check all extensions with .endswith\r\n\r\n* Handle all archive types with .endswith\r\n\r\n* Update ludwig/datasets/loaders/split_loaders.py\r\n\r\nCo-authored-by: Joppe Geluykens \r\n\r\n* Adds explanation for export, fixes preserve_paths (should be relative to processed_dataset_dir)\r\n\r\n* Resolve preserved paths relative to raw dataset dir before move.\r\n\r\n* Catch runtime exception from extracting sub-archives.\r\n\r\nCo-authored-by: Daniel Treiman \r\nCo-authored-by: Joppe Geluykens ", "code": "def list_datasets() -> List[str]:\n \n return sorted(_get_dataset_configs().keys())\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 21, "n_ast_nodes": 38, "n_identifiers": 6, "d_id": 1318, "documentation": { "docstring": "Returns a list of the names of all available datasets.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 264887, "commit_id": "3a461d02793e6f9d41c2b1a92647e691de1abaac", "repo": "netbox", "path": "netbox/dcim/tests/test_models.py", "file_name": "test_models.py", "fun_name": "test_cable_cannot_terminate_to_an_existing_connection", "commit_message": "Update Cable instantiations to match new signature", "code": "def test_cable_cannot_terminate_to_an_existing_connection(self):\n \n # Try to create a cable with the same interface terminations\n cable = Cable(a_terminations=[self.interface2], b_terminations=[self.interface1])\n with self.assertRaises(ValidationError):\n cable.clean()\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 59, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 4, "token_counts": 39, "n_ast_nodes": 68, "n_identifiers": 11, "d_id": 77898, "documentation": { "docstring": "\n Either side of a cable cannot be terminated when that side already has a connection\n ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 30, "language": "en" } }, { "id": 320660, "commit_id": "4094e15bcbe71311685cb8c57abb6bfb4deadbdc", "repo": "qutebrowser", "path": "qutebrowser/utils/version.py", "file_name": "version.py", "fun_name": "_get_pyqt_webengine_qt_version", "commit_message": "version: Always prefer builtin importlib.metadata\n\nIf we have a builtin importlib.metadata (Python 3.8+) and the importlib_metadata\nbackport installed, we preferred the backport. However, the version.py tests do\nthe opposite: They only mock the builtin if it is available. This did lead to\nfailing tests if the backport was installed in an environment where the builtin\nwas available too.\n\nSince we don't need any specialized functionality (only reading the version), we\ncan prefer the builtin no matter whether a backport is available or not.", "code": "def _get_pyqt_webengine_qt_version() -> Optional[str]:\n \n try:\n import importlib.metadata as importlib_metadata # type: ignore[import]\n except ImportError:\n try:\n import importlib_metadata # type: ignore[no-redef]\n except ImportError:\n log.misc.debug(\"Neither importlib.metadata nor backport available\")\n return None\n\n for suffix in ['Qt5', 'Qt']:\n try:\n return importlib_metadata.version(f'PyQtWebEngine-{suffix}')\n except importlib_metadata.PackageNotFoundError:\n log.misc.debug(f\"PyQtWebEngine-{suffix} not found\")\n\n return None\n\n\n@dataclasses.dataclass", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "@dataclasses.dataclass", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 151, "n_words": 45, "vocab_size": 32, "complexity": 5, "nloc": 30, "token_counts": 73, "n_ast_nodes": 144, "n_identifiers": 15, "d_id": 117258, "documentation": { "docstring": "Get the version of the PyQtWebEngine-Qt package.\n\n With PyQtWebEngine 5.15.3, the QtWebEngine binary got split into its own\n PyQtWebEngine-Qt PyPI package:\n\n https://www.riverbankcomputing.com/pipermail/pyqt/2021-February/043591.html\n https://www.riverbankcomputing.com/pipermail/pyqt/2021-February/043638.html\n\n PyQtWebEngine 5.15.4 renamed it to PyQtWebEngine-Qt5...:\n https://www.riverbankcomputing.com/pipermail/pyqt/2021-March/043699.html\n\n Here, we try to use importlib.metadata or its backport (optional dependency) to\n figure out that version number. If PyQtWebEngine is installed via pip, this will\n give us an accurate answer.\n ", "n_words": 60, "vocab_size": 51, "n_whitespaces": 90, "language": "en" } }, { "id": 259640, "commit_id": "ade90145c9c660a1a7baf2315185995899b0f356", "repo": "scikit-learn", "path": "sklearn/manifold/_t_sne.py", "file_name": "_t_sne.py", "fun_name": "trustworthiness", "commit_message": "FIX Raise error when n_neighbors >= n_samples / 2 in manifold.trustworthiness (#23033)\n\nCo-authored-by: Shao Yang Hong \r\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def trustworthiness(X, X_embedded, *, n_neighbors=5, metric=\"euclidean\"):\n r\n n_samples = X.shape[0]\n if n_neighbors >= n_samples / 2:\n raise ValueError(\n f\"n_neighbors ({n_neighbors}) should be less than n_samples / 2\"\n f\" ({n_samples / 2})\"\n )\n dist_X = pairwise_distances(X, metric=metric)\n if metric == \"precomputed\":\n dist_X = dist_X.copy()\n # we set the diagonal to np.inf to exclude the points themselves from\n # their own neighborhood\n np.fill_diagonal(dist_X, np.inf)\n ind_X = np.argsort(dist_X, axis=1)\n # `ind_X[i]` is the index of sorted distances between i and other samples\n ind_X_embedded = (\n NearestNeighbors(n_neighbors=n_neighbors)\n .fit(X_embedded)\n .kneighbors(return_distance=False)\n )\n\n # We build an inverted index of neighbors in the input space: For sample i,\n # we define `inverted_index[i]` as the inverted index of sorted distances:\n # inverted_index[i][ind_X[i]] = np.arange(1, n_sample + 1)\n inverted_index = np.zeros((n_samples, n_samples), dtype=int)\n ordered_indices = np.arange(n_samples + 1)\n inverted_index[ordered_indices[:-1, np.newaxis], ind_X] = ordered_indices[1:]\n ranks = (\n inverted_index[ordered_indices[:-1, np.newaxis], ind_X_embedded] - n_neighbors\n )\n t = np.sum(ranks[ranks > 0])\n t = 1.0 - t * (\n 2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0))\n )\n return t\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 322, "n_words": 173, "vocab_size": 115, "complexity": 3, "nloc": 84, "token_counts": 228, "n_ast_nodes": 352, "n_identifiers": 32, "d_id": 75842, "documentation": { "docstring": "Expresses to what extent the local structure is retained.\n\n The trustworthiness is within [0, 1]. It is defined as\n\n .. math::\n\n T(k) = 1 - \\frac{2}{nk (2n - 3k - 1)} \\sum^n_{i=1}\n \\sum_{j \\in \\mathcal{N}_{i}^{k}} \\max(0, (r(i, j) - k))\n\n where for each sample i, :math:`\\mathcal{N}_{i}^{k}` are its k nearest\n neighbors in the output space, and every sample j is its :math:`r(i, j)`-th\n nearest neighbor in the input space. In other words, any unexpected nearest\n neighbors in the output space are penalised in proportion to their rank in\n the input space.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features) or (n_samples, n_samples)\n If the metric is 'precomputed' X must be a square distance\n matrix. Otherwise it contains a sample per row.\n\n X_embedded : ndarray of shape (n_samples, n_components)\n Embedding of the training data in low-dimensional space.\n\n n_neighbors : int, default=5\n The number of neighbors that will be considered. Should be fewer than\n `n_samples / 2` to ensure the trustworthiness to lies within [0, 1], as\n mentioned in [1]_. An error will be raised otherwise.\n\n metric : str or callable, default='euclidean'\n Which metric to use for computing pairwise distances between samples\n from the original input space. If metric is 'precomputed', X must be a\n matrix of pairwise distances or squared distances. Otherwise, for a list\n of available metrics, see the documentation of argument metric in\n `sklearn.pairwise.pairwise_distances` and metrics listed in\n `sklearn.metrics.pairwise.PAIRWISE_DISTANCE_FUNCTIONS`. Note that the\n \"cosine\" metric uses :func:`~sklearn.metrics.pairwise.cosine_distances`.\n\n .. versionadded:: 0.20\n\n Returns\n -------\n trustworthiness : float\n Trustworthiness of the low-dimensional embedding.\n\n References\n ----------\n .. [1] Jarkko Venna and Samuel Kaski. 2001. Neighborhood\n Preservation in Nonlinear Projection Methods: An Experimental Study.\n In Proceedings of the International Conference on Artificial Neural Networks\n (ICANN '01). Springer-Verlag, Berlin, Heidelberg, 485-491.\n\n .. [2] Laurens van der Maaten. Learning a Parametric Embedding by Preserving\n Local Structure. Proceedings of the Twelth International Conference on\n Artificial Intelligence and Statistics, PMLR 5:384-391, 2009.\n ", "n_words": 314, "vocab_size": 202, "n_whitespaces": 550, "language": "en" } }, { "id": 221197, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/bz2.py", "file_name": "bz2.py", "fun_name": "writelines", "commit_message": "add python 3.10.4 for windows", "code": "def writelines(self, seq):\n \n return _compression.BaseStream.writelines(self, seq)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 19, "n_ast_nodes": 31, "n_identifiers": 5, "d_id": 56267, "documentation": { "docstring": "Write a sequence of byte strings to the file.\n\n Returns the number of uncompressed bytes written.\n seq can be any iterable yielding byte strings.\n\n Line separators are not added between the written byte strings.\n ", "n_words": 34, "vocab_size": 28, "n_whitespaces": 62, "language": "en" } }, { "id": 170653, "commit_id": "76923d7b58d8f25329e779a40b87e2b6959f9cea", "repo": "pandas", "path": "pandas/tests/indexes/interval/test_constructors.py", "file_name": "test_constructors.py", "fun_name": "test_generic_errors", "commit_message": "issue 48855 enable pylint unnecessary-pass (#49418)\n\nissue 48855 enable unnecessary-pass", "code": "def test_generic_errors(self, constructor):\n \n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 10, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 2, "token_counts": 9, "n_ast_nodes": 15, "n_identifiers": 3, "d_id": 40592, "documentation": { "docstring": "\n override the base class implementation since errors are handled\n differently; checks unnecessary since caught at the Interval level\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 40, "language": "en" } }, { "id": 69222, "commit_id": "58d430fe3ee62e93ad8d16a08bb42156a25b7d41", "repo": "erpnext", "path": "erpnext/assets/doctype/asset_capitalization/test_asset_capitalization.py", "file_name": "test_asset_capitalization.py", "fun_name": "get_actual_gle_dict", "commit_message": "feat: Asset Capitalization\n- manual selection of entry type\n- GLE cleanup with smaller functions\n- GLE considering periodical inventory\n- test cases", "code": "def get_actual_gle_dict(name):\n\treturn dict(\n\t\tfrappe.db.sql(\n\t\t\t,\n\t\t\tname,\n\t\t)\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 2, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 13, "token_counts": 20, "n_ast_nodes": 33, "n_identifiers": 6, "d_id": 14997, "documentation": { "docstring": "\n\t\tselect account, sum(debit-credit) as diff\n\t\tfrom `tabGL Entry`\n\t\twhere voucher_type = 'Asset Capitalization' and voucher_no = %s\n\t\tgroup by account\n\t\thaving diff != 0\n\t", "n_words": 24, "vocab_size": 22, "n_whitespaces": 19, "language": "en" } }, { "id": 191181, "commit_id": "0e845259cd3d49b39889ae15df19922af0ef7269", "repo": "thumbor", "path": "tests/conftest.py", "file_name": "conftest.py", "fun_name": "doctor_output_no_config", "commit_message": "Remove snapshottest to reduce number of dependencies (#1433)\n\nHaving an extra package that can be replaced with something already\r\nincluded makes packaging easier. For instance, in Debian, one would have\r\nto either be fortunate to find an existing package or go over the\r\ntrouble of creating such package and all its dependencies.\r\n\r\nI believe this CL is a good small compromise considering the benefit it\r\nbrings.", "code": "def doctor_output_no_config():\n return \n", "url": "https://github.com/thumbor/thumbor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 38, "token_counts": 6, "n_ast_nodes": 13, "n_identifiers": 1, "d_id": 46464, "documentation": { "docstring": "\nThumbor doctor will analyze your install and verify if everything is working as expected.\n\nVerifying libraries support...\n\n✅ pycurl is installed correctly.\n✅ cairosvg is installed correctly.\n\nVerifying thumbor compiled extensions...\n\n✅ _alpha\n✅ _bounding_box\n✅ _brightness\n✅ _colorize\n✅ _composite\n✅ _contrast\n✅ _convolution\n✅ _curve\n✅ _equalize\n✅ _fill\n✅ _nine_patch\n✅ _noise\n✅ _rgb\n✅ _round_corner\n✅ _saturation\n✅ _sharpen\n\nVerifying extension programs...\n\n✅ jpegtran is installed correctly.\n✅ ffmpeg is installed correctly.\n✅ gifsicle is installed correctly.\nVerifying security...\n\n\n🎉 Congratulations! No errors found! 🎉\n", "n_words": 89, "vocab_size": 52, "n_whitespaces": 62, "language": "en" } }, { "id": 177150, "commit_id": "7f3ec2c5906b709733a5c26285032bf24134bcf0", "repo": "networkx", "path": "networkx/drawing/nx_pylab.py", "file_name": "nx_pylab.py", "fun_name": "draw", "commit_message": "See matplotlb 3.6rc1 failure (#5937)\n\n* See matplotlb 3.6rc1 failure\r\n\r\n* replace use of private class method to allow mpl v3.6 to work.\r\n\r\n* ensure ax exists before calling colorbar\r\n\r\n* Undo matplotlib pin\r\n\r\nCo-authored-by: Dan Schult ", "code": "def draw(G, pos=None, ax=None, **kwds):\n \n import matplotlib.pyplot as plt\n\n if ax is None:\n cf = plt.gcf()\n else:\n cf = ax.get_figure()\n cf.set_facecolor(\"w\")\n if ax is None:\n if cf.axes:\n ax = cf.gca()\n else:\n ax = cf.add_axes((0, 0, 1, 1))\n\n if \"with_labels\" not in kwds:\n kwds[\"with_labels\"] = \"labels\" in kwds\n\n draw_networkx(G, pos=pos, ax=ax, **kwds)\n ax.set_axis_off()\n plt.draw_if_interactive()\n return\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 144, "n_words": 54, "vocab_size": 39, "complexity": 5, "nloc": 18, "token_counts": 125, "n_ast_nodes": 206, "n_identifiers": 18, "d_id": 42290, "documentation": { "docstring": "Draw the graph G with Matplotlib.\n\n Draw the graph as a simple representation with no node\n labels or edge labels and using the full Matplotlib figure area\n and no axis labels by default. See draw_networkx() for more\n full-featured drawing that allows title, axis labels etc.\n\n Parameters\n ----------\n G : graph\n A networkx graph\n\n pos : dictionary, optional\n A dictionary with nodes as keys and positions as values.\n If not specified a spring layout positioning will be computed.\n See :py:mod:`networkx.drawing.layout` for functions that\n compute node positions.\n\n ax : Matplotlib Axes object, optional\n Draw the graph in specified Matplotlib axes.\n\n kwds : optional keywords\n See networkx.draw_networkx() for a description of optional keywords.\n\n Examples\n --------\n >>> G = nx.dodecahedral_graph()\n >>> nx.draw(G)\n >>> nx.draw(G, pos=nx.spring_layout(G)) # use spring layout\n\n See Also\n --------\n draw_networkx\n draw_networkx_nodes\n draw_networkx_edges\n draw_networkx_labels\n draw_networkx_edge_labels\n\n Notes\n -----\n This function has the same name as pylab.draw and pyplot.draw\n so beware when using `from networkx import *`\n\n since you might overwrite the pylab.draw function.\n\n With pyplot use\n\n >>> import matplotlib.pyplot as plt\n >>> G = nx.dodecahedral_graph()\n >>> nx.draw(G) # networkx draw()\n >>> plt.draw() # pyplot draw()\n\n Also see the NetworkX drawing examples at\n https://networkx.org/documentation/latest/auto_examples/index.html\n ", "n_words": 190, "vocab_size": 118, "n_whitespaces": 348, "language": "en" } }, { "id": 176190, "commit_id": "5dfd57af2a141a013ae3753e160180b82bec9469", "repo": "networkx", "path": "networkx/linalg/graphmatrix.py", "file_name": "graphmatrix.py", "fun_name": "adjacency_matrix", "commit_message": "Use scipy.sparse array datastructure (#5139)\n\n* Step 1: use sparse arrays in nx.to_scipy_sparse_matrix.\r\n\r\nSeems like a reasonable place to start.\r\nnx.to_scipy_sparse_matrix is one of the primary interfaces to\r\nscipy.sparse from within NetworkX.\r\n\r\n* 1: Use np.outer instead of mult col/row vectors\r\n\r\nFix two instances in modularitymatrix where a new 2D array was being\r\ncreated via an outer product of two \\\"vectors\\\".\r\n\r\nIn the matrix case, this was a row vector \\* a column vector. In the\r\narray case this can be disambiguated by being explicit with np.outer.\r\n\r\n* Update _transition_matrix in laplacianmatrix module\r\n\r\n - A few instances of matrix multiplication operator\r\n - Add np.newaxis + transpose to get shape right for broadcasting\r\n - Explicitly convert e.g. sp.sparse.spdiags to a csr_array.\r\n\r\n* Update directed_combinitorial_laplacian w/ sparse array.\r\n\r\n - Wrap spdiags in csr_array and update matmul operators.\r\n\r\n* Rm matrix-specific code from lgc and hmn modules\r\n\r\n - Replace .A call with appropriate array semantics\r\n - wrap sparse.diags in csr_array.\r\n\r\n* Change hits to use sparse array semantics.\r\n\r\n - Replace * with @\r\n - Remove superfluous calls to flatten.\r\n\r\n* Update sparse matrix usage in layout module.\r\n - Simplify lil.getrowview call\r\n - Wrap spdiags in csr_array.\r\n\r\n* lil_matrix -> lil_array in graphmatrix.py.\r\n\r\n* WIP: Start working on algebraic connectivity module.\r\n\r\n* Incorporate auth mat varname feedback.\r\n\r\n* Revert 1D slice and comment for 1D sparse future.\r\n\r\n* Add TODOs: rm csr_array wrapper around spdiags etc.\r\n\r\n* WIP: cleanup algebraicconn: tracemin_fiedler.\r\n\r\n* Typo.\r\n\r\n* Finish reviewing algebraicconnectivity.\r\n\r\n* Convert bethe_hessian matrix to use sparse arrays.\r\n\r\n* WIP: update laplacian.\r\n\r\nUpdate undirected laplacian functions.\r\n\r\n* WIP: laplacian - add comment about _transition_matrix return types.\r\n\r\n* Finish laplacianmatrix review.\r\n\r\n* Update attrmatrix.\r\n\r\n* Switch to official laplacian function.\r\n\r\n* Update pagerank to use sparse array.\r\n\r\n* Switch bipartite matrix to sparse arrays.\r\n\r\n* Check from_scipy_sparse_matrix works with arrays.\r\n\r\nModifies test suite.\r\n\r\n* Apply changes from review.\r\n\r\n* Fix failing docstring tests.\r\n\r\n* Fix missing axis for in-place multiplication.\r\n\r\n* Use scipy==1.8rc2\r\n\r\n* Use matrix multiplication\r\n\r\n* Fix PyPy CI\r\n\r\n* [MRG] Create plot_subgraphs.py example (#5165)\r\n\r\n* Create plot_subgraphs.py\r\n\r\nhttps://github.com/networkx/networkx/issues/4220\r\n\r\n* Update plot_subgraphs.py\r\n\r\nblack\r\n\r\n* Update plot_subgraphs.py\r\n\r\nlint plus font_size\r\n\r\n* Update plot_subgraphs.py\r\n\r\nadded more plots\r\n\r\n* Update plot_subgraphs.py\r\n\r\nremoved plots from the unit test and added comments\r\n\r\n* Update plot_subgraphs.py\r\n\r\nlint\r\n\r\n* Update plot_subgraphs.py\r\n\r\ntypos fixed\r\n\r\n* Update plot_subgraphs.py\r\n\r\nadded nodes to the plot of the edges removed that was commented out for whatever reason\r\n\r\n* Update plot_subgraphs.py\r\n\r\nrevert the latest commit - the line was commented out for a reason - it's broken\r\n\r\n* Update plot_subgraphs.py\r\n\r\nfixed node color issue\r\n\r\n* Update plot_subgraphs.py\r\n\r\nformat fix\r\n\r\n* Update plot_subgraphs.py\r\n\r\nforgot to draw the nodes... now fixed\r\n\r\n* Fix sphinx warnings about heading length.\r\n\r\n* Update examples/algorithms/plot_subgraphs.py\r\n\r\n* Update examples/algorithms/plot_subgraphs.py\r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Dan Schult \r\n\r\n* Add traveling salesman problem to example gallery (#4874)\r\n\r\nAdds an example of the using Christofides to solve the TSP problem to the example galery.\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() (#5037)\r\n\r\n* Fixed inconsistent documentation for nbunch parameter in DiGraph.edges()\r\n\r\n* Resolved Requested Changes\r\n\r\n* Revert changes to degree docstrings.\r\n\r\n* Update comments in example.\r\n\r\n* Apply wording to edges method in all graph classes.\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Compatibility updates from testing with numpy/scipy/pytest rc's (#5226)\r\n\r\n* Rm deprecated scipy subpkg access.\r\n\r\n* Use recwarn fixture in place of deprecated pytest pattern.\r\n\r\n* Rm unnecessary try/except from tests.\r\n\r\n* Replace internal `close` fn with `math.isclose`. (#5224)\r\n\r\n* Replace internal close fn with math.isclose.\r\n\r\n* Fix lines in docstring examples.\r\n\r\n* Fix Python 3.10 deprecation warning w/ int div. (#5231)\r\n\r\n* Touchups and suggestions for subgraph gallery example (#5225)\r\n\r\n* Simplify construction of G with edges rm'd\r\n\r\n* Rm unused graph attribute.\r\n\r\n* Shorten categorization by node type.\r\n\r\n* Simplify node coloring.\r\n\r\n* Simplify isomorphism check.\r\n\r\n* Rm unit test.\r\n\r\n* Rm redundant plotting of each subgraph.\r\n\r\n* Use new package name (#5234)\r\n\r\n* Allowing None edges in weight function of bidirectional Dijkstra (#5232)\r\n\r\n* added following feature also to bidirectional dijkstra: The weight function can be used to hide edges by returning None.\r\n\r\n* changed syntax for better readability and code duplicate avoidance\r\n\r\nCo-authored-by: Hohmann, Nikolas \r\n\r\n* Add an FAQ about assigning issues. (#5182)\r\n\r\n* Add FAQ about assigning issues.\r\n\r\n* Add note about linking issues from new PRs.\r\n\r\n* Update dev deps (#5243)\r\n\r\n* Update minor doc issues with tex notation (#5244)\r\n\r\n* Add FutureWarnings to fns that return sparse matrices\r\n\r\n - biadjacency_matrix.\r\n - bethe_hessian_matrix.\r\n - incidence_matrix.\r\n - laplacian functions.\r\n - modularity_matrix functions.\r\n - adjacency_matrix.\r\n\r\n* Add to_scipy_sparse_array and use it everywhere.\r\n\r\nAdd a new conversion function to preserve array semantics internally\r\nwhile not altering behavior for users.\r\n\r\nAlso adds FutureWarning to to_scipy_sparse_matrix.\r\n\r\n* Add from_scipy_sparse_array. Supercedes from_scipy_sparse_matrix.\r\n\r\n* Handle deprecations in separate PR.\r\n\r\n* Fix docstring examples.\r\n\r\nCo-authored-by: Mridul Seth \r\n\r\nCo-authored-by: Jarrod Millman \r\nCo-authored-by: Andrew Knyazev \r\nCo-authored-by: Dan Schult \r\nCo-authored-by: eskountis <56514439+eskountis@users.noreply.github.com>\r\nCo-authored-by: Anutosh Bhat <87052487+anutosh491@users.noreply.github.com>\r\nCo-authored-by: NikHoh \r\nCo-authored-by: Hohmann, Nikolas \r\nCo-authored-by: Sultan Orazbayev \r\nCo-authored-by: Mridul Seth ", "code": "def adjacency_matrix(G, nodelist=None, dtype=None, weight=\"weight\"):\n \n import warnings\n\n warnings.warn(\n \"adjacency_matrix will return a scipy.sparse array instead of a matrix in Networkx 3.0.\",\n FutureWarning,\n stacklevel=2,\n )\n # TODO: Change to `to_scipy_sparse_array` for networkx 3.0\n return nx.to_scipy_sparse_matrix(G, nodelist=nodelist, dtype=dtype, weight=weight)\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 76, "n_words": 37, "vocab_size": 35, "complexity": 1, "nloc": 8, "token_counts": 52, "n_ast_nodes": 81, "n_identifiers": 11, "d_id": 41756, "documentation": { "docstring": "Returns adjacency matrix of G.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in nodelist.\n If nodelist is None, then the ordering is produced by G.nodes().\n\n dtype : NumPy data-type, optional\n The desired data-type for the array.\n If None, then the NumPy default is used.\n\n weight : string or None, optional (default='weight')\n The edge data key used to provide each value in the matrix.\n If None, then each edge has weight 1.\n\n Returns\n -------\n A : SciPy sparse matrix\n Adjacency matrix representation of G.\n\n Notes\n -----\n For directed graphs, entry i,j corresponds to an edge from i to j.\n\n If you want a pure Python adjacency matrix representation try\n networkx.convert.to_dict_of_dicts which will return a\n dictionary-of-dictionaries format that can be addressed as a\n sparse matrix.\n\n For MultiGraph/MultiDiGraph with parallel edges the weights are summed.\n See `to_numpy_array` for other options.\n\n The convention used for self-loop edges in graphs is to assign the\n diagonal matrix entry value to the edge weight attribute\n (or the number 1 if the edge has no weight attribute). If the\n alternate convention of doubling the edge weight is desired the\n resulting Scipy sparse matrix can be modified as follows:\n\n >>> G = nx.Graph([(1, 1)])\n >>> A = nx.adjacency_matrix(G)\n >>> print(A.todense())\n [[1]]\n >>> A.setdiag(A.diagonal() * 2)\n >>> print(A.todense())\n [[2]]\n\n See Also\n --------\n to_numpy_array\n to_scipy_sparse_array\n to_dict_of_dicts\n adjacency_spectrum\n ", "n_words": 231, "vocab_size": 137, "n_whitespaces": 392, "language": "en" } }, { "id": 247584, "commit_id": "605d161d7d585847fd1bb98d14d5281daeac8e86", "repo": "synapse", "path": "tests/util/test_rwlock.py", "file_name": "test_rwlock.py", "fun_name": "test_cancellation_while_holding_read_lock", "commit_message": "Add cancellation support to `ReadWriteLock` (#12120)\n\nAlso convert `ReadWriteLock` to use async context managers.\r\n\r\nSigned-off-by: Sean Quah ", "code": "def test_cancellation_while_holding_read_lock(self):\n \n rwlock = ReadWriteLock()\n key = \"key\"\n\n # 1. A reader takes the lock and blocks.\n reader_d, _, _ = self._start_blocking_reader(rwlock, key, \"read completed\")\n\n # 2. A writer waits for the reader to complete.\n writer_d, _ = self._start_nonblocking_writer(rwlock, key, \"write completed\")\n self.assertFalse(writer_d.called)\n\n # 3. The reader is cancelled.\n reader_d.cancel()\n self.failureResultOf(reader_d, CancelledError)\n\n # 4. The writer should take the lock and complete.\n self.assertTrue(\n writer_d.called, \"Writer is stuck waiting for a cancelled reader\"\n )\n self.assertEqual(\"write completed\", self.successResultOf(writer_d))\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 192, "n_words": 76, "vocab_size": 55, "complexity": 1, "nloc": 12, "token_counts": 88, "n_ast_nodes": 152, "n_identifiers": 18, "d_id": 71759, "documentation": { "docstring": "Test cancellation while holding a read lock.\n\n A waiting writer should be given the lock when the reader holding the lock is\n cancelled.\n ", "n_words": 23, "vocab_size": 19, "n_whitespaces": 44, "language": "en" } }, { "id": 172101, "commit_id": "bce995817caf00ab5e82cb4cf1b540f1530cf4ea", "repo": "pandas", "path": "pandas/core/dtypes/inference.py", "file_name": "inference.py", "fun_name": "is_re", "commit_message": "Fix some dosctring RT02 error (#50197)", "code": "def is_re(obj) -> bool:\n \n return isinstance(obj, Pattern)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 21, "token_counts": 15, "n_ast_nodes": 26, "n_identifiers": 5, "d_id": 40755, "documentation": { "docstring": "\n Check if the object is a regex pattern instance.\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n bool\n Whether `obj` is a regex pattern.\n\n Examples\n --------\n >>> is_re(re.compile(\".*\"))\n True\n >>> is_re(\"foo\")\n False\n ", "n_words": 34, "vocab_size": 29, "n_whitespaces": 84, "language": "en" } }, { "id": 260558, "commit_id": "d942600e1f1979c431c24f59933a95155789f324", "repo": "scikit-learn", "path": "sklearn/multioutput.py", "file_name": "multioutput.py", "fun_name": "partial_fit", "commit_message": "MAINT add parameter_constraints for MultiOutputClassifier and MultiOutputRegressor (#23902)\n\nCo-authored-by: jeremiedbb ", "code": "def partial_fit(self, X, y, classes=None, sample_weight=None):\n \n first_time = not hasattr(self, \"estimators_\")\n\n if first_time:\n self._validate_params()\n\n y = self._validate_data(X=\"no_validation\", y=y, multi_output=True)\n\n if y.ndim == 1:\n raise ValueError(\n \"y must have at least two dimensions for \"\n \"multi-output regression but has only one.\"\n )\n\n if sample_weight is not None and not has_fit_parameter(\n self.estimator, \"sample_weight\"\n ):\n raise ValueError(\"Underlying estimator does not support sample weights.\")\n\n self.estimators_ = Parallel(n_jobs=self.n_jobs)(\n delayed(_partial_fit_estimator)(\n self.estimators_[i] if not first_time else self.estimator,\n X,\n y[:, i],\n classes[i] if classes is not None else None,\n sample_weight,\n first_time,\n )\n for i in range(y.shape[1])\n )\n\n if first_time and hasattr(self.estimators_[0], \"n_features_in_\"):\n self.n_features_in_ = self.estimators_[0].n_features_in_\n if first_time and hasattr(self.estimators_[0], \"feature_names_in_\"):\n self.feature_names_in_ = self.estimators_[0].feature_names_in_\n\n return self\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 421, "n_words": 107, "vocab_size": 77, "complexity": 12, "nloc": 30, "token_counts": 214, "n_ast_nodes": 332, "n_identifiers": 25, "d_id": 76339, "documentation": { "docstring": "Incrementally fit a separate model for each class output.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input data.\n\n y : {array-like, sparse matrix} of shape (n_samples, n_outputs)\n Multi-output targets.\n\n classes : list of ndarray of shape (n_outputs,), default=None\n Each array is unique classes for one output in str/int.\n Can be obtained via\n ``[np.unique(y[:, i]) for i in range(y.shape[1])]``, where `y`\n is the target matrix of the entire dataset.\n This argument is required for the first call to partial_fit\n and can be omitted in the subsequent calls.\n Note that `y` doesn't need to contain all labels in `classes`.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights. If `None`, then samples are equally weighted.\n Only supported if the underlying regressor supports sample\n weights.\n\n Returns\n -------\n self : object\n Returns a fitted instance.\n ", "n_words": 136, "vocab_size": 100, "n_whitespaces": 349, "language": "en" } }, { "id": 281541, "commit_id": "82747072c511beb1b2672846ae2ee4aec53eb562", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/stocks/due_diligence/dd_controller.py", "file_name": "dd_controller.py", "fun_name": "print_help", "commit_message": "Terminal Wide Rich (#1161)\n\n* My idea for how we handle Rich moving forward\r\n\r\n* remove independent consoles\r\n\r\n* FIxed pylint issues\r\n\r\n* add a few vars\r\n\r\n* Switched print to console\r\n\r\n* More transitions\r\n\r\n* Changed more prints\r\n\r\n* Replaced all prints\r\n\r\n* Fixing tabulate\r\n\r\n* Finished replace tabulate\r\n\r\n* Finished removing rich from Tabulate\r\n\r\n* add Panel around menu\r\n\r\n* add GST watermark under feature flag\r\n\r\n* Fixed 46 tests\r\n\r\n* Delete test_screener[False].yaml\r\n\r\n* Delete test_screener[True].yaml\r\n\r\n* Fixed the rest of the tests\r\n\r\n* add help and source color vars and use rgb\r\n\r\n* rich on stocks/options\r\n\r\n* update rich on disc, dps, sia\r\n\r\n* rich in gov, ins and scr menus\r\n\r\n* ba and ca menus with rich\r\n\r\n* Fixed import issue\r\n\r\n* Fixed some tests\r\n\r\n* removed termcolor\r\n\r\n* Removed prettytable\r\n\r\n* add rich to remaining stocks menus\r\n\r\n* FIxed linting issue\r\n\r\n* Added James' changes\r\n\r\n* Updated dependencies\r\n\r\n* Add rich to cryptocurrency menu\r\n\r\n* refactor economy and forex\r\n\r\n* refactor etf with rich\r\n\r\n* refactor mfunds\r\n\r\n* refactor rich rest\r\n\r\n* not specify style so default color works well on any background\r\n\r\n* Fixing mypy issues\r\n\r\n* Updated tests\r\n\r\n* More test fixes\r\n\r\n* James' test fixes\r\n\r\n* Updating tests : stocks/screener - fix cassettes using BR\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : disable DEBUG_MODE\r\n\r\n* Updating tests : stocks/fa/yfinance\r\n\r\n* minor fixes that escape\r\n\r\n* Improve the rich table function (that replaces tabulate :D )\r\n\r\n* Fixed bad code\r\n\r\n* delete rogue file + dcf fix + NoConsole\r\n\r\n* sia mypy\r\n\r\n* fuck you linter\r\n\r\n* fuck you linter pt 2\r\n\r\n* skip hehe\r\n\r\n* i hate the black linter\r\n\r\n* ubuntu mypy attempt\r\n\r\n* Update : rich_config + gtff\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : stocks\r\n\r\n* Update : rich_config\r\n\r\n* Updating : rich_config\r\n\r\n* make panel configurable for Theodore :b\r\n\r\n* colors update\r\n\r\n* Merged\r\n\r\n* Updating : rich_config + feature_flags\r\n\r\n* Updating : rich_config\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating : feature_flags\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: james \r\nCo-authored-by: jose-donato ", "code": "def print_help(self):\n \n help_text = f\n console.print(text=help_text, menu=\"Stocks - Due Diligence\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 22, "token_counts": 22, "n_ast_nodes": 47, "n_identifiers": 8, "d_id": 83839, "documentation": { "docstring": "Print help\n[param]Ticker: [/param]{self.ticker}[cmds]\n\n[src][Finviz][/src]\n analyst analyst prices and ratings of the company\n[src][FMP][/src]\n rating rating over time (daily)\n[src][Finnhub][/src]\n rot number of analysts ratings over time (monthly)\n[src][Business Insider][/src]\n pt price targets over time\n est quarter and year analysts earnings estimates\n[src][Market Watch][/src]\n sec SEC filings\n[src][Csimarket][/src]\n supplier list of suppliers\n customer list of customers\n[src][Cathiesark.com][/src]\n arktrades get ARK trades for ticker[/cmds]\n ", "n_words": 63, "vocab_size": 50, "n_whitespaces": 157, "language": "en" } }, { "id": 72424, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/views/mixins.py", "file_name": "mixins.py", "fun_name": "get_preprocess_function", "commit_message": "Reformat with black", "code": "def get_preprocess_function(self, field, value, export_format):\n \n\n # Try to find a field specific function and return it\n format_dict = self.custom_field_preprocess.get(field, {})\n if export_format in format_dict:\n return format_dict[export_format]\n\n # Otherwise check for a value class specific function\n for value_classes, format_dict in self.custom_value_preprocess.items():\n if isinstance(value, value_classes) and export_format in format_dict:\n return format_dict[export_format]\n\n # Finally resort to force_str to prevent encoding errors\n return force_str\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 153, "n_words": 60, "vocab_size": 40, "complexity": 5, "nloc": 8, "token_counts": 67, "n_ast_nodes": 105, "n_identifiers": 13, "d_id": 15891, "documentation": { "docstring": "Returns the preprocessing function for a given field name, field value, and export format", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 80659, "commit_id": "799968460d4794bcd9959f57a2b97846b9a00bb7", "repo": "awx", "path": "awx/main/utils/common.py", "file_name": "common.py", "fun_name": "get_corrected_cpu", "commit_message": "Fixup conversion of memory and cpu settings to support k8s resource request format (#11725)\n\nfix memory and cpu settings to suport k8s resource request format\r\n\r\n* fix conversion of memory setting to bytes\r\n\r\nThis setting has not been getting set by default, and needed some fixing\r\nup to be compatible with setting the memory in the same way as we set it\r\nin the operator, as well as with other changes from last year which\r\nassume that ansible runner is returning memory in bytes.\r\n\r\nThis way we can start setting this setting in the operator, and get a\r\nmore accurate reflection of how much memory is available to the control\r\npod in k8s.\r\n\r\nOn platforms where services are all sharing memory, we deduct a\r\npenalty from the memory available. On k8s we don't need to do this\r\nbecause the web, redis, and task containers each have memory\r\nallocated to them.\r\n\r\n* Support CPU setting expressed in units used by k8s\r\n\r\nThis setting has not been getting set by default, and needed some fixing\r\nup to be compatible with setting the CPU resource request/limits in the\r\nsame way as we set it in the resource requests/limits.\r\n\r\nThis way we can start setting this setting in the\r\noperator, and get a more accurate reflection of how much cpu is\r\navailable to the control pod in k8s.\r\n\r\nBecause cpu on k8s can be partial cores, migrate cpu field to decimal.\r\n\r\nk8s does not allow granularity of less than 100m (equivalent to 0.1 cores), so only\r\nstore up to 1 decimal place.\r\n\r\nfix analytics to deal with decimal cpu\r\n\r\nneed to use DjangoJSONEncoder when Decimal fields in data passed to\r\njson.dumps", "code": "def get_corrected_cpu(cpu_count): # formerlly get_cpu_capacity\n \n from django.conf import settings\n\n settings_abscpu = getattr(settings, 'SYSTEM_TASK_ABS_CPU', None)\n env_abscpu = os.getenv('SYSTEM_TASK_ABS_CPU', None)\n\n if env_abscpu is not None:\n return convert_cpu_str_to_decimal_cpu(env_abscpu)\n elif settings_abscpu is not None:\n return convert_cpu_str_to_decimal_cpu(settings_abscpu)\n\n return cpu_count # no correction\n\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 74, "n_words": 37, "vocab_size": 27, "complexity": 3, "nloc": 9, "token_counts": 56, "n_ast_nodes": 94, "n_identifiers": 11, "d_id": 17088, "documentation": { "docstring": "Some environments will do a correction to the reported CPU number\n because the given OpenShift value is a lie\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 25, "language": "en" } }, { "id": 138043, "commit_id": "edb17fd2069844f12237c85ba6607afae536401d", "repo": "ray", "path": "python/ray/air/execution/resources/resource_manager.py", "file_name": "resource_manager.py", "fun_name": "clear", "commit_message": "[air/tune] Internal resource management 1 - Ray AIR resource manager implementation (#30777)\n\nPrerequisite to #30016\r\n\r\nThis PR adds a new Ray AIR resource manager to replace the PlacementGroupManager of Ray Tune. Details can be found in #30016.\r\n\r\nSpecifically, this PR\r\n- Adds the main resource manager abstractions\r\n- Renames (and moves) PlacementGroupFactory to ResourceRequest\r\n- Adds implementations and tests for a placement group based manager and a budget based manager\r\n\r\nSigned-off-by: Kai Fricke \r\nSigned-off-by: Kai Fricke \r\nCo-authored-by: matthewdeng ", "code": "def clear(self):\n \n raise NotImplementedError\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 8, "n_ast_nodes": 16, "n_identifiers": 3, "d_id": 31286, "documentation": { "docstring": "Reset internal state and clear all resources.\n\n Calling this method will reset the resource manager to its initialization state.\n All resources will be removed.\n\n Clearing the state will remove tracked resources from the manager, but there are\n no guarantees about the tasks and actors scheduled on the resources. The caller\n should make sure that any references to tasks or actors scheduled on the\n resources have been removed before calling ``clear()``.\n ", "n_words": 70, "vocab_size": 53, "n_whitespaces": 119, "language": "en" } }, { "id": 219598, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_osx_support.py", "file_name": "_osx_support.py", "fun_name": "_supports_universal_builds", "commit_message": "add python 3.10.4 for windows", "code": "def _supports_universal_builds():\n \n # As an approximation, we assume that if we are running on 10.4 or above,\n # then we are running with an Xcode environment that supports universal\n # builds, in particular -isysroot and -arch arguments to the compiler. This\n # is in support of allowing 10.4 universal builds to run on 10.3.x systems.\n\n osx_version = _get_system_version_tuple()\n return bool(osx_version >= (10, 4)) if osx_version else False\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 88, "n_words": 67, "vocab_size": 51, "complexity": 2, "nloc": 3, "token_counts": 25, "n_ast_nodes": 46, "n_identifiers": 4, "d_id": 55636, "documentation": { "docstring": "Returns True if universal builds are supported on this system", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 195838, "commit_id": "337e5c51b1ae7e202b7d7c62107fab6d5ea58d93", "repo": "sympy", "path": "sympy/polys/specialpolys.py", "file_name": "specialpolys.py", "fun_name": "symmetric_poly", "commit_message": "Removed even more Python 2-support", "code": "def symmetric_poly(n, *gens, **args):\n \n # TODO: use an explicit keyword argument\n gens = _analyze_gens(gens)\n\n if n < 0 or n > len(gens) or not gens:\n raise ValueError(\"Cannot generate symmetric polynomial of order %s for %s\" % (n, gens))\n elif not n:\n poly = S.One\n else:\n poly = Add(*[Mul(*s) for s in subsets(gens, int(n))])\n\n if not args.get('polys', False):\n return poly\n else:\n return Poly(poly, *gens)\n\n\n@public", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "@public", "n_ast_errors": 1, "ast_levels": 18, "n_whitespaces": 122, "n_words": 64, "vocab_size": 52, "complexity": 7, "nloc": 12, "token_counts": 103, "n_ast_nodes": 174, "n_identifiers": 18, "d_id": 47432, "documentation": { "docstring": "Generates symmetric polynomial of order `n`.\n\n Returns a Poly object when ``polys=True``, otherwise\n (default) returns an expression.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 26, "language": "en" } }, { "id": 111324, "commit_id": "1d34aa2b3dd1ba0931dcb1863dfbeba6ae5b912d", "repo": "spaCy", "path": "spacy/tests/test_cli.py", "file_name": "test_cli.py", "fun_name": "test_ensure_print_span_characteristics_wont_fail", "commit_message": "Add spacy-span-analyzer to debug data (#10668)\n\n* Rename to spans_key for consistency\r\n\r\n* Implement spans length in debug data\r\n\r\n* Implement how span bounds and spans are obtained\r\n\r\nIn this commit, I implemented how span boundaries (the tokens) around a\r\ngiven span and spans are obtained. I've put them in the compile_gold()\r\nfunction so that it's accessible later on. I will do the actual\r\ncomputation of the span and boundary distinctiveness in the main\r\nfunction above.\r\n\r\n* Compute for p_spans and p_bounds\r\n\r\n* Add computation for SD and BD\r\n\r\n* Fix mypy issues\r\n\r\n* Add weighted average computation\r\n\r\n* Fix compile_gold conditional logic\r\n\r\n* Add test for frequency distribution computation\r\n\r\n* Add tests for kl-divergence computation\r\n\r\n* Fix weighted average computation\r\n\r\n* Make tables more compact by rounding them\r\n\r\n* Add more descriptive checks for spans\r\n\r\n* Modularize span computation methods\r\n\r\nIn this commit, I added the _get_span_characteristics and\r\n_print_span_characteristics functions so that they can be reusable\r\nanywhere.\r\n\r\n* Remove unnecessary arguments and make fxs more compact\r\n\r\n* Update a few parameter arguments\r\n\r\n* Add tests for print_span and get_span methods\r\n\r\n* Update API to talk about span characteristics in brief\r\n\r\n* Add better reporting of spans_length\r\n\r\n* Add test for span length reporting\r\n\r\n* Update formatting of span length report\r\n\r\nRemoved '' to indicate that it's not a string, then\r\nsort the n-grams by their length, not by their frequency.\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Show all frequency distribution when -V\r\n\r\nIn this commit, I displayed the full frequency distribution of the\r\nspan lengths when --verbose is passed. To make things simpler, I\r\nrewrote some of the formatter functions so that I can call them\r\nwhenever.\r\n\r\nAnother notable change is that instead of showing percentages as\r\nIntegers, I showed them as floats (max 2-decimal places). I did this\r\nbecause it looks weird when it displays (0%).\r\n\r\n* Update logic on how total is computed\r\n\r\nThe way the 90% thresholding is computed now is that we keep\r\nadding the percentages until we reach >= 90%. I also updated the wording\r\nand used the term \"At least\" to denote that >= 90% of your spans have\r\nthese distributions.\r\n\r\n* Fix display when showing the threshold percentage\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Add better phrasing for span information\r\n\r\n* Update spacy/cli/debug_data.py\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\n* Add minor edits for whitespaces etc.\r\n\r\nCo-authored-by: Adriane Boyd \r\n\r\nCo-authored-by: Adriane Boyd ", "code": "def test_ensure_print_span_characteristics_wont_fail():\n \n nlp = English()\n spans_key = \"sc\"\n\n pred = Doc(nlp.vocab, words=[\"Welcome\", \"to\", \"the\", \"Bank\", \"of\", \"China\", \".\"])\n pred.spans[spans_key] = [Span(pred, 3, 6, \"ORG\"), Span(pred, 5, 6, \"GPE\")]\n ref = Doc(nlp.vocab, words=[\"Welcome\", \"to\", \"the\", \"Bank\", \"of\", \"China\", \".\"])\n ref.spans[spans_key] = [Span(ref, 3, 6, \"ORG\"), Span(ref, 5, 6, \"GPE\")]\n eg = Example(pred, ref)\n\n examples = [eg]\n data = _compile_gold(examples, [\"spancat\"], nlp, True)\n span_characteristics = _get_span_characteristics(\n examples=examples, compiled_gold=data, spans_key=spans_key\n )\n _print_span_characteristics(span_characteristics)\n\n\n@pytest.mark.parametrize(\"threshold\", [70, 80, 85, 90, 95])", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"threshold\", [70, 80, 85, 90, 95])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 120, "n_words": 75, "vocab_size": 51, "complexity": 1, "nloc": 14, "token_counts": 172, "n_ast_nodes": 309, "n_identifiers": 23, "d_id": 24375, "documentation": { "docstring": "Test if interface between two methods aren't destroyed if refactored", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 61395, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py", "file_name": "versioncontrol.py", "fun_name": "find_path_to_setup_from_repo_root", "commit_message": "upd; format", "code": "def find_path_to_setup_from_repo_root(location, repo_root):\n # type: (str, str) -> Optional[str]\n \n # find setup.py\n orig_location = location\n while not os.path.exists(os.path.join(location, 'setup.py')):\n last_location = location\n location = os.path.dirname(location)\n if location == last_location:\n # We've traversed up to the root of the filesystem without\n # finding setup.py\n logger.warning(\n \"Could not find setup.py for directory %s (tried all \"\n \"parent directories)\",\n orig_location,\n )\n return None\n\n if os.path.samefile(repo_root, location):\n return None\n\n return os.path.relpath(location, repo_root)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 217, "n_words": 68, "vocab_size": 51, "complexity": 4, "nloc": 15, "token_counts": 86, "n_ast_nodes": 145, "n_identifiers": 14, "d_id": 12543, "documentation": { "docstring": "\n Find the path to `setup.py` by searching up the filesystem from `location`.\n Return the path to `setup.py` relative to `repo_root`.\n Return None if `setup.py` is in `repo_root` or cannot be found.\n ", "n_words": 31, "vocab_size": 23, "n_whitespaces": 44, "language": "en" } }, { "id": 227305, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_isosurface.py", "file_name": "_isosurface.py", "fun_name": "isomin", "commit_message": "switch to black .22", "code": "def isomin(self):\n \n return self[\"isomin\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58978, "documentation": { "docstring": "\n Sets the minimum boundary for iso-surface plot.\n\n The 'isomin' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n ", "n_words": 26, "vocab_size": 26, "n_whitespaces": 78, "language": "en" } }, { "id": 101053, "commit_id": "c8122bc499afba4fcb99030e42e08bfb8d3a75e1", "repo": "faceswap", "path": "scripts/train.py", "file_name": "train.py", "fun_name": "_configure_matplotlib", "commit_message": "bugfix: Stop preview window from stealing focus", "code": "def _configure_matplotlib(cls):\n \n rcParams[\"keymap.fullscreen\"] = [k for k in rcParams[\"keymap.fullscreen\"] if k != \"f\"]\n rcParams[\"keymap.save\"] = [k for k in rcParams[\"keymap.save\"] if k != \"s\"]\n rcParams[\"keymap.home\"] = [k for k in rcParams[\"keymap.home\"] if k != \"r\"]\n rcParams[\"figure.raise_window\"] = False\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 73, "n_words": 38, "vocab_size": 17, "complexity": 7, "nloc": 5, "token_counts": 69, "n_ast_nodes": 123, "n_identifiers": 4, "d_id": 20490, "documentation": { "docstring": " Remove `F`, 'S' and 'R' from their default bindings and stop Matplotlib from stealing\n focus ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 23, "language": "en" } }, { "id": 270352, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/distribute/distributed_training_utils_v1.py", "file_name": "distributed_training_utils_v1.py", "fun_name": "_per_replica_aggregate_batch", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _per_replica_aggregate_batch(strategy, batch_outs, model, mode):\n \n if strategy is not None and mode == ModeKeys.PREDICT:\n total_batch_outs = []\n for i in range(len(model.outputs)):\n num_replicas = strategy.num_replicas_in_sync\n nested_outs = batch_outs[\n i * num_replicas : i * num_replicas + num_replicas\n ]\n total_batch_outs.append(\n concat_along_batch_dimension(tf.nest.flatten(nested_outs))\n )\n return total_batch_outs\n return batch_outs\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 159, "n_words": 44, "vocab_size": 34, "complexity": 4, "nloc": 13, "token_counts": 80, "n_ast_nodes": 125, "n_identifiers": 20, "d_id": 80449, "documentation": { "docstring": "Aggregates the per-replica batch-level outputs from a distributed step.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 75173, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/tests/test_admin_views.py", "file_name": "test_admin_views.py", "fun_name": "test_get_bad_image", "commit_message": "Reformat with black", "code": "def test_get_bad_image(self):\n \n # Get\n response = self.client.get(\n reverse(\n \"wagtailimages:generate_url\", args=(self.image.id + 1, \"fill-800x600\")\n )\n )\n\n # Check response\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response[\"Content-Type\"], \"application/json\")\n\n # Check JSON\n self.assertJSONEqual(\n response.content.decode(),\n json.dumps(\n {\n \"error\": \"Cannot find image.\",\n }\n ),\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 225, "n_words": 36, "vocab_size": 30, "complexity": 1, "nloc": 16, "token_counts": 79, "n_ast_nodes": 137, "n_identifiers": 16, "d_id": 16373, "documentation": { "docstring": "\n This tests that the view gives a 404 response if a user attempts to use it with an image which doesn't exist\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 37, "language": "en" } }, { "id": 101226, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "lib/align/detected_face.py", "file_name": "detected_face.py", "fun_name": "to_dict", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def to_dict(self, is_png=False) -> MaskAlignmentsFileDict:\n \n assert self._mask is not None\n affine_matrix = self.affine_matrix.tolist() if is_png else self.affine_matrix\n retval = MaskAlignmentsFileDict(mask=self._mask,\n affine_matrix=affine_matrix,\n interpolator=self.interpolator,\n stored_size=self.stored_size,\n stored_centering=self.stored_centering)\n logger.trace({k: v if k != \"mask\" else type(v) for k, v in retval.items()}) # type: ignore\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 241, "n_words": 42, "vocab_size": 37, "complexity": 4, "nloc": 24, "token_counts": 97, "n_ast_nodes": 149, "n_identifiers": 18, "d_id": 20646, "documentation": { "docstring": " Convert the mask to a dictionary for saving to an alignments file\n\n Parameters\n ----------\n is_png: bool\n ``True`` if the dictionary is being created for storage in a png header otherwise\n ``False``. Default: ``False``\n\n Returns\n -------\n dict:\n The :class:`Mask` for saving to an alignments file. Contains the keys ``mask``,\n ``affine_matrix``, ``interpolator``, ``stored_size``, ``stored_centering``\n ", "n_words": 52, "vocab_size": 41, "n_whitespaces": 146, "language": "en" } }, { "id": 218447, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "getgeneratorlocals", "commit_message": "add python 3.10.4 for windows", "code": "def getgeneratorlocals(generator):\n \n\n if not isgenerator(generator):\n raise TypeError(\"{!r} is not a Python generator\".format(generator))\n\n frame = getattr(generator, \"gi_frame\", None)\n if frame is not None:\n return generator.gi_frame.f_locals\n else:\n return {}\n\n\n# ------------------------------------------------ coroutine introspection\n\nCORO_CREATED = 'CORO_CREATED'\nCORO_RUNNING = 'CORO_RUNNING'\nCORO_SUSPENDED = 'CORO_SUSPENDED'\nCORO_CLOSED = 'CORO_CLOSED'\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 74, "n_words": 43, "vocab_size": 33, "complexity": 3, "nloc": 8, "token_counts": 50, "n_ast_nodes": 115, "n_identifiers": 13, "d_id": 55315, "documentation": { "docstring": "\n Get the mapping of generator local variables to their current values.\n\n A dict is returned, with the keys the local variable names and values the\n bound values.", "n_words": 27, "vocab_size": 22, "n_whitespaces": 36, "language": "en" } }, { "id": 249451, "commit_id": "898fef2789c9b1a20ef53c7d588f536f51f0fe2f", "repo": "synapse", "path": "synapse/metrics/common_usage_metrics.py", "file_name": "common_usage_metrics.py", "fun_name": "setup", "commit_message": "Share some metrics between the Prometheus exporter and the phone home stats (#13671)", "code": "async def setup(self) -> None:\n \n await self._update_gauges()\n self._clock.looping_call(\n run_as_background_process,\n 5 * 60 * 1000,\n desc=\"common_usage_metrics_update_gauges\",\n func=self._update_gauges,\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 89, "n_words": 17, "vocab_size": 16, "complexity": 1, "nloc": 9, "token_counts": 39, "n_ast_nodes": 65, "n_identifiers": 8, "d_id": 72923, "documentation": { "docstring": "Keep the gauges for common usage metrics up to date.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 132073, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/automlboard/frontend/query.py", "file_name": "query.py", "fun_name": "query_trial", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def query_trial(request):\n \n trial_id = request.GET.get(\"trial_id\")\n trials = TrialRecord.objects.filter(trial_id=trial_id).order_by(\"-start_time\")\n if len(trials) == 0:\n resp = \"Unkonwn trial id %s.\\n\" % trials\n else:\n trial = trials[0]\n result = {\n \"trial_id\": trial.trial_id,\n \"job_id\": trial.job_id,\n \"trial_status\": trial.trial_status,\n \"start_time\": trial.start_time,\n \"end_time\": trial.end_time,\n \"params\": trial.params,\n }\n resp = json.dumps(result)\n return HttpResponse(resp, content_type=\"application/json;charset=utf-8\")\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 165, "n_words": 46, "vocab_size": 38, "complexity": 2, "nloc": 17, "token_counts": 111, "n_ast_nodes": 192, "n_identifiers": 23, "d_id": 29666, "documentation": { "docstring": "Rest API to query the trial info, with the given trial_id.\n\n The url pattern should be like this:\n\n curl http://:/query_trial?trial_id=\n\n The response may be:\n\n {\n \"app_url\": \"None\",\n \"trial_status\": \"TERMINATED\",\n \"params\": {'a': 1, 'b': 2},\n \"job_id\": \"asynchyperband_test\",\n \"end_time\": \"2018-07-19 20:49:44\",\n \"start_time\": \"2018-07-19 20:49:40\",\n \"trial_id\": \"2067R2ZD\",\n }\n ", "n_words": 45, "vocab_size": 42, "n_whitespaces": 112, "language": "en" } }, { "id": 88863, "commit_id": "abcccb3fe46fb8479687b77e8bce07dc5df13c90", "repo": "sentry", "path": "tests/sentry/event_manager/test_event_manager.py", "file_name": "test_event_manager.py", "fun_name": "test_transaction_outcome_accepted", "commit_message": "fix(event_manager): Emit TRANSACTION outcomes if metrics are disabled (#41607)\n\nIn #40507 we started to count transaction metrics in the `transaction`\r\ndata category and transaction events in the `transaction_indexed` data\r\ncategory. That PR missed that metrics extraction can be disabled, in\r\nwhich case the old behavior of counting events as `transaction` should\r\nremain. Relay already implemented this logic since getsentry/relay#1537\r\nbased on the metrics extraction flag.\r\n\r\nThis PR adds a feature check to the\r\n`organizations:transaction-metrics-extraction` feature, which is the\r\nsame feature flag used to control Relay's behavior. We also remove the\r\npreviously used option to sample a percentage of organizations into\r\nmetrics extraction.\r\n\r\nThe default for this feature remains off (`false`) until metrics\r\ncomponents have been added to all deployment targets including\r\nself-hosted.\r\n\r\nCo-authored-by: Matej Minar ", "code": "def test_transaction_outcome_accepted(self):\n \n\n manager = EventManager(\n make_event(\n transaction=\"wait\",\n contexts={\n \"trace\": {\n \"parent_span_id\": \"bce14471e0e9654d\",\n \"op\": \"foobar\",\n \"trace_id\": \"a0fa8803753e40fd8124b21eeb2986b5\",\n \"span_id\": \"bf5be759039ede9a\",\n }\n },\n spans=[],\n timestamp=iso_format(before_now(minutes=5)),\n start_timestamp=iso_format(before_now(minutes=5)),\n type=\"transaction\",\n platform=\"python\",\n )\n )\n manager.normalize()\n\n mock_track_outcome = mock.Mock()\n with mock.patch(\"sentry.event_manager.track_outcome\", mock_track_outcome):\n with self.feature({\"organizations:transaction-metrics-extraction\": False}):\n manager.save(self.project.id)\n\n assert_mock_called_once_with_partial(\n mock_track_outcome, outcome=Outcome.ACCEPTED, category=DataCategory.TRANSACTION\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 408, "n_words": 43, "vocab_size": 39, "complexity": 1, "nloc": 27, "token_counts": 140, "n_ast_nodes": 244, "n_identifiers": 31, "d_id": 18459, "documentation": { "docstring": "\n Without metrics extraction, we count the number of accepted transaction\n events in the TRANSACTION data category. This maintains compatibility\n with Sentry installations that do not have a metrics pipeline.\n ", "n_words": 29, "vocab_size": 27, "n_whitespaces": 58, "language": "en" } }, { "id": 275511, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_v2/optimizer_v2.py", "file_name": "optimizer_v2.py", "fun_name": "set_weights", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def set_weights(self, weights):\n \n params = self.weights\n if len(params) != len(weights):\n raise ValueError(\n f\"You called `set_weights(weights)` on optimizer {self._name} \"\n f\"with a weight list of length {str(len(weights))}, \"\n f\"but the optimizer was expecting {str(len(params))} \"\n f\"weights. Provided weights: {str(weights)[:50]}...\"\n )\n if not params:\n return\n weight_value_tuples = []\n param_values = backend.batch_get_value(params)\n for pv, p, w in zip(param_values, params, weights):\n if pv.shape != w.shape:\n raise ValueError(\n f\"Optimizer weight shape {str(pv.shape)} \"\n \"not compatible with \"\n f\"provided weight shape {str(w.shape)}.\"\n )\n weight_value_tuples.append((p, w))\n backend.batch_set_value(weight_value_tuples)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 339, "n_words": 80, "vocab_size": 63, "complexity": 5, "nloc": 22, "token_counts": 103, "n_ast_nodes": 241, "n_identifiers": 19, "d_id": 81406, "documentation": { "docstring": "Set the weights of the optimizer.\n\n The weights of an optimizer are its state (ie, variables).\n This function takes the weight values associated with this\n optimizer as a list of Numpy arrays. The first value is always the\n iterations count of the optimizer, followed by the optimizer's state\n variables in the order they are created. The passed values are used to set\n the new state of the optimizer.\n\n For example, the RMSprop optimizer for this simple model takes a list of\n three values-- the iteration count, followed by the root-mean-square value\n of the kernel and bias of the single Dense layer:\n\n >>> opt = tf.keras.optimizers.RMSprop()\n >>> m = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])\n >>> m.compile(opt, loss='mse')\n >>> data = np.arange(100).reshape(5, 20)\n >>> labels = np.zeros(5)\n >>> results = m.fit(data, labels) # Training.\n >>> new_weights = [np.array(10), np.ones([20, 10]), np.zeros([10])]\n >>> opt.set_weights(new_weights)\n >>> opt.iterations\n \n\n Args:\n weights: weight values as a list of numpy arrays.\n ", "n_words": 154, "vocab_size": 96, "n_whitespaces": 313, "language": "en" } }, { "id": 9815, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "daemon/stores/partial.py", "file_name": "partial.py", "fun_name": "delete", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "def delete(self) -> None:\n \n try:\n if hasattr(self.object, 'close'):\n self.object.close()\n self._logger.info(self.item.arguments)\n if self.item.arguments.get('identity'):\n self._logger.success(\n f'{colored(self.item.arguments[\"identity\"], \"cyan\")} is removed!'\n )\n else:\n self._logger.success('object is removed!')\n else:\n self._logger.warning(f'nothing to close. exiting')\n except Exception as e:\n self._logger.error(f'{e!r}')\n raise\n else:\n self.item = PartialStoreItem()\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 275, "n_words": 37, "vocab_size": 33, "complexity": 4, "nloc": 19, "token_counts": 105, "n_ast_nodes": 214, "n_identifiers": 17, "d_id": 1707, "documentation": { "docstring": "Terminates the object in the store & stops the server", "n_words": 10, "vocab_size": 8, "n_whitespaces": 9, "language": "en" } }, { "id": 63453, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "markInputline", "commit_message": "upd; format", "code": "def markInputline(self, markerString=\">!<\"):\n \n line_str = self.line\n line_column = self.column - 1\n if markerString:\n line_str = \"\".join((line_str[:line_column],\n markerString, line_str[line_column:]))\n return line_str.strip()", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 97, "n_words": 20, "vocab_size": 17, "complexity": 2, "nloc": 7, "token_counts": 53, "n_ast_nodes": 88, "n_identifiers": 9, "d_id": 13311, "documentation": { "docstring": "Extracts the exception line from the input string, and marks\n the location of the exception with a special symbol.\n ", "n_words": 19, "vocab_size": 15, "n_whitespaces": 36, "language": "en" } }, { "id": 196010, "commit_id": "0aabd1d7b8c3cb521f713ea925a0bf019ba1f3ca", "repo": "sympy", "path": "sympy/tensor/array/expressions/conv_indexed_to_array.py", "file_name": "conv_indexed_to_array.py", "fun_name": "convert_indexed_to_array", "commit_message": "Extend conversion function of indexed expression to arrays to support broadcasting and addition of different indices", "code": "def convert_indexed_to_array(expr, first_indices=None):\n r\n\n result, indices = _convert_indexed_to_array(expr)\n\n if any(isinstance(i, (int, Integer)) for i in indices):\n result = ArrayElement(result, indices)\n indices = []\n\n if not first_indices:\n return result\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 60, "n_words": 28, "vocab_size": 23, "complexity": 15, "nloc": 62, "token_counts": 191, "n_ast_nodes": 87, "n_identifiers": 12, "d_id": 47511, "documentation": { "docstring": "\n Parse indexed expression into a form useful for code generation.\n\n Examples\n ========\n\n >>> from sympy.tensor.array.expressions.conv_indexed_to_array import convert_indexed_to_array\n >>> from sympy import MatrixSymbol, Sum, symbols\n\n >>> i, j, k, d = symbols(\"i j k d\")\n >>> M = MatrixSymbol(\"M\", d, d)\n >>> N = MatrixSymbol(\"N\", d, d)\n\n Recognize the trace in summation form:\n\n >>> expr = Sum(M[i, i], (i, 0, d-1))\n >>> convert_indexed_to_array(expr)\n ArrayContraction(M, (0, 1))\n\n Recognize the extraction of the diagonal by using the same index `i` on\n both axes of the matrix:\n\n >>> expr = M[i, i]\n >>> convert_indexed_to_array(expr)\n ArrayDiagonal(M, (0, 1))\n\n This function can help perform the transformation expressed in two\n different mathematical notations as:\n\n `\\sum_{j=0}^{N-1} A_{i,j} B_{j,k} \\Longrightarrow \\mathbf{A}\\cdot \\mathbf{B}`\n\n Recognize the matrix multiplication in summation form:\n\n >>> expr = Sum(M[i, j]*N[j, k], (j, 0, d-1))\n >>> convert_indexed_to_array(expr)\n ArrayContraction(ArrayTensorProduct(M, N), (1, 2))\n\n Specify that ``k`` has to be the starting index:\n\n >>> convert_indexed_to_array(expr, first_indices=[k])\n ArrayContraction(ArrayTensorProduct(N, M), (0, 3))\n ", "n_words": 151, "vocab_size": 107, "n_whitespaces": 236, "language": "en" } }, { "id": 277089, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/tf_utils.py", "file_name": "tf_utils.py", "fun_name": "is_symbolic_tensor", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def is_symbolic_tensor(tensor):\n \n if isinstance(tensor, tf.Tensor):\n return hasattr(tensor, \"graph\")\n elif is_extension_type(tensor):\n component_tensors = tf.nest.flatten(tensor, expand_composites=True)\n return any(hasattr(t, \"graph\") for t in component_tensors)\n elif isinstance(tensor, tf.Variable):\n # Variables that are output of a Keras Layer in Functional API mode\n # should be considered symbolic.\n # TODO(omalleyt): We need a better way to check this in order to\n # enable `run_eagerly=True` for Models containing Layers that\n # return Variables as outputs.\n return (\n getattr(tensor, \"_keras_history\", False)\n or not tf.executing_eagerly()\n )\n elif isinstance(tensor, tuple(_user_convertible_tensor_types)):\n tensor = ops.convert_to_tensor_or_composite(tensor)\n return is_symbolic_tensor(tensor)\n else:\n return False\n\n\n@keras_export(\"keras.__internal__.utils.register_symbolic_tensor_type\", v1=[])", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.__internal__.utils.register_symbolic_tensor_type\", v1=[])", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 220, "n_words": 90, "vocab_size": 68, "complexity": 7, "nloc": 16, "token_counts": 113, "n_ast_nodes": 205, "n_identifiers": 22, "d_id": 81861, "documentation": { "docstring": "Returns whether a tensor is symbolic (from a TF graph) or an eager tensor.\n\n A Variable can be seen as either: it is considered symbolic\n when we are in a graph scope, and eager when we are in an eager scope.\n\n Args:\n tensor: A tensor instance to test.\n\n Returns:\n True for symbolic tensors, False for eager tensors.\n ", "n_words": 57, "vocab_size": 41, "n_whitespaces": 82, "language": "en" } }, { "id": 108945, "commit_id": "4e21912d2938b0e8812c4d1f7cd902c080062ff2", "repo": "matplotlib", "path": "lib/matplotlib/backends/backend_wx.py", "file_name": "backend_wx.py", "fun_name": "_on_size", "commit_message": "Make it easier to improve UI event metadata.\n\nCurrently, UI events (MouseEvent, KeyEvent, etc.) are generated by\nletting the GUI-specific backends massage the native event objects into\na list of args/kwargs and then call\n`FigureCanvasBase.motion_notify_event`/`.key_press_event`/etc. This\nmakes it a bit tricky to improve the metadata on the events, because one\nneeds to change the signature on both the `FigureCanvasBase` method and\nthe event class. Moreover, the `motion_notify_event`/etc. methods are\ndirectly bound as event handlers in the gtk3 and tk backends, and thus\nhave incompatible signatures there.\n\nInstead, the native GUI handlers can directly construct the relevant\nevent objects and trigger the events themselves; a new `Event._process`\nhelper method makes this even shorter (and allows to keep factoring some\ncommon functionality e.g. for tracking the last pressed button or key).\n\nAs an example, this PR also updates figure_leave_event to always\ncorrectly set the event location based on the *current* cursor position,\ninstead of the last triggered location event (which may be outdated);\nthis can now easily be done on a backend-by-backend basis, instead of\ncoordinating the change with FigureCanvasBase.figure_leave_event.\n\nThis also exposed another (minor) issue, in that resize events\noften trigger *two* calls to draw_idle -- one in the GUI-specific\nhandler, and one in FigureCanvasBase.draw_idle (now moved to\nResizeEvent._process, but should perhaps instead be a callback\nautoconnected to \"resize_event\") -- could probably be fixed later.", "code": "def _on_size(self, event):\n \n\n _log.debug(\"%s - _on_size()\", type(self))\n sz = self.GetParent().GetSizer()\n if sz:\n si = sz.GetItem(self)\n if sz and si and not si.Proportion and not si.Flag & wx.EXPAND:\n # managed by a sizer, but with a fixed size\n size = self.GetMinSize()\n else:\n # variable size\n size = self.GetClientSize()\n # Do not allow size to become smaller than MinSize\n size.IncTo(self.GetMinSize())\n if getattr(self, \"_width\", None):\n if size == (self._width, self._height):\n # no change in size\n return\n self._width, self._height = size\n self._isDrawn = False\n\n if self._width <= 1 or self._height <= 1:\n return # Empty figure\n\n # Create a new, correctly sized bitmap\n self.bitmap = wx.Bitmap(self._width, self._height)\n\n dpival = self.figure.dpi\n winch = self._width / dpival\n hinch = self._height / dpival\n self.figure.set_size_inches(winch, hinch, forward=False)\n\n # Rendering will happen on the associated paint event\n # so no need to do anything here except to make sure\n # the whole background is repainted.\n self.Refresh(eraseBackground=False)\n ResizeEvent(\"resize_event\", self)._process()\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 426, "n_words": 149, "vocab_size": 101, "complexity": 10, "nloc": 24, "token_counts": 207, "n_ast_nodes": 344, "n_identifiers": 36, "d_id": 23396, "documentation": { "docstring": "\n Called when wxEventSize is generated.\n\n In this application we attempt to resize to fit the window, so it\n is better to take the performance hit and redraw the whole window.\n ", "n_words": 30, "vocab_size": 25, "n_whitespaces": 59, "language": "en" } }, { "id": 47649, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/operators/test_subdag_operator.py", "file_name": "test_subdag_operator.py", "fun_name": "test_subdag_pools_no_possible_conflict", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def test_subdag_pools_no_possible_conflict(self):\n \n dag = DAG('parent', default_args=default_args)\n subdag = DAG('parent.child', default_args=default_args)\n\n session = airflow.settings.Session()\n pool_1 = airflow.models.Pool(pool='test_pool_1', slots=1)\n pool_10 = airflow.models.Pool(pool='test_pool_10', slots=10)\n session.add(pool_1)\n session.add(pool_10)\n session.commit()\n\n EmptyOperator(task_id='dummy', dag=subdag, pool='test_pool_10')\n\n mock_session = Mock()\n SubDagOperator(task_id='child', dag=dag, subdag=subdag, pool='test_pool_1', session=mock_session)\n assert not mock_session.query.called\n\n session.delete(pool_1)\n session.delete(pool_10)\n session.commit()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 153, "n_words": 41, "vocab_size": 34, "complexity": 1, "nloc": 16, "token_counts": 149, "n_ast_nodes": 250, "n_identifiers": 26, "d_id": 9190, "documentation": { "docstring": "\n Subdags and subdag tasks with no pool overlap, should not to query\n pools\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 35, "language": "en" } }, { "id": 176977, "commit_id": "abaa68779ccb4cce8d1a5ecade622ab96d01edeb", "repo": "networkx", "path": "networkx/algorithms/lowest_common_ancestors.py", "file_name": "lowest_common_ancestors.py", "fun_name": "tree_all_pairs_lowest_common_ancestor", "commit_message": "Add examples to lowest common ancestors algorithms (#5531)\n\n* Add examples to lowest common ancestors documentation\r\n\r\n* Fix output style of examples\r\n\r\n* Fix output style of example\r\n\r\n* Update pre-commit\r\n\r\n* Update networkx/algorithms/lowest_common_ancestors.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/lowest_common_ancestors.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Indentation fix & pprint dictionary\r\n\r\n* Update networkx/algorithms/lowest_common_ancestors.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/lowest_common_ancestors.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Update networkx/algorithms/lowest_common_ancestors.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\n* Move \"import pprint\" to the example\r\n\r\nCo-authored-by: dtuncturk \r\nCo-authored-by: Ross Barnowski ", "code": "def tree_all_pairs_lowest_common_ancestor(G, root=None, pairs=None):\n r\n if len(G) == 0:\n raise nx.NetworkXPointlessConcept(\"LCA meaningless on null graphs.\")\n elif None in G:\n raise nx.NetworkXError(\"None is not a valid node.\")\n\n # Index pairs of interest for efficient lookup from either side.\n if pairs is not None:\n pair_dict = defaultdict(set)\n # See note on all_pairs_lowest_common_ancestor.\n if not isinstance(pairs, (Mapping, Set)):\n pairs = set(pairs)\n for u, v in pairs:\n for n in (u, v):\n if n not in G:\n msg = f\"The node {str(n)} is not in the digraph.\"\n raise nx.NodeNotFound(msg)\n pair_dict[u].add(v)\n pair_dict[v].add(u)\n\n # If root is not specified, find the exactly one node with in degree 0 and\n # use it. Raise an error if none are found, or more than one is. Also check\n # for any nodes with in degree larger than 1, which would imply G is not a\n # tree.\n if root is None:\n for n, deg in G.in_degree:\n if deg == 0:\n if root is not None:\n msg = \"No root specified and tree has multiple sources.\"\n raise nx.NetworkXError(msg)\n root = n\n elif deg > 1:\n msg = \"Tree LCA only defined on trees; use DAG routine.\"\n raise nx.NetworkXError(msg)\n if root is None:\n raise nx.NetworkXError(\"Graph contains a cycle.\")\n\n # Iterative implementation of Tarjan's offline lca algorithm\n # as described in CLRS on page 521 (2nd edition)/page 584 (3rd edition)\n uf = UnionFind()\n ancestors = {}\n for node in G:\n ancestors[node] = uf[node]\n\n colors = defaultdict(bool)\n for node in nx.dfs_postorder_nodes(G, root):\n colors[node] = True\n for v in pair_dict[node] if pairs is not None else G:\n if colors[v]:\n # If the user requested both directions of a pair, give it.\n # Otherwise, just give one.\n if pairs is not None and (node, v) in pairs:\n yield (node, v), ancestors[uf[v]]\n if pairs is None or (v, node) in pairs:\n yield (v, node), ancestors[uf[v]]\n if node != root:\n parent = arbitrary_element(G.pred[node])\n uf.union(parent, node)\n ancestors[uf[parent]] = parent\n\n\n@not_implemented_for(\"undirected\")\n@not_implemented_for(\"multigraph\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@not_implemented_for(\"undirected\")\n@not_implemented_for(\"multigraph\")", "n_ast_errors": 1, "ast_levels": 18, "n_whitespaces": 808, "n_words": 314, "vocab_size": 173, "complexity": 24, "nloc": 102, "token_counts": 345, "n_ast_nodes": 573, "n_identifiers": 35, "d_id": 42205, "documentation": { "docstring": "Yield the lowest common ancestor for sets of pairs in a tree.\n\n Parameters\n ----------\n G : NetworkX directed graph (must be a tree)\n\n root : node, optional (default: None)\n The root of the subtree to operate on.\n If None, assume the entire graph has exactly one source and use that.\n\n pairs : iterable or iterator of pairs of nodes, optional (default: None)\n The pairs of interest. If None, Defaults to all pairs of nodes\n under `root` that have a lowest common ancestor.\n\n Returns\n -------\n lcas : generator of tuples `((u, v), lca)` where `u` and `v` are nodes\n in `pairs` and `lca` is their lowest common ancestor.\n\n Examples\n --------\n >>> import pprint\n >>> G = nx.DiGraph([(1, 3), (2, 4), (1, 2)])\n >>> pprint.pprint(dict(nx.tree_all_pairs_lowest_common_ancestor(G)))\n {(1, 1): 1,\n (2, 1): 1,\n (2, 2): 2,\n (3, 1): 1,\n (3, 2): 1,\n (3, 3): 3,\n (3, 4): 1,\n (4, 1): 1,\n (4, 2): 2,\n (4, 4): 4}\n\n We can also use `pairs` argument to specify the pairs of nodes for which we\n want to compute lowest common ancestors. Here is an example:\n\n >>> dict(nx.tree_all_pairs_lowest_common_ancestor(G, pairs=[(1, 4), (2, 3)]))\n {(2, 3): 1, (1, 4): 1}\n\n Notes\n -----\n Only defined on non-null trees represented with directed edges from\n parents to children. Uses Tarjan's off-line lowest-common-ancestors\n algorithm. Runs in time $O(4 \\times (V + E + P))$ time, where 4 is the largest\n value of the inverse Ackermann function likely to ever come up in actual\n use, and $P$ is the number of pairs requested (or $V^2$ if all are needed).\n\n Tarjan, R. E. (1979), \"Applications of path compression on balanced trees\",\n Journal of the ACM 26 (4): 690-715, doi:10.1145/322154.322161.\n\n See Also\n --------\n all_pairs_lowest_common_ancestor: similar routine for general DAGs\n lowest_common_ancestor: just a single pair for general DAGs\n ", "n_words": 290, "vocab_size": 186, "n_whitespaces": 457, "language": "en" } }, { "id": 109756, "commit_id": "4896ec1a2cfb8c454e385632d8df213c915ced52", "repo": "matplotlib", "path": "lib/mpl_toolkits/mplot3d/proj3d.py", "file_name": "proj3d.py", "fun_name": "view_transformation", "commit_message": "Add pan and zoom toolbar handling to 3D Axes (Replaces PR#22614) (#23449)\n\n* ENH: Add pan and zoom toolbar handling to 3D Axes\r\n\r\n1) This moves the pan logic that was already in the mouse move handler\r\ninto the \"drag_pan\" method to make it available from the toolbar.\r\n\r\n2) This expands upon the panning logic to enable a zoom-to-box feature.\r\nThe zoom-to-box is done relative to the Axes, so it shrinks/expands\r\nthe box as a fraction of each delta, from lower-left Axes to lower-left\r\nzoom-box. Thus, it tries to handle non-centered zooms, which adds more\r\ncases to handle versus the current right-click zoom only scaling from\r\nthe center of the projection.\r\n\r\n* Rewrite zooming with bounding box\r\n\r\n* Rewrite 3d panning to work with a roll angle\r\n\r\n* Whats new for zoom and pan buttons\r\n\r\n* Make pan button configurable\r\n\r\n* Do not jump when zooming and mouse goes over other subplot\r\n\r\n* Rework zooming for 3d plots\r\n\r\n* Handle x/y lock when zooming and panning\r\n\r\n* Update tests\r\n\r\n* Docstrings\r\n\r\n* Dont assume a scale_z\r\n\r\n* Limit zoom box\r\n\r\n* Test zoom pan key modifiers\r\n\r\n* Save some calculation by saving view axes\r\n\r\n* Deprecation warnings for Axes3D.eye, .vvec\r\n\r\n* Remove Axes3D._prepare_view_from_bbox for now\r\n\r\n* Comments and docstrings\r\n\r\n* Switch from uvn to uvw\r\n\r\n* Save aspect to axes\r\n\r\n* Constrain zooming with mouse when one of the equal aspect ratios is set\r\n\r\n* Cleanup\r\n\r\n* Cleanup\r\n\r\n* Consolidate finding equal aspect axis indices\r\n\r\n* linting\r\n\r\n* More intuitive scaling\r\n\r\n* Box zoom keeps existing aspect ratios\r\n\r\n* Linting\r\n\r\n* Code review comments\r\n\r\n* Revert parameters for view_transformation\r\n\r\n* Fix new 3d pan/zoom view going on view stack twice\r\n\r\n* Better clipping\r\n\r\n* Test 3d toolbar navigation\r\n\r\n* Privatize helper functions\r\n\r\n* Deprecations\r\n\r\n* Code review changes\r\n\r\n* Deprecation note\r\n\r\n* Undeprecate proj3d.view_transformation\r\n\r\n* Undeprecate proj3d.view_transformation\r\n\r\n* Update doc/api/next_api_changes/deprecations/23449-SS.rst\r\n\r\n\r\nCo-authored-by: Greg Lucas \r\nCo-authored-by: Scott Shambaugh \r\nCo-authored-by: Oscar Gustafsson ", "code": "def view_transformation(E, R, V, roll):\n \n u, v, w = _view_axes(E, R, V, roll)\n M = _view_transformation_uvw(u, v, w, E)\n return M\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 33, "n_words": 21, "vocab_size": 16, "complexity": 1, "nloc": 4, "token_counts": 42, "n_ast_nodes": 59, "n_identifiers": 11, "d_id": 23737, "documentation": { "docstring": "\n Return the view transformation matrix.\n\n Parameters\n ----------\n E : 3-element numpy array\n The coordinates of the eye/camera.\n R : 3-element numpy array\n The coordinates of the center of the view box.\n V : 3-element numpy array\n Unit vector in the direction of the vertical axis.\n roll : float\n The roll angle in radians.\n ", "n_words": 53, "vocab_size": 30, "n_whitespaces": 106, "language": "en" } }, { "id": 101717, "commit_id": "e2a77e7c6e84e81f642cb22f528e25e3f2d2dbc1", "repo": "faceswap", "path": "tools/alignments/jobs.py", "file_name": "jobs.py", "fun_name": "_get_missing_alignments", "commit_message": "Alignments Tool - Typing, Documentation + Re-org", "code": "def _get_missing_alignments(self) -> Generator[str, None, None]:\n \n self.output_message = \"Frames missing from alignments file\"\n exclude_filetypes = set([\"yaml\", \"yml\", \"p\", \"json\", \"txt\"])\n for frame in tqdm(cast(Dict[str, str], self._items),\n desc=self.output_message,\n leave=False):\n frame_name = frame[\"frame_fullname\"]\n if (frame[\"frame_extension\"] not in exclude_filetypes\n and not self._alignments.frame_exists(frame_name)):\n logger.debug(\"Returning: '%s'\", frame_name)\n yield frame_name\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 193, "n_words": 44, "vocab_size": 38, "complexity": 4, "nloc": 18, "token_counts": 103, "n_ast_nodes": 169, "n_identifiers": 19, "d_id": 21121, "documentation": { "docstring": " yield each frame that does not exist in alignments file\n\n Yields\n ------\n str\n The frame name of any frames missing alignments\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 61, "language": "en" } }, { "id": 153177, "commit_id": "8d1004fdbdaa05700613c8e6287641a732acf606", "repo": "modin", "path": "modin/core/dataframe/pandas/partitioning/partition_manager.py", "file_name": "partition_manager.py", "fun_name": "rebalance_partitions", "commit_message": "FIX-#3675: Expand virtual partitioning utility (#3886)\n\nCo-authored-by: mvashishtha \r\nCo-authored-by: jeffreykennethli \r\nCo-authored-by: Anatoly Myachev \r\nCo-authored-by: Vasily Litvinov \r\nCo-authored-by: Alexey Prutskov \r\nCo-authored-by: Mahesh Vashishtha \r\nCo-authored-by: Naren Krishna <92325366+naren-ponder@users.noreply.github.com>\r\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Dmitry Chigarev <62142979+dchigarev@users.noreply.github.com>\r\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Doris Lee \r\nCo-authored-by: Aditya Parameswaran \r\nCo-authored-by: Rehan Sohail Durrani \r\nCo-authored-by: Susmit Vengurlekar \r\nSigned-off-by: Devin Petersohn ", "code": "def rebalance_partitions(cls, partitions):\n \n return partitions\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 18, "n_identifiers": 3, "d_id": 35280, "documentation": { "docstring": "\n Return the provided array of partitions without rebalancing it.\n\n Parameters\n ----------\n partitions : np.ndarray\n The 2-d array of partitions to rebalance.\n\n Returns\n -------\n np.ndarray\n The same 2-d array.\n ", "n_words": 28, "vocab_size": 21, "n_whitespaces": 107, "language": "en" } }, { "id": 101076, "commit_id": "2ea05623bd684b2d1dd75679ad00441a5c751e7e", "repo": "faceswap", "path": "scripts/train.py", "file_name": "train.py", "fun_name": "_handle_deprecations", "commit_message": "Update Distibution Strategies:\n - Add Central Storage Stategy\n - Deprecate 'distributed' cli argument", "code": "def _handle_deprecations(self) -> None:\n \n if self._args.distributed:\n deprecation_warning(\"`-d`, `--distributed`\",\n \"Please use `-D`, `--distribution-strategy`\")\n logger.warning(\"Setting 'distribution-strategy' to 'mirrored'\")\n setattr(self._args, \"distribution_strategy\", \"mirrored\")\n del self._args.distributed\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 110, "n_words": 21, "vocab_size": 21, "complexity": 2, "nloc": 8, "token_counts": 43, "n_ast_nodes": 79, "n_identifiers": 8, "d_id": 20513, "documentation": { "docstring": " Handle the update of deprecated arguments and output warnings. ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 256248, "commit_id": "a59bca366174d9c692fa19750c24d65f47660ef7", "repo": "haystack", "path": "haystack/modeling/model/prediction_head.py", "file_name": "prediction_head.py", "fun_name": "to_qa_preds", "commit_message": "Apply black formatting (#2115)\n\n* Testing black on ui/\r\n\r\n* Applying black on docstores\r\n\r\n* Add latest docstring and tutorial changes\r\n\r\n* Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too\r\n\r\n* Remove comments\r\n\r\n* Relax constraints on pydoc-markdown\r\n\r\n* Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade\r\n\r\n* Fix a couple of bugs\r\n\r\n* Add a type: ignore that was missing somehow\r\n\r\n* Give path to black\r\n\r\n* Apply Black\r\n\r\n* Apply Black\r\n\r\n* Relocate a couple of type: ignore\r\n\r\n* Update documentation\r\n\r\n* Make Linux CI run after applying Black\r\n\r\n* Triggering Black\r\n\r\n* Apply Black\r\n\r\n* Remove dependency, does not work well\r\n\r\n* Remove manually double trailing commas\r\n\r\n* Update documentation\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def to_qa_preds(self, top_preds, no_ans_gaps, baskets):\n \n ret = []\n\n # Iterate over each set of document level prediction\n for pred_d, no_ans_gap, basket in zip(top_preds, no_ans_gaps, baskets):\n\n # Unpack document offsets, clear text and id\n token_offsets = basket.raw[\"document_offsets\"]\n pred_id = basket.id_external if basket.id_external else basket.id_internal\n\n # These options reflect the different input dicts that can be assigned to the basket\n # before any kind of normalization or preprocessing can happen\n question_names = [\"question_text\", \"qas\", \"questions\"]\n doc_names = [\"document_text\", \"context\", \"text\"]\n\n document_text = try_get(doc_names, basket.raw)\n question = self.get_question(question_names, basket.raw)\n ground_truth = self.get_ground_truth(basket)\n\n curr_doc_pred = QAPred(\n id=pred_id,\n prediction=pred_d,\n context=document_text,\n question=question,\n token_offsets=token_offsets,\n context_window_size=self.context_window_size,\n aggregation_level=\"document\",\n ground_truth_answer=ground_truth,\n no_answer_gap=no_ans_gap,\n )\n\n ret.append(curr_doc_pred)\n return ret\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 418, "n_words": 105, "vocab_size": 84, "complexity": 3, "nloc": 23, "token_counts": 152, "n_ast_nodes": 238, "n_identifiers": 33, "d_id": 74828, "documentation": { "docstring": "\n Groups Span objects together in a QAPred object\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 93937, "commit_id": "f31b57cbc5ec359c8ef9c6459d3d9d8ffcd6e8d9", "repo": "sentry", "path": "tests/sentry/sentry_metrics/test_batch.py", "file_name": "test_batch.py", "fun_name": "_deconstruct_messages", "commit_message": "ref(metrics_indexer): Improve typing, introduce more dataclasses, fix org_id namespacing bug in metadata [INGEST-1380] (#37170)", "code": "def _deconstruct_messages(snuba_messages):\n \n return [\n (json.loads(msg.payload.value.decode(\"utf-8\")), msg.payload.headers)\n for msg in snuba_messages\n ]\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 34, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 36, "n_ast_nodes": 59, "n_identifiers": 9, "d_id": 19028, "documentation": { "docstring": "\n Convert a list of messages returned by `reconstruct_messages` into python\n primitives, to run assertions on:\n\n assert _deconstruct_messages(batch.reconstruct_messages(...)) == [ ... ]\n\n This is slightly nicer to work with than:\n\n assert batch.reconstruct_messages(...) == _construct_messages([ ... ])\n\n ...because pytest's assertion diffs work better with python primitives.\n ", "n_words": 44, "vocab_size": 37, "n_whitespaces": 74, "language": "en" } }, { "id": 22139, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/utils.py", "file_name": "utils.py", "fun_name": "from_key_val_list", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def from_key_val_list(value):\n \n if value is None:\n return None\n\n if isinstance(value, (str, bytes, bool, int)):\n raise ValueError(\"cannot encode objects that are not 2-tuples\")\n\n return OrderedDict(value)\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 50, "n_words": 24, "vocab_size": 22, "complexity": 3, "nloc": 6, "token_counts": 39, "n_ast_nodes": 63, "n_identifiers": 9, "d_id": 4211, "documentation": { "docstring": "Take an object and test to see if it can be represented as a\n dictionary. Unless it can not be represented as such, return an\n OrderedDict, e.g.,\n\n ::\n\n >>> from_key_val_list([('key', 'val')])\n OrderedDict([('key', 'val')])\n >>> from_key_val_list('string')\n Traceback (most recent call last):\n ...\n ValueError: cannot encode objects that are not 2-tuples\n >>> from_key_val_list({'key': 'val'})\n OrderedDict([('key', 'val')])\n\n :rtype: OrderedDict\n ", "n_words": 56, "vocab_size": 44, "n_whitespaces": 127, "language": "en" } }, { "id": 31156, "commit_id": "ca2a55e9dfb245527b5e1c954fec6ffbb7aef07b", "repo": "transformers", "path": "src/transformers/models/bloom/modeling_bloom.py", "file_name": "modeling_bloom.py", "fun_name": "_set_gradient_checkpointing", "commit_message": "BLOOM (#17474)\n\n* adding template\r\n\r\n* update model\r\n\r\n* model update\r\n\r\n* update conf for debug model\r\n\r\n* update conversion\r\n\r\n* update conversion script\r\n\r\n* update conversion script\r\n\r\n* fix missing keys check\r\n\r\n* add tests to test the tokenizer in the local machine\r\n\r\n* Change variable name\r\n\r\n* add tests on xnli dataset\r\n\r\n* add more description\r\n\r\n* add descriptions + clearer code\r\n\r\n* clearer code\r\n\r\n* adding new tests + skipping few tests because of env problems\r\n\r\n* change comment\r\n\r\n* add dtype on the configuration\r\n\r\n* add test embeddings\r\n\r\n* add hardcoded test\r\n\r\n* fix dtype issue\r\n\r\n* adding torch.float16 to config\r\n\r\n* adding more metrics (min, max, mean)\r\n\r\n* add sum\r\n\r\n* now the test passes with almost equal\r\n\r\n* add files for conversion - test passes on cpu gpu\r\n\r\n* add final changes\r\n\r\n* cleaning code\r\n\r\n* add new args in the docstring\r\n\r\n* fix one liner function\r\n\r\n* remove macros\r\n\r\n* remove forward attention\r\n\r\n* clean up init funtion\r\n\r\n* add comments on the issue\r\n\r\n* rm scale mask softmax\r\n\r\n* do make style\r\n\r\n* fix dtype in init\r\n\r\n* fixing for loop on att probs\r\n\r\n* fix style with black\r\n\r\n* fix style + doc error\r\n\r\n* fix and debug CI errors (docs + style)\r\n\r\n* some updates\r\n\r\n- change new operations\r\n- finally add scaled softmax\r\n- added new args in the config\r\n\r\n* make use cache working\r\n\r\n* add changes\r\n\r\n- save sharded models\r\n- final changes on the modeling script\r\n\r\n* add changes\r\n\r\n- comment on alibi\r\n- add TODO on seq length\r\n\r\n* test commit\r\n\r\n- added a text to test the commit\r\n\r\nCo-authored-by: thomasw21 <24695242+thomasw21@users.noreply.github.com>\r\n\r\n* final changes\r\n\r\n- attention mask change\r\n- generation works on BS176b\r\n\r\nCo-authored-by: thomasw21 <24695242+thomasw21@users.noreply.github.com>\r\n\r\n* changes - model + conversion\r\n\r\n* move to correct dir\r\n\r\n* put ,\r\n\r\n* fex fixes\r\n\r\n* fix tokenizer autodoc\r\n\r\n* fix minor CI issues\r\n\r\n* fix minor CI issues\r\n\r\n* fix minor CI issues\r\n\r\n* fix style issue\r\n\r\n* fix minor import issues\r\n\r\n* fix few issues\r\n\r\n* remove def main on the test\r\n\r\n* add require torch\r\n\r\n* replace decorator with 'with'\r\n\r\n* fix style\r\n\r\n* change to bloom\r\n\r\n* add quick fix tokenizer\r\n\r\n* fix tokenizer file\r\n\r\n* fix tokenizer\r\n\r\n- merge tests\r\n- small fixes\r\n\r\n* fix import issue\r\n\r\n* add bloom to readme\r\n\r\n* fix consistency\r\n\r\n* Update docs/source/en/model_doc/bloom.mdx\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Apply suggestions from code review\r\n\r\nfix comment issues on file headers\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* fix doc issue\r\n\r\n* small fix - modeling test\r\n\r\n* some changes\r\n\r\n- refactor some code\r\n- taking into account reviews\r\n- more tests should pass\r\n- removed pruning tests\r\n\r\n* remove useless division\r\n\r\n* more tests should pass\r\n\r\n* more tests should pass\r\n\r\n* more tests should pass\r\n\r\n* let's try this one\r\n\r\n-add alibi offset\r\n- remove all permutes to make the grad operations work\r\n- finger crossed\r\n\r\n* refactor\r\n\r\n- refactor code\r\n- style changes\r\n- add new threshold for test\r\n\r\n* major changes\r\n\r\n- change BLOOM to Bloom\r\n- add quick doc on bloom.mdx\r\n- move embeddings test on modeling test\r\n\r\n* modify readme\r\n\r\n* small fixes\r\n\r\n* small fix\r\n\r\n- better threshold for a test\r\n\r\n* remove old test file from fetcher\r\n\r\n* fix small typo\r\n\r\n* major change\r\n\r\n- change BloomLMHead to BloomForCausalLM\r\n\r\n* remove onnx config\r\n\r\n* major changes\r\n\r\n- refactor the code\r\n- remove asserts\r\n- change tol for test\r\n\r\n* make style\r\n\r\n* small change\r\n\r\n* adding a slow test + commenting old ones for now\r\n\r\n* make style\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* make style\r\n\r\n* fix duplicates\r\n\r\n* cleaning comments on config\r\n\r\n* clean a bit conversion file\r\n\r\n* refacor a bit modeling file\r\n\r\n* refactor tokenizer file\r\n\r\n* fix tokenization test issue\r\n\r\n* fix tokenization issue #2\r\n\r\n* fix tokenization issue second try\r\n\r\n* fix test issue\r\n\r\n* make style + add suggestions\r\n\r\n* change test fetcher\r\n\r\n* try this one\r\n\r\n- slow tests should pass\r\n- finger crossed\r\n\r\n* possible final changes\r\n\r\n* make style\r\n\r\n* try fix padding side issue\r\n\r\n* fix side\r\n\r\n* fix padding issue\r\n\r\n* fix ko-readme\r\n\r\n* fix config auto\r\n\r\n* cleaning modeling file\r\n\r\n* keep bloom in caps in ko\r\n\r\n* update config docs\r\n\r\n* remove pretraining_pp\r\n\r\n* remove model parallel\r\n\r\n* update config\r\n\r\n- add correct config files\r\n\r\n* fix duplicates\r\n\r\n* fix fetcher\r\n\r\n* fix refactor issue\r\n\r\n- remove divide function\r\n\r\n* try to remove alibi\r\n\r\n* small fixes\r\n\r\n- fix alibi\r\n- remove seq length\r\n- refactor a bit the code\r\n\r\n* put correct values\r\n\r\n- fix bos and eos token ids\r\n\r\n* fix attention mask loop\r\n\r\nCo-authored-by: thomasw21 <24695242+thomasw21@users.noreply.github.com>\r\n\r\n* small fixes:\r\n\r\n- remove skip bias add\r\n\r\n* small fixes\r\n\r\n- fix typo in readme\r\n- fix typos in config\r\n\r\n* small changes\r\n\r\n- remove a test\r\n- add reconstruction test\r\n- change config\r\n\r\n* small changes\r\n\r\n- change Scaled Softmax to BloomScaledSoftmax\r\n\r\n* small fixes\r\n\r\n- fix alibi dtype\r\n\r\n* major changes\r\n\r\n- removing explicit dtype when loading modules\r\n- fixing test args (torch_dtype=auto)\r\n- add dosctring\r\n\r\n* fix readmes\r\n\r\n* major changes\r\n\r\n- now bloom supports alibi shifting\r\n- refactor a bit the code\r\n- better test tolerance now\r\n\r\n* refactor a bit\r\n\r\n* refactor a bit\r\n\r\n* put correct name on test\r\n\r\n* change docstring\r\n\r\n* small changes\r\n\r\n- fix docstring modeling\r\n- fix test tolerance\r\n\r\n* fix small nit\r\n\r\n- take dtype from tensors in the conversion script\r\n\r\n* minor fix\r\n\r\n- fix mdx issue\r\n\r\n* minor fix\r\n\r\n- change config docstring\r\n\r\n* forward contrib credits from PR14084\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Stas Bekman \r\n\r\n* apply modifications\r\n\r\nCo-authored-by: Stas Bekman \r\n\r\n* resolve softmax upcast\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Stas Bekman \r\n\r\n* Update src/transformers/models/bloom/modeling_bloom.py\r\n\r\nCo-authored-by: Niklas Muennighoff \r\n\r\n* final changes modeling\r\n\r\nCo-authored-by: Stas Bekman \r\n\r\n* Merge commit 'd156898f3b9b2c990e5963f5030a7143d57921a2'\r\n\r\n* merge commit\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Stas Bekman \r\n\r\n* apply suggestions\r\n\r\nApply suggestions from Stas comments\r\nCo-authored-by: Stas Bekman \r\n\r\n* Fix gradient checkpointing\r\n\r\nCo-authored-by: Stas Bekman \r\n\r\n* add slow but exact\r\n\r\n* add accelerate compatibility\r\n\r\nCo-authored-by: Nicolas Patry \r\n\r\n* forward contrib credits\r\n\r\nCo-authored-by: thomasw21 \r\nCo-authored-by: sgugger \r\nCo-authored-by: patrickvonplaten \r\nCo-authored-by: Niklas Muennighoff \r\nCo-authored-by: LysandreJik \r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* fix torch device on tests\r\n\r\n* make style\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* fix nits\r\n\r\nCo-authored-by: patrickvonplaten\r\n\r\n* remove final nits\r\n\r\n* fix doc\r\n\r\n- add more details on the doc\r\n- add links to checkpoints\r\n\r\n* Update src/transformers/__init__.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/models/bloom/modeling_bloom.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* apply suggestions\r\n\r\nCo-authored-by: sgugger \r\n\r\n* put test torchscript to false\r\n\r\n* Update src/transformers/models/bloom/modeling_bloom.py\r\n\r\nCo-authored-by: justheuristic \r\n\r\n* fix alibi\r\n\r\n- create alibi only once\r\n\r\n* add small doc\r\n\r\n* make quality\r\n\r\n* replace torch.nn\r\n\r\n* remove token type emb\r\n\r\n* fix fused op + output bias\r\n\r\n* add fused op\r\n\r\n- now can control fused operation from config\r\n\r\n* remove fused op\r\n\r\n* make quality\r\n\r\n* small changes\r\n\r\n- remove unsed args on config\r\n- removed bias gelu file\r\n- make the model torchscriptable\r\n- add torchscript slow tests\r\n\r\n* Update src/transformers/models/bloom/modeling_bloom.py\r\n\r\n* fix slow\r\n\r\n* make style\r\n\r\n* add accelerate support\r\n\r\n* add bloom to deepspeed tests\r\n\r\n* minor changes\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* minor change\r\n\r\n* slow tests pass\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update docs/source/en/model_doc/bloom.mdx\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* minor changes:\r\n\r\n- change docstring\r\n- add link to paper\r\n\r\nCo-authored-by: Thomwolf \r\nCo-authored-by: Thomas Wolf \r\nCo-authored-by: thomasw21 <24695242+thomasw21@users.noreply.github.com>\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: sIncerass \r\nCo-authored-by: Stas Bekman \r\nCo-authored-by: Niklas Muennighoff \r\nCo-authored-by: Nicolas Patry \r\nCo-authored-by: thomasw21 \r\nCo-authored-by: sgugger \r\nCo-authored-by: patrickvonplaten \r\nCo-authored-by: LysandreJik \r\nCo-authored-by: Patrick von Platen \r\nCo-authored-by: justheuristic \r\nCo-authored-by: Stas Bekman ", "code": "def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, BloomModel):\n module.gradient_checkpointing = value\n\n\nBLOOM_START_DOCSTRING = r\n\nBLOOM_INPUTS_DOCSTRING = r\n\n\n@add_start_docstrings(\n \"The bare Bloom Model transformer outputting raw hidden-states without any specific head on top.\",\n BLOOM_START_DOCSTRING,\n)", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@add_start_docstrings(\n \"The bare Bloom Model transformer outputting raw hidden-states without any specific head on top.\",\n BLOOM_START_DOCSTRING,\n)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 52, "n_words": 33, "vocab_size": 30, "complexity": 2, "nloc": 3, "token_counts": 24, "n_ast_nodes": 64, "n_identifiers": 10, "d_id": 5691, "documentation": { "docstring": "\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)\n\n This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.\n Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage\n and behavior.\n\n Parameters:\n config ([`BloomConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):\n `input_ids_length` = `sequence_length` if `past_key_values` is `None` else\n `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input\n sequence tokens in the vocabulary.\n\n If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as\n `input_ids`.\n\n Indices can be obtained using [`BloomTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):\n Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see\n `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have\n their past given to this model should not be passed as `input_ids` as they have already been computed.\n attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n config.max_position_embeddings - 1]`.\n\n [What are position IDs?](../glossary#position-ids)\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see\n `past_key_values`).\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n", "n_words": 474, "vocab_size": 241, "n_whitespaces": 957, "language": "en" } }, { "id": 218271, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/importlib/metadata/__init__.py", "file_name": "__init__.py", "fun_name": "__getitem__", "commit_message": "add python 3.10.4 for windows", "code": "def __getitem__(self, name): # -> EntryPoint:\n \n if isinstance(name, int):\n warnings.warn(\n \"Accessing entry points by index is deprecated. \"\n \"Cast to tuple if needed.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return super().__getitem__(name)\n try:\n return next(iter(self.select(name=name)))\n except StopIteration:\n raise KeyError(name)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 179, "n_words": 35, "vocab_size": 33, "complexity": 3, "nloc": 13, "token_counts": 64, "n_ast_nodes": 108, "n_identifiers": 15, "d_id": 55236, "documentation": { "docstring": "\n Get the EntryPoint in self matching name.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 277066, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/tf_inspect.py", "file_name": "tf_inspect.py", "fun_name": "isgeneratorfunction", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def isgeneratorfunction(obj):\n \n return _inspect.isgeneratorfunction(\n tf.__internal__.decorator.unwrap(obj)[1]\n )\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 22, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 4, "token_counts": 25, "n_ast_nodes": 42, "n_identifiers": 7, "d_id": 81843, "documentation": { "docstring": "TFDecorator-aware replacement for inspect.isgeneratorfunction.", "n_words": 4, "vocab_size": 4, "n_whitespaces": 3, "language": "en" } }, { "id": 288948, "commit_id": "503434e538af4b708f01cee9ca20bfa8426cec94", "repo": "core", "path": "homeassistant/components/geonetnz_volcano/sensor.py", "file_name": "sensor.py", "fun_name": "_update_from_feed", "commit_message": "Use DistanceConverter in components (#80182)\n\n* Use DistanceConverter in components\r\n\r\n* Adjust for METRIC_SYSTEM", "code": "def _update_from_feed(self, feed_entry, last_update, last_update_successful):\n \n self._title = feed_entry.title\n # Convert distance if not metric system.\n if self._unit_system == CONF_UNIT_SYSTEM_IMPERIAL:\n self._distance = round(\n DistanceConverter.convert(\n feed_entry.distance_to_home, LENGTH_KILOMETERS, LENGTH_MILES\n ),\n 1,\n )\n else:\n self._distance = round(feed_entry.distance_to_home, 1)\n self._latitude = round(feed_entry.coordinates[0], 5)\n self._longitude = round(feed_entry.coordinates[1], 5)\n self._attribution = feed_entry.attribution\n self._alert_level = feed_entry.alert_level\n self._activity = feed_entry.activity\n self._hazards = feed_entry.hazards\n self._feed_last_update = dt.as_utc(last_update) if last_update else None\n self._feed_last_update_successful = (\n dt.as_utc(last_update_successful) if last_update_successful else None\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 276, "n_words": 70, "vocab_size": 52, "complexity": 4, "nloc": 21, "token_counts": 150, "n_ast_nodes": 228, "n_identifiers": 31, "d_id": 88097, "documentation": { "docstring": "Update the internal state from the provided feed entry.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 70443, "commit_id": "d964675ee8fcb7ea58681ac8869733a86d58e4ec", "repo": "wagtail", "path": "wagtail/search/tests/test_indexed_class.py", "file_name": "test_indexed_class.py", "fun_name": "test_checking_core_page_fields_are_indexed", "commit_message": "add check for correct search_fields on pages\n\n- fixes #4940", "code": "def test_checking_core_page_fields_are_indexed(self):\n \n\n # first confirm that errors show as EventPage (in test models) has no Page.search_fields\n errors = [error for error in checks.run_checks() if error.id == 'wagtailsearch.W001']\n\n # should only ever get this warning on the sub-classes of the page model\n self.assertEqual([EventPage, SingleEventPage], [error.obj for error in errors])\n\n for error in errors:\n self.assertEqual(error.msg, 'Core Page fields missing in `search_fields`', )\n self.assertIn(\n 'Page model search fields `search_fields = Page.search_fields + [...]`',\n error.hint)\n\n # second check that we get no errors when setting up the models correctly\n with patch_search_fields(EventPage, Page.search_fields + EventPage.search_fields):\n errors = [error for error in checks.run_checks() if error.id == 'wagtailsearch.W001']\n self.assertEqual([], errors)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 233, "n_words": 103, "vocab_size": 70, "complexity": 7, "nloc": 11, "token_counts": 113, "n_ast_nodes": 185, "n_identifiers": 17, "d_id": 15509, "documentation": { "docstring": "Run checks to ensure that when core page fields are missing we get a warning", "n_words": 15, "vocab_size": 15, "n_whitespaces": 14, "language": "en" } }, { "id": 261789, "commit_id": "ecb9a70e82d4ee352e2958c555536a395b53d2bd", "repo": "scikit-learn", "path": "sklearn/preprocessing/tests/test_encoders.py", "file_name": "test_encoders.py", "fun_name": "test_mixed_string_bytes_categoricals", "commit_message": "FIX Ensure dtype of categories is `object` for strings in `OneHotEncoder` (#25174)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: Thomas J. Fan ", "code": "def test_mixed_string_bytes_categoricals():\n \n # data as unicode\n X = np.array([[\"b\"], [\"a\"]], dtype=\"U\")\n # predefined categories as bytes\n categories = [np.array([\"b\", \"a\"], dtype=\"S\")]\n ohe = OneHotEncoder(categories=categories, sparse_output=False)\n\n msg = re.escape(\n \"In column 0, the predefined categories have type 'bytes' which is incompatible\"\n \" with values of type 'str_'.\"\n )\n\n with pytest.raises(ValueError, match=msg):\n ohe.fit(X)\n\n\n@pytest.mark.parametrize(\"missing_value\", [np.nan, None])", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"missing_value\", [np.nan, None])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 101, "n_words": 54, "vocab_size": 44, "complexity": 1, "nloc": 10, "token_counts": 82, "n_ast_nodes": 175, "n_identifiers": 20, "d_id": 76996, "documentation": { "docstring": "Check that this mixture of predefined categories and X raises an error.\n\n Categories defined as bytes can not easily be compared to data that is\n a string.\n ", "n_words": 27, "vocab_size": 26, "n_whitespaces": 36, "language": "en" } }, { "id": 66793, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v13_0/set_company_in_leave_ledger_entry.py", "file_name": "set_company_in_leave_ledger_entry.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tfrappe.reload_doc(\"HR\", \"doctype\", \"Leave Allocation\")\n\tfrappe.reload_doc(\"HR\", \"doctype\", \"Leave Ledger Entry\")\n\tfrappe.db.sql(\n\t\t\n\t)\n\tfrappe.db.sql(\n\t\t\n\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 8, "n_words": 15, "vocab_size": 10, "complexity": 1, "nloc": 9, "token_counts": 40, "n_ast_nodes": 78, "n_identifiers": 5, "d_id": 14336, "documentation": { "docstring": "update `tabLeave Ledger Entry` as lle set company = (select company from `tabEmployee` where employee = lle.employee)update `tabLeave Allocation` as la set company = (select company from `tabEmployee` where employee = la.employee)", "n_words": 32, "vocab_size": 18, "n_whitespaces": 31, "language": "en" } }, { "id": 42549, "commit_id": "8a4cf5d94eb94b6427c5d1d7907ba07b119932c5", "repo": "nltk", "path": "nltk/text.py", "file_name": "text.py", "fun_name": "collocations", "commit_message": "Docstring tests (#3050)\n\n* fixed pytests\r\n\r\n* fixed more pytests\r\n\r\n* fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py\r\n\r\n* fixed pytests (mainly multiline or rounding issues)\r\n\r\n* fixed treebank pytests, removed test for return_string=True (deprecated)\r\n\r\n* fixed destructive.py pytests, removed test for return_string=True (deprecated)\r\n\r\n* fixed pytest (rounding issues)\r\n\r\n* fixed pytest (initialised missing object)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* added pytest +SKIP for deprecated module stanford\r\n\r\n* updated AUTHORS.md\r\n\r\n* changed docstring corrections by usage of ELLIPSIS and different roundings\r\n\r\n* fixed AUTHORS.md to be consistent\r\n\r\n* Fix framenet doctest formatting with pprint\r\n\r\n* Change docstring on MultiListBox.__init__\r\n\r\nI believe the original typo was misinterpreted and changed to something that was not originally intended.\r\n\r\nCo-authored-by: Jan Lennartz \r\nCo-authored-by: Tom Aarsen <37621491+tomaarsen@users.noreply.github.com>\r\nCo-authored-by: Tom Aarsen ", "code": "def collocations(self, num=20, window_size=2):\n \n\n collocation_strings = [\n w1 + \" \" + w2 for w1, w2 in self.collocation_list(num, window_size)\n ]\n print(tokenwrap(collocation_strings, separator=\"; \"))\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 62, "n_words": 23, "vocab_size": 20, "complexity": 2, "nloc": 5, "token_counts": 47, "n_ast_nodes": 76, "n_identifiers": 11, "d_id": 7611, "documentation": { "docstring": "\n Print collocations derived from the text, ignoring stopwords.\n\n >>> from nltk.book import text4\n >>> text4.collocations() # doctest: +NORMALIZE_WHITESPACE\n United States; fellow citizens; years ago; four years; Federal\n Government; General Government; American people; Vice President; God\n bless; Chief Justice; one another; fellow Americans; Old World;\n Almighty God; Fellow citizens; Chief Magistrate; every citizen; Indian\n tribes; public debt; foreign nations\n\n\n :param num: The maximum number of collocations to print.\n :type num: int\n :param window_size: The number of tokens spanned by a collocation (default=2)\n :type window_size: int\n ", "n_words": 84, "vocab_size": 69, "n_whitespaces": 204, "language": "en" } }, { "id": 65407, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/utils.py", "file_name": "utils.py", "fun_name": "get_held_invoices", "commit_message": "style: format code with black", "code": "def get_held_invoices(party_type, party):\n\t\n\theld_invoices = None\n\n\tif party_type == \"Supplier\":\n\t\theld_invoices = frappe.db.sql(\n\t\t\t\"select name from `tabPurchase Invoice` where release_date IS NOT NULL and release_date > CURDATE()\",\n\t\t\tas_dict=1,\n\t\t)\n\t\theld_invoices = set(d[\"name\"] for d in held_invoices)\n\n\treturn held_invoices\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 29, "n_words": 38, "vocab_size": 32, "complexity": 3, "nloc": 9, "token_counts": 46, "n_ast_nodes": 78, "n_identifiers": 10, "d_id": 13888, "documentation": { "docstring": "\n\tReturns a list of names Purchase Invoices for the given party that are on hold\n\t", "n_words": 15, "vocab_size": 15, "n_whitespaces": 14, "language": "en" } }, { "id": 218555, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ipaddress.py", "file_name": "ipaddress.py", "fun_name": "is_reserved", "commit_message": "add python 3.10.4 for windows", "code": "def is_reserved(self):\n \n return (self.network_address.is_reserved and\n self.broadcast_address.is_reserved)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 35, "n_words": 6, "vocab_size": 6, "complexity": 2, "nloc": 3, "token_counts": 20, "n_ast_nodes": 34, "n_identifiers": 4, "d_id": 55386, "documentation": { "docstring": "Test if the address is otherwise IETF reserved.\n\n Returns:\n A boolean, True if the address is within one of the\n reserved IPv6 Network ranges.\n\n ", "n_words": 24, "vocab_size": 19, "n_whitespaces": 60, "language": "en" } }, { "id": 199144, "commit_id": "645539ed9a65eec4a7bfc4571bdf2135cfb68cfb", "repo": "sympy", "path": "sympy/tensor/array/dense_ndim_array.py", "file_name": "dense_ndim_array.py", "fun_name": "reshape", "commit_message": "Fix bug in error message (cast tuple to str)\n\n```python\r\nfrom sympy.abc import x, y, z \r\nfrom sympy import Array \r\n \r\na2 = Array([[[x, y], [z, x*z]], [[1, x*y], [1/x, x/y]]])\r\na2.reshape(1) \r\n```\r\nOut:\r\n```text\r\nTypeError: can only concatenate str (not \"tuple\") to str\r\n```\r\n\r\nThis casts `newshape` to a string to the error message makes sense.", "code": "def reshape(self, *newshape):\n \n new_total_size = functools.reduce(lambda x,y: x*y, newshape)\n if new_total_size != self._loop_size:\n raise ValueError(\"Invalid reshape parameters \" + str(newshape))\n\n # there is no `.func` as this class does not subtype `Basic`:\n return type(self)(self._array, newshape)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 81, "n_words": 35, "vocab_size": 33, "complexity": 2, "nloc": 5, "token_counts": 55, "n_ast_nodes": 90, "n_identifiers": 13, "d_id": 49163, "documentation": { "docstring": "\n Returns MutableDenseNDimArray instance with new shape. Elements number\n must be suitable to new shape. The only argument of method sets\n new shape.\n\n Examples\n ========\n\n >>> from sympy import MutableDenseNDimArray\n >>> a = MutableDenseNDimArray([1, 2, 3, 4, 5, 6], (2, 3))\n >>> a.shape\n (2, 3)\n >>> a\n [[1, 2, 3], [4, 5, 6]]\n >>> b = a.reshape(3, 2)\n >>> b.shape\n (3, 2)\n >>> b\n [[1, 2], [3, 4], [5, 6]]\n\n ", "n_words": 69, "vocab_size": 49, "n_whitespaces": 196, "language": "en" } }, { "id": 19466, "commit_id": "7e33fcae4384563b4c927fd44318c29dd524a097", "repo": "pipenv", "path": "pipenv/patched/notpip/_internal/locations/__init__.py", "file_name": "__init__.py", "fun_name": "_looks_like_red_hat_scheme", "commit_message": "Vendor in pip 21.2.4 release (from pip 21.2.2 prior). (#5009)\n\n* Vendor in pip 21.2.4 release (from pip 21.2.2 prior).\r\n\r\n* Add news fragment for pip 21.2.4 vendor update.\r\n\r\n* Add potentially missing LICENSE files", "code": "def _looks_like_red_hat_scheme() -> bool:\n \n from distutils.command.install import install\n from distutils.dist import Distribution\n\n cmd: Any = install(Distribution())\n cmd.finalize_options()\n return (\n cmd.exec_prefix == f\"{os.path.normpath(sys.exec_prefix)}/local\"\n and cmd.prefix == f\"{os.path.normpath(sys.prefix)}/local\"\n )\n\n\n@functools.lru_cache(maxsize=None)", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "@functools.lru_cache(maxsize=None)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 62, "n_words": 28, "vocab_size": 25, "complexity": 2, "nloc": 16, "token_counts": 52, "n_ast_nodes": 137, "n_identifiers": 19, "d_id": 2983, "documentation": { "docstring": "Red Hat patches ``sys.prefix`` and ``sys.exec_prefix``.\n\n Red Hat's ``00251-change-user-install-location.patch`` changes the install\n command's ``prefix`` and ``exec_prefix`` to append ``\"/local\"``. This is\n (fortunately?) done quite unconditionally, so we create a default command\n object without any configuration to detect this.\n ", "n_words": 38, "vocab_size": 35, "n_whitespaces": 53, "language": "en" } }, { "id": 63355, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "__call__", "commit_message": "upd; format", "code": "def __call__(self, name=None):\n \n if name is not None:\n return self._setResultsName(name)\n else:\n return self.copy()\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 56, "n_words": 13, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 31, "n_ast_nodes": 52, "n_identifiers": 5, "d_id": 13264, "documentation": { "docstring": "\n Shortcut for :class:`setResultsName`, with ``listAllMatches=False``.\n\n If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be\n passed as ``True``.\n\n If ``name` is omitted, same as calling :class:`copy`.\n\n Example::\n\n # these are equivalent\n userdata = Word(alphas).setResultsName(\"name\") + Word(nums + \"-\").setResultsName(\"socsecno\")\n userdata = Word(alphas)(\"name\") + Word(nums + \"-\")(\"socsecno\")\n ", "n_words": 48, "vocab_size": 38, "n_whitespaces": 124, "language": "en" } }, { "id": 276975, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/losses_utils.py", "file_name": "losses_utils.py", "fun_name": "_num_elements", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _num_elements(losses):\n \n with backend.name_scope(\"num_elements\") as scope:\n return tf.cast(tf.size(losses, name=scope), dtype=losses.dtype)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 23, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 38, "n_ast_nodes": 66, "n_identifiers": 10, "d_id": 81809, "documentation": { "docstring": "Computes the number of elements in `losses` tensor.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 270387, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/distribute/keras_correctness_test_base.py", "file_name": "keras_correctness_test_base.py", "fun_name": "get_input_for_correctness_test", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_input_for_correctness_test(self, **kwargs):\n \n\n return get_correctness_test_inputs(**kwargs)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 27, "n_identifiers": 4, "d_id": 80460, "documentation": { "docstring": "Generates inputs that are dictionaries.\n\n We only provide a default implementation of this method here. If you need\n more customized way of providing input to your model, overwrite this method.\n\n Args:\n **kwargs: key word arguments about how to create the input dictionaries\n\n Returns:\n Three dictionaries representing the input for fit(), evaluate() and\n predict()\n ", "n_words": 53, "vocab_size": 46, "n_whitespaces": 115, "language": "en" } }, { "id": 171304, "commit_id": "c7010a7adec1c47a4642fa068544699fc8e1ea6a", "repo": "pandas", "path": "pandas/core/dtypes/dtypes.py", "file_name": "dtypes.py", "fun_name": "__new__", "commit_message": "STYLE enable pylint's redefined-outer-name (#49671)\n\n* fix warning for pandas/core/dtypes/cast.py, pandas/core/dtypes/dtypes.py, pandas/core/indexes/base.py\r\n\r\n* fix warning for pandas/core/dtypes/cast.py, pandas/core/dtypes/dtypes.py, pandas/core/indexes/base.py\r\n\r\n* fix warning for pandas/core/dtypes/cast.py, pandas/core/dtypes/dtypes.py, pandas/core/indexes/base.py\r\n\r\n* fix warning for pandas/core/dtypes/cast.py, pandas/core/dtypes/dtypes.py, pandas/core/indexes/base.py\r\n\r\nCo-authored-by: bishwas jha ", "code": "def __new__(cls, freq=None):\n \n if isinstance(freq, PeriodDtype):\n return freq\n\n elif freq is None:\n # empty constructor for pickle compat\n # -10_000 corresponds to PeriodDtypeCode.UNDEFINED\n u = PeriodDtypeBase.__new__(cls, -10_000)\n u._freq = None\n return u\n\n if not isinstance(freq, BaseOffset):\n freq = cls._parse_dtype_strict(freq)\n\n try:\n return cls._cache_dtypes[freq.freqstr]\n except KeyError:\n dtype_code = freq._period_dtype_code\n u = PeriodDtypeBase.__new__(cls, dtype_code)\n u._freq = freq\n cls._cache_dtypes[freq.freqstr] = u\n return u\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 244, "n_words": 59, "vocab_size": 37, "complexity": 5, "nloc": 17, "token_counts": 106, "n_ast_nodes": 169, "n_identifiers": 15, "d_id": 40660, "documentation": { "docstring": "\n Parameters\n ----------\n freq : frequency\n ", "n_words": 5, "vocab_size": 5, "n_whitespaces": 34, "language": "en" } }, { "id": 131062, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/state.py", "file_name": "state.py", "fun_name": "actor_table", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def actor_table(self, actor_id):\n \n self._check_connected()\n\n if actor_id is not None:\n actor_id = ray.ActorID(hex_to_binary(actor_id))\n actor_info = self.global_state_accessor.get_actor_info(actor_id)\n if actor_info is None:\n return {}\n else:\n actor_table_data = gcs_utils.ActorTableData.FromString(actor_info)\n return self._gen_actor_info(actor_table_data)\n else:\n actor_table = self.global_state_accessor.get_actor_table()\n results = {}\n for i in range(len(actor_table)):\n actor_table_data = gcs_utils.ActorTableData.FromString(actor_table[i])\n results[\n binary_to_hex(actor_table_data.actor_id)\n ] = self._gen_actor_info(actor_table_data)\n\n return results\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 273, "n_words": 48, "vocab_size": 30, "complexity": 4, "nloc": 19, "token_counts": 124, "n_ast_nodes": 202, "n_identifiers": 21, "d_id": 29463, "documentation": { "docstring": "Fetch and parse the actor table information for a single actor ID.\n\n Args:\n actor_id: A hex string of the actor ID to fetch information about.\n If this is None, then the actor table is fetched.\n\n Returns:\n Information from the actor table.\n ", "n_words": 41, "vocab_size": 31, "n_whitespaces": 99, "language": "en" } }, { "id": 271076, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/compile_utils_test.py", "file_name": "compile_utils_test.py", "fun_name": "test_ragged_tensor_output", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_ragged_tensor_output(self):\n \n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 9, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 15, "token_counts": 192, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 80686, "documentation": { "docstring": "Ensure that ragged tensors can be passed as targets and predictions.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 187159, "commit_id": "3d44da082b3ba202b9d0557bfd8ce747a1d7960c", "repo": "streamlink", "path": "tests/test_api_validate.py", "file_name": "test_api_validate.py", "fun_name": "test_xml_element", "commit_message": "plugin.api.validate: implement ValidationError\n\n- Implement `ValidationError`\n - Inherit from `ValueError` to preserve backwards compatiblity\n - Allow collecting multiple errors (AnySchema)\n - Keep an error stack of parent `ValidationError`s or other exceptions\n - Format error stack when converting error to string\n- Raise `ValidationError` instead of `ValueError`\n - Add error contexts where it makes sense\n - Add schema names to error instances\n- Add and update tests", "code": "def test_xml_element(self):\n el = Element(\"tag\")\n el.set(\"key\", \"value\")\n el.text = \"text\"\n childA = Element(\"childA\")\n childB = Element(\"childB\")\n el.append(childA)\n el.append(childB)\n\n upper = transform(str.upper)\n newelem: Element = validate(xml_element(tag=upper, text=upper, attrib={upper: upper}), el)\n\n assert newelem is not el\n assert newelem.tag == \"TAG\"\n assert newelem.text == \"TEXT\"\n assert newelem.attrib == {\"KEY\": \"VALUE\"}\n assert newelem[0].tag == \"childA\"\n assert newelem[1].tag == \"childB\"\n assert newelem[0] is not childA\n assert newelem[1] is not childB\n\n with self.assertRaises(ValueError) as cm:\n validate(xml_element(tag=\"invalid\"), el)\n assert_validationerror(cm.exception, )\n\n with self.assertRaises(ValueError) as cm:\n validate(xml_element(text=\"invalid\"), el)\n assert_validationerror(cm.exception, )\n\n with self.assertRaises(ValueError) as cm:\n validate(xml_element(attrib={\"key\": \"invalid\"}), el)\n assert_validationerror(cm.exception, )\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 283, "n_words": 90, "vocab_size": 52, "complexity": 1, "nloc": 44, "token_counts": 235, "n_ast_nodes": 407, "n_identifiers": 22, "d_id": 45718, "documentation": { "docstring": "\n ValidationError(XmlElementSchema):\n Unable to validate XML tag\n Context(equality):\n 'tag' does not equal 'invalid'\n \n ValidationError(XmlElementSchema):\n Unable to validate XML text\n Context(equality):\n 'text' does not equal 'invalid'\n \n ValidationError(XmlElementSchema):\n Unable to validate XML attributes\n Context(dict):\n Unable to validate value of key 'key'\n Context(equality):\n 'value' does not equal 'invalid'\n ", "n_words": 44, "vocab_size": 21, "n_whitespaces": 256, "language": "en" } }, { "id": 337934, "commit_id": "0c6bdc2c237ac071be99ac6f93ddfbc8bbcb8441", "repo": "accelerate", "path": "src/accelerate/test_utils/testing.py", "file_name": "testing.py", "fun_name": "require_fsdp", "commit_message": "enhancements and fixes for FSDP and DeepSpeed (#532)\n\n* checkpointing enhancements and fixes for FSDP and DeepSpeed\r\n\r\n* resolving comments\r\n\r\n1. Adding deprecation args and warnings in launcher for FSDP\r\n2. Handling old configs to work with new launcher args wrt FSDP.\r\n3. Reverting changes to public methods in `checkpointing.py` and handling it in `Accelerator`\r\n4. Explicitly writing the defaults of various FSDP options in `dataclasses` for readability.\r\n\r\n* fixes\r\n\r\n1. FSDP wrapped model being added to the `_models`.\r\n2. Not passing the env variables when args are None.\r\n\r\n* resolving comments\r\n\r\n* adding FSDP for all the collective operations\r\n\r\n* adding deepspeed and fsdp tests\r\n\r\n1. Removes mrpc datafiles and directly relies on HF datasets as it was throwing `file not found` error when running from within `tests` folder. Updating `moke_dataloaders` as a result.\r\n2. adding `test_performance.py`, `test_memory.py` and `test_checkpointing.py` for multi-gpu FSDP and DeepSpeed tests\r\n\r\n* reverting `mocked_dataloader` changes\r\n\r\n* adding FSDP tests\r\n\r\n* data files revert\r\n\r\n* excluding fsdp tests from `tests_core`\r\n\r\n* try 2\r\n\r\n* adding time delay to avoid `torchrun` from crashing at times leading which causing flaky behaviour\r\n\r\n* reducing the time of tests\r\n\r\n* fixes\r\n\r\n* fix\r\n\r\n* fixes and reduce time further\r\n\r\n* reduce time further and minor fixes\r\n\r\n* adding a deepspeed basic e2e test for single gpu setup", "code": "def require_fsdp(test_case):\n \n return unittest.skipUnless(is_torch_version(\">=\", \"1.12.0\"), \"test requires torch version >= 1.12.0\")(test_case)\n\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 17, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 44, "n_identifiers": 5, "d_id": 121141, "documentation": { "docstring": "\n Decorator marking a test that requires FSDP installed. These tests are skipped when FSDP isn't installed\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 23, "language": "en" } }, { "id": 136556, "commit_id": "c51b0c9a5664e5c6df3d92f9093b56e61b48f514", "repo": "ray", "path": "python/ray/autoscaler/batching_node_provider.py", "file_name": "batching_node_provider.py", "fun_name": "cur_num_workers", "commit_message": "[autoscaler][kuberay] Batching node provider (#29933)\n\nImplements the abstract subclass of NodeProvider proposed in\r\nhttps://docs.google.com/document/d/1JyQINBFirZw7YenA_14zize0R3hIII1_fnfQytIXTPo/\r\n\r\nThe goal is to simplify the autoscaler's interactions with external cluster managers like the KubeRay operator.\r\n\r\nA follow-up PR will implement KuberayNodeProvider as a subclass of the BatchingNodeProvider added here.\r\n\r\nSigned-off-by: Dmitri Gekhtman ", "code": "def cur_num_workers(self):\n \n # Factor like this for convenient re-use.\n return self._cur_num_workers(self.node_data_dict)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 32, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 28, "n_identifiers": 4, "d_id": 30939, "documentation": { "docstring": "Returns dict mapping node type to the number of nodes of that type.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 102926, "commit_id": "4a3ed628092fac5b2552c8554c0482c569d14323", "repo": "kitty", "path": "kittens/tui/dircolors.py", "file_name": "dircolors.py", "fun_name": "generate_lscolors", "commit_message": "Refactor: More f-string for kittens", "code": "def generate_lscolors(self) -> str:\n \n", "url": "https://github.com/kovidgoyal/kitty.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 2, "nloc": 4, "token_counts": 29, "n_ast_nodes": 16, "n_identifiers": 3, "d_id": 21582, "documentation": { "docstring": " Output the database in the format used by the LS_COLORS environment variable. ", "n_words": 12, "vocab_size": 10, "n_whitespaces": 13, "language": "en" } }, { "id": 100363, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "lib/utils.py", "file_name": "utils.py", "fun_name": "get_keras_custom_objects", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def get_keras_custom_objects():\n \n # pylint:disable=no-name-in-module,import-outside-toplevel\n if get_backend() == \"amd\" or get_tf_version() < 2.8:\n from keras.utils import get_custom_objects\n else:\n from keras.utils.generic_utils import get_custom_objects\n return get_custom_objects()\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 52, "n_words": 23, "vocab_size": 20, "complexity": 3, "nloc": 6, "token_counts": 40, "n_ast_nodes": 68, "n_identifiers": 7, "d_id": 19852, "documentation": { "docstring": " Wrapper to obtain keras.utils.get_custom_objects from correct location depending on\n backend used and tensorflow version. ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 18, "language": "en" } }, { "id": 133770, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/agents/mbmpo/mbmpo.py", "file_name": "mbmpo.py", "fun_name": "__call__", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def __call__(self, data_tuple):\n \n # Metaupdate Step.\n print(\"Meta-Update Step\")\n samples = data_tuple[0]\n adapt_metrics_dict = data_tuple[1]\n self.postprocess_metrics(\n adapt_metrics_dict, prefix=\"MAMLIter{}\".format(self.step_counter)\n )\n\n # MAML Meta-update.\n fetches = None\n for i in range(self.maml_optimizer_steps):\n fetches = self.workers.local_worker().learn_on_batch(samples)\n learner_stats = get_learner_stats(fetches)\n\n # Update KLs.", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 143, "n_words": 37, "vocab_size": 30, "complexity": 3, "nloc": 34, "token_counts": 242, "n_ast_nodes": 126, "n_identifiers": 19, "d_id": 30102, "documentation": { "docstring": "Args:\n data_tuple (tuple): 1st element is samples collected from MAML\n Inner adaptation steps and 2nd element is accumulated metrics\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 40, "language": "en" } }, { "id": 241810, "commit_id": "a1546047bc146bf3189fa905c3415475b0e47931", "repo": "scipy", "path": "scipy/stats/_stats_py.py", "file_name": "_stats_py.py", "fun_name": "hmean", "commit_message": "ENH: stats: add weights in harmonic mean (#15347)\n\nCo-authored-by: Pamphile Roy ", "code": "def hmean(a, axis=0, dtype=None, *, weights=None):\n \n if not isinstance(a, np.ndarray):\n a = np.array(a, dtype=dtype)\n elif dtype:\n # Must change the default dtype allowing array type\n if isinstance(a, np.ma.MaskedArray):\n a = np.ma.asarray(a, dtype=dtype)\n else:\n a = np.asarray(a, dtype=dtype)\n\n if np.all(a >= 0):\n # Harmonic mean only defined if greater than or equal to zero.\n if weights is not None:\n weights = np.asanyarray(weights, dtype=dtype)\n\n with np.errstate(divide='ignore'):\n return 1.0 / np.average(1.0 / a, axis=axis, weights=weights)\n else:\n raise ValueError(\"Harmonic mean only defined if all elements greater \"\n \"than or equal to zero\")\n\n\nModeResult = namedtuple('ModeResult', ('mode', 'count'))\n\n", "url": "https://github.com/scipy/scipy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 231, "n_words": 93, "vocab_size": 66, "complexity": 6, "nloc": 16, "token_counts": 155, "n_ast_nodes": 265, "n_identifiers": 20, "d_id": 69705, "documentation": { "docstring": "Calculate the harmonic mean along the specified axis.\n\n That is: n / (1/x1 + 1/x2 + ... + 1/xn)\n\n Parameters\n ----------\n a : array_like\n Input array, masked array or object that can be converted to an array.\n axis : int or None, optional\n Axis along which the harmonic mean is computed. Default is 0.\n If None, compute over the whole array `a`.\n dtype : dtype, optional\n Type of the returned array and of the accumulator in which the\n elements are summed. If `dtype` is not specified, it defaults to the\n dtype of `a`, unless `a` has an integer `dtype` with a precision less\n than that of the default platform integer. In that case, the default\n platform integer is used.\n weights : array_like, optional\n The weights array can either be 1-D (in which case its length must be\n the size of `a` along the given `axis`) or of the same shape as `a`.\n Default is None, which gives each value a weight of 1.0.\n\n .. versionadded:: 1.9\n\n Returns\n -------\n hmean : ndarray\n See `dtype` parameter above.\n\n See Also\n --------\n numpy.mean : Arithmetic average\n numpy.average : Weighted average\n gmean : Geometric mean\n\n Notes\n -----\n The harmonic mean is computed over a single dimension of the input\n array, axis=0 by default, or all values in the array if axis=None.\n float64 intermediate and return values are used for integer inputs.\n\n Use masked arrays to ignore any non-finite values in the input or that\n arise in the calculations such as Not a Number and infinity.\n\n References\n ----------\n .. [1] \"Weighted Harmonic Mean\", *Wikipedia*,\n https://en.wikipedia.org/wiki/Harmonic_mean#Weighted_harmonic_mean\n .. [2] Ferger, F., \"The nature and use of the harmonic mean\", Journal of\n the American Statistical Association, vol. 26, pp. 36-40, 1931\n\n Examples\n --------\n >>> from scipy.stats import hmean\n >>> hmean([1, 4])\n 1.6000000000000001\n >>> hmean([1, 2, 3, 4, 5, 6, 7])\n 2.6997245179063363\n\n ", "n_words": 302, "vocab_size": 188, "n_whitespaces": 516, "language": "en" } }, { "id": 204149, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/humanize/templatetags/humanize.py", "file_name": "humanize.py", "fun_name": "ordinal", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def ordinal(value):\n \n try:\n value = int(value)\n except (TypeError, ValueError):\n return value\n if value % 100 in (11, 12, 13):\n # Translators: Ordinal format for 11 (11th), 12 (12th), and 13 (13th).\n value = pgettext(\"ordinal 11, 12, 13\", \"{}th\").format(value)\n else:\n templates = (\n # Translators: Ordinal format when value ends with 0, e.g. 80th.\n pgettext(\"ordinal 0\", \"{}th\"),\n # Translators: Ordinal format when value ends with 1, e.g. 81st, except 11.\n pgettext(\"ordinal 1\", \"{}st\"),\n # Translators: Ordinal format when value ends with 2, e.g. 82nd, except 12.\n pgettext(\"ordinal 2\", \"{}nd\"),\n # Translators: Ordinal format when value ends with 3, e.g. 83th, except 13.\n pgettext(\"ordinal 3\", \"{}rd\"),\n # Translators: Ordinal format when value ends with 4, e.g. 84th.\n pgettext(\"ordinal 4\", \"{}th\"),\n # Translators: Ordinal format when value ends with 5, e.g. 85th.\n pgettext(\"ordinal 5\", \"{}th\"),\n # Translators: Ordinal format when value ends with 6, e.g. 86th.\n pgettext(\"ordinal 6\", \"{}th\"),\n # Translators: Ordinal format when value ends with 7, e.g. 87th.\n pgettext(\"ordinal 7\", \"{}th\"),\n # Translators: Ordinal format when value ends with 8, e.g. 88th.\n pgettext(\"ordinal 8\", \"{}th\"),\n # Translators: Ordinal format when value ends with 9, e.g. 89th.\n pgettext(\"ordinal 9\", \"{}th\"),\n )\n value = templates[value % 10].format(value)\n # Mark value safe so i18n does not break with or see #19988\n return mark_safe(value)\n\n\n@register.filter(is_safe=True)", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "@register.filter(is_safe=True)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 501, "n_words": 212, "vocab_size": 94, "complexity": 3, "nloc": 22, "token_counts": 143, "n_ast_nodes": 278, "n_identifiers": 12, "d_id": 50649, "documentation": { "docstring": "\n Convert an integer to its ordinal as a string. 1 is '1st', 2 is '2nd',\n 3 is '3rd', etc. Works for any integer.\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 33, "language": "en" } }, { "id": 285200, "commit_id": "9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b", "repo": "OpenBBTerminal", "path": "openbb_terminal/econometrics/econometrics_model.py", "file_name": "econometrics_model.py", "fun_name": "get_granger_causality", "commit_message": "Here we merge all API Refactor related branches (#2236)\n\n* Update api.py\r\n\r\n* Updated forex menu\r\n\r\n* refactor ycrv command\r\n\r\n* refactor ycrv command black\r\n\r\n* refactor ecocal command\r\n\r\n* Minh changes\r\n\r\n* Adding space to test pushing\r\n\r\n* title fix ecocal df\r\n\r\n* get economic calendar annotation\r\n\r\n* fix investingcom tests\r\n\r\n* refactor index command\r\n\r\n* refactor overview command\r\n\r\n* give defaults to wsj view function args\r\n\r\n* rename date args investincom\r\n\r\n* refacto bigmac command\r\n\r\n* fix ecocal typo\r\n\r\n* refactor rtps command\r\n\r\n* alphavantage gdp\r\n\r\n* alphavantage gdp per capita\r\n\r\n* alphavantage cpi\r\n\r\n* alphavantage tyld\r\n\r\n* alphavantage inf\r\n\r\n* refactor macro command\r\n\r\n* refactor macro command w helpers\r\n\r\n* refactor treasury command\r\n\r\n* fix macro on terminal\r\n\r\n* treasury labels\r\n\r\n* refactor maturities\r\n\r\n* update treasury maturities doc strings\r\n\r\n* refactor get economic calendar finhub\r\n\r\n* refactor map command api\r\n\r\n* display map filter choices\r\n\r\n* route economy api to performance map\r\n\r\n* route economy api to performance map\r\n\r\n* display group choices on valuation command\r\n\r\n* refactor performance and valuation commands\r\n\r\n* refactor spectrum model and view\r\n\r\n* add choices to spectrum controller\r\n\r\n* delete image after view\r\n\r\n* fix model tests finviz\r\n\r\n* fix finciz view tests\r\n\r\n* refactor futures\r\n\r\n* fix some tests\r\n\r\n* fix more tests\r\n\r\n* fix controller test\r\n\r\n* refactor fred series notes\r\n\r\n* update fred notes docstring\r\n\r\n* refacto fred series ids\r\n\r\n* fix pred and qa when empty datasets\r\n\r\n* refactor fred\r\n\r\n* uncomment stuff\r\n\r\n* refacto get series data\r\n\r\n* fix some tests\r\n\r\n* set defaults on args\r\n\r\n* refactor fred yield curve\r\n\r\n* black\r\n\r\n* fix spell and remove ecocal names\r\n\r\n* fix linting\r\n\r\n* linting\r\n\r\n* pylint fix\r\n\r\n* change dangerous defaults\r\n\r\n* Working through crypto fixes (#2256)\r\n\r\n* Working through crypto fixes\r\n\r\n* Continued adding crypto stuff\r\n\r\n* Added crypto overview\r\n\r\n* Added test fixes\r\n\r\n* Added fixtures\r\n\r\n* Fixed tests\r\n\r\n* Fixed charting issue\r\n\r\n* Removed broken APIs\r\n\r\n* Final adjustments\r\n\r\n* Added test fixes\r\n\r\n* map get groups and get ycrv countries into old api\r\n\r\n* exposed econdb helper funcs\r\n\r\n* remove helpers\r\n\r\n* refactor search indices\r\n\r\n* linting\r\n\r\n* refactor arg currency\r\n\r\n* pylint from currency\r\n\r\n* Started switching crpyto ascending to ascend\r\n\r\n* Merging\r\n\r\n* Portfolio model arguements, params, and docstring\r\n\r\n* Refactored for etf commands (#2292)\r\n\r\n* Refactored for etf commands\r\n\r\n* Fixed tests\r\n\r\n* Added load command\r\n\r\n* Fixed menu\r\n\r\n* Portfolio logic fixes\r\n\r\n* Added econometrics (#2260)\r\n\r\n* Added econometrics\r\n\r\n* Fixed tests\r\n\r\n* Simplified API\r\n\r\n* Added test fixes\r\n\r\n* Added test csv\r\n\r\n* Allowed examples to be loaded\r\n\r\n* Fund refactor (#2291)\r\n\r\n* Fund refactor\r\n\r\n* Changed fund_name and fund to name\r\n\r\n* Changed ascending to ascend\r\n\r\n* Stock menu refactoring for easier API usage (#2194)\r\n\r\n* Stocks refactoring for easier API usage\r\n\r\n* Linting\r\n\r\n* Refactor newly added features\r\n\r\n* Linting\r\n\r\n* Fixing tests\r\n\r\n* Refactor common files used by stocks menu\r\n\r\n* Fixing flake8\r\n\r\n* Fix linting and tests\r\n\r\n* Linting\r\n\r\n* Fix flake8\r\n\r\n* refactor insider_data\r\n\r\n* refactor mentions\r\n\r\n* refactor watchlist\r\n\r\n* refactor sentiment\r\n\r\n* refactor sentiment\r\n\r\n* fix yahoofinance tests\r\n\r\n* refactor load and candle\r\n\r\n* refactor get_news and display_news\r\n\r\n* refactor stocks.ins.act\r\n\r\n* candle default matplotlib\r\n\r\n* fix yahoofinance_view tests\r\n\r\n* fix ark model tests\r\n\r\n* fix ark view tests\r\n\r\n* fix business insider model\r\n\r\n* fix business insider view\r\n\r\n* refactor csimarket model\r\n\r\n* fix tests csi market model\r\n\r\n* update dd controller\r\n\r\n* fix get suppliers tests\r\n\r\n* fix dd controller tests\r\n\r\n* fix finhub tests\r\n\r\n* fix finviz tests\r\n\r\n* fix fmp tests\r\n\r\n* fix marketwatch tests\r\n\r\n* corrected argument keywords in test_bt_model\r\n\r\n* corrected argument keywords in test_bt_view\r\n\r\n* refactor fa controller\r\n\r\n* refactor marketwatch view\r\n\r\n* refactor gov controller\r\n\r\n* fix tests fa av\r\n\r\n* fix tests elect\r\n\r\n* fix dcf tests\r\n\r\n* fix polygon tests\r\n\r\n* fix fmp tests\r\n\r\n* fix quiverquant tests\r\n\r\n* fix yahoofinance fa tests\r\n\r\n* fix more fa tests\r\n\r\n* fix insider tests\r\n\r\n* fix more tests\r\n\r\n* fix more tests\r\n\r\n* fix options tests\r\n\r\n* fix stock gov tests\r\n\r\n* fix tests test_ba_controller\r\n\r\n* fix tests for test_finviz_compare_model.py\r\n\r\n* fixed 2 tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fix final tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* Fix tests\r\n\r\n* black\r\n\r\n* forgot to black tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* flakefix\r\n\r\n* Tests + code : Stocks / Discovery\r\n\r\n* fix tests\r\n\r\n* added recorder\r\n\r\n* fixed tests\r\n\r\n* fixed tests\r\n\r\n* black\r\n\r\n* black\r\n\r\n* remove unused imports\r\n\r\n* refactor display raw\r\n\r\n* sia dicts fix\r\n\r\n* pylint\r\n\r\n* linting\r\n\r\n* remove dangerous default\r\n\r\n* fix tests\r\n\r\n* fix beta model test\r\n\r\n* black\r\n\r\n* skip screener qa test\r\n\r\n* change sector path to sectors\r\n\r\n* update tests readme\r\n\r\n* fix metric defaults\r\n\r\n* black\r\n\r\n* substitute lost ticker\r\n\r\n* defaults cpic\r\n\r\n* another round on sia\r\n\r\n* refactor cramer\r\n\r\n* reduce default tweets on sentiment\r\n\r\n* refactor yf hist, corr, volume\r\n\r\n* arkorders default\r\n\r\n* refactor income, balance, cashflow\r\n\r\n* refacto scorr, screener, getfinnhub\r\n\r\n* refactor stockgrid\r\n\r\n* ibkr refactor\r\n\r\n* another round on stockgrid\r\n\r\n* add dividens end point\r\n\r\n* refactor discovery endpoints\r\n\r\n* update docstrings with similar input\r\n\r\n* refactor messages\r\n\r\n* refactor ba\r\n\r\n* refactor regioons\r\n\r\n* refactor twitter sentiment\r\n\r\n* refactor hist\r\n\r\n* refactor regions\r\n\r\n* give default to timeframe\r\n\r\n* refactor bunch of defaults and arg names\r\n\r\n* remove leftover imports\r\n\r\n* refactor vwap\r\n\r\n* let tests run\r\n\r\n* fix tests\r\n\r\n* fix stock tests\r\n\r\n* fix stockanalysis tests\r\n\r\n* flake\r\n\r\n* MYPY\r\n\r\n* Made important changes\r\n\r\n* added fixes\r\n\r\n* Fixed big issue\r\n\r\n* Added fixes to tests\r\n\r\n* fix qa tests\r\n\r\n* fix tests\r\n\r\n* fix 1 more test\r\n\r\n* last stocks failing\r\n\r\n* fix crypto test\r\n\r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: montezdesousa \r\nCo-authored-by: hjoaquim \r\nCo-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com>\r\nCo-authored-by: colin99d \r\n\r\n* fix portfolio tests\r\n\r\n* change period to window\r\n\r\n* update ca docstrings\r\n\r\n* refactor get_similar_companies func\r\n\r\n* Fixed\r\n\r\n* Update CI\r\n\r\n* Update CI 2\r\n\r\n* Update CI 3\r\n\r\n* Update dependencies\r\n\r\nCo-authored-by: colin99d \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: montezdesousa \r\nCo-authored-by: James Simmons \r\nCo-authored-by: Theodore Aptekarev \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: jose-donato <43375532+jose-donato@users.noreply.github.com>\r\nCo-authored-by: montezdesousa <79287829+montezdesousa@users.noreply.github.com>\r\nCo-authored-by: northern-64bit <75195383+northern-64bit@users.noreply.github.com>\r\nCo-authored-by: hjoaquim ", "code": "def get_granger_causality(dependent_series, independent_series, lags):\n \n granger_set = pd.concat([dependent_series, independent_series], axis=1)\n\n granger = grangercausalitytests(granger_set, [lags], verbose=False)\n\n return granger\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 28, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 4, "token_counts": 42, "n_ast_nodes": 63, "n_identifiers": 11, "d_id": 85240, "documentation": { "docstring": "Calculate granger tests\n\n Parameters\n ----------\n dependent_series: Series\n The series you want to test Granger Causality for.\n independent_series: Series\n The series that you want to test whether it Granger-causes time_series_y\n lags : int\n The amount of lags for the Granger test. By default, this is set to 3.\n ", "n_words": 47, "vocab_size": 36, "n_whitespaces": 86, "language": "en" } }, { "id": 96154, "commit_id": "09726d7fc95e53bb516e328fc1811fc9a0704cac", "repo": "sentry", "path": "src/sentry/tasks/post_process.py", "file_name": "post_process.py", "fun_name": "fetch_buffered_group_stats", "commit_message": "fix(post_process): Fetch buffered `times_seen` values and add them to `Group.times_seen` (#31624)\n\nIn `post_process_group` we process issue alert rules and also ignored groups. Both of these can have\r\nconditions that read from the `times_seen` value on the `Group`.\r\n\r\nThe problem here is that updates to `times_seen` are buffered and only written every 45s or so. This\r\nmeans that most of the time when a `Group` goes through `post_process_group` it has an out of date\r\n`times_seen` value. For infrequently updated groups, this can just mean that the count is -1. But\r\nfor high volume groups this could mean that we're considerably below the count.\r\n\r\nTo improve this, we read the current value from buffers and store it as pending updates on the group.\r\nWe then use this pending value when checking rules and snoozes in post process. There's a potential \r\nrace condition here where we fetch the `Group`, and before we fetch the value from buffers it is \r\ncleared, and so we miss out on the update. This should be infrequent enough that it's not a problem, \r\nand either way we will be considerably more accurate most of the time.", "code": "def fetch_buffered_group_stats(group):\n \n from sentry import buffer\n from sentry.models import Group\n\n result = buffer.get(Group, [\"times_seen\"], {\"pk\": group.id})\n group.times_seen_pending = result[\"times_seen\"]\n\n\n@instrumented_task(\n name=\"sentry.tasks.post_process.post_process_group\",\n time_limit=120,\n soft_time_limit=110,\n)", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@instrumented_task(\n name=\"sentry.tasks.post_process.post_process_group\",\n time_limit=120,\n soft_time_limit=110,\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 46, "n_words": 24, "vocab_size": 21, "complexity": 1, "nloc": 5, "token_counts": 44, "n_ast_nodes": 101, "n_identifiers": 14, "d_id": 19285, "documentation": { "docstring": "\n Fetches buffered increments to `times_seen` for this group and adds them to the current\n `times_seen`.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 25, "language": "en" } }, { "id": 12207, "commit_id": "a3b71c7208b3cd48aa7bc978c3343a074947e3d9", "repo": "jina", "path": "jina/parsers/orchestrate/base.py", "file_name": "base.py", "fun_name": "mixin_base_ppr_parser", "commit_message": "fix(parsers): clearify flow args (#4701)", "code": "def mixin_base_ppr_parser(parser):\n \n\n mixin_essential_parser(parser)\n\n gp = add_arg_group(parser, title='Base Deployment')\n\n gp.add_argument(\n '--extra-search-paths',\n type=str,\n default=[],\n nargs='*',\n help='Extra search paths to be used when loading modules and finding YAML config files.'\n if _SHOW_ALL_ARGS\n else argparse.SUPPRESS,\n )\n\n gp.add_argument(\n '--timeout-ctrl',\n type=int,\n default=int(os.getenv('JINA_DEFAULT_TIMEOUT_CTRL', '60')),\n help='The timeout in milliseconds of the control request, -1 for waiting forever',\n )\n\n parser.add_argument(\n '--k8s-namespace',\n type=str,\n help='Name of the namespace where Kubernetes deployment should be deployed, to be filled by flow name'\n if _SHOW_ALL_ARGS\n else argparse.SUPPRESS,\n )\n\n gp.add_argument(\n '--polling',\n type=str,\n default=PollingType.ANY.name,\n help=,\n )\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 253, "n_words": 80, "vocab_size": 64, "complexity": 3, "nloc": 41, "token_counts": 123, "n_ast_nodes": 202, "n_identifiers": 21, "d_id": 2215, "documentation": { "docstring": "Mixing in arguments required by pod/deployment/runtime module into the given parser.\n :param parser: the parser instance to which we add arguments\n \n The polling strategy of the Deployment and its endpoints (when `shards>1`).\n Can be defined for all endpoints of a Deployment or by endpoint.\n Define per Deployment:\n - ANY: only one (whoever is idle) Pod polls the message\n - ALL: all Pods poll the message (like a broadcast)\n Define per Endpoint:\n JSON dict, {endpoint: PollingType}\n {'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}\n \n ", "n_words": 81, "vocab_size": 66, "n_whitespaces": 119, "language": "en" } }, { "id": 179244, "commit_id": "cc0cff893f9d7d472788adc2510c123967b384fe", "repo": "gradio", "path": "gradio/inputs.py", "file_name": "inputs.py", "fun_name": "preprocess", "commit_message": "Format The Codebase\n- black formatting\n- isort formatting", "code": "def preprocess(self, x):\n \n if self.type == \"value\":\n return x\n elif self.type == \"index\":\n return [self.choices.index(choice) for choice in x]\n else:\n raise ValueError(\n \"Unknown type: \"\n + str(self.type)\n + \". Please choose from: 'value', 'index'.\"\n )\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 152, "n_words": 35, "vocab_size": 31, "complexity": 4, "nloc": 11, "token_counts": 55, "n_ast_nodes": 96, "n_identifiers": 9, "d_id": 42924, "documentation": { "docstring": "\n Parameters:\n x (List[str]): list of selected choices\n Returns:\n (Union[List[str], List[int]]): list of selected choices as strings or indices within choice list\n ", "n_words": 21, "vocab_size": 16, "n_whitespaces": 57, "language": "en" } }, { "id": 87295, "commit_id": "c8bfd65f261769da2565ca4240f11da6e820a7e4", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_relay_projectconfigs.py", "file_name": "test_relay_projectconfigs.py", "fun_name": "test_relays_dyamic_sampling", "commit_message": "feat(dyn-sampling): Switch to new feature flag multiplexer in projectconfig (#40498)\n\nThis PR switch to new feature flag multiplexer\r\nin projectconfig.", "code": "def test_relays_dyamic_sampling(client, call_endpoint, default_project, dyn_sampling_data):\n \n default_project.update_option(\"sentry:dynamic_sampling\", dyn_sampling_data())\n\n with Feature(\n {\n \"organizations:server-side-sampling\": True,\n \"organizations:dynamic-sampling-deprecated\": True,\n }\n ):\n result, status_code = call_endpoint(full_config=False)\n assert status_code < 400\n dynamic_sampling = safe.get_path(\n result, \"configs\", str(default_project.id), \"config\", \"dynamicSampling\"\n )\n assert dynamic_sampling == dyn_sampling_data()\n\n\n@pytest.mark.django_db", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@pytest.mark.django_db", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 131, "n_words": 38, "vocab_size": 32, "complexity": 1, "nloc": 14, "token_counts": 78, "n_ast_nodes": 142, "n_identifiers": 18, "d_id": 18274, "documentation": { "docstring": "\n Tests that dynamic sampling configuration set in project details are retrieved in relay configs\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 21, "language": "en" } }, { "id": 226503, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_choroplethmapbox.py", "file_name": "_choroplethmapbox.py", "fun_name": "below", "commit_message": "switch to black .22", "code": "def below(self):\n \n return self[\"below\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58176, "documentation": { "docstring": "\n Determines if the choropleth polygons will be inserted before\n the layer with the specified ID. By default, choroplethmapbox\n traces are placed above the water layers. If set to '', the\n layer will be inserted above every existing layer.\n\n The 'below' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n\n Returns\n -------\n str\n ", "n_words": 65, "vocab_size": 46, "n_whitespaces": 147, "language": "en" } }, { "id": 66773, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v13_0/patch_to_fix_reverse_linking_in_additional_salary_encashment_and_incentive.py", "file_name": "patch_to_fix_reverse_linking_in_additional_salary_encashment_and_incentive.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tif not frappe.db.table_exists(\"Additional Salary\"):\n\t\treturn\n\n\tfor doctype in (\"Additional Salary\", \"Employee Incentive\", \"Salary Detail\"):\n\t\tfrappe.reload_doc(\"Payroll\", \"doctype\", doctype)\n\n\tfrappe.reload_doc(\"hr\", \"doctype\", \"Leave Encashment\")\n\n\tadditional_salaries = frappe.get_all(\n\t\t\"Additional Salary\",\n\t\tfields=[\"name\", \"salary_slip\", \"type\", \"salary_component\"],\n\t\tfilters={\"salary_slip\": [\"!=\", \"\"]},\n\t\tgroup_by=\"salary_slip\",\n\t)\n\tleave_encashments = frappe.get_all(\n\t\t\"Leave Encashment\",\n\t\tfields=[\"name\", \"additional_salary\"],\n\t\tfilters={\"additional_salary\": [\"!=\", \"\"]},\n\t)\n\temployee_incentives = frappe.get_all(\n\t\t\"Employee Incentive\",\n\t\tfields=[\"name\", \"additional_salary\"],\n\t\tfilters={\"additional_salary\": [\"!=\", \"\"]},\n\t)\n\n\tfor incentive in employee_incentives:\n\t\tfrappe.db.sql(\n\t\t\t,\n\t\t\t(incentive[\"name\"], incentive[\"additional_salary\"]),\n\t\t)\n\n\tfor leave_encashment in leave_encashments:\n\t\tfrappe.db.sql(\n\t\t\t,\n\t\t\t(leave_encashment[\"name\"], leave_encashment[\"additional_salary\"]),\n\t\t)\n\n\tsalary_slips = [sal[\"salary_slip\"] for sal in additional_salaries]\n\n\tfor salary in additional_salaries:\n\t\tcomp_type = \"earnings\" if salary[\"type\"] == \"Earning\" else \"deductions\"\n\t\tif salary[\"salary_slip\"] and salary_slips.count(salary[\"salary_slip\"]) == 1:\n\t\t\tfrappe.db.sql(\n\t\t\t\t,\n\t\t\t\t(salary[\"name\"], comp_type, salary[\"salary_slip\"], salary[\"salary_component\"]),\n\t\t\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 70, "n_words": 110, "vocab_size": 71, "complexity": 10, "nloc": 53, "token_counts": 269, "n_ast_nodes": 470, "n_identifiers": 21, "d_id": 14329, "documentation": { "docstring": " UPDATE `tabAdditional Salary`\n\t\t\tSET ref_doctype = 'Employee Incentive', ref_docname = %s\n\t\t\tWHERE name = %s\n\t\t UPDATE `tabAdditional Salary`\n\t\t\tSET ref_doctype = 'Leave Encashment', ref_docname = %s\n\t\t\tWHERE name = %s\n\t\t\n\t\t\t\tUPDATE `tabSalary Detail`\n\t\t\t\tSET additional_salary = %s\n\t\t\t\tWHERE parenttype = 'Salary Slip'\n\t\t\t\t\tand parentfield = %s\n\t\t\t\t\tand parent = %s\n\t\t\t\t\tand salary_component = %s\n\t\t\t", "n_words": 54, "vocab_size": 24, "n_whitespaces": 44, "language": "en" } }, { "id": 216350, "commit_id": "55a7519dd5dab2bdfcac2e7e6e77a3d1358538f9", "repo": "salt", "path": "tests/unit/utils/test_win_dacl.py", "file_name": "test_win_dacl.py", "fun_name": "test_get_permissions", "commit_message": "fix tests", "code": "def test_get_permissions(self):\n \n self.assertTrue(\n win_dacl.set_permissions(\n obj_name=self.obj_name,\n principal=\"Backup Operators\",\n permissions=\"full_control\",\n access_mode=\"grant\",\n obj_type=self.obj_type,\n reset_perms=False,\n protected=None,\n )\n )\n\n expected = {'Not Inherited': {'Backup Operators': {'grant': {'applies to': 'This key and subkeys', 'permissions': 'Full Control'}}}}\n self.assertEqual(\n win_dacl.get_permissions(\n obj_name=self.obj_name,\n principal=\"Backup Operators\",\n obj_type=self.obj_type,\n ),\n expected,\n )\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 286, "n_words": 39, "vocab_size": 33, "complexity": 1, "nloc": 21, "token_counts": 100, "n_ast_nodes": 168, "n_identifiers": 15, "d_id": 54555, "documentation": { "docstring": "\n Test the get_permissions function\n ", "n_words": 4, "vocab_size": 4, "n_whitespaces": 19, "language": "en" } }, { "id": 5907, "commit_id": "69604268c2ddc06a4ee0b3dce0e05a8fb73b5d16", "repo": "ludwig", "path": "tests/integration_tests/test_api.py", "file_name": "test_api.py", "fun_name": "run_api_experiment_separated_datasets", "commit_message": "Rename fc_size to output_size (#1641)\n\n* Rename fc_size to output_size\r\n\r\n* Responding to comments", "code": "def run_api_experiment_separated_datasets(input_features, output_features, data_csv):\n \n config = {\n \"input_features\": input_features,\n \"output_features\": output_features,\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n \"training\": {\"epochs\": 2},\n }\n\n model = LudwigModel(config)\n\n # Training with dataframe\n data_df = read_csv(data_csv)\n train_df = data_df.sample(frac=0.8)\n test_df = data_df.drop(train_df.index).sample(frac=0.5)\n validation_df = data_df.drop(train_df.index).drop(test_df.index)\n\n basename, ext = os.path.splitext(data_csv)\n train_fname = basename + \".train\" + ext\n val_fname = basename + \".validation\" + ext\n test_fname = basename + \".test\" + ext\n output_dirs = []\n\n try:\n train_df.to_csv(train_fname)\n validation_df.to_csv(val_fname)\n test_df.to_csv(test_fname)\n\n # Training with csv\n _, _, output_dir = model.train(\n training_set=train_fname,\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True,\n )\n output_dirs.append(output_dir)\n\n _, _, output_dir = model.train(\n training_set=train_fname,\n validation_set=val_fname,\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True,\n )\n output_dirs.append(output_dir)\n\n _, _, output_dir = model.train(\n training_set=train_fname,\n validation_set=val_fname,\n test_set=test_fname,\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True,\n )\n output_dirs.append(output_dir)\n\n _, output_dir = model.predict(dataset=test_fname)\n output_dirs.append(output_dir)\n\n finally:\n # Remove results/intermediate data saved to disk\n os.remove(train_fname)\n os.remove(val_fname)\n os.remove(test_fname)\n for output_dir in output_dirs:\n shutil.rmtree(output_dir, ignore_errors=True)\n\n output_dirs = []\n try:\n _, _, output_dir = model.train(\n training_set=train_df,\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True,\n )\n output_dirs.append(output_dir)\n\n _, _, output_dir = model.train(\n training_set=train_df,\n validation_set=validation_df,\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True,\n )\n output_dirs.append(output_dir)\n\n _, _, output_dir = model.train(\n training_set=train_df,\n validation_set=validation_df,\n test_set=test_df,\n skip_save_processed_input=True,\n skip_save_progress=True,\n skip_save_unprocessed_output=True,\n )\n output_dirs.append(output_dir)\n\n _, output_dir = model.predict(dataset=data_df)\n output_dirs.append(output_dir)\n\n finally:\n for output_dir in output_dirs:\n shutil.rmtree(output_dir, ignore_errors=True)\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 846, "n_words": 185, "vocab_size": 82, "complexity": 5, "nloc": 84, "token_counts": 475, "n_ast_nodes": 732, "n_identifiers": 42, "d_id": 890, "documentation": { "docstring": "Helper method to avoid code repetition in running an experiment.\n\n :param input_features: input schema\n :param output_features: output schema\n :param data_csv: path to data\n :return: None\n ", "n_words": 25, "vocab_size": 21, "n_whitespaces": 40, "language": "en" } }, { "id": 266093, "commit_id": "4a95cfd1c4435e6eda01745fe06d902c25d2493e", "repo": "netbox", "path": "netbox/extras/signals.py", "file_name": "signals.py", "fun_name": "is_same_object", "commit_message": "Permanently connect change logging & webhook receivers", "code": "def is_same_object(instance, webhook_data, request_id):\n \n return (\n ContentType.objects.get_for_model(instance) == webhook_data['content_type'] and\n instance.pk == webhook_data['object_id'] and\n request_id == webhook_data['request_id']\n )\n\n\n@receiver((post_save, m2m_changed))", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "@receiver((post_save, m2m_changed))", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 49, "n_words": 20, "vocab_size": 17, "complexity": 3, "nloc": 6, "token_counts": 42, "n_ast_nodes": 84, "n_identifiers": 11, "d_id": 78288, "documentation": { "docstring": "\n Compare the given instance to the most recent queued webhook object, returning True\n if they match. This check is used to avoid creating duplicate webhook entries.\n ", "n_words": 26, "vocab_size": 23, "n_whitespaces": 36, "language": "en" } }, { "id": 244992, "commit_id": "c08b81510fbfc1199eab6ccc7af07fc3d3f89d12", "repo": "mmdetection", "path": "mmdet/models/seg_heads/panoptic_fusion_heads/base_panoptic_fusion_head.py", "file_name": "base_panoptic_fusion_head.py", "fun_name": "with_loss", "commit_message": "Two stage segmentor + Panpotic FPN", "code": "def with_loss(self) -> bool:\n \n return self.loss_panoptic is not None\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 15, "n_ast_nodes": 26, "n_identifiers": 4, "d_id": 70624, "documentation": { "docstring": "bool: whether the panoptic head contains loss function.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 260816, "commit_id": "6d16698dd8ba4407e5c3c588d7b5e6a5257eddc9", "repo": "scikit-learn", "path": "sklearn/svm/_bounds.py", "file_name": "_bounds.py", "fun_name": "l1_min_c", "commit_message": "DOC Ensures that l1_min_c passes numpydoc validation (#24134)", "code": "def l1_min_c(X, y, *, loss=\"squared_hinge\", fit_intercept=True, intercept_scaling=1.0):\n \n if loss not in (\"squared_hinge\", \"log\"):\n raise ValueError('loss type not in (\"squared_hinge\", \"log\")')\n\n X = check_array(X, accept_sparse=\"csc\")\n check_consistent_length(X, y)\n\n Y = LabelBinarizer(neg_label=-1).fit_transform(y).T\n # maximum absolute value over classes and features\n den = np.max(np.abs(safe_sparse_dot(Y, X)))\n if fit_intercept:\n bias = np.full(\n (np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype\n )\n den = max(den, abs(np.dot(Y, bias)).max())\n\n if den == 0.0:\n raise ValueError(\n \"Ill-posed l1_min_c calculation: l1 will always \"\n \"select zero coefficients for this data\"\n )\n if loss == \"squared_hinge\":\n return 0.5 / den\n else: # loss == 'log':\n return 2.0 / den\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 216, "n_words": 93, "vocab_size": 70, "complexity": 5, "nloc": 21, "token_counts": 176, "n_ast_nodes": 276, "n_identifiers": 26, "d_id": 76515, "documentation": { "docstring": "Return the lowest bound for C.\n\n The lower bound for C is computed such that for C in (l1_min_C, infinity)\n the model is guaranteed not to be empty. This applies to l1 penalized\n classifiers, such as LinearSVC with penalty='l1' and\n linear_model.LogisticRegression with penalty='l1'.\n\n This value is valid if class_weight parameter in fit() is not set.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Training vector, where `n_samples` is the number of samples and\n `n_features` is the number of features.\n\n y : array-like of shape (n_samples,)\n Target vector relative to X.\n\n loss : {'squared_hinge', 'log'}, default='squared_hinge'\n Specifies the loss function.\n With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).\n With 'log' it is the loss of logistic regression models.\n\n fit_intercept : bool, default=True\n Specifies if the intercept should be fitted by the model.\n It must match the fit() method parameter.\n\n intercept_scaling : float, default=1.0\n When fit_intercept is True, instance vector x becomes\n [x, intercept_scaling],\n i.e. a \"synthetic\" feature with constant value equals to\n intercept_scaling is appended to the instance vector.\n It must match the fit() method parameter.\n\n Returns\n -------\n l1_min_c : float\n Minimum value for C.\n ", "n_words": 190, "vocab_size": 121, "n_whitespaces": 336, "language": "en" } }, { "id": 184614, "commit_id": "b22436933acc0d7440ec300f971a249bd6105a5b", "repo": "textual", "path": "src/textual/app.py", "file_name": "app.py", "fun_name": "screen", "commit_message": "lots of docstrings", "code": "def screen(self) -> Screen:\n \n try:\n return self._screen_stack[-1]\n except IndexError:\n raise ScreenStackError(\"No screens on stack\") from None\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 59, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 13, "token_counts": 28, "n_ast_nodes": 49, "n_identifiers": 6, "d_id": 44714, "documentation": { "docstring": "Get the current screen.\n\n Raises:\n ScreenStackError: If there are no screens on the stack.\n\n Returns:\n Screen: The currently active screen.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 63, "language": "en" } }, { "id": 56927, "commit_id": "336eca7839fccbcbdb77179f352f926da8b1fa15", "repo": "prefect", "path": "tests/test_flows.py", "file_name": "test_flows.py", "fun_name": "test_timeout_stops_execution_in_sync_subflows", "commit_message": "Ensure flows are called in an interruptible thread (PrefectHQ/orion#2174)\n\n* Ensure flows are called in an interruptible thread\r\n\r\n* Set higher runtime limit in `test_timeout_stops_execution_in_sync_subflows`", "code": "async def test_timeout_stops_execution_in_sync_subflows(self, tmp_path):\n \n canary_file = tmp_path / \"canary\"\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 14, "token_counts": 72, "n_ast_nodes": 26, "n_identifiers": 4, "d_id": 11587, "documentation": { "docstring": "\n Sync flow runs can be cancelled after a timeout once a task is called\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 29, "language": "en" } }, { "id": 47753, "commit_id": "c3d883a971a8e4e65ccc774891928daaaa0f4442", "repo": "airflow", "path": "tests/providers/cncf/kubernetes/operators/test_kubernetes_pod.py", "file_name": "test_kubernetes_pod.py", "fun_name": "test_mark_checked_if_not_deleted", "commit_message": "KubernetesPodOperator should patch \"already checked\" always (#22734)\n\nWhen not configured to delete pods, at end of task execution the current behavior is to patch the pod as \"already checked\", but only if pod not successful. We should also patch when successful so it isn't \"reattached\" to after a task clear.", "code": "def test_mark_checked_if_not_deleted(self, mock_patch_already_checked, mock_delete_pod, should_fail):\n \n dag = DAG('hello2', start_date=pendulum.now())\n k = KubernetesPodOperator(\n namespace=\"default\",\n image=\"ubuntu:16.04\",\n name=\"test\",\n task_id=\"task\",\n is_delete_operator_pod=False,\n dag=dag,\n )\n remote_pod_mock = MagicMock()\n remote_pod_mock.status.phase = 'Failed' if should_fail else 'Succeeded'\n self.await_pod_mock.return_value = remote_pod_mock\n context = create_context(k, persist_to_db=True)\n if should_fail:\n with pytest.raises(AirflowException):\n k.execute(context=context)\n else:\n k.execute(context=context)\n mock_patch_already_checked.assert_called_once()\n mock_delete_pod.assert_not_called()\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 232, "n_words": 45, "vocab_size": 37, "complexity": 3, "nloc": 21, "token_counts": 127, "n_ast_nodes": 213, "n_identifiers": 32, "d_id": 9244, "documentation": { "docstring": "If we aren't deleting pods mark \"checked\" if the task completes (successful or otherwise)", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 205277, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/migrations/autodetector.py", "file_name": "autodetector.py", "fun_name": "check_dependency", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def check_dependency(self, operation, dependency):\n \n # Created model\n if dependency[2] is None and dependency[3] is True:\n return (\n isinstance(operation, operations.CreateModel)\n and operation.name_lower == dependency[1].lower()\n )\n # Created field\n elif dependency[2] is not None and dependency[3] is True:\n return (\n isinstance(operation, operations.CreateModel)\n and operation.name_lower == dependency[1].lower()\n and any(dependency[2] == x for x, y in operation.fields)\n ) or (\n isinstance(operation, operations.AddField)\n and operation.model_name_lower == dependency[1].lower()\n and operation.name_lower == dependency[2].lower()\n )\n # Removed field\n elif dependency[2] is not None and dependency[3] is False:\n return (\n isinstance(operation, operations.RemoveField)\n and operation.model_name_lower == dependency[1].lower()\n and operation.name_lower == dependency[2].lower()\n )\n # Removed model\n elif dependency[2] is None and dependency[3] is False:\n return (\n isinstance(operation, operations.DeleteModel)\n and operation.name_lower == dependency[1].lower()\n )\n # Field being altered\n elif dependency[2] is not None and dependency[3] == \"alter\":\n return (\n isinstance(operation, operations.AlterField)\n and operation.model_name_lower == dependency[1].lower()\n and operation.name_lower == dependency[2].lower()\n )\n # order_with_respect_to being unset for a field\n elif dependency[2] is not None and dependency[3] == \"order_wrt_unset\":\n return (\n isinstance(operation, operations.AlterOrderWithRespectTo)\n and operation.name_lower == dependency[1].lower()\n and (operation.order_with_respect_to or \"\").lower()\n != dependency[2].lower()\n )\n # Field is removed and part of an index/unique_together\n elif dependency[2] is not None and dependency[3] == \"foo_together_change\":\n return (\n isinstance(\n operation,\n (operations.AlterUniqueTogether, operations.AlterIndexTogether),\n )\n and operation.name_lower == dependency[1].lower()\n )\n # Unknown dependency. Raise an error.\n else:\n raise ValueError(\"Can't handle dependency %r\" % (dependency,))\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 891, "n_words": 213, "vocab_size": 74, "complexity": 31, "nloc": 50, "token_counts": 409, "n_ast_nodes": 629, "n_identifiers": 23, "d_id": 51060, "documentation": { "docstring": "\n Return True if the given operation depends on the given dependency,\n False otherwise.\n ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 35, "language": "en" } }, { "id": 264108, "commit_id": "f57e15ae14d2370cba7a14cfae97d2c29b5c8154", "repo": "pyinstaller", "path": "PyInstaller/utils/win32/versioninfo.py", "file_name": "versioninfo.py", "fun_name": "load_version_info_from_text_file", "commit_message": "building: EXE: load version info structure before comparing guts\n\nLoad the version information structure in `EXE` constructor, so that\nthe comparison against cached state is done with the structure instead\nof the filen name. This way, changing the contents of the version\ninformation file triggers rebuild of the EXE.\n\nSplit and clean-up related functions in the `utils.win32.versioninfo`\nmodule as well as in `pyi-grab_version` and `pyi-set_version`\nutility scripts.", "code": "def load_version_info_from_text_file(filename):\n \n\n # Read and parse the version file. It may have a byte order marker or encoding cookie - respect it if it does.\n import PyInstaller.utils.misc as miscutils\n with open(filename, 'rb') as fp:\n text = miscutils.decode(fp.read())\n\n # Deserialize via eval()\n try:\n info = eval(text)\n except Exception as e:\n raise ValueError(\"Failed to deserialize VSVersionInfo from text-based representation!\") from e\n\n # Sanity check\n assert isinstance(info, VSVersionInfo), \\\n f\"Loaded incompatible structure type! Expected VSVersionInfo, got: {type(info)!r}\"\n\n return info\n\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 134, "n_words": 76, "vocab_size": 68, "complexity": 2, "nloc": 11, "token_counts": 69, "n_ast_nodes": 129, "n_identifiers": 19, "d_id": 77609, "documentation": { "docstring": "\n Load the `VSVersionInfo` structure from its string-based (`VSVersionInfo.__str__`) serialization by reading the\n text from the file and running it through `eval()`.\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 31, "language": "en" } }, { "id": 203873, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/db/backends/spatialite/operations.py", "file_name": "operations.py", "fun_name": "rttopo_version", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def rttopo_version(self):\n \n return self._get_spatialite_func(\"rttopo_version()\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 26, "n_identifiers": 3, "d_id": 50568, "documentation": { "docstring": "Return the version of RTTOPO library used by SpatiaLite.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 38436, "commit_id": "e730e1256732b5dfeae2bdd427beacc3fbc20e2a", "repo": "transformers", "path": "examples/research_projects/codeparrot/scripts/preprocessing.py", "file_name": "preprocessing.py", "fun_name": "filter", "commit_message": "Update codeparrot data preprocessing (#16944)\n\n* add new preprocessing arguments\r\n\r\n* add new filters\r\n\r\n* add new filters to readme\r\n\r\n* fix config and test count, update function names and docstrings\r\n\r\n* reformat code\r\n\r\n* update readme\r\n\r\n* Update readme\r\n\r\n* rename config_test filter\r\n\r\nCo-authored-by: Leandro von Werra \r\n\r\n* rename few_assignments filter\r\n\r\nCo-authored-by: Leandro von Werra \r\n\r\n* rename tokenizer in arguments\r\n\r\nCo-authored-by: Leandro von Werra \r\n\r\n* rename functions and add limit_line argument for config_test filter\r\n\r\n* update threshold for config_test filter\r\n\r\nCo-authored-by: Leandro von Werra \r\nCo-authored-by: Loubna ben allal ", "code": "def filter(example, uniques, args):\n \n if not check_uniques(example, uniques):\n return False\n elif example[\"autogenerated\"]:\n return False\n elif example[\"line_max\"] > args.line_max:\n return False\n elif example[\"line_mean\"] > args.line_mean:\n return False\n elif example[\"alpha_frac\"] < args.alpha_frac:\n return False\n elif example[\"ratio\"] < args.min_token_ratio:\n return False\n elif example[\"config_or_test\"] and np.random.rand() <= args.filter_proba:\n return False\n elif example[\"has_no_keywords\"] and np.random.rand() <= args.filter_proba:\n return False\n elif example[\"has_few_assignments\"]:\n return False\n else:\n return True\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 164, "n_words": 61, "vocab_size": 31, "complexity": 12, "nloc": 21, "token_counts": 129, "n_ast_nodes": 215, "n_identifiers": 13, "d_id": 6974, "documentation": { "docstring": "Filter dataset with heuristics. Config, test and has_no_keywords files are removed with a given probability.", "n_words": 15, "vocab_size": 14, "n_whitespaces": 14, "language": "en" } }, { "id": 251881, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/proxy/layers/http/test_http2.py", "file_name": "test_http2.py", "fun_name": "test_stream_concurrency", "commit_message": "make it black!", "code": "def test_stream_concurrency(tctx):\n \n playbook, cff = start_h2_client(tctx)\n flow1 = Placeholder(HTTPFlow)\n flow2 = Placeholder(HTTPFlow)\n\n reqheadershook1 = http.HttpRequestHeadersHook(flow1)\n reqheadershook2 = http.HttpRequestHeadersHook(flow2)\n reqhook1 = http.HttpRequestHook(flow1)\n reqhook2 = http.HttpRequestHook(flow2)\n\n server = Placeholder(Server)\n data_req1 = Placeholder(bytes)\n data_req2 = Placeholder(bytes)\n\n assert (\n playbook\n >> DataReceived(\n tctx.client,\n cff.build_headers_frame(\n example_request_headers, flags=[\"END_STREAM\"], stream_id=1\n ).serialize()\n + cff.build_headers_frame(\n example_request_headers, flags=[\"END_STREAM\"], stream_id=3\n ).serialize(),\n )\n << reqheadershook1\n << reqheadershook2\n >> reply(to=reqheadershook1)\n << reqhook1\n >> reply(to=reqheadershook2)\n << reqhook2\n # req 2 overtakes 1 and we already have a reply:\n >> reply(to=reqhook2)\n << OpenConnection(server)\n >> reply(None, side_effect=make_h2)\n << SendData(server, data_req2)\n >> reply(to=reqhook1)\n << SendData(server, data_req1)\n )\n frames = decode_frames(data_req2())\n assert [type(x) for x in frames] == [\n hyperframe.frame.SettingsFrame,\n hyperframe.frame.HeadersFrame,\n ]\n frames = decode_frames(data_req1())\n assert [type(x) for x in frames] == [\n hyperframe.frame.HeadersFrame,\n ]\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 29, "n_whitespaces": 392, "n_words": 117, "vocab_size": 72, "complexity": 3, "nloc": 44, "token_counts": 269, "n_ast_nodes": 403, "n_identifiers": 42, "d_id": 73875, "documentation": { "docstring": "Test that we can send an intercepted request with a lower stream id than one that has already been sent.", "n_words": 20, "vocab_size": 19, "n_whitespaces": 19, "language": "en" } }, { "id": 204714, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/management/templates.py", "file_name": "templates.py", "fun_name": "handle_template", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def handle_template(self, template, subdir):\n \n if template is None:\n return os.path.join(django.__path__[0], \"conf\", subdir)\n else:\n if template.startswith(\"file://\"):\n template = template[7:]\n expanded_template = os.path.expanduser(template)\n expanded_template = os.path.normpath(expanded_template)\n if os.path.isdir(expanded_template):\n return expanded_template\n if self.is_url(template):\n # downloads the file and returns the path\n absolute_path = self.download(template)\n else:\n absolute_path = os.path.abspath(expanded_template)\n if os.path.exists(absolute_path):\n return self.extract(absolute_path)\n\n raise CommandError(\n \"couldn't handle %s template %s.\" % (self.app_or_project, template)\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 284, "n_words": 60, "vocab_size": 43, "complexity": 6, "nloc": 19, "token_counts": 140, "n_ast_nodes": 228, "n_identifiers": 22, "d_id": 50848, "documentation": { "docstring": "\n Determine where the app or project templates are.\n Use django.__path__[0] as the default because the Django install\n directory isn't known.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 49, "language": "en" } }, { "id": 159466, "commit_id": "a2cb6b72bb72fb9e5598808d564749503ee08784", "repo": "rasa", "path": "rasa/nlu/featurizers/dense_featurizer/lm_featurizer.py", "file_name": "lm_featurizer.py", "fun_name": "_load_model_instance", "commit_message": "fix transformers typing issues", "code": "def _load_model_instance(self) -> None:\n \n from rasa.nlu.utils.hugging_face.registry import (\n model_class_dict,\n model_tokenizer_dict,\n )\n\n logger.debug(f\"Loading Tokenizer and Model for {self.model_name}\")\n\n self.tokenizer = model_tokenizer_dict[self.model_name].from_pretrained(\n self.model_weights, cache_dir=self.cache_dir\n )\n self.model = model_class_dict[self.model_name].from_pretrained( # type: ignore[no-untyped-call] # noqa: E501\n self.model_weights, cache_dir=self.cache_dir\n )\n\n # Use a universal pad token since all transformer architectures do not have a\n # consistent token. Instead of pad_token_id we use unk_token_id because\n # pad_token_id is not set for all architectures. We can't add a new token as\n # well since vocabulary resizing is not yet supported for TF classes.\n # Also, this does not hurt the model predictions since we use an attention mask\n # while feeding input.\n self.pad_token_id = self.tokenizer.unk_token_id\n", "url": "https://github.com/RasaHQ/rasa.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 258, "n_words": 108, "vocab_size": 80, "complexity": 1, "nloc": 18, "token_counts": 87, "n_ast_nodes": 145, "n_identifiers": 19, "d_id": 38282, "documentation": { "docstring": "Tries to load the model instance.\n\n Model loading should be skipped in unit tests.\n See unit tests for examples.\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 40, "language": "en" } }, { "id": 177500, "commit_id": "d82815dba6c8ddce19cd49f700298dc82a58f066", "repo": "networkx", "path": "networkx/algorithms/shortest_paths/weighted.py", "file_name": "weighted.py", "fun_name": "all_pairs_dijkstra_path", "commit_message": "Hide edges with a weight of None in A*. (#5945)\n\n* Hide edges with a weight of None in A*.\r\n\r\nThis matches the Dijkstra's weight interface.\r\n\r\n* Update Dijkstra's and A* docs for weights of None.\r\n\r\n* Add tests for A* with weight of None.\r\n\r\n* Add another test for A* with a weight function.\r\n\r\n* Document that None indicates a hidden edge.", "code": "def all_pairs_dijkstra_path(G, cutoff=None, weight=\"weight\"):\n \n path = single_source_dijkstra_path\n # TODO This can be trivially parallelized.\n for n in G:\n yield (n, path(G, n, cutoff=cutoff, weight=weight))\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 43, "n_words": 24, "vocab_size": 24, "complexity": 2, "nloc": 4, "token_counts": 41, "n_ast_nodes": 64, "n_identifiers": 7, "d_id": 42404, "documentation": { "docstring": "Compute shortest paths between all nodes in a weighted graph.\n\n Parameters\n ----------\n G : NetworkX graph\n\n cutoff : integer or float, optional\n Length (sum of edge weights) at which the search is stopped.\n If cutoff is provided, only return paths with summed weight <= cutoff.\n\n weight : string or function\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number or None to indicate a hidden edge.\n\n Returns\n -------\n distance : dictionary\n Dictionary, keyed by source and target, of shortest paths.\n\n Examples\n --------\n >>> G = nx.path_graph(5)\n >>> path = dict(nx.all_pairs_dijkstra_path(G))\n >>> path[0][4]\n [0, 1, 2, 3, 4]\n\n Notes\n -----\n Edge weight attributes must be numerical.\n Distances are calculated as sums of weighted edges traversed.\n\n See Also\n --------\n floyd_warshall, all_pairs_bellman_ford_path\n\n ", "n_words": 205, "vocab_size": 127, "n_whitespaces": 362, "language": "en" } }, { "id": 295294, "commit_id": "8fc55b71c5153580508446d478adfb450c76ea41", "repo": "core", "path": "homeassistant/components/climate/__init__.py", "file_name": "__init__.py", "fun_name": "preset_modes", "commit_message": "Add EntityFeature enum to Climate (#69077)", "code": "def preset_modes(self) -> list[str] | None:\n \n return self._attr_preset_modes\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 6, "token_counts": 17, "n_ast_nodes": 29, "n_identifiers": 5, "d_id": 94318, "documentation": { "docstring": "Return a list of available preset modes.\n\n Requires ClimateEntityFeature.PRESET_MODE.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 23, "language": "en" } }, { "id": 154285, "commit_id": "bd326f1c4175102489f08d271a53cf374bd9125e", "repo": "modin", "path": "modin/core/dataframe/algebra/binary.py", "file_name": "binary.py", "fun_name": "call", "commit_message": "PERF-#4268: Implement partition-parallel __getitem__ for bool Series masks (#4753)\n\nSigned-off-by: Vasily Litvinov ", "code": "def call(cls, func, join_type=\"outer\", labels=\"replace\"):\n \n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 12, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 29, "n_identifiers": 5, "d_id": 35897, "documentation": { "docstring": "\n Build template binary operator.\n\n Parameters\n ----------\n func : callable(pandas.DataFrame, [pandas.DataFrame, list-like, scalar]) -> pandas.DataFrame\n Binary function to execute. Have to be able to accept at least two arguments.\n join_type : {'left', 'right', 'outer', 'inner', None}, default: 'outer'\n Type of join that will be used if indices of operands are not aligned.\n labels : {\"keep\", \"replace\", \"drop\"}, default: \"replace\"\n Whether keep labels from left Modin DataFrame, replace them with labels\n from joined DataFrame or drop altogether to make them be computed lazily later.\n\n Returns\n -------\n callable\n Function that takes query compiler and executes binary operation.\n ", "n_words": 94, "vocab_size": 79, "n_whitespaces": 220, "language": "en" } }, { "id": 149271, "commit_id": "1c0946833da746b480f6ef88d4866d6a87824e17", "repo": "freqtrade", "path": "freqtrade/persistence/models.py", "file_name": "models.py", "fun_name": "recalc_open_trade_value", "commit_message": "Fix bug in exit-count detection", "code": "def recalc_open_trade_value(self) -> None:\n \n self.open_trade_value = self._calc_open_trade_value()\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 7, "token_counts": 17, "n_ast_nodes": 31, "n_identifiers": 4, "d_id": 34386, "documentation": { "docstring": "\n Recalculate open_trade_value.\n Must be called whenever open_rate, fee_open or is_short is changed.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 34, "language": "en" } }, { "id": 61230, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py", "file_name": "misc.py", "fun_name": "dist_in_site_packages", "commit_message": "upd; format", "code": "def dist_in_site_packages(dist):\n # type: (Distribution) -> bool\n \n return dist_location(dist).startswith(normalize_path(site_packages))\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 18, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 19, "n_ast_nodes": 35, "n_identifiers": 6, "d_id": 12452, "documentation": { "docstring": "\n Return True if given Distribution is installed in\n sysconfig.get_python_lib().\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 19, "language": "en" } }, { "id": 65186, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/cash_flow/cash_flow.py", "file_name": "cash_flow.py", "fun_name": "get_account_type_based_gl_data", "commit_message": "style: format code with black", "code": "def get_account_type_based_gl_data(company, start_date, end_date, account_type, filters=None):\n\tcond = \"\"\n\tfilters = frappe._dict(filters or {})\n\n\tif filters.include_default_book_entries:\n\t\tcompany_fb = frappe.db.get_value(\"Company\", company, \"default_finance_book\")\n\t\tcond = % (\n\t\t\tfrappe.db.escape(filters.finance_book),\n\t\t\tfrappe.db.escape(company_fb),\n\t\t)\n\telse:\n\t\tcond = \" AND (finance_book in (%s, '') OR finance_book IS NULL)\" % (\n\t\t\tfrappe.db.escape(cstr(filters.finance_book))\n\t\t)\n\n\tgl_sum = frappe.db.sql_list(\n\t\t.format(\n\t\t\tcond=cond\n\t\t),\n\t\t(company, start_date, end_date, account_type),\n\t)\n\n\treturn gl_sum[0] if gl_sum and gl_sum[0] else 0\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 45, "n_words": 64, "vocab_size": 48, "complexity": 5, "nloc": 27, "token_counts": 137, "n_ast_nodes": 214, "n_identifiers": 19, "d_id": 13820, "documentation": { "docstring": " AND (finance_book in (%s, %s, '') OR finance_book IS NULL)\n\t\t\t\n\t\tselect sum(credit) - sum(debit)\n\t\tfrom `tabGL Entry`\n\t\twhere company=%s and posting_date >= %s and posting_date <= %s\n\t\t\tand voucher_type != 'Period Closing Voucher'\n\t\t\tand account in ( SELECT name FROM tabAccount WHERE account_type = %s) {cond}\n\t", "n_words": 46, "vocab_size": 40, "n_whitespaces": 41, "language": "en" } }, { "id": 220220, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ast.py", "file_name": "ast.py", "fun_name": "get_docstring", "commit_message": "add python 3.10.4 for windows", "code": "def get_docstring(node, clean=True):\n \n if not isinstance(node, (AsyncFunctionDef, FunctionDef, ClassDef, Module)):\n raise TypeError(\"%r can't have docstrings\" % node.__class__.__name__)\n if not(node.body and isinstance(node.body[0], Expr)):\n return None\n node = node.body[0].value\n if isinstance(node, Str):\n text = node.s\n elif isinstance(node, Constant) and isinstance(node.value, str):\n text = node.value\n else:\n return None\n if clean:\n import inspect\n text = inspect.cleandoc(text)\n return text\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 130, "n_words": 54, "vocab_size": 39, "complexity": 8, "nloc": 16, "token_counts": 124, "n_ast_nodes": 193, "n_identifiers": 21, "d_id": 55940, "documentation": { "docstring": "\n Return the docstring for the given node or None if no docstring can\n be found. If the node provided does not have docstrings a TypeError\n will be raised.\n\n If *clean* is `True`, all tabs are expanded to spaces and any whitespace\n that can be uniformly removed from the second line onwards is removed.\n ", "n_words": 53, "vocab_size": 43, "n_whitespaces": 73, "language": "en" } }, { "id": 73081, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/forms/views.py", "file_name": "views.py", "fun_name": "get_success_url", "commit_message": "Reformat with black", "code": "def get_success_url(self):\n \n return self.success_url\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 15949, "documentation": { "docstring": "Returns the success URL to redirect to after a successful deletion", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 222753, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/command/install.py", "file_name": "install.py", "fun_name": "expand_basedirs", "commit_message": "add python 3.10.4 for windows", "code": "def expand_basedirs(self):\n \n self._expand_attrs(['install_base', 'install_platbase', 'root'])\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 36, "n_identifiers": 3, "d_id": 56733, "documentation": { "docstring": "Calls `os.path.expanduser` on install_base, install_platbase and\n root.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 13, "language": "en" } }, { "id": 58678, "commit_id": "74b49c72657da5e18fc00c4b1da3012b575210cd", "repo": "prefect", "path": "tests/orion/api/test_block_documents.py", "file_name": "test_block_documents.py", "fun_name": "test_delete_nonsense_block_document", "commit_message": "Prevent non-UUID slugs from raising errors on the BlockDocuments APIs. (#6541)\n\nIn Prefect Cloud, we observed some errors when clients would send requests for\r\n`.../block_documents/null`, which should really be handled at the routing layer\r\nwith 404s when the path UUIDs can't be parsed.\r\n\r\nNote that this is just correcting the server-side issue, but does not attempt\r\nto diagnose the client-side issue at this time. Also, this does not attempt\r\nto go through every route in Orion that includes UUIDs in its path.", "code": "async def test_delete_nonsense_block_document(self, client, block_schemas):\n \n response = await client.get(\"/block_documents/not-even\")\n assert response.status_code == status.HTTP_404_NOT_FOUND\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 34, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 27, "n_ast_nodes": 47, "n_identifiers": 9, "d_id": 11797, "documentation": { "docstring": "Regression test for an issue we observed in Cloud where a client made\n requests for /block_documents/null", "n_words": 16, "vocab_size": 15, "n_whitespaces": 22, "language": "en" } }, { "id": 46734, "commit_id": "3849b4e709acfc9e85496aa2dededb2dae117fc7", "repo": "airflow", "path": "tests/cli/commands/test_dag_command.py", "file_name": "test_dag_command.py", "fun_name": "test_cli_backfill_depends_on_past_backwards", "commit_message": "support for continue backfill on failures (#22697)", "code": "def test_cli_backfill_depends_on_past_backwards(self, mock_run):\n \n dag_id = 'test_depends_on_past'\n start_date = DEFAULT_DATE + timedelta(days=1)\n end_date = start_date + timedelta(days=1)\n args = [\n 'dags',\n 'backfill',\n dag_id,\n '--local',\n '--start-date',\n start_date.isoformat(),\n '--end-date',\n end_date.isoformat(),\n '--ignore-first-depends-on-past',\n '--run-backwards',\n ]\n dag = self.dagbag.get_dag(dag_id)\n\n dag_command.dag_backfill(self.parser.parse_args(args), dag=dag)\n mock_run.assert_called_once_with(\n start_date=start_date,\n end_date=end_date,\n conf=None,\n delay_on_limit_secs=1.0,\n donot_pickle=False,\n ignore_first_depends_on_past=True,\n ignore_task_deps=False,\n local=True,\n mark_success=False,\n pool=None,\n rerun_failed_tasks=False,\n run_backwards=True,\n verbose=False,\n continue_on_failures=False,\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 385, "n_words": 51, "vocab_size": 44, "complexity": 1, "nloc": 34, "token_counts": 153, "n_ast_nodes": 230, "n_identifiers": 31, "d_id": 8976, "documentation": { "docstring": "\n Test that CLI respects -B argument and raises on interaction with depends_on_past\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 69841, "commit_id": "8d4a20a6a843e1e35b5324bc83be422fbed04b87", "repo": "glances", "path": "glances/outputs/glances_bottle.py", "file_name": "glances_bottle.py", "fun_name": "_api_status", "commit_message": "Web server status check endpoint #1988", "code": "def _api_status(self):\n \n response.status = 200\n\n return None\n", "url": "https://github.com/nicolargo/glances.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 28, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 13, "n_ast_nodes": 24, "n_identifiers": 4, "d_id": 15108, "documentation": { "docstring": "Glances API RESTful implementation.\n\n Return a 200 status code.\n This entry point should be used to check the API health.\n\n See related issue: Web server health check endpoint #1988\n ", "n_words": 29, "vocab_size": 27, "n_whitespaces": 58, "language": "en" } }, { "id": 269439, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "spatial_2d_padding", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):\n \n assert len(padding) == 2\n assert len(padding[0]) == 2\n assert len(padding[1]) == 2\n if data_format is None:\n data_format = image_data_format()\n if data_format not in {\"channels_first\", \"channels_last\"}:\n raise ValueError(\"Unknown data_format: \" + str(data_format))\n\n if data_format == \"channels_first\":\n pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]\n else:\n pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]\n return tf.compat.v1.pad(x, pattern)\n\n\n@keras_export(\"keras.backend.spatial_3d_padding\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.backend.spatial_3d_padding\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 117, "n_words": 65, "vocab_size": 45, "complexity": 4, "nloc": 27, "token_counts": 165, "n_ast_nodes": 278, "n_identifiers": 20, "d_id": 80078, "documentation": { "docstring": "Pads the 2nd and 3rd dimensions of a 4D tensor.\n\n Args:\n x: Tensor or variable.\n padding: Tuple of 2 tuples, padding pattern.\n data_format: One of `channels_last` or `channels_first`.\n\n Returns:\n A padded 4D tensor.\n\n Raises:\n ValueError: if `data_format` is neither\n `channels_last` or `channels_first`.\n ", "n_words": 42, "vocab_size": 34, "n_whitespaces": 100, "language": "en" } }, { "id": 182594, "commit_id": "025a0e348d3d3c360498f4f2035451d50f79b40e", "repo": "textual", "path": "src/textual/widget.py", "file_name": "widget.py", "fun_name": "is_container", "commit_message": "Scrolling working", "code": "def is_container(self) -> bool:\n \n return self.styles.layout is not None\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 7, "token_counts": 17, "n_ast_nodes": 29, "n_identifiers": 5, "d_id": 43875, "documentation": { "docstring": "Check if this widget is a container (contains other widgets)\n\n Returns:\n bool: True if this widget is a container.\n ", "n_words": 19, "vocab_size": 14, "n_whitespaces": 44, "language": "en" } }, { "id": 292448, "commit_id": "b19bf9b147f4321e89d1f7f01e68337f2102f460", "repo": "core", "path": "homeassistant/components/dlna_dms/dms.py", "file_name": "dms.py", "fun_name": "available", "commit_message": "Add dlna_dms integration to support DLNA Digital Media Servers (#66437)", "code": "def available(self) -> bool:\n \n return self._device is not None and self._device.profile_device.available\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 25, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 3, "token_counts": 23, "n_ast_nodes": 38, "n_identifiers": 5, "d_id": 91534, "documentation": { "docstring": "Device is available when we have a connection to it.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 175178, "commit_id": "a94461d7189d7f1147ab304a332c8684263dc17e", "repo": "cpython", "path": "Lib/test/test_code.py", "file_name": "test_code.py", "fun_name": "test_co_positions_artificial_instructions", "commit_message": "bpo-46202: Remove opcode POP_EXCEPT_AND_RERAISE (GH-30302)\n\n* bpo-46202: remove opcode POP_EXCEPT_AND_RERAISE\r\n\r\n* do not assume that an exception group is truthy", "code": "def test_co_positions_artificial_instructions(self):\n import dis\n\n namespace = {}\n exec(textwrap.dedent(), namespace)\n\n exc = namespace['exc']\n traceback = exc.__traceback__\n code = traceback.tb_frame.f_code\n\n artificial_instructions = []\n for instr, positions in zip(\n dis.get_instructions(code),\n code.co_positions(),\n strict=True\n ):\n # If any of the positions is None, then all have to\n # be None as well for the case above. There are still\n # some places in the compiler, where the artificial instructions\n # get assigned the first_lineno but they don't have other positions.\n # There is no easy way of inferring them at that stage, so for now\n # we don't support it.\n self.assertTrue(positions.count(None) in [0, 4])\n\n if not any(positions):\n artificial_instructions.append(instr)\n\n self.assertEqual(\n [\n (instruction.opname, instruction.argval)\n for instruction in artificial_instructions\n ],\n [\n (\"PUSH_EXC_INFO\", None),\n (\"LOAD_CONST\", None), # artificial 'None'\n (\"STORE_NAME\", \"e\"), # XX: we know the location for this\n (\"DELETE_NAME\", \"e\"),\n (\"RERAISE\", 1),\n (\"COPY\", 3),\n (\"POP_EXCEPT\", None),\n (\"RERAISE\", 1)\n ]\n )\n", "url": "https://github.com/python/cpython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 549, "n_words": 142, "vocab_size": 105, "complexity": 4, "nloc": 37, "token_counts": 169, "n_ast_nodes": 278, "n_identifiers": 28, "d_id": 41563, "documentation": { "docstring": "\\\n try:\n 1/0\n except Exception as e:\n exc = e\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 53, "language": "en" } }, { "id": 65846, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/education/api.py", "file_name": "api.py", "fun_name": "get_assessment_criteria", "commit_message": "style: format code with black", "code": "def get_assessment_criteria(course):\n\t\n\treturn frappe.get_all(\n\t\t\"Course Assessment Criteria\",\n\t\tfields=[\"assessment_criteria\", \"weightage\"],\n\t\tfilters={\"parent\": course},\n\t\torder_by=\"idx\",\n\t)\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 6, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 7, "token_counts": 34, "n_ast_nodes": 72, "n_identifiers": 8, "d_id": 14035, "documentation": { "docstring": "Returns Assessmemt Criteria and their Weightage from Course Master.\n\n\t:param Course: Course\n\t", "n_words": 12, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 132338, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/syncer.py", "file_name": "syncer.py", "fun_name": "get_cloud_syncer", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def get_cloud_syncer(local_dir, remote_dir=None, sync_function=None) -> CloudSyncer:\n \n key = (local_dir, remote_dir)\n\n if key in _syncers:\n return _syncers[key]\n\n if not remote_dir:\n _syncers[key] = CloudSyncer(local_dir, remote_dir, NOOP)\n return _syncers[key]\n\n if sync_function == \"auto\":\n sync_function = None # Auto-detect\n\n # Maybe get user-provided sync client here\n client = get_sync_client(sync_function)\n\n if client:\n # If the user provided a sync template or function\n _syncers[key] = CloudSyncer(local_dir, remote_dir, client)\n else:\n # Else, get default cloud sync client (e.g. S3 syncer)\n sync_client = get_cloud_sync_client(remote_dir)\n _syncers[key] = CloudSyncer(local_dir, remote_dir, sync_client)\n\n return _syncers[key]\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 177, "n_words": 83, "vocab_size": 53, "complexity": 5, "nloc": 38, "token_counts": 111, "n_ast_nodes": 174, "n_identifiers": 12, "d_id": 29744, "documentation": { "docstring": "Returns a Syncer.\n\n This syncer is in charge of syncing the local_dir with upload_dir.\n\n If no ``remote_dir`` is provided, it will return a no-op syncer.\n\n If a ``sync_function`` is provided, it will return a CloudSyncer using\n a custom SyncClient initialized by the sync function. Otherwise it will\n return a CloudSyncer with default templates for s3/gs/hdfs.\n\n Args:\n local_dir (str): Source directory for syncing.\n remote_dir (str): Target directory for syncing. If not provided, a\n no-op Syncer is returned.\n sync_function (func | str): Function for syncing the local_dir to\n remote_dir. If string, then it must be a string template for\n syncer to run. If not provided, it defaults\n to standard S3, gsutil or HDFS sync commands.\n\n Raises:\n ValueError if malformed remote_dir.\n ", "n_words": 118, "vocab_size": 72, "n_whitespaces": 214, "language": "en" } }, { "id": 266979, "commit_id": "353511a900f6216a25a25d8a36528f636428b57b", "repo": "ansible", "path": "test/sanity/code-smell/package-data.py", "file_name": "package-data.py", "fun_name": "assemble_files_to_ship", "commit_message": "Add script to handle more deprecations. (#77400)\n\n* Add script to handle more deprecations.\r\n\r\nThis script currently supports deprecations from the following sanity tests:\r\n\r\n* deprecated-config\r\n* update-bundled\r\n\r\n* Ignore script in package-data test.", "code": "def assemble_files_to_ship(complete_file_list):\n \n # All files which are in the repository except these:\n ignore_patterns = (\n # Developer-only tools\n '.azure-pipelines/*',\n '.github/*',\n '.github/*/*',\n 'changelogs/fragments/*',\n 'hacking/backport/*',\n 'hacking/azp/*',\n 'hacking/tests/*',\n 'hacking/ticket_stubs/*',\n 'test/sanity/code-smell/botmeta.*',\n 'test/sanity/code-smell/release-names.*',\n 'test/utils/*',\n 'test/utils/*/*',\n 'test/utils/*/*/*',\n 'test/results/.tmp/*',\n 'test/results/.tmp/*/*',\n 'test/results/.tmp/*/*/*',\n 'test/results/.tmp/*/*/*/*',\n 'test/results/.tmp/*/*/*/*/*',\n '.git*',\n )\n ignore_files = frozenset((\n # Developer-only tools\n 'changelogs/config.yaml',\n 'hacking/README.md',\n 'hacking/ansible-profile',\n 'hacking/cgroup_perf_recap_graph.py',\n 'hacking/create_deprecated_issues.py',\n 'hacking/deprecated_issue_template.md',\n 'hacking/create_deprecation_bug_reports.py',\n 'hacking/fix_test_syntax.py',\n 'hacking/get_library.py',\n 'hacking/metadata-tool.py',\n 'hacking/report.py',\n 'hacking/return_skeleton_generator.py',\n 'hacking/test-module',\n 'test/support/README.md',\n 'test/lib/ansible_test/_internal/commands/sanity/bin_symlinks.py',\n 'test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py',\n '.cherry_picker.toml',\n '.mailmap',\n # Generated as part of a build step\n 'docs/docsite/rst/conf.py',\n 'docs/docsite/rst/index.rst',\n # Possibly should be included\n 'examples/scripts/uptime.py',\n 'examples/scripts/my_test.py',\n 'examples/scripts/my_test_info.py',\n 'examples/scripts/my_test_facts.py',\n 'examples/DOCUMENTATION.yml',\n 'examples/play.yml',\n 'examples/hosts.yaml',\n 'examples/hosts.yml',\n 'examples/inventory_script_schema.json',\n 'examples/plugin_filters.yml',\n 'hacking/env-setup',\n 'hacking/env-setup.fish',\n 'MANIFEST',\n 'setup.cfg',\n # docs for test files not included in sdist\n 'docs/docsite/rst/dev_guide/testing/sanity/bin-symlinks.rst',\n 'docs/docsite/rst/dev_guide/testing/sanity/botmeta.rst',\n 'docs/docsite/rst/dev_guide/testing/sanity/integration-aliases.rst',\n 'docs/docsite/rst/dev_guide/testing/sanity/release-names.rst',\n ))\n\n # These files are generated and then intentionally added to the sdist\n\n # Manpages\n ignore_script = ('ansible-connection', 'ansible-test')\n manpages = ['docs/man/man1/ansible.1']\n for dirname, dummy, files in os.walk('bin'):\n for filename in files:\n if filename in ignore_script:\n continue\n manpages.append('docs/man/man1/%s.1' % filename)\n\n # Misc\n misc_generated_files = [\n 'PKG-INFO',\n ]\n\n shipped_files = manpages + misc_generated_files\n\n for path in complete_file_list:\n if path not in ignore_files:\n for ignore in ignore_patterns:\n if fnmatch.fnmatch(path, ignore):\n break\n else:\n shipped_files.append(path)\n\n return shipped_files\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 791, "n_words": 177, "vocab_size": 136, "complexity": 8, "nloc": 81, "token_counts": 224, "n_ast_nodes": 423, "n_identifiers": 19, "d_id": 78675, "documentation": { "docstring": "\n This looks for all files which should be shipped in the sdist\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 19, "language": "en" } }, { "id": 258898, "commit_id": "1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe", "repo": "scikit-learn", "path": "sklearn/linear_model/_ridge.py", "file_name": "_ridge.py", "fun_name": "_solve_svd_design_matrix", "commit_message": "MNT Update black to stable version (#22474)", "code": "def _solve_svd_design_matrix(self, alpha, y, sqrt_sw, X_mean, singvals_sq, U, UT_y):\n \n w = ((singvals_sq + alpha) ** -1) - (alpha**-1)\n if self.fit_intercept:\n # detect intercept column\n normalized_sw = sqrt_sw / np.linalg.norm(sqrt_sw)\n intercept_dim = _find_smallest_angle(normalized_sw, U)\n # cancel the regularization for the intercept\n w[intercept_dim] = -(alpha**-1)\n c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha**-1) * y\n G_inverse_diag = self._decomp_diag(w, U) + (alpha**-1)\n if len(y.shape) != 1:\n # handle case where y is 2-d\n G_inverse_diag = G_inverse_diag[:, np.newaxis]\n return G_inverse_diag, c\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 202, "n_words": 76, "vocab_size": 57, "complexity": 3, "nloc": 11, "token_counts": 143, "n_ast_nodes": 220, "n_identifiers": 25, "d_id": 75474, "documentation": { "docstring": "Compute dual coefficients and diagonal of G^-1.\n\n Used when we have an SVD decomposition of X\n (n_samples > n_features and X is dense).\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 44, "language": "en" } }, { "id": 182902, "commit_id": "a72e347ed99333a090377ee438eaf63477cbf98b", "repo": "textual", "path": "src/textual/devtools/service.py", "file_name": "service.py", "fun_name": "_send_server_info_to_all", "commit_message": "Seperate server and client handling logic into classes for devtools", "code": "async def _send_server_info_to_all(self) -> None:\n \n for client_handler in self.clients:\n await self.send_server_info(client_handler)\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 36, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 4, "token_counts": 22, "n_ast_nodes": 39, "n_identifiers": 5, "d_id": 44001, "documentation": { "docstring": "Add `server_info` message to the queues of every client", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 223221, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/tests/test_filelist.py", "file_name": "test_filelist.py", "fun_name": "test_basic_discovery", "commit_message": "add python 3.10.4 for windows", "code": "def test_basic_discovery(self):\n \n with os_helper.temp_cwd():\n os.mkdir('foo')\n file1 = os.path.join('foo', 'file1.txt')\n os_helper.create_empty_file(file1)\n os.mkdir('bar')\n file2 = os.path.join('bar', 'file2.txt')\n os_helper.create_empty_file(file2)\n expected = [file2, file1]\n self.assertEqual(sorted(filelist.findall()), expected)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 124, "n_words": 22, "vocab_size": 20, "complexity": 1, "nloc": 10, "token_counts": 83, "n_ast_nodes": 149, "n_identifiers": 16, "d_id": 56865, "documentation": { "docstring": "\n When findall is called with no parameters or with\n '.' as the parameter, the dot should be omitted from\n the results.\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 50, "language": "en" } }, { "id": 101819, "commit_id": "47867a0dd424b3e31d7beead0ffdb8b37c970a9e", "repo": "faceswap", "path": "lib/gui/analysis/stats.py", "file_name": "stats.py", "fun_name": "_total_stats", "commit_message": "typing: lib.gui.analysis.stats", "code": "def _total_stats(self) -> Dict[str, Union[str, int, float]]:\n \n logger.debug(\"Compiling Totals\")\n elapsed = 0\n examples = 0\n iterations = 0\n batchset = set()\n total_summaries = len(self._per_session_stats)\n for idx, summary in enumerate(self._per_session_stats):\n if idx == 0:\n starttime = summary[\"start\"]\n if idx == total_summaries - 1:\n endtime = summary[\"end\"]\n elapsed += summary[\"elapsed\"]\n examples += ((summary[\"batch\"] * 2) * summary[\"iterations\"])\n batchset.add(summary[\"batch\"])\n iterations += summary[\"iterations\"]\n batch = \",\".join(str(bs) for bs in batchset)\n totals = {\"session\": \"Total\",\n \"start\": starttime,\n \"end\": endtime,\n \"elapsed\": elapsed,\n \"rate\": examples / elapsed if elapsed != 0 else 0,\n \"batch\": batch,\n \"iterations\": iterations}\n logger.debug(totals)\n return totals\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 375, "n_words": 93, "vocab_size": 65, "complexity": 6, "nloc": 36, "token_counts": 184, "n_ast_nodes": 311, "n_identifiers": 27, "d_id": 21206, "documentation": { "docstring": " Compile the Totals stats.\n Totals are fully calculated each time as they will change on the basis of the training\n session.\n\n Returns\n -------\n dict\n The Session name, start time, end time, elapsed time, rate, batch size and number of\n iterations for all session ids within the loaded data.\n ", "n_words": 48, "vocab_size": 41, "n_whitespaces": 113, "language": "en" } }, { "id": 151437, "commit_id": "3e08c6e5409d3e1b9c6f787415869e3e49289a00", "repo": "freqtrade", "path": "scripts/ws_client.py", "file_name": "ws_client.py", "fun_name": "json_deserialize", "commit_message": "testing/debugging ws client script", "code": "def json_deserialize(message):\n ", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 4, "token_counts": 21, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 35014, "documentation": { "docstring": "\n Deserialize JSON to a dict\n :param message: The message to deserialize\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 21, "language": "en" } }, { "id": 156832, "commit_id": "e61405cb5d345e73f1952ee3d50708566b5263d1", "repo": "dask", "path": "dask/utils.py", "file_name": "utils.py", "fun_name": "apply", "commit_message": "Docs: how to use kwargs with custom task graphs (#9322)", "code": "def apply(func, args, kwargs=None):\n \n if kwargs:\n return func(*args, **kwargs)\n else:\n return func(*args)\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 35, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 32, "n_ast_nodes": 53, "n_identifiers": 4, "d_id": 36780, "documentation": { "docstring": "Apply a function given its positional and keyword arguments.\n\n Equivalent to ``func(*args, **kwargs)``\n Most Dask users will never need to use the ``apply`` function.\n It is typically only used by people who need to inject\n keyword argument values into a low level Dask task graph.\n\n Parameters\n ----------\n func : callable\n The function you want to apply.\n args : tuple\n A tuple containing all the positional arguments needed for ``func``\n (eg: ``(arg_1, arg_2, arg_3)``)\n kwargs : dict, optional\n A dictionary mapping the keyword arguments\n (eg: ``{\"kwarg_1\": value, \"kwarg_2\": value}``\n\n Examples\n --------\n >>> from dask.utils import apply\n >>> def add(number, second_number=5):\n ... return number + second_number\n ...\n >>> apply(add, (10,), {\"second_number\": 2}) # equivalent to add(*args, **kwargs)\n 12\n\n >>> task = apply(add, (10,), {\"second_number\": 2})\n >>> dsk = {'task-name': task} # adds the task to a low level Dask task graph\n ", "n_words": 139, "vocab_size": 100, "n_whitespaces": 240, "language": "en" } }, { "id": 196258, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/functions/special/tensor_functions.py", "file_name": "tensor_functions.py", "fun_name": "preferred_index", "commit_message": "Updated import locations", "code": "def preferred_index(self):\n \n if self._get_preferred_index():\n return self.args[1]\n else:\n return self.args[0]\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 52, "n_words": 9, "vocab_size": 8, "complexity": 2, "nloc": 5, "token_counts": 29, "n_ast_nodes": 49, "n_identifiers": 4, "d_id": 47758, "documentation": { "docstring": "\n Returns the index which is preferred to keep in the final expression.\n\n Explanation\n ===========\n\n The preferred index is the index with more information regarding fermi\n level. If indices contain the same information, 'a' is preferred before\n 'b'.\n\n Examples\n ========\n\n >>> from sympy import KroneckerDelta, Symbol\n >>> a = Symbol('a', above_fermi=True)\n >>> i = Symbol('i', below_fermi=True)\n >>> j = Symbol('j', below_fermi=True)\n >>> p = Symbol('p')\n >>> KroneckerDelta(p, i).preferred_index\n i\n >>> KroneckerDelta(p, a).preferred_index\n a\n >>> KroneckerDelta(i, j).preferred_index\n i\n\n See Also\n ========\n\n killable_index\n\n ", "n_words": 80, "vocab_size": 55, "n_whitespaces": 242, "language": "en" } }, { "id": 222344, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/datetime.py", "file_name": "datetime.py", "fun_name": "isoformat", "commit_message": "add python 3.10.4 for windows", "code": "def isoformat(self, sep='T', timespec='auto'):\n \n s = (\"%04d-%02d-%02d%c\" % (self._year, self._month, self._day, sep) +\n _format_time(self._hour, self._minute, self._second,\n self._microsecond, timespec))\n\n off = self.utcoffset()\n tz = _format_offset(off)\n if tz:\n s += tz\n\n return s\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 121, "n_words": 31, "vocab_size": 26, "complexity": 2, "nloc": 9, "token_counts": 77, "n_ast_nodes": 121, "n_identifiers": 17, "d_id": 56543, "documentation": { "docstring": "Return the time formatted according to ISO.\n\n The full format looks like 'YYYY-MM-DD HH:MM:SS.mmmmmm'.\n By default, the fractional part is omitted if self.microsecond == 0.\n\n If self.tzinfo is not None, the UTC offset is also attached, giving\n giving a full format of 'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM'.\n\n Optional argument sep specifies the separator between date and\n time, default 'T'.\n\n The optional argument timespec specifies the number of additional\n terms of the time to include. Valid options are 'auto', 'hours',\n 'minutes', 'seconds', 'milliseconds' and 'microseconds'.\n ", "n_words": 81, "vocab_size": 62, "n_whitespaces": 151, "language": "en" } }, { "id": 139339, "commit_id": "4c1f27118a3af246006ab63325cdff53321bf68b", "repo": "ray", "path": "dashboard/modules/job/tests/test_job_manager.py", "file_name": "test_job_manager.py", "fun_name": "test_cuda_visible_devices", "commit_message": "[job submission] Don't set CUDA_VISIBLE_DEVICES in job driver (#24546)\n\nCurrently job drivers cannot use GPUs due to `CUDA_VISIBLE_DEVICES` being set (no resource request for job driver's supervisor actor). This is a regression from `ray submit`.\r\n\r\nThis is a temporary workaround -- in the future we should support a resource request for the job supervisor actor.", "code": "async def test_cuda_visible_devices(self, job_manager):\n \n run_cmd = f\"python {_driver_script_path('check_cuda_devices.py')}\"\n job_id = job_manager.submit_job(entrypoint=run_cmd)\n\n await async_wait_for_condition(\n check_job_succeeded, job_manager=job_manager, job_id=job_id\n )\n\n\n@pytest.mark.asyncio", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.asyncio", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 63, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 6, "token_counts": 35, "n_ast_nodes": 79, "n_identifiers": 13, "d_id": 31668, "documentation": { "docstring": "Check CUDA_VISIBLE_DEVICES behavior.\n\n Should not be set in the driver, but should be set in tasks.\n ", "n_words": 16, "vocab_size": 13, "n_whitespaces": 30, "language": "en" } }, { "id": 155212, "commit_id": "193505fdf0c984743397ba3df56262f30aee13a8", "repo": "modin", "path": "modin/experimental/core/execution/unidist/implementations/pandas_on_unidist/io/sql.py", "file_name": "sql.py", "fun_name": "query_put_bounders", "commit_message": "FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059)\n\nSigned-off-by: Igoshev, Iaroslav ", "code": "def query_put_bounders(query, partition_column, start, end):\n \n where = \" WHERE TMP_TABLE.{0} >= {1} AND TMP_TABLE.{0} <= {2}\".format(\n partition_column, start, end\n )\n query_with_bounders = \"SELECT * FROM ({0}) AS TMP_TABLE {1}\".format(query, where)\n return query_with_bounders\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 54, "n_words": 32, "vocab_size": 27, "complexity": 1, "nloc": 6, "token_counts": 36, "n_ast_nodes": 58, "n_identifiers": 8, "d_id": 36303, "documentation": { "docstring": "\n Put partition boundaries into the query.\n\n Parameters\n ----------\n query : str\n SQL query string.\n partition_column : str\n Column name used for data partitioning between the workers.\n start : int\n Lowest value to request from the `partition_column`.\n end : int\n Highest value to request from the `partition_column`.\n\n Returns\n -------\n str\n Query string with boundaries.\n ", "n_words": 53, "vocab_size": 38, "n_whitespaces": 122, "language": "en" } }, { "id": 244278, "commit_id": "d18cdb140ef3cb9ed5fdef6f1a815f5836f1b1ab", "repo": "mmdetection", "path": "mmdet/models/dense_heads/solo_head.py", "file_name": "solo_head.py", "fun_name": "resize_feats", "commit_message": "[Feature] Support SOLOv2 (#7441)\n\n* solov2 init\r\n\r\n* solov2 r18 lightweight\r\n\r\n* add model docstrings and reformat the code\r\n\r\n* add docstrings to model method\r\n\r\n* add solov2 big model config and correct some errors in the docstring\r\n\r\n* fix linting issues\r\n\r\n* refactor code and configs\r\n\r\n* rename variables according to the convention\r\n\r\n* add and enhance solov2 logic\r\n\r\n* add doc strings\r\n\r\n* update solov2 config files\r\n\r\n* fix norm_cfg in mask head\r\n\r\n* minor fix\r\n\r\n* update configs\r\n\r\nCo-authored-by: BIGWangYuDong ", "code": "def resize_feats(self, feats):\n \n out = []\n for i in range(len(feats)):\n if i == 0:\n out.append(\n F.interpolate(\n feats[0],\n size=feats[i + 1].shape[-2:],\n mode='bilinear',\n align_corners=False))\n elif i == len(feats) - 1:\n out.append(\n F.interpolate(\n feats[i],\n size=feats[i - 1].shape[-2:],\n mode='bilinear',\n align_corners=False))\n else:\n out.append(feats[i])\n return out\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 368, "n_words": 40, "vocab_size": 29, "complexity": 4, "nloc": 20, "token_counts": 127, "n_ast_nodes": 198, "n_identifiers": 14, "d_id": 70305, "documentation": { "docstring": "Downsample the first feat and upsample last feat in feats.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 251580, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "mitmproxy/tools/console/window.py", "file_name": "window.py", "fun_name": "top_widget", "commit_message": "make it black!", "code": "def top_widget(self):\n \n if self.overlay:\n return self.overlay\n return self.top_window()\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 40, "n_words": 8, "vocab_size": 7, "complexity": 2, "nloc": 4, "token_counts": 21, "n_ast_nodes": 37, "n_identifiers": 4, "d_id": 73795, "documentation": { "docstring": "\n The current top widget - either a window or the active overlay.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 275783, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/preprocessing/text.py", "file_name": "text.py", "fun_name": "get_config", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_config(self):\n \n json_word_counts = json.dumps(self.word_counts)\n json_word_docs = json.dumps(self.word_docs)\n json_index_docs = json.dumps(self.index_docs)\n json_word_index = json.dumps(self.word_index)\n json_index_word = json.dumps(self.index_word)\n\n return {\n \"num_words\": self.num_words,\n \"filters\": self.filters,\n \"lower\": self.lower,\n \"split\": self.split,\n \"char_level\": self.char_level,\n \"oov_token\": self.oov_token,\n \"document_count\": self.document_count,\n \"word_counts\": json_word_counts,\n \"word_docs\": json_word_docs,\n \"index_docs\": json_index_docs,\n \"index_word\": json_index_word,\n \"word_index\": json_word_index,\n }\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 232, "n_words": 44, "vocab_size": 40, "complexity": 1, "nloc": 20, "token_counts": 121, "n_ast_nodes": 203, "n_identifiers": 21, "d_id": 81462, "documentation": { "docstring": "Returns the tokenizer configuration as Python dictionary.\n\n The word count dictionaries used by the tokenizer get serialized\n into plain JSON, so that the configuration can be read by other\n projects.\n\n Returns:\n A Python dictionary with the tokenizer configuration.\n ", "n_words": 38, "vocab_size": 30, "n_whitespaces": 84, "language": "en" } }, { "id": 191744, "commit_id": "a4c1ff1391bfa45b78db5473d1df4a1ace6651f5", "repo": "owasp-mastg", "path": "tools/scripts/parse_html.py", "file_name": "parse_html.py", "fun_name": "write_file", "commit_message": "Generate MSTG Checklists automatically and machine-readable YAML (#2010)\n\n* port masvs checklist generation to the mstg\r\n* add recursive ls\r\n* fix Tools -> tools\r\n* generate MSTG html\r\n* checkout latest masvs tag\r\n* use GITHUB_ENV instead of steps.output\r\n* add MASVS and MSTG link including versions and commit IDs\r\n* add new logo\r\n* set avenir as main font\r\n* add column status with validation\r\n* add conditional formatting for pass, fail and n/a\r\n* add step Show openpyxl Version\r\n* try format only relevant status cells\r\n* create new About sheet with the same header\r\n* add intro to project\r\n* black and flake8 fixes", "code": "def write_file(masvs_file, input_file, output_file):\n \n\n # enhanced_masvs_dict = {}\n # for file in Path('masvs_yaml').glob('*.yaml'):\n # masvs_dict = yaml.load(open(file))\n # enhanced_masvs_dict[MASVS_TITLES[file.stem]] = masvs_dict\n\n masvs = yaml.safe_load(open(masvs_file))\n\n testcases_info = []\n\n for file in Path(input_file).glob(\"*.html\"):\n\n contents = file.read_text()\n\n chapter = BeautifulSoup(contents, \"lxml\")\n\n # print(get_links_to_other_chapters(chapter))\n\n # print(get_all_links_to_tools(chapter))\n # print(get_links_to_tools_per_section(chapter))\n\n testcases_info += get_testcases_info(f\"{file.stem}.md\", chapter)\n # print_yaml(testcases_info)\n\n # print(get_sections_plain_text(chapter, \"overview\"))\n # print(get_sections_innerHtml(chapter, \"overview\"))\n\n for tc in testcases_info:\n for id in tc[\"mstg_ids\"]:\n if masvs.get(id):\n # masvs[id].update(tc)\n masvs_req = masvs[id]\n if not masvs_req.get(\"links\"):\n masvs_req[\"links\"] = []\n masvs_req[\"links\"].append(tc[\"link\"])\n # masvs_dict[id]['solution'].append(tc['overview']) # todo\n\n # print_yaml(masvs)\n write_yaml_file(output_file, masvs)\n\n", "url": "https://github.com/OWASP/owasp-mastg.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 298, "n_words": 86, "vocab_size": 53, "complexity": 6, "nloc": 15, "token_counts": 123, "n_ast_nodes": 227, "n_identifiers": 24, "d_id": 46842, "documentation": { "docstring": "\n Parses the MSTG and completes the MASVS file with information from the MSTG.\n ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 20, "language": "en" } }, { "id": 218383, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "_signature_get_bound_param", "commit_message": "add python 3.10.4 for windows", "code": "def _signature_get_bound_param(spec):\n \n\n assert spec.startswith('($')\n\n pos = spec.find(',')\n if pos == -1:\n pos = spec.find(')')\n\n cpos = spec.find(':')\n assert cpos == -1 or cpos > pos\n\n cpos = spec.find('=')\n assert cpos == -1 or cpos > pos\n\n return spec[2:pos]\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 72, "n_words": 38, "vocab_size": 19, "complexity": 4, "nloc": 10, "token_counts": 76, "n_ast_nodes": 134, "n_identifiers": 6, "d_id": 55271, "documentation": { "docstring": " Private helper to get first parameter name from a\n __text_signature__ of a builtin method, which should\n be in the following format: '($param1, ...)'.\n Assumptions are that the first argument won't have\n a default value or an annotation.\n ", "n_words": 37, "vocab_size": 33, "n_whitespaces": 53, "language": "en" } }, { "id": 56199, "commit_id": "b5b3d808bf059294a7adf17156e4ccdb5a3799da", "repo": "prefect", "path": "src/prefect/orion/database/migrations/versions/postgresql/2022_06_04_133535_d60c18774a5d_add_indexes_for_partial_name_matches.py", "file_name": "2022_06_04_133535_d60c18774a5d_add_indexes_for_partial_name_matches.py", "fun_name": "upgrade", "commit_message": "Add index migrations", "code": "def upgrade():\n with op.get_context().autocommit_block():\n op.execute(\n \n )\n op.execute(\n \n )\n op.execute(\n \n )\n op.execute(\n \n )\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 118, "n_words": 12, "vocab_size": 6, "complexity": 1, "nloc": 30, "token_counts": 39, "n_ast_nodes": 77, "n_identifiers": 5, "d_id": 11462, "documentation": { "docstring": "\n CREATE INDEX CONCURRENTLY \n trgm_ix_flow_name \n ON flow USING gin (name gin_trgm_ops);\n \n CREATE INDEX CONCURRENTLY \n trgm_ix_flow_run_name \n ON flow_run USING gin (name gin_trgm_ops);\n \n CREATE INDEX CONCURRENTLY \n trgm_ix_task_run_name \n ON task_run USING gin (name gin_trgm_ops);\n \n CREATE INDEX CONCURRENTLY \n trgm_ix_deployment_name \n ON deployment USING gin (name gin_trgm_ops);\n ", "n_words": 40, "vocab_size": 16, "n_whitespaces": 228, "language": "en" } }, { "id": 61961, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py", "file_name": "database.py", "fun_name": "write_exports", "commit_message": "upd; format", "code": "def write_exports(self, exports):\n \n rf = self.get_distinfo_file(EXPORTS_FILENAME)\n with open(rf, 'w') as f:\n write_exports(exports, f)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 45, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 4, "token_counts": 32, "n_ast_nodes": 57, "n_identifiers": 8, "d_id": 12781, "documentation": { "docstring": "\n Write a dictionary of exports to a file in .ini format.\n :param exports: A dictionary of exports, mapping an export category to\n a list of :class:`ExportEntry` instances describing the\n individual export entries.\n ", "n_words": 32, "vocab_size": 25, "n_whitespaces": 100, "language": "en" } }, { "id": 220708, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/sslproto.py", "file_name": "sslproto.py", "fun_name": "pause_writing", "commit_message": "add python 3.10.4 for windows", "code": "def pause_writing(self):\n \n self._app_protocol.pause_writing()\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 17, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 25, "n_identifiers": 3, "d_id": 56095, "documentation": { "docstring": "Called when the low-level transport's buffer goes over\n the high-water mark.\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 36752, "commit_id": "81ac45f85c35244831f11f73c09ea10eee4f953a", "repo": "transformers", "path": "src/transformers/training_args.py", "file_name": "training_args.py", "fun_name": "local_process_index", "commit_message": "update smddp api to v1.4.0 (#16371)\n\n* update smddp api to v1.4.0\r\n\r\n* Update src/transformers/trainer.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Update src/transformers/trainer.py\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* address comments\r\n\r\n* fix style\r\n\r\n* remove unused import\r\n\r\n* fix indent\r\n\r\n* disable style check for import\r\n\r\n* fix space\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def local_process_index(self):\n \n if is_torch_tpu_available():\n return xm.get_local_ordinal()\n elif is_sagemaker_mp_enabled():\n return smp.local_rank()\n elif is_sagemaker_dp_enabled():\n return dist.get_rank()\n elif self.local_rank != -1:\n return self.local_rank\n return 0\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 108, "n_words": 22, "vocab_size": 15, "complexity": 5, "nloc": 10, "token_counts": 53, "n_ast_nodes": 92, "n_identifiers": 11, "d_id": 6671, "documentation": { "docstring": "\n The index of the local process used.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 294590, "commit_id": "9d14201b13be4f5a5cc5e5f52bba56bfd8fa9694", "repo": "core", "path": "tests/components/zwave_js/test_lock.py", "file_name": "test_lock.py", "fun_name": "test_only_one_lock", "commit_message": "Don't create two zwave_js.lock entities for a single device (#68651)", "code": "async def test_only_one_lock(hass, client, lock_home_connect_620, integration):\n \n assert len(hass.states.async_entity_ids(\"lock\")) == 1\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 16, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 26, "n_ast_nodes": 44, "n_identifiers": 8, "d_id": 93624, "documentation": { "docstring": "Test node with both Door Lock and Lock CC values only gets one lock entity.", "n_words": 15, "vocab_size": 14, "n_whitespaces": 14, "language": "en" } }, { "id": 164912, "commit_id": "15a06d3d9e7656afff239da7a295a7b684456680", "repo": "pandas", "path": "pandas/conftest.py", "file_name": "conftest.py", "fun_name": "axis_1", "commit_message": "BUG: groupby.size and groupby.transform('size') incorrect for axis=1 (#45987)", "code": "def axis_1(request):\n \n return request.param\n\n\n@pytest.fixture(params=[True, False, None])", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.fixture(params=[True, False, None])", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 12, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 41, "n_identifiers": 6, "d_id": 39619, "documentation": { "docstring": "\n Fixture for returning aliases of axis 1 of a DataFrame.\n ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 17, "language": "en" } }, { "id": 269998, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/callbacks_test.py", "file_name": "callbacks_test.py", "fun_name": "list_summaries", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def list_summaries(logdir):\n \n result = _SummaryFile()\n for (dirpath, _, filenames) in os.walk(logdir):\n for filename in filenames:\n if not filename.startswith(\"events.out.\"):\n continue\n path = os.path.join(dirpath, filename)\n for event in tf.compat.v1.train.summary_iterator(path):\n if event.graph_def:\n result.graph_defs.append(event.graph_def)\n if not event.summary: # (e.g., it's a `graph_def` event)\n continue\n for value in event.summary.value:\n tag = value.tag\n # Case on the `value` rather than the summary metadata because\n # the Keras callback uses `summary_ops_v2` to emit old-style\n # summaries. See b/124535134.\n kind = value.WhichOneof(\"value\")\n container = {\n \"simple_value\": result.scalars,\n \"image\": result.images,\n \"histo\": result.histograms,\n \"tensor\": result.tensors,\n }.get(kind)\n if container is None:\n raise ValueError(\n \"Unexpected summary kind %r in event file %s:\\n%r\"\n % (kind, path, event)\n )\n elif kind == \"tensor\" and tag != \"keras\":\n # Convert the tf2 summary proto to old style for type checking.\n plugin_name = value.metadata.plugin_data.plugin_name\n container = {\n \"images\": result.images,\n \"histograms\": result.histograms,\n \"scalars\": result.scalars,\n }.get(plugin_name)\n if container is not None:\n result.convert_from_v2_summary_proto = True\n else:\n container = result.tensors\n container.add(_ObservedSummary(logdir=dirpath, tag=tag))\n return result\n\n\n@test_combinations.run_with_all_model_types\n@test_combinations.run_all_keras_modes(always_skip_v1=True)", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@test_combinations.run_with_all_model_types\n@test_combinations.run_all_keras_modes(always_skip_v1=True)", "n_ast_errors": 1, "ast_levels": 22, "n_whitespaces": 960, "n_words": 156, "vocab_size": 107, "complexity": 12, "nloc": 39, "token_counts": 245, "n_ast_nodes": 426, "n_identifiers": 44, "d_id": 80375, "documentation": { "docstring": "Read all summaries under the logdir into a `_SummaryFile`.\n\n Args:\n logdir: A path to a directory that contains zero or more event\n files, either as direct children or in transitive subdirectories.\n Summaries in these events must only contain old-style scalars,\n images, and histograms. Non-summary events, like `graph_def`s, are\n ignored.\n\n Returns:\n A `_SummaryFile` object reflecting all summaries written to any\n event files in the logdir or any of its descendant directories.\n\n Raises:\n ValueError: If an event file contains an summary of unexpected kind.\n ", "n_words": 82, "vocab_size": 65, "n_whitespaces": 142, "language": "en" } }, { "id": 229539, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/contour/_line.py", "file_name": "_line.py", "fun_name": "dash", "commit_message": "switch to black .22", "code": "def dash(self):\n \n return self[\"dash\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 61212, "documentation": { "docstring": "\n Sets the dash style of lines. Set to a dash type string\n (\"solid\", \"dot\", \"dash\", \"longdash\", \"dashdot\", or\n \"longdashdot\") or a dash length list in px (eg\n \"5px,10px,2px,2px\").\n\n The 'dash' property is an enumeration that may be specified as:\n - One of the following dash styles:\n ['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']\n - A string containing a dash length list in pixels or percentages\n (e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)\n\n Returns\n -------\n str\n ", "n_words": 80, "vocab_size": 65, "n_whitespaces": 192, "language": "en" } }, { "id": 279672, "commit_id": "ba5086fa31d24a9f61b46d4a844311b58dea7ff1", "repo": "keras", "path": "keras/engine/base_layer.py", "file_name": "base_layer.py", "fun_name": "_get_state", "commit_message": "Keras saving: A prototype of config-based (idempotent) saving and loading, with simple model state restoration added. It's done via the archive provided by `zipfile` package.\n\nPreliminary for review and the APIs and implementation are subject to changes.\n\nPiperOrigin-RevId: 470784761", "code": "def _get_state(self):\n \n result = {}\n for child_attr, child_obj in self.__dict__.items():\n # TODO(rchao): Store non-variable states in the dict as well.\n if isinstance(child_obj, tf.Variable):\n result[child_attr] = child_obj.numpy()\n elif saving_lib.is_container(child_obj):\n for k, contained_obj in enumerate(child_obj):\n if isinstance(contained_obj, tf.Variable):\n # Handling the case where `child_obj` is a list/tuple.\n result[f\"{child_attr}-{k}\"] = contained_obj.numpy()\n elif isinstance(child_obj, dict) and isinstance(\n child_obj[contained_obj], tf.Variable\n ):\n # Handling the case where `child_obj` is a dict.\n result[f\"{child_attr}-{contained_obj}\"] = child_obj[\n contained_obj\n ].numpy()\n return result\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 385, "n_words": 72, "vocab_size": 50, "complexity": 8, "nloc": 16, "token_counts": 118, "n_ast_nodes": 205, "n_identifiers": 17, "d_id": 83087, "documentation": { "docstring": "Experimental method for getting the state of this layer object.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 196251, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/functions/special/hyper.py", "file_name": "hyper.py", "fun_name": "radius_of_convergence", "commit_message": "Updated import locations", "code": "def radius_of_convergence(self):\n \n if any(a.is_integer and (a <= 0) == True for a in self.ap + self.bq):\n aints = [a for a in self.ap if a.is_Integer and (a <= 0) == True]\n bints = [a for a in self.bq if a.is_Integer and (a <= 0) == True]\n if len(aints) < len(bints):\n return S.Zero\n popped = False\n for b in bints:\n cancelled = False\n while aints:\n a = aints.pop()\n if a >= b:\n cancelled = True\n break\n popped = True\n if not cancelled:\n return S.Zero\n if aints or popped:\n # There are still non-positive numerator parameters.\n # This is a polynomial.\n return oo\n if len(self.ap) == len(self.bq) + 1:\n return S.One\n elif len(self.ap) <= len(self.bq):\n return oo\n else:\n return S.Zero\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 479, "n_words": 118, "vocab_size": 61, "complexity": 19, "nloc": 25, "token_counts": 185, "n_ast_nodes": 294, "n_identifiers": 19, "d_id": 47751, "documentation": { "docstring": "\n Compute the radius of convergence of the defining series.\n\n Explanation\n ===========\n\n Note that even if this is not ``oo``, the function may still be\n evaluated outside of the radius of convergence by analytic\n continuation. But if this is zero, then the function is not actually\n defined anywhere else.\n\n Examples\n ========\n\n >>> from sympy import hyper\n >>> from sympy.abc import z\n >>> hyper((1, 2), [3], z).radius_of_convergence\n 1\n >>> hyper((1, 2, 3), [4], z).radius_of_convergence\n 0\n >>> hyper((1, 2), (3, 4), z).radius_of_convergence\n oo\n\n ", "n_words": 80, "vocab_size": 54, "n_whitespaces": 207, "language": "en" } }, { "id": 64812, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/budget/budget.py", "file_name": "budget.py", "fun_name": "get_ordered_amount", "commit_message": "style: format code with black", "code": "def get_ordered_amount(args, budget):\n\titem_code = args.get(\"item_code\")\n\tcondition = get_other_condition(args, budget, \"Purchase Order\")\n\n\tdata = frappe.db.sql(\n\t\t.format(\n\t\t\tcondition\n\t\t),\n\t\titem_code,\n\t\tas_list=1,\n\t)\n\n\treturn data[0][0] if data else 0\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 16, "n_words": 27, "vocab_size": 23, "complexity": 2, "nloc": 14, "token_counts": 59, "n_ast_nodes": 92, "n_identifiers": 13, "d_id": 13728, "documentation": { "docstring": " select ifnull(sum(child.amount - child.billed_amt), 0) as amount\n\t\tfrom `tabPurchase Order Item` child, `tabPurchase Order` parent where\n\t\tparent.name = child.parent and child.item_code = %s and parent.docstatus = 1 and child.amount > child.billed_amt\n\t\tand parent.status != 'Closed' and {0}", "n_words": 37, "vocab_size": 30, "n_whitespaces": 34, "language": "en" } }, { "id": 189392, "commit_id": "540dc70d2fd7a2f759a6da158303ef81a1ae53f8", "repo": "manim", "path": "manim/mobject/svg/text_mobject.py", "file_name": "text_mobject.py", "fun_name": "set_color_by_t2c", "commit_message": "Update `Text` to use new ManimPango color setting (#2341)\n\n* Find indexes in stripped text, not original text\r\n\r\n* Add regression test\r\n\r\n* Only run the test in linux environement\r\n\r\n* Rewrite text2settings in Text to set text color via pango\r\n\r\n* Make gradient in Text use pango coloring\r\n\r\n* Bump manimpango to newest version\r\n\r\n* Update test to use new frames_comparison\r\n\r\n* Don't remove svg file on exception\r\n\r\n* Bump manimpango\r\n\r\n* Fix pre-commit errors\r\n\r\n* Fix index bug\r\n\r\n* Deprecate no longer used functions set_color_by_t2x\r\n\r\n* Remove old commented out code\r\n\r\n* Update poetry.lock", "code": "def set_color_by_t2c(self, t2c=None):\n \n t2c = t2c if t2c else self.t2c\n for word, color in list(t2c.items()):\n for start, end in self.find_indexes(word, self.text):\n self.chars[start:end].set_color(color)\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 69, "n_words": 22, "vocab_size": 18, "complexity": 4, "nloc": 5, "token_counts": 62, "n_ast_nodes": 96, "n_identifiers": 13, "d_id": 46033, "documentation": { "docstring": "Internally used function. Sets color for specified strings.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 187407, "commit_id": "d1a8d1597d4fe9f129a72fe94c1508304b7eae0f", "repo": "streamlink", "path": "src/streamlink/stream/dash.py", "file_name": "dash.py", "fun_name": "sleeper", "commit_message": "stream.dash: update DASHStreamWorker.iter_segments\n\n- Refactor DASHStreamWorker.iter_segments()\n- Replace dash_manifest.sleeper() with SegmentedStreamWorker.wait(),\n and make the worker thread shut down immediately on close().\n- Prevent unnecessary wait times for static manifest types by calling\n close() after all segments were put into the writer's queue.", "code": "def sleeper(self, duration):\n \n s = time()\n yield\n time_to_sleep = duration - (time() - s)\n if time_to_sleep > 0:\n self.wait(time_to_sleep)\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 65, "n_words": 19, "vocab_size": 16, "complexity": 2, "nloc": 6, "token_counts": 36, "n_ast_nodes": 63, "n_identifiers": 7, "d_id": 45770, "documentation": { "docstring": "\n Do something and then wait for a given duration minus the time it took doing something\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 31, "language": "en" } }, { "id": 219711, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "_group_lengths", "commit_message": "add python 3.10.4 for windows", "code": "def _group_lengths(grouping):\n \n # The result from localeconv()['grouping'], and the input to this\n # function, should be a list of integers in one of the\n # following three forms:\n #\n # (1) an empty list, or\n # (2) nonempty list of positive integers + [0]\n # (3) list of positive integers + [locale.CHAR_MAX], or\n\n from itertools import chain, repeat\n if not grouping:\n return []\n elif grouping[-1] == 0 and len(grouping) >= 2:\n return chain(grouping[:-1], repeat(grouping[-2]))\n elif grouping[-1] == _locale.CHAR_MAX:\n return grouping[:-1]\n else:\n raise ValueError('unrecognised format for grouping')\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 159, "n_words": 86, "vocab_size": 62, "complexity": 5, "nloc": 10, "token_counts": 79, "n_ast_nodes": 138, "n_identifiers": 9, "d_id": 55734, "documentation": { "docstring": "Convert a localeconv-style grouping into a (possibly infinite)\n iterable of integers representing group lengths.\n\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 20, "language": "en" } }, { "id": 243427, "commit_id": "279ddf4ce6c76498ac29df2552a3023b9aaa76c1", "repo": "Pillow", "path": "src/PIL/ImageOps.py", "file_name": "ImageOps.py", "fun_name": "expand", "commit_message": "Use getpalette() in ImageOps", "code": "def expand(image, border=0, fill=0):\n \n left, top, right, bottom = _border(border)\n width = left + image.size[0] + right\n height = top + image.size[1] + bottom\n color = _color(fill, image.mode)\n if image.mode == \"P\" and image.palette:\n palette = ImagePalette.ImagePalette(palette=image.getpalette())\n if isinstance(color, tuple):\n color = palette.getcolor(color)\n else:\n palette = None\n out = Image.new(image.mode, (width, height), color)\n if palette:\n out.putpalette(palette.palette)\n out.paste(image, (left, top))\n return out\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 133, "n_words": 61, "vocab_size": 45, "complexity": 5, "nloc": 16, "token_counts": 149, "n_ast_nodes": 230, "n_identifiers": 26, "d_id": 70030, "documentation": { "docstring": "\n Add border to the image\n\n :param image: The image to expand.\n :param border: Border width, in pixels.\n :param fill: Pixel fill value (a color value). Default is 0 (black).\n :return: An image.\n ", "n_words": 32, "vocab_size": 28, "n_whitespaces": 52, "language": "en" } }, { "id": 64741, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/account/account.py", "file_name": "account.py", "fun_name": "merge_account", "commit_message": "style: format code with black", "code": "def merge_account(old, new, is_group, root_type, company):\n\t# Validate properties before merging\n\tif not frappe.db.exists(\"Account\", new):\n\t\tthrow(_(\"Account {0} does not exist\").format(new))\n\n\tval = list(frappe.db.get_value(\"Account\", new, [\"is_group\", \"root_type\", \"company\"]))\n\n\tif val != [cint(is_group), root_type, company]:\n\t\tthrow(\n\t\t\t_(\n\t\t\t\t\n\t\t\t)\n\t\t)\n\n\tif is_group and frappe.db.get_value(\"Account\", new, \"parent_account\") == old:\n\t\tfrappe.db.set_value(\n\t\t\t\"Account\", new, \"parent_account\", frappe.db.get_value(\"Account\", old, \"parent_account\")\n\t\t)\n\n\tfrappe.rename_doc(\"Account\", old, new, merge=1, force=1)\n\n\treturn new\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 44, "n_words": 61, "vocab_size": 47, "complexity": 5, "nloc": 16, "token_counts": 145, "n_ast_nodes": 248, "n_identifiers": 21, "d_id": 13713, "documentation": { "docstring": "Merging is only possible if following properties are same in both records. Is Group, Root Type, Company", "n_words": 17, "vocab_size": 17, "n_whitespaces": 16, "language": "en" } }, { "id": 282826, "commit_id": "493617752699ff4ab63a1ed9df478ac030e68492", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/mutual_funds/mutual_fund_controller.py", "file_name": "mutual_fund_controller.py", "fun_name": "print_help", "commit_message": "Add avanza mutual fund data and commands (#1452)\n\n* Adding info_se and al_swe commands\r\n\r\n* Linting\r\n\r\n* Linting\r\n\r\n* linting\r\n\r\n* Fixes\r\n\r\n* Fixes to formatting\r\n\r\n* Linting\r\n\r\n* Linting\r\n\r\n* Linting\r\n\r\nCo-authored-by: jmaslek \r\nCo-authored-by: Colin Delahunty <72827203+colin99d@users.noreply.github.com>\r\nCo-authored-by: didierlopes.eth ", "code": "def print_help(self):\n \n has_fund_start = \"\" if self.fund_symbol else \"[unvl]\"\n has_fund_end = \"\" if self.fund_symbol else \"[/unvl]\"\n has_fund_usa_start = (\n \"\" if self.fund_symbol and self.country == \"united states\" else \"[unvl]\"\n )\n has_fund_usa_end = (\n \"\" if self.fund_symbol and self.country == \"united states\" else \"[/unvl]\"\n )\n if self.fund_name:\n if self.fund_symbol:\n fund_string = f\"{self.fund_name} ({self.fund_symbol})\"\n else:\n fund_string = f\"{self.fund_name}\"\n else:\n fund_string = \"\"\n help_text = f\n if self.fund_symbol != \"\" and self.country == \"sweden\":\n help_text += \n console.print(text=help_text, menu=\"Mutual Funds\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 257, "n_words": 76, "vocab_size": 35, "complexity": 11, "nloc": 43, "token_counts": 115, "n_ast_nodes": 267, "n_identifiers": 16, "d_id": 84315, "documentation": { "docstring": "Print help\n[src][Investing.com][/src][cmds]\n country set a country for filtering[/cmds]\n\n[param]Current Country: [/param]{self.country.title()}\n\n[src][Investing.com][/src][cmds]\n overview overview of top funds by country\n search search for Mutual Funds\n load load historical fund data[/cmds]\n\n[param]Current Fund: [/param]{fund_string}\n{has_fund_start}\n[src][Investing.com][/src][cmds]\n info get fund information\n plot plot loaded historical fund data{has_fund_end}{has_fund_usa_start}\n[src][YFinance][/src]\n sector sector weightings\n equity equity holdings[/cmds]{has_fund_usa_end}\n \n[src][Avanza][/src]\n al_swe display fund allocation (sector, country, holdings)\n info_swe get fund information\n ", "n_words": 64, "vocab_size": 45, "n_whitespaces": 164, "language": "en" } }, { "id": 244047, "commit_id": "cac356380d505bf15587f07c0529218cc36b9652", "repo": "mmdetection", "path": "mmdet/models/detectors/maskformer.py", "file_name": "maskformer.py", "fun_name": "forward_dummy", "commit_message": "[Feature] Add Maskformer to mmdet (#7212)\n\n* first commit\r\n\r\n* add README\r\n\r\n* move model description from config to readme\r\n\r\nadd description for binary_input\r\n\r\nadd description for dice loss\r\n\r\nadd a independent panoptic gt processing function\r\n\r\nadd a independent panoptic gt processing function\r\n\r\nremove compatibility of pretrain in maskformer\r\n\r\n* update comments in maskformer_head\r\n\r\n* update docs format", "code": "def forward_dummy(self, img, img_metas):\n \n super(SingleStageDetector, self).forward_train(img, img_metas)\n x = self.extract_feat(img)\n outs = self.panoptic_head(x, img_metas)\n return outs\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 51, "n_words": 16, "vocab_size": 13, "complexity": 1, "nloc": 5, "token_counts": 43, "n_ast_nodes": 67, "n_identifiers": 11, "d_id": 70214, "documentation": { "docstring": "Used for computing network flops. See\n `mmdetection/tools/analysis_tools/get_flops.py`\n\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[Dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n ", "n_words": 61, "vocab_size": 55, "n_whitespaces": 179, "language": "en" } }, { "id": 63356, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "setDefaultWhitespaceChars", "commit_message": "upd; format", "code": "def setDefaultWhitespaceChars(chars):\n r\n ParserElement.DEFAULT_WHITE_CHARS = chars\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 19, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 14, "token_counts": 12, "n_ast_nodes": 21, "n_identifiers": 4, "d_id": 13265, "documentation": { "docstring": "\n Overrides the default whitespace chars\n\n Example::\n\n # default whitespace chars are space, and newline\n OneOrMore(Word(alphas)).parseString(\"abc def\\nghi jkl\") # -> ['abc', 'def', 'ghi', 'jkl']\n\n # change to just treat newline as significant\n ParserElement.setDefaultWhitespaceChars(\" \\t\")\n OneOrMore(Word(alphas)).parseString(\"abc def\\nghi jkl\") # -> ['abc', 'def']\n ", "n_words": 41, "vocab_size": 29, "n_whitespaces": 120, "language": "en" } }, { "id": 117084, "commit_id": "ae4fa77a2c0a9fa57cc9c8bc7e8961dd01e4067e", "repo": "mindsdb", "path": "tests/integration_tests/flows/conftest.py", "file_name": "conftest.py", "fun_name": "override_recursive", "commit_message": "It mysql api test pytest (#3694)\n\n* migration to pytest\r\n\r\n* Tests start passing\r\n\r\n* Fully working tests\r\n\r\n* Increase timeout for mindsdb start\r\n\r\n* reduce amount of logs\r\n\r\n* show logs only for failed tests", "code": "def override_recursive(a, b):\n \n for key in b:\n if isinstance(b[key], dict) is False:\n a[key] = b[key]\n elif key not in a or isinstance(a[key], dict) is False:\n a[key] = b[key]\n # make config section empty by demand\n elif isinstance(b[key], dict) is True and b[key] == {}:\n a[key] = b[key]\n else:\n override_recursive(a[key], b[key])\n\n@pytest.fixture(scope=\"module\")", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "@pytest.fixture(scope=\"module\")", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 135, "n_words": 51, "vocab_size": 35, "complexity": 7, "nloc": 10, "token_counts": 106, "n_ast_nodes": 176, "n_identifiers": 9, "d_id": 25896, "documentation": { "docstring": "Overrides some elements in json 'a' by elements in json 'b'", "n_words": 11, "vocab_size": 8, "n_whitespaces": 10, "language": "en" } }, { "id": 22595, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "calculator.py", "file_name": "calculator.py", "fun_name": "result", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def result(term):\n \n print(\"\\n\" + str(calc(term)))\n\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 36, "n_identifiers": 5, "d_id": 4374, "documentation": { "docstring": "\n input: term of type str\n output: none\n purpose: passes the argument to the function calc(...) and\n prints the result onto console.\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 46, "language": "en" } }, { "id": 19913, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_internal/metadata/base.py", "file_name": "base.py", "fun_name": "installed_location", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def installed_location(self) -> Optional[str]:\n \n egg_link = egg_link_path_from_location(self.raw_name)\n if egg_link:\n location = egg_link\n elif self.location:\n location = self.location\n else:\n return None\n return normalize_path(location)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 97, "n_words": 22, "vocab_size": 17, "complexity": 3, "nloc": 18, "token_counts": 44, "n_ast_nodes": 74, "n_identifiers": 9, "d_id": 3146, "documentation": { "docstring": "The distribution's \"installed\" location.\n\n This should generally be a ``site-packages`` directory. This is\n usually ``dist.location``, except for legacy develop-installed packages,\n where ``dist.location`` is the source code location, and this is where\n the ``.egg-link`` file is.\n\n The returned location is normalized (in particular, with symlinks removed).\n ", "n_words": 45, "vocab_size": 38, "n_whitespaces": 87, "language": "en" } }, { "id": 121503, "commit_id": "2bc3e39cd9104071ee39dacac22abd51b94eb27e", "repo": "jax", "path": "jax/experimental/sparse/linalg.py", "file_name": "linalg.py", "fun_name": "spsolve", "commit_message": "Sparse direct solver using QR factorization from cuSOLVER. This is the jaxlib implementation. We will want to combine this with the sparse libraries already existing in JAX.\n\nPiperOrigin-RevId: 468303019", "code": "def spsolve(data, indices, indptr, b, tol=1e-6, reorder=1):\n \n if jax._src.lib.xla_extension_version < 86:\n raise ValueError('spsolve requires jaxlib version 86 or above.')\n return spsolve_p.bind(data, indices, indptr, b, tol=tol, reorder=reorder)\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 32, "n_words": 26, "vocab_size": 23, "complexity": 2, "nloc": 4, "token_counts": 59, "n_ast_nodes": 83, "n_identifiers": 14, "d_id": 27067, "documentation": { "docstring": "A sparse direct solver using QR factorization.\n\n Accepts a sparse matrix in CSR format `data, indices, indptr` arrays.\n Currently only the CUDA GPU backend is implemented.\n\n Args:\n data : An array containing the non-zero entries of the CSR matrix.\n indices : The column indices of the CSR matrix.\n indptr : The row pointer array of the CSR matrix.\n b : The right hand side of the linear system.\n tol : Tolerance to decide if singular or not. Defaults to 1e-6.\n reorder : The reordering scheme to use to reduce fill-in. No reordering if\n `reorder=0'. Otherwise, symrcm, symamd, or csrmetisnd (`reorder=1,2,3'),\n respectively. Defaults to symrcm.\n\n Returns:\n An array with the same dtype and size as b representing the solution to\n the sparse linear system.\n ", "n_words": 123, "vocab_size": 81, "n_whitespaces": 166, "language": "en" } }, { "id": 3690, "commit_id": "2573fa145a1fbf4e849d26c54de105bcacde114f", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams.py", "file_name": "streams.py", "fun_name": "request_params", "commit_message": "🎉 Source Facebook Marketing: Add AdAccount and Images stream implementation (#10180)\n\n* Add AdAccount and Images stream implementation\r\n\r\n* Update PR number\r\n\r\n* Updated docker version\r\n\r\n* Updated to linter\r\n\r\n* Update to review\r\n\r\n* Add comment to AdAccount read_records method\r\n\r\n* Bumped version in seed, definitions and specs files", "code": "def request_params(self, **kwargs) -> MutableMapping[str, Any]:\n \n params = {\"limit\": self.page_size}\n\n if self._include_deleted:\n params.update(self._filter_all_statuses())\n\n if self.send_fields:\n params.update({\"fields\": \",\".join(self.fields)})\n\n return params\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 76, "n_words": 19, "vocab_size": 17, "complexity": 3, "nloc": 8, "token_counts": 64, "n_ast_nodes": 109, "n_identifiers": 14, "d_id": 516, "documentation": { "docstring": "Parameters that should be passed to query_records method", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 196510, "commit_id": "e8c5f4fe692e863bf0a48573a1d0c7b92487c5c1", "repo": "sympy", "path": "sympy/algebras/quaternion.py", "file_name": "quaternion.py", "fun_name": "axis", "commit_message": "hamilton", "code": "def axis(self):\n \n\n q = self\n AX = Quaternion(0, q.b, q.c, q.d).normalize()\n return AX\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 41, "n_words": 13, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 33, "n_ast_nodes": 53, "n_identifiers": 9, "d_id": 47950, "documentation": { "docstring": "\n Returns the axis part of the quaternion.\n\n Returns\n =======\n Ax : The axis of the quaternion.\n\n Examples\n ========\n\n >>> from sympy.algebras.quaternion import Quaternion\n >>> q = Quaternion(1, 1, 1, 1)\n >>> q.axis()\n 0 + sqrt(3)/3*i + sqrt(3)/3*j + sqrt(3)/3*k\n\n >>> q = Quaternion(4, 8, 13, 12)\n >>> q.axis()\n 0 + 8*sqrt(377)/377*i + sqrt(377)/29*j + 12*sqrt(377)/377*k\n\n ", "n_words": 55, "vocab_size": 35, "n_whitespaces": 154, "language": "en" } }, { "id": 46033, "commit_id": "afd3c135c7d1815c56578d020625a33dc27fe640", "repo": "airflow", "path": "airflow/www/views.py", "file_name": "views.py", "fun_name": "dagrun_queued", "commit_message": "Add queue button to click-on-DagRun interface. (#21555)\n\n* Initial implementation of adding Queue button to DagRun interface\r\n\r\n* Implement the test cases\r\n\r\n* FIX Add all required MyPy ignores\r\n\r\n* FIX import\r\n\r\n* Update airflow/www/views.py\r\n\r\nFIX Documentation\r\n\r\nCo-authored-by: Brent Bovenzi \r\n\r\n* update modal UI\r\n\r\nCo-authored-by: Brent Bovenzi ", "code": "def dagrun_queued(self):\n \n dag_id = request.form.get('dag_id')\n dag_run_id = request.form.get('dag_run_id')\n confirmed = request.form.get('confirmed') == 'true'\n origin = get_safe_url(request.form.get('origin'))\n return self._mark_dagrun_state_as_queued(dag_id, dag_run_id, confirmed, origin)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 63, "n_words": 21, "vocab_size": 18, "complexity": 1, "nloc": 6, "token_counts": 64, "n_ast_nodes": 112, "n_identifiers": 11, "d_id": 8767, "documentation": { "docstring": "Queue DagRun so tasks that haven't run yet can be started.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 168252, "commit_id": "2f8d0a36703e81e4dca52ca9fe4f58c910c1b304", "repo": "pandas", "path": "pandas/core/indexes/range.py", "file_name": "range.py", "fun_name": "_start", "commit_message": "PERF cache find_stack_level (#48023)\n\ncache stacklevel", "code": "def _start(self) -> int:\n \n warnings.warn(\n self._deprecation_message.format(\"_start\", \"start\"),\n FutureWarning,\n stacklevel=find_stack_level(inspect.currentframe()),\n )\n return self.start\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 73, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 13, "token_counts": 41, "n_ast_nodes": 69, "n_identifiers": 13, "d_id": 40259, "documentation": { "docstring": "\n The value of the `start` parameter (``0`` if this was not supplied).\n\n .. deprecated:: 0.25.0\n Use ``start`` instead.\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 52, "language": "en" } }, { "id": 322894, "commit_id": "93cae49c0c572b5c1ac972759140fbe924b0374d", "repo": "PaddleNLP", "path": "examples/model_interpretation/task/senti/LIME/lime_text.py", "file_name": "lime_text.py", "fun_name": "string_position", "commit_message": "Add NLP model interpretation (#1752)\n\n* upload NLP interpretation\r\n\r\n* fix problems and relocate project\r\n\r\n* remove abandoned picture\r\n\r\n* remove abandoned picture\r\n\r\n* fix dead link in README\r\n\r\n* fix dead link in README\r\n\r\n* fix code style problems\r\n\r\n* fix CR round 1\r\n\r\n* remove .gitkeep files\r\n\r\n* fix code style\r\n\r\n* fix file encoding problem\r\n\r\n* fix code style\r\n\r\n* delete duplicated files due to directory rebuild\r\n\r\n* fix CR round 2\r\n\r\n* fix code style\r\n\r\n* fix ernie tokenizer\r\n\r\n* fix code style\r\n\r\n* fix problem from CR round 1\r\n\r\n* fix bugs\r\n\r\n* fix README\r\n\r\n* remove duplicated files\r\n\r\n* deal with diff of old and new tokenizer results\r\n\r\n* fix CR round 4\r\n\r\n* fix code style\r\n\r\n* add missing dependence\r\n\r\n* fix broken import path\r\n\r\n* move some data file to cloud\r\n\r\n* MRC upper case to lower case\r\n\r\nCo-authored-by: Zeyu Chen \r\nCo-authored-by: binlinquge \r\nCo-authored-by: Guo Sheng ", "code": "def string_position(self, id_):\n \n if self.bow:\n return self.string_start[self.positions[id_]]\n else:\n return self.string_start[[self.positions[id_]]]\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 53, "n_words": 10, "vocab_size": 9, "complexity": 2, "nloc": 5, "token_counts": 41, "n_ast_nodes": 64, "n_identifiers": 6, "d_id": 118273, "documentation": { "docstring": "Returns a np array with indices to id_ (int) occurrences", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 101068, "commit_id": "049314429f71a21e6595e9d27e9e36f6a3479c42", "repo": "faceswap", "path": "plugins/convert/writer/opencv.py", "file_name": "opencv.py", "fun_name": "_get_save_args", "commit_message": "Convert: Add option to output mask separately for draw-transparent", "code": "def _get_save_args(self) -> Tuple[int, ...]:\n \n filetype = self.config[\"format\"]\n args: Tuple[int, ...] = tuple()\n if filetype == \"jpg\" and self.config[\"jpg_quality\"] > 0:\n args = (cv2.IMWRITE_JPEG_QUALITY, # pylint: disable=no-member\n self.config[\"jpg_quality\"])\n if filetype == \"png\" and self.config[\"png_compress_level\"] > -1:\n args = (cv2.IMWRITE_PNG_COMPRESSION, # pylint: disable=no-member\n self.config[\"png_compress_level\"])\n logger.debug(args)\n return args\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 157, "n_words": 46, "vocab_size": 31, "complexity": 5, "nloc": 18, "token_counts": 98, "n_ast_nodes": 165, "n_identifiers": 13, "d_id": 20505, "documentation": { "docstring": " Obtain the save parameters for the file format.\n\n Returns\n -------\n tuple\n The OpenCV specific arguments for the selected file format\n ", "n_words": 20, "vocab_size": 16, "n_whitespaces": 61, "language": "en" } }, { "id": 109337, "commit_id": "1e40f41713fab2d4a86aa26766b3cf6cccd9203d", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_colors.py", "file_name": "test_colors.py", "fun_name": "test_resample", "commit_message": "ENH: Make the ability to resample interpolated colormaps public", "code": "def test_resample():\n \n n = 101\n colorlist = np.empty((n, 4), float)\n colorlist[:, 0] = np.linspace(0, 1, n)\n colorlist[:, 1] = 0.2\n colorlist[:, 2] = np.linspace(1, 0, n)\n colorlist[:, 3] = 0.7\n lsc = mcolors.LinearSegmentedColormap.from_list('lsc', colorlist)\n lc = mcolors.ListedColormap(colorlist)\n # Set some bad values for testing too\n for cmap in [lsc, lc]:\n cmap.set_under('r')\n cmap.set_over('g')\n cmap.set_bad('b')\n lsc3 = lsc.resample(3)\n lc3 = lc.resample(3)\n expected = np.array([[0.0, 0.2, 1.0, 0.7],\n [0.5, 0.2, 0.5, 0.7],\n [1.0, 0.2, 0.0, 0.7]], float)\n assert_array_almost_equal(lsc3([0, 0.5, 1]), expected)\n assert_array_almost_equal(lc3([0, 0.5, 1]), expected)\n # Test over/under was copied properly\n assert_array_almost_equal(lsc(np.inf), lsc3(np.inf))\n assert_array_almost_equal(lsc(-np.inf), lsc3(-np.inf))\n assert_array_almost_equal(lsc(np.nan), lsc3(np.nan))\n assert_array_almost_equal(lc(np.inf), lc3(np.inf))\n assert_array_almost_equal(lc(-np.inf), lc3(-np.inf))\n assert_array_almost_equal(lc(np.nan), lc3(np.nan))\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 238, "n_words": 100, "vocab_size": 76, "complexity": 2, "nloc": 26, "token_counts": 337, "n_ast_nodes": 467, "n_identifiers": 25, "d_id": 23532, "documentation": { "docstring": "\n GitHub issue #6025 pointed to incorrect ListedColormap.resample;\n here we test the method for LinearSegmentedColormap as well.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 26, "language": "en" } }, { "id": 124155, "commit_id": "7d3ceb222c8af98a5c101b1c28ab37ffcb0a3793", "repo": "ray", "path": "python/ray/tests/kuberay/test_autoscaling_config.py", "file_name": "test_autoscaling_config.py", "fun_name": "test_cr_image_consistency", "commit_message": "[kuberay][autoscaler] Improve CPU, GPU, and memory detection. (#26219)\n\nThis PR improves the autoscaler's resource detection logic", "code": "def test_cr_image_consistency():\n \n cr = _get_basic_ray_cr()\n\n group_specs = [cr[\"spec\"][\"headGroupSpec\"]] + cr[\"spec\"][\"workerGroupSpecs\"]\n # Head, CPU group, GPU group.\n assert len(group_specs) == 3\n\n ray_containers = [\n group_spec[\"template\"][\"spec\"][\"containers\"][0] for group_spec in group_specs\n ]\n\n # All Ray containers in the example config have \"ray-\" in their name.\n assert all(\"ray-\" in ray_container[\"name\"] for ray_container in ray_containers)\n\n # All Ray images are from the Ray repo.\n assert all(\n \"rayproject/ray\" in ray_container[\"image\"] for ray_container in ray_containers\n )\n\n # All Ray images are the same.\n assert len({ray_container[\"image\"] for ray_container in ray_containers}) == 1\n\n\n@pytest.mark.parametrize(\"exception\", [Exception, requests.HTTPError])\n@pytest.mark.parametrize(\"num_exceptions\", range(6))", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"exception\", [Exception, requests.HTTPError])\n@pytest.mark.parametrize(\"num_exceptions\", range(6))", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 143, "n_words": 89, "vocab_size": 57, "complexity": 5, "nloc": 12, "token_counts": 101, "n_ast_nodes": 229, "n_identifiers": 16, "d_id": 27530, "documentation": { "docstring": "Verify that the example config uses the same Ray image for all Ray pods.", "n_words": 14, "vocab_size": 12, "n_whitespaces": 13, "language": "en" } }, { "id": 192417, "commit_id": "289fce29b3e2392114aadbe7a419df0f2e3ac1be", "repo": "vision", "path": "torchvision/transforms/_functional_video.py", "file_name": "_functional_video.py", "fun_name": "resized_crop", "commit_message": "Replace asserts with exceptions (#5587)\n\n* replace most asserts with exceptions\r\n\r\n* fix formating issues\r\n\r\n* fix linting and remove more asserts\r\n\r\n* fix regresion\r\n\r\n* fix regresion\r\n\r\n* fix bug\r\n\r\n* apply ufmt\r\n\r\n* apply ufmt\r\n\r\n* fix tests\r\n\r\n* fix format\r\n\r\n* fix None check\r\n\r\n* fix detection models tests\r\n\r\n* non scriptable any\r\n\r\n* add more checks for None values\r\n\r\n* fix retinanet test\r\n\r\n* fix retinanet test\r\n\r\n* Update references/classification/transforms.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update references/classification/transforms.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update references/optical_flow/transforms.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update references/optical_flow/transforms.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update references/optical_flow/transforms.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* make value checks more pythonic:\r\n\r\n* Update references/optical_flow/transforms.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* make value checks more pythonic\r\n\r\n* make more checks pythonic\r\n\r\n* fix bug\r\n\r\n* appy ufmt\r\n\r\n* fix tracing issues\r\n\r\n* fib typos\r\n\r\n* fix lint\r\n\r\n* remove unecessary f-strings\r\n\r\n* fix bug\r\n\r\n* Update torchvision/datasets/mnist.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update torchvision/datasets/mnist.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update torchvision/ops/boxes.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update torchvision/ops/poolers.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update torchvision/utils.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* address PR comments\r\n\r\n* Update torchvision/io/_video_opt.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update torchvision/models/detection/generalized_rcnn.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update torchvision/models/feature_extraction.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* Update torchvision/models/optical_flow/raft.py\r\n\r\nCo-authored-by: Nicolas Hug \r\n\r\n* address PR comments\r\n\r\n* addressing further pr comments\r\n\r\n* fix bug\r\n\r\n* remove unecessary else\r\n\r\n* apply ufmt\r\n\r\n* last pr comment\r\n\r\n* replace RuntimeErrors\r\n\r\nCo-authored-by: Nicolas Hug ", "code": "def resized_crop(clip, i, j, h, w, size, interpolation_mode=\"bilinear\"):\n \n if not _is_tensor_video_clip(clip):\n raise ValueError(\"clip should be a 4D torch.tensor\")\n clip = crop(clip, i, j, h, w)\n clip = resize(clip, size, interpolation_mode)\n return clip\n\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 54, "n_words": 32, "vocab_size": 25, "complexity": 2, "nloc": 6, "token_counts": 58, "n_ast_nodes": 87, "n_identifiers": 12, "d_id": 46892, "documentation": { "docstring": "\n Do spatial cropping and resizing to the video clip\n Args:\n clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)\n i (int): i in (i,j) i.e coordinates of the upper left corner.\n j (int): j in (i,j) i.e coordinates of the upper left corner.\n h (int): Height of the cropped region.\n w (int): Width of the cropped region.\n size (tuple(int, int)): height and width of resized clip\n Returns:\n clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W)\n ", "n_words": 83, "vocab_size": 46, "n_whitespaces": 145, "language": "en" } }, { "id": 258494, "commit_id": "26e2c38a961a27a9e53ce7814bf27f840510b237", "repo": "scikit-learn", "path": "sklearn/cluster/tests/test_spectral.py", "file_name": "test_spectral.py", "fun_name": "test_spectral_params_validation", "commit_message": "[MRG] MNT use check_scalar to validate scalar in SpectralClustering (#21881)\n\n* use check_scalar in SpectralClustering\n\n* Add check_scalar parameters validation for cluster.SpectralClustering\n\n* fix missing comma\n\n* tiny changelog update to relauch CI\n\n* errors are raised at fit time solely\n\nCo-authored-by: Julien Jerphanion \n\n* fix typos\n\nCo-authored-by: Julien Jerphanion \n\n* merge ..utils imports\n\nCo-authored-by: Julien Jerphanion \n\nCo-authored-by: hvassard \nCo-authored-by: Julien Jerphanion ", "code": "def test_spectral_params_validation(input, params, err_type, err_msg):\n \n est = SpectralClustering(**params)\n with pytest.raises(err_type, match=err_msg):\n est.fit(input)\n\n\n@pytest.mark.parametrize(\"assign_labels\", (\"kmeans\", \"discretize\", \"cluster_qr\"))", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"assign_labels\", (\"kmeans\", \"discretize\", \"cluster_qr\"))", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 31, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 4, "token_counts": 37, "n_ast_nodes": 93, "n_identifiers": 13, "d_id": 75252, "documentation": { "docstring": "Check the parameters validation in `SpectralClustering`.", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 270862, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/base_layer_utils.py", "file_name": "base_layer_utils.py", "fun_name": "check_graph_consistency", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def check_graph_consistency(tensor=None, method=\"add_loss\", force_raise=False):\n \n if force_raise or (\n tf.compat.v1.executing_eagerly_outside_functions()\n and hasattr(tensor, \"graph\")\n and tensor.graph.is_control_flow_graph\n ):\n if method == \"activity_regularizer\":\n bad_example = ", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "bad_example = \"\"\"", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 70, "n_words": 21, "vocab_size": 19, "complexity": 8, "nloc": 112, "token_counts": 142, "n_ast_nodes": 102, "n_identifiers": 15, "d_id": 80577, "documentation": { "docstring": "Checks that tensors passed to `add_*` method match the Keras graph.\n\n When one of the `add_*` method is called inside a V2 conditional branch,\n the underlying tensor gets created in a FuncGraph managed by control_flow_v2.\n We need to raise clear error messages in such cases.\n\n Args:\n tensor: Tensor to check, or `False` if it is known that an error\n should be raised.\n method: Caller method, one of {'add_metric', 'add_loss', 'add_update'}.\n force_raise: If an error should be raised regardless of `tensor`.\n\n Raises:\n RuntimeError: In case of an out-of-graph tensor.\n \n class TestModel(tf.keras.Model):\n", "n_words": 90, "vocab_size": 70, "n_whitespaces": 140, "language": "en" } }, { "id": 216181, "commit_id": "2bd6323ef5f87d871891a59917ee96f44ef55e75", "repo": "salt", "path": "salt/modules/cp.py", "file_name": "cp.py", "fun_name": "get_url", "commit_message": "fixes saltstack/salt#61562 cp functions derive saltenv from config", "code": "def get_url(path, dest=\"\", saltenv=None, makedirs=False, source_hash=None):\n \n if not saltenv:\n saltenv = __opts__[\"saltenv\"] or \"base\"\n\n if isinstance(dest, str):\n result = _client().get_url(\n path, dest, makedirs, saltenv, source_hash=source_hash\n )\n else:\n result = _client().get_url(\n path, None, makedirs, saltenv, no_cache=True, source_hash=source_hash\n )\n if not result:\n log.error(\n \"Unable to fetch file %s from saltenv %s.\",\n salt.utils.url.redact_http_basic_auth(path),\n saltenv,\n )\n if result:\n return salt.utils.stringutils.to_unicode(result)\n return result\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 190, "n_words": 58, "vocab_size": 39, "complexity": 6, "nloc": 20, "token_counts": 128, "n_ast_nodes": 198, "n_identifiers": 20, "d_id": 54458, "documentation": { "docstring": "\n .. versionchanged:: 3005\n ``saltenv`` will use value from config if not explicitly set\n\n .. versionchanged:: 2018.3.0\n ``dest`` can now be a directory\n\n Used to get a single file from a URL.\n\n path\n A URL to download a file from. Supported URL schemes are: ``salt://``,\n ``http://``, ``https://``, ``ftp://``, ``s3://``, ``swift://`` and\n ``file://`` (local filesystem). If no scheme was specified, this is\n equivalent of using ``file://``.\n If a ``file://`` URL is given, the function just returns absolute path\n to that file on a local filesystem.\n The function returns ``False`` if Salt was unable to fetch a file from\n a ``salt://`` URL.\n\n dest\n The default behaviour is to write the fetched file to the given\n destination path. If this parameter is omitted or set as empty string\n (``''``), the function places the remote file on the local filesystem\n inside the Minion cache directory and returns the path to that file.\n\n .. note::\n\n To simply return the file contents instead, set destination to\n ``None``. This works with ``salt://``, ``http://``, ``https://``\n and ``file://`` URLs. The files fetched by ``http://`` and\n ``https://`` will not be cached.\n\n saltenv\n Salt fileserver environment from which to retrieve the file. Ignored if\n ``path`` is not a ``salt://`` URL.\n\n source_hash\n If ``path`` is an http(s) or ftp URL and the file exists in the\n minion's file cache, this option can be passed to keep the minion from\n re-downloading the file if the cached copy matches the specified hash.\n\n .. versionadded:: 2018.3.0\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' cp.get_url salt://my/file /tmp/this_file_is_mine\n salt '*' cp.get_url http://www.slashdot.org /tmp/index.html\n ", "n_words": 255, "vocab_size": 146, "n_whitespaces": 491, "language": "en" } }, { "id": 46886, "commit_id": "91832a42d8124b040073481fd93c54e9e64c2609", "repo": "airflow", "path": "tests/models/test_dagrun.py", "file_name": "test_dagrun.py", "fun_name": "test_mapped_literal_verify_integrity", "commit_message": "Expand mapped tasks at DagRun.Veriy_integrity (#22679)\n\nCreate the necessary task instances for a mapped task at dagrun.verify_integrity\r\n\r\nCo-authored-by: Ash Berlin-Taylor ", "code": "def test_mapped_literal_verify_integrity(dag_maker, session):\n \n\n with dag_maker(session=session) as dag:\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 19, "token_counts": 173, "n_ast_nodes": 34, "n_identifiers": 4, "d_id": 9033, "documentation": { "docstring": "Test that when the length of a mapped literal changes we remove extra TIs", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 201727, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/backends/oracle/tests.py", "file_name": "tests.py", "fun_name": "test_cursor_var", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_cursor_var(self):\n \n with connection.cursor() as cursor:\n var = cursor.var(str)\n cursor.execute(\"BEGIN %s := 'X'; END; \", [var])\n self.assertEqual(var.getvalue(), \"X\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 65, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 5, "token_counts": 45, "n_ast_nodes": 82, "n_identifiers": 9, "d_id": 49983, "documentation": { "docstring": "Cursor variables can be passed as query parameters.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 195855, "commit_id": "cda8dfe6f45dc5ed394c2f5cda706cd6c729f713", "repo": "sympy", "path": "sympy/core/power.py", "file_name": "power.py", "fun_name": "_eval_Mod", "commit_message": "Improved documentation formatting", "code": "def _eval_Mod(self, q):\n r\n\n base, exp = self.base, self.exp\n\n if exp.is_integer and exp.is_positive:\n if q.is_integer and base % q == 0:\n return S.Zero\n\n from sympy.ntheory.factor_ import totient\n\n if base.is_Integer and exp.is_Integer and q.is_Integer:\n b, e, m = int(base), int(exp), int(q)\n mb = m.bit_length()\n if mb <= 80 and e >= mb and e.bit_length()**4 >= m:\n phi = totient(m)\n return Integer(pow(b, phi + e%phi, m))\n return Integer(pow(b, e, m))\n\n from .mod import Mod\n\n if isinstance(base, Pow) and base.is_integer and base.is_number:\n base = Mod(base, q)\n return Mod(Pow(base, exp, evaluate=False), q)\n\n if isinstance(exp, Pow) and exp.is_integer and exp.is_number:\n bit_length = int(q).bit_length()\n # XXX Mod-Pow actually attempts to do a hanging evaluation\n # if this dispatched function returns None.\n # May need some fixes in the dispatcher itself.\n if bit_length <= 80:\n phi = totient(q)\n exp = phi + Mod(exp, phi)\n return Mod(Pow(base, exp, evaluate=False), q)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 503, "n_words": 142, "vocab_size": 90, "complexity": 18, "nloc": 45, "token_counts": 253, "n_ast_nodes": 389, "n_identifiers": 29, "d_id": 47442, "documentation": { "docstring": "A dispatched function to compute `b^e \\bmod q`, dispatched\n by ``Mod``.\n\n Notes\n =====\n\n Algorithms:\n\n 1. For unevaluated integer power, use built-in ``pow`` function\n with 3 arguments, if powers are not too large wrt base.\n\n 2. For very large powers, use totient reduction if $e \\ge \\log(m)$.\n Bound on m, is for safe factorization memory wise i.e. $m^{1/4}$.\n For pollard-rho to be faster than built-in pow $\\log(e) > m^{1/4}$\n check is added.\n\n 3. For any unevaluated power found in `b` or `e`, the step 2\n will be recursed down to the base and the exponent\n such that the $b \\bmod q$ becomes the new base and\n $\\phi(q) + e \\bmod \\phi(q)$ becomes the new exponent, and then\n the computation for the reduced expression can be done.\n ", "n_words": 125, "vocab_size": 95, "n_whitespaces": 237, "language": "en" } }, { "id": 252191, "commit_id": "8e71b0331b8de95c4204d5cc26fb07e967883972", "repo": "mitmproxy", "path": "mitmproxy/http.py", "file_name": "http.py", "fun_name": "host_header", "commit_message": "[quic] add is_http3 where necessary", "code": "def host_header(self) -> Optional[str]:\n \n if self.is_http2 or self.is_http3:\n return self.authority or self.data.headers.get(\"Host\", None)\n else:\n return self.data.headers.get(\"Host\", None)\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 60, "n_words": 17, "vocab_size": 13, "complexity": 4, "nloc": 13, "token_counts": 52, "n_ast_nodes": 86, "n_identifiers": 10, "d_id": 73921, "documentation": { "docstring": "\n The request's host/authority header.\n\n This property maps to either ``request.headers[\"Host\"]`` or\n ``request.authority``, depending on whether it's HTTP/1.x or HTTP/2.0.\n\n *See also:* `Request.authority`,`Request.host`, `Request.pretty_host`\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 59, "language": "en" } }, { "id": 131422, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/test_client_proxy.py", "file_name": "test_client_proxy.py", "fun_name": "test_proxy_manager_lifecycle", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_proxy_manager_lifecycle(shutdown_only):\n \n proxier.CHECK_PROCESS_INTERVAL_S = 1\n os.environ[\"TIMEOUT_FOR_SPECIFIC_SERVER_S\"] = \"5\"\n pm, free_ports = start_ray_and_proxy_manager(n_ports=2)\n client = \"client1\"\n\n pm.create_specific_server(client)\n assert pm.start_specific_server(client, JobConfig())\n # Channel should be ready and corresponding to an existing server\n grpc.channel_ready_future(pm.get_channel(client)).result(timeout=5)\n\n proc = pm._get_server_for_client(client)\n assert proc.port == free_ports[0], f\"Free Ports are: {free_ports}\"\n\n log_files_path = os.path.join(\n pm.node.get_session_dir_path(), \"logs\", \"ray_client_server*\"\n )\n files = glob(log_files_path)\n assert any(str(free_ports[0]) in f for f in files)\n\n proc.process_handle_future.result().process.wait(10)\n # Wait for reconcile loop\n time.sleep(2)\n\n assert len(pm._free_ports) == 2\n assert pm._get_unused_port() == free_ports[1]\n\n\n@pytest.mark.skipif(\n sys.platform == \"win32\", reason=\"PSUtil does not work the same on windows.\"\n)", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.skipif(\n sys.platform == \"win32\", reason=\"PSUtil does not work the same on windows.\"\n)", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 156, "n_words": 88, "vocab_size": 70, "complexity": 2, "nloc": 19, "token_counts": 170, "n_ast_nodes": 315, "n_identifiers": 46, "d_id": 29519, "documentation": { "docstring": "\n Creates a ProxyManager and tests basic handling of the lifetime of a\n specific RayClient Server. It checks the following properties:\n 1. The SpecificServer is created using the first port.\n 2. The SpecificServer comes alive and has a log associated with it.\n 3. The SpecificServer destructs itself when no client connects.\n 4. The ProxyManager returns the port of the destructed SpecificServer.\n ", "n_words": 60, "vocab_size": 45, "n_whitespaces": 82, "language": "en" } }, { "id": 34828, "commit_id": "c74f3d4c480a6971e302de7cef226e9a157ef0d0", "repo": "transformers", "path": "src/transformers/testing_utils.py", "file_name": "testing_utils.py", "fun_name": "require_wandb", "commit_message": "Add W&B backend for hyperparameter sweep (#14582)\n\n# Add support for W&B hyperparameter sweep\r\nThis PR:\r\n* allows using wandb for running hyperparameter search.\r\n* The runs are visualized on W&B sweeps dashboard\r\n* This supports runnning sweeps on parallel devices, all reporting to the same central dashboard.\r\n\r\n### Usage\r\n**To run new a hyperparameter search:**\r\n```\r\ntrainer.hyperparameter_search(\r\n backend=\"wandb\", \r\n project=\"transformers_sweep\", # name of the project\r\n n_trials=5,\r\n metric=\"eval/loss\", # metric to be optimized, default 'eval/loss'. A warning is raised if the passed metric is not found\r\n)\r\n```\r\nThis outputs a sweep id. Eg. `my_project/sweep_id`\r\n\r\n**To run sweeps on parallel devices:**\r\nJust pass sweep id which you want to run parallel\r\n```\r\ntrainer.hyperparameter_search(\r\n backend=\"wandb\", \r\n sweep_id = \"my_project/sweep_id\"\r\n)\r\n```", "code": "def require_wandb(test_case):\n \n if not is_wandb_available():\n return unittest.skip(\"test requires wandb\")(test_case)\n else:\n return test_case\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 35, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 26, "n_ast_nodes": 49, "n_identifiers": 5, "d_id": 6347, "documentation": { "docstring": "\n Decorator marking a test that requires wandb.\n\n These tests are skipped when wandb isn't installed.\n\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 25, "language": "en" } }, { "id": 2736, "commit_id": "7f44809cca9457058171cfd65994fb4aee8031ac", "repo": "PySyft", "path": "packages/syft/src/syft/core/node/common/action/get_object_action.py", "file_name": "get_object_action.py", "fun_name": "_object2proto", "commit_message": "Replace absolute syft imports", "code": "def _object2proto(self) -> GetObjectResponseMessage_PB:\n \n ser = serialize(self.obj)\n\n return GetObjectResponseMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n obj=ser,\n )\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 74, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 21, "token_counts": 42, "n_ast_nodes": 67, "n_identifiers": 9, "d_id": 355, "documentation": { "docstring": "Returns a protobuf serialization of self.\n\n As a requirement of all objects which inherit from Serializable,\n this method transforms the current object into the corresponding\n Protobuf object so that it can be further serialized.\n\n :return: returns a protobuf object\n :rtype: GetObjectResponseMessage_PB\n\n .. note::\n This method is purely an internal method. Please use serialize(object) or one of\n the other public serialization methods if you wish to serialize an\n object.\n ", "n_words": 68, "vocab_size": 56, "n_whitespaces": 150, "language": "en" } }, { "id": 208726, "commit_id": "a72418e2dcdfc3c91f70d724d16d2691a41c9c24", "repo": "ipython", "path": "IPython/core/tests/test_iplib.py", "file_name": "test_iplib.py", "fun_name": "doctest_tb_context", "commit_message": "Restore lineno's for Input mapped files (#13560)\n\n* Implement lineno's for Input mapped files\r\n* Adopt In [123], line 123 format\r\n* Revert \"Set co_name for cells run line by line. Fixes https://github.com/ipython/ipykernel/issues/841\"\r\n (This reverts commit d11e987f174a15f1640f8006c86f58d884c3faa4.)\r\n* Omit mention of \", in \" for input tracebacks\r\n* Input cell -> Cell\r\n* Remove from traceback doctests\r\n* Use f-string for `in ...' format\r\n* Simplify _format_list logic, converting to f-strings", "code": "def doctest_tb_context():\n \n\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 1, "token_counts": 5, "n_ast_nodes": 12, "n_identifiers": 1, "d_id": 52485, "documentation": { "docstring": "\n In [3]: xmode context\n Exception reporting mode: Context\n\n In [4]: run simpleerr.py\n ---------------------------------------------------------------------------\n ZeroDivisionError Traceback (most recent call last)\n \n ...\n 30 except IndexError:\n 31 mode = 'div'\n ---> 33 bar(mode)\n \n ... in bar(mode)\n 15 \"bar\"\n 16 if mode=='div':\n ---> 17 div0()\n 18 elif mode=='exit':\n 19 try:\n \n ... in div0()\n 6 x = 1\n 7 y = 0\n ----> 8 x/y\n \n ZeroDivisionError: ...", "n_words": 66, "vocab_size": 53, "n_whitespaces": 260, "language": "en" } }, { "id": 11754, "commit_id": "beb0d8f569530755f7797781a8cb49e1b8a2faaf", "repo": "jina", "path": "jina/hubble/hubio.py", "file_name": "hubio.py", "fun_name": "new", "commit_message": "feat(hubble): fetch image only when required (#4445)", "code": "def new(self) -> None:\n \n\n from rich import box, print\n from rich.console import Console\n from rich.panel import Panel\n from rich.progress import track\n from rich.prompt import Confirm, Prompt\n from rich.syntax import Syntax\n from rich.table import Table\n\n console = Console()\n\n print(\n Panel.fit(\n ,\n title='Create New Executor',\n )\n )\n\n exec_name = (\n self.args.name\n if self.args.name\n else Prompt.ask(\n ':grey_question: What is the [bold]name[/bold] of your executor?\\n'\n '[dim]CamelCase is required[/dim]',\n default=f'MyExecutor{random.randint(0, 100)}',\n )\n )\n\n exec_path = (\n self.args.path\n if self.args.path\n else Prompt.ask(\n ':grey_question: [bold]Which folder[/bold] to store your executor?',\n default=os.path.join(os.getcwd(), exec_name),\n )\n )\n exec_description = '{{}}'\n exec_keywords = '{{}}'\n exec_url = '{{}}'\n\n is_dockerfile = False\n\n if self.args.advance_configuration or Confirm.ask(\n '[green]That\\'s all we need to create an Executor![/green]\\n'\n ':grey_question: Or do you want to proceed to advanced configuration',\n default=False,\n ):\n exec_description = (\n self.args.description\n if self.args.description\n else (\n Prompt.ask(\n ':grey_question: Please give a [bold]short description[/bold] of your executor?\\n'\n f'[dim]Example: {exec_name} embeds images into 128-dim vectors using ResNet.[/dim]'\n )\n )\n )\n\n exec_keywords = (\n self.args.keywords\n if self.args.keywords\n else (\n Prompt.ask(\n ':grey_question: Please give some [bold]keywords[/bold] to help people search your executor [dim](separated by comma)[/dim]\\n'\n f'[dim]Example: image cv embedding encoding resnet[/dim]'\n )\n )\n )\n\n exec_url = (\n self.args.url\n if self.args.url\n else (\n Prompt.ask(\n ':grey_question: What is the [bold]URL[/bold] for GitHub repo?\\n'\n f'[dim]Example: https://github.com/yourname/my-executor[/dim]'\n )\n )\n )\n\n print(\n Panel.fit(\n ,\n title='[Optional] [bold]Dockerfile[/bold]',\n width=80,\n )\n )\n\n is_dockerfile = self.args.add_dockerfile or Confirm.ask(\n ':grey_question: Do you need to write your own [bold]Dockerfile[/bold] instead of the auto-generated one?',\n default=False,\n )\n print('[green]That\\'s all we need to create an Executor![/green]')\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1309, "n_words": 244, "vocab_size": 133, "complexity": 11, "nloc": 205, "token_counts": 694, "n_ast_nodes": 505, "n_identifiers": 42, "d_id": 2111, "documentation": { "docstring": "Create a new executor folder interactively.\n[bold green]Executor[/bold green] is how Jina processes [bold]Document[/bold].\n\nThis guide helps you to create your own Executor in 30 seconds.\n[bold]Dockerfile[/bold] describes how this executor will be built. It is useful when\nyour executor has non-trivial dependencies or must be run under certain environment.\n\n- If the [bold]Dockerfile[/bold] is missing, Jina automatically generates one for you.\n- If you provide one, then Jina will respect the given [bold]Dockerfile[/bold].", "n_words": 74, "vocab_size": 59, "n_whitespaces": 67, "language": "en" } }, { "id": 205899, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/sql/where.py", "file_name": "where.py", "fun_name": "split_having", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def split_having(self, negated=False):\n \n if not self.contains_aggregate:\n return self, None\n in_negated = negated ^ self.negated\n # If the effective connector is OR and this node contains an aggregate,\n # then we need to push the whole branch to HAVING clause.\n may_need_split = (in_negated and self.connector == AND) or (\n not in_negated and self.connector == OR\n )\n if may_need_split and self.contains_aggregate:\n return None, self\n where_parts = []\n having_parts = []\n for c in self.children:\n if hasattr(c, \"split_having\"):\n where_part, having_part = c.split_having(in_negated)\n if where_part is not None:\n where_parts.append(where_part)\n if having_part is not None:\n having_parts.append(having_part)\n elif c.contains_aggregate:\n having_parts.append(c)\n else:\n where_parts.append(c)\n having_node = (\n self.__class__(having_parts, self.connector, self.negated)\n if having_parts\n else None\n )\n where_node = (\n self.__class__(where_parts, self.connector, self.negated)\n if where_parts\n else None\n )\n return where_node, having_node\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 478, "n_words": 121, "vocab_size": 75, "complexity": 14, "nloc": 33, "token_counts": 184, "n_ast_nodes": 290, "n_identifiers": 20, "d_id": 51273, "documentation": { "docstring": "\n Return two possibly None nodes: one for those parts of self that\n should be included in the WHERE clause and one for those parts of\n self that must be included in the HAVING clause.\n ", "n_words": 34, "vocab_size": 23, "n_whitespaces": 63, "language": "en" } }, { "id": 155868, "commit_id": "8971c37f810aa242295dd6a7d9a7cbdf9621d92e", "repo": "dask", "path": "dask/base.py", "file_name": "base.py", "fun_name": "unpack_collections", "commit_message": "Tokenize dataclasses (#8557)", "code": "def unpack_collections(*args, traverse=True):\n \n\n collections = []\n repack_dsk = {}\n\n collections_token = uuid.uuid4().hex\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 24, "n_words": 12, "vocab_size": 10, "complexity": 2, "nloc": 9, "token_counts": 64, "n_ast_nodes": 49, "n_identifiers": 9, "d_id": 36480, "documentation": { "docstring": "Extract collections in preparation for compute/persist/etc...\n\n Intended use is to find all collections in a set of (possibly nested)\n python objects, do something to them (compute, etc...), then repackage them\n in equivalent python objects.\n\n Parameters\n ----------\n *args\n Any number of objects. If it is a dask collection, it's extracted and\n added to the list of collections returned. By default, python builtin\n collections are also traversed to look for dask collections (for more\n information see the ``traverse`` keyword).\n traverse : bool, optional\n If True (default), builtin python collections are traversed looking for\n any dask collections they might contain.\n\n Returns\n -------\n collections : list\n A list of all dask collections contained in ``args``\n repack : callable\n A function to call on the transformed collections to repackage them as\n they were in the original ``args``.\n ", "n_words": 132, "vocab_size": 83, "n_whitespaces": 231, "language": "en" } }, { "id": 296995, "commit_id": "652fedf4d1de2645ba08e6ace5376c7126839154", "repo": "core", "path": "tests/components/html5/test_notify.py", "file_name": "test_notify.py", "fun_name": "test_callback_view_with_jwt", "commit_message": "Fix html5 Firefox Notifications (#82556)\n\nCo-authored-by: Paulus Schoutsen \r\nfixes undefined", "code": "async def test_callback_view_with_jwt(hass, hass_client):\n \n registrations = {\"device\": SUBSCRIPTION_1}\n client = await mock_client(hass, hass_client, registrations)\n\n with patch(\"homeassistant.components.html5.notify.WebPusher\") as mock_wp:\n mock_wp().send().status_code = 201\n await hass.services.async_call(\n \"notify\",\n \"notify\",\n {\"message\": \"Hello\", \"target\": [\"device\"], \"data\": {\"icon\": \"beer.png\"}},\n blocking=True,\n )\n\n assert len(mock_wp.mock_calls) == 4\n\n # WebPusher constructor\n assert mock_wp.mock_calls[2][1][0] == SUBSCRIPTION_1[\"subscription\"]\n\n # Call to send\n push_payload = json.loads(mock_wp.mock_calls[3][1][0])\n\n assert push_payload[\"body\"] == \"Hello\"\n assert push_payload[\"icon\"] == \"beer.png\"\n\n bearer_token = \"Bearer {}\".format(push_payload[\"data\"][\"jwt\"])\n\n resp = await client.post(\n PUBLISH_URL, json={\"type\": \"push\"}, headers={AUTHORIZATION: bearer_token}\n )\n\n assert resp.status == HTTPStatus.OK\n body = await resp.json()\n assert body == {\"event\": \"push\", \"status\": \"ok\"}\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 212, "n_words": 89, "vocab_size": 66, "complexity": 1, "nloc": 23, "token_counts": 208, "n_ast_nodes": 368, "n_identifiers": 30, "d_id": 95966, "documentation": { "docstring": "Test that the notification callback view works with JWT.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 165791, "commit_id": "f99ec8bf80ba64b2f852cfab7b27ec9e05055589", "repo": "pandas", "path": "pandas/io/formats/style_render.py", "file_name": "style_render.py", "fun_name": "_render_href", "commit_message": "BUG: url regex in `style_render` does not pass colon and other valid (#46457)\n\n* BUG: url regex in `style_render` does not pass colon and other valid\r\n\r\nURLs containing some valid characters such as colon in port numbers get\r\ncut off when html-formatting. As a workaround, expanded the regex to\r\nmatch a wider variety of URLs.\r\n\r\n* Add whatsnew entry for #46389 fix\r\n\r\n* Update whatsnew entry for fix #46389\r\n\r\nCo-authored-by: Simon Hawkins \r\n\r\nCo-authored-by: Simon Hawkins ", "code": "def _render_href(x, format):\n \n if isinstance(x, str):\n if format == \"html\":\n href = '{0}'\n elif format == \"latex\":\n href = r\"\\href{{{0}}}{{{0}}}\"\n else:\n raise ValueError(\"``hyperlinks`` format can only be 'html' or 'latex'\")\n pat = r\"((http|ftp)s?:\\/\\/|www.)[\\w/\\-?=%.:@]+\\.[\\w/\\-&?=%.,':;~!@#$*()\\[\\]]+\"\n return re.sub(pat, lambda m: href.format(m.group(0)), x)\n return x\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 120, "n_words": 43, "vocab_size": 35, "complexity": 4, "nloc": 11, "token_counts": 70, "n_ast_nodes": 121, "n_identifiers": 12, "d_id": 39716, "documentation": { "docstring": "uses regex to detect a common URL pattern and converts to href tag in format.", "n_words": 15, "vocab_size": 14, "n_whitespaces": 14, "language": "en" } }, { "id": 204349, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/staticfiles/management/commands/collectstatic.py", "file_name": "collectstatic.py", "fun_name": "collect", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def collect(self):\n \n if self.symlink and not self.local:\n raise CommandError(\"Can't symlink to a remote destination.\")\n\n if self.clear:\n self.clear_dir(\"\")\n\n if self.symlink:\n handler = self.link_file\n else:\n handler = self.copy_file\n\n found_files = {}\n for finder in get_finders():\n for path, storage in finder.list(self.ignore_patterns):\n # Prefix the relative path if the source storage contains it\n if getattr(storage, \"prefix\", None):\n prefixed_path = os.path.join(storage.prefix, path)\n else:\n prefixed_path = path\n\n if prefixed_path not in found_files:\n found_files[prefixed_path] = (storage, path)\n handler(path, prefixed_path, storage)\n else:\n self.log(\n \"Found another file with the destination path '%s'. It \"\n \"will be ignored since only the first encountered file \"\n \"is collected. If this is not what you want, make sure \"\n \"every static file has a unique path.\" % prefixed_path,\n level=1,\n )\n\n # Storage backends may define a post_process() method.\n if self.post_process and hasattr(self.storage, \"post_process\"):\n processor = self.storage.post_process(found_files, dry_run=self.dry_run)\n for original_path, processed_path, processed in processor:\n if isinstance(processed, Exception):\n self.stderr.write(\"Post-processing '%s' failed!\" % original_path)\n # Add a blank line before the traceback, otherwise it's\n # too easy to miss the relevant part of the error message.\n self.stderr.write()\n raise processed\n if processed:\n self.log(\n \"Post-processed '%s' as '%s'\" % (original_path, processed_path),\n level=2,\n )\n self.post_processed_files.append(original_path)\n else:\n self.log(\"Skipped post-processing '%s'\" % original_path)\n\n return {\n \"modified\": self.copied_files + self.symlinked_files,\n \"unmodified\": self.unmodified_files,\n \"post_processed\": self.post_processed_files,\n }\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 957, "n_words": 204, "vocab_size": 144, "complexity": 14, "nloc": 47, "token_counts": 274, "n_ast_nodes": 453, "n_identifiers": 40, "d_id": 50706, "documentation": { "docstring": "\n Perform the bulk of the work of collectstatic.\n\n Split off from handle() to facilitate testing.\n ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 37, "language": "en" } }, { "id": 128859, "commit_id": "d99eff919bf785f911e4eebc87ddc4960344a139", "repo": "ray", "path": "python/ray/train/tests/test_gpu.py", "file_name": "test_gpu.py", "fun_name": "test_torch_auto_gpu_to_cpu", "commit_message": "[AIR] Hard deprecate old Trainer, old callbacks (#29015)\n\nHard deprecations for ray.train.Trainer, ray.train.callbacks and ray.train.checkpoint.CheckpointStrategy. Restart-on-failure logic from BackendExecutor has also been removed as it is superseded by Tune.\r\n\r\nSome tests have been refactored to use the new API. Tests that are no longer applicable have been removed.\r\n\r\nSigned-off-by: Antoni Baum \r\nSigned-off-by: Amog Kamsetty \r\nCo-authored-by: Amog Kamsetty ", "code": "def test_torch_auto_gpu_to_cpu(ray_start_4_cpus_2_gpus):\n \n num_workers = 2\n assert os.environ[\"CUDA_VISIBLE_DEVICES\"] == \"\"\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 9, "vocab_size": 9, "complexity": 3, "nloc": 23, "token_counts": 163, "n_ast_nodes": 35, "n_identifiers": 5, "d_id": 28826, "documentation": { "docstring": "Tests if GPU tensors are auto converted to CPU on driver.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 293617, "commit_id": "7876ffe9e392b20da16f0d0c44c723f526f807e6", "repo": "core", "path": "homeassistant/components/google/api.py", "file_name": "api.py", "fun_name": "verification_url", "commit_message": "Update google calendar integration with a config flow (#68010)\n\n* Convert google calendar to config flow and async\r\n\r\n* Call correct exchange method\r\n\r\n* Fix async method and reduce unnecessary diffs\r\n\r\n* Wording improvements\r\n\r\n* Reduce unnecessary diffs\r\n\r\n* Run load/update config from executor\r\n\r\n* Update homeassistant/components/google/calendar.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Remove unnecessary updating of unexpected multiple config entries.\r\n\r\n* Remove unnecessary unique_id checks\r\n\r\n* Improve readability with comments about device code expiration\r\n\r\n* Update homeassistant/components/google/calendar.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Update homeassistant/components/google/calendar.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Update homeassistant/components/google/api.py\r\n\r\nCo-authored-by: Martin Hjelmare \r\n\r\n* Add comment for when code is none on timeout\r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "def verification_url(self) -> str:\n \n return self._device_flow_info.verification_url\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 25, "n_identifiers": 4, "d_id": 92674, "documentation": { "docstring": "Return the verification url that the user should visit to enter the code.", "n_words": 13, "vocab_size": 11, "n_whitespaces": 12, "language": "en" } }, { "id": 133869, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/agents/trainer.py", "file_name": "trainer.py", "fun_name": "step_attempt", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def step_attempt(self) -> ResultDict:\n \n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 10, "nloc": 60, "token_counts": 278, "n_ast_nodes": 16, "n_identifiers": 3, "d_id": 30129, "documentation": { "docstring": "Attempts a single training step, including evaluation, if required.\n\n Override this method in your Trainer sub-classes if you would like to\n keep the n step-attempts logic (catch worker failures) in place or\n override `step()` directly if you would like to handle worker\n failures yourself.\n\n Returns:\n The results dict with stats/infos on sampling, training,\n and - if required - evaluation.\n ", "n_words": 59, "vocab_size": 49, "n_whitespaces": 123, "language": "en" } }, { "id": 226431, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_choropleth.py", "file_name": "_choropleth.py", "fun_name": "zauto", "commit_message": "switch to black .22", "code": "def zauto(self):\n \n return self[\"zauto\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58104, "documentation": { "docstring": "\n Determines whether or not the color domain is computed with\n respect to the input data (here in `z`) or the bounds set in\n `zmin` and `zmax` Defaults to `false` when `zmin` and `zmax`\n are set by the user.\n\n The 'zauto' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n ", "n_words": 54, "vocab_size": 42, "n_whitespaces": 126, "language": "en" } }, { "id": 120278, "commit_id": "667d63aa2d4fbf7c9da73aab0e24c5c4c33cb5ba", "repo": "jax", "path": "jax/_src/numpy/lax_numpy.py", "file_name": "lax_numpy.py", "fun_name": "ravel_multi_index", "commit_message": "replace int with operator.index part2\n\nThis change align the behavior of `ravel_multi_index`, `split` and `indices` to their `numpy` counterparts.\nAlso ensure size argument of `nonzero` should be integer.\nThe changes with `*space` are only simplification", "code": "def ravel_multi_index(multi_index, dims, mode='raise', order='C'):\n assert len(multi_index) == len(dims), f\"len(multi_index)={len(multi_index)} != len(dims)={len(dims)}\"\n dims = tuple(core.concrete_or_error(operator.index, d, \"in `dims` argument of ravel_multi_index().\") for d in dims)\n _check_arraylike(\"ravel_multi_index\", *multi_index)\n for index in multi_index:\n if mode == 'raise':\n core.concrete_or_error(array, index,\n \"The error occurred because ravel_multi_index was jit-compiled\"\n \" with mode='raise'. Use mode='wrap' or mode='clip' instead.\")\n if not issubdtype(_dtype(index), integer):\n raise TypeError(\"only int indices permitted\")\n if mode == \"raise\":\n if _any(any((i < 0) | (i >= d)) for i, d in zip(multi_index, dims)):\n raise ValueError(\"invalid entry in coordinates array\")\n elif mode == \"clip\":\n multi_index = [clip(i, 0, d - 1) for i, d in zip(multi_index, dims)]\n elif mode == \"wrap\":\n multi_index = [i % d for i, d in zip(multi_index, dims)]\n else:\n raise ValueError(f\"invalid mode={mode!r}. Expected 'raise', 'wrap', or 'clip'\")\n\n if order == \"F\":\n strides = np.cumprod((1,) + dims[:-1])\n elif order == \"C\":\n strides = np.cumprod((1,) + dims[1:][::-1])[::-1]\n else:\n raise ValueError(f\"invalid order={order!r}. Expected 'C' or 'F'\")\n\n result = array(0, dtype=dtypes.canonicalize_dtype(int_))\n for i, s in zip(multi_index, strides):\n result = result + i * s\n return result\n\n\n_UNRAVEL_INDEX_DOC = \n\n@_wraps(np.unravel_index, lax_description=_UNRAVEL_INDEX_DOC)", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@_wraps(np.unravel_index, lax_description=_UNRAVEL_INDEX_DOC)", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 246, "n_words": 175, "vocab_size": 112, "complexity": 15, "nloc": 30, "token_counts": 294, "n_ast_nodes": 529, "n_identifiers": 37, "d_id": 26808, "documentation": { "docstring": "\\\nUnlike numpy's implementation of unravel_index, negative indices are accepted\nand out-of-bounds indices are clipped.\n", "n_words": 15, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 218370, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "ismethod", "commit_message": "add python 3.10.4 for windows", "code": "def ismethod(object):\n \n return isinstance(object, types.MethodType)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 26, "n_identifiers": 5, "d_id": 55264, "documentation": { "docstring": "Return true if the object is an instance method.\n\n Instance method objects provide these attributes:\n __doc__ documentation string\n __name__ name with which this method was defined\n __func__ function object containing implementation of method\n __self__ instance to which this method is bound", "n_words": 41, "vocab_size": 33, "n_whitespaces": 100, "language": "en" } }, { "id": 289406, "commit_id": "31a787558fd312331b55e5c2c4b33341fc3601fc", "repo": "core", "path": "tests/components/logbook/test_init.py", "file_name": "test_init.py", "fun_name": "test_logbook_invalid_entity", "commit_message": "Ensure recorder test fixture is setup before hass fixture (#80528)\n\n* Ensure recorder test fixture is setup before hass fixture\r\n\r\n* Adjust more tests", "code": "async def test_logbook_invalid_entity(recorder_mock, hass, hass_client):\n \n await async_setup_component(hass, \"logbook\", {})\n await hass.async_block_till_done()\n client = await hass_client()\n\n # Today time 00:00:00\n start = dt_util.utcnow().date()\n start_date = datetime(start.year, start.month, start.day)\n\n # Test today entries with filter by end_time\n end_time = start + timedelta(hours=24)\n response = await client.get(\n f\"/api/logbook/{start_date.isoformat()}?end_time={end_time}&entity=invalid\"\n )\n assert response.status == HTTPStatus.INTERNAL_SERVER_ERROR\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 93, "n_words": 50, "vocab_size": 40, "complexity": 1, "nloc": 11, "token_counts": 87, "n_ast_nodes": 161, "n_identifiers": 25, "d_id": 88548, "documentation": { "docstring": "Test the logbook view with requesting an invalid entity.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 293721, "commit_id": "bc862e97ed68cce8c437327651f85892787e755e", "repo": "core", "path": "homeassistant/components/recorder/pool.py", "file_name": "pool.py", "fun_name": "recorder_or_dbworker", "commit_message": "Use a dedicated executor pool for database operations (#68105)\n\nCo-authored-by: Erik Montnemery \r\nCo-authored-by: Franck Nijhof ", "code": "def recorder_or_dbworker(self) -> bool:\n \n thread_name = threading.current_thread().name\n return bool(\n thread_name == \"Recorder\" or thread_name.startswith(DB_WORKER_PREFIX)\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 54, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 6, "token_counts": 31, "n_ast_nodes": 55, "n_identifiers": 9, "d_id": 92777, "documentation": { "docstring": "Check if the thread is a recorder or dbworker thread.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 267980, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/docker_util.py", "file_name": "docker_util.py", "fun_name": "get_network_names", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def get_network_names(self) -> t.Optional[t.List[str]]:\n \n if self.networks is None:\n return None\n\n return sorted(self.networks)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 44, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 34, "n_ast_nodes": 55, "n_identifiers": 8, "d_id": 79255, "documentation": { "docstring": "Return a list of the network names the container is attached to.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 257999, "commit_id": "5fedfb03b03496d7ca25f55788e1fa576ff1b2a4", "repo": "haystack", "path": "test/nodes/test_preprocessor.py", "file_name": "test_preprocessor.py", "fun_name": "test_page_number_extraction_on_empty_pages", "commit_message": "fix: Fix the error of wrong page numbers when documents contain empty pages. (#3330)\n\n* Fix the error of wrong page numbers when documents contain empty pages.\r\n\r\n* Reformat using git hooks.\r\n\r\n* Use a more descriptive placeholder", "code": "def test_page_number_extraction_on_empty_pages():\n \n preprocessor = PreProcessor(add_page_number=True, split_by=\"word\", split_length=7, split_overlap=0)\n text_page_one = \"This is a text on page one.\"\n text_page_three = \"This is a text on page three.\"\n # this is what we get from PDFToTextConverter in case of an \"empty\" page\n document_with_empty_pages = f\"{text_page_one}\\f\\f{text_page_three}\"\n document = Document(content=document_with_empty_pages)\n\n documents = preprocessor.process(document)\n\n assert documents[0].meta[\"page\"] == 1\n assert documents[1].meta[\"page\"] == 3\n\n # verify the placeholder for the empty page has been removed\n assert documents[0].content.strip() == text_page_one\n assert documents[1].content.strip() == text_page_three\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 115, "n_words": 76, "vocab_size": 52, "complexity": 1, "nloc": 11, "token_counts": 101, "n_ast_nodes": 179, "n_identifiers": 17, "d_id": 75178, "documentation": { "docstring": "\n Often \"marketing\" documents contain pages without text (visuals only). When extracting page numbers, these pages should be counted as well to avoid\n issues when mapping results back to the original document.\n ", "n_words": 31, "vocab_size": 29, "n_whitespaces": 41, "language": "en" } }, { "id": 33933, "commit_id": "ac224bb0797c1ee6522d814139f3eb0a8947267b", "repo": "transformers", "path": "src/transformers/models/segformer/modeling_segformer.py", "file_name": "modeling_segformer.py", "fun_name": "forward", "commit_message": "[Fix doc examples] Add missing from_pretrained (#15044)\n\n* fix doc example - ValueError: Parameter config should be an instance of class `PretrainedConfig`\r\n\r\n* Update src/transformers/models/segformer/modeling_segformer.py\r\n\r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>\r\n\r\n* update\r\n\r\nCo-authored-by: ydshieh \r\nCo-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>", "code": "def forward(self, pixel_values, output_attentions=None, output_hidden_states=None, return_dict=None):\n r\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n encoder_outputs = self.encoder(\n pixel_values,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n\n if not return_dict:\n return (sequence_output,) + encoder_outputs[1:]\n\n return BaseModelOutput(\n last_hidden_state=sequence_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n\n@add_start_docstrings(\n ,\n SEGFORMER_START_DOCSTRING,\n)", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@add_start_docstrings(\n \"\"\"\n SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden\n states) e.g. for ImageNet.\n \"\"\",\n SEGFORMER_START_DOCSTRING,\n)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 246, "n_words": 67, "vocab_size": 40, "complexity": 5, "nloc": 40, "token_counts": 127, "n_ast_nodes": 196, "n_identifiers": 17, "d_id": 6173, "documentation": { "docstring": "\n Returns:\n\n Examples:\n\n ```python\n >>> from transformers import SegformerFeatureExtractor, SegformerModel\n >>> from PIL import Image\n >>> import requests\n\n >>> feature_extractor = SegformerFeatureExtractor.from_pretrained(\"nvidia/mit-b0\")\n >>> model = SegformerModel.from_pretrained(\"nvidia/mit-b0\")\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> inputs = feature_extractor(images=image, return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n >>> sequence_output = outputs.last_hidden_state\n ```\n SegFormer Model transformer with an image classification head on top (a linear layer on top of the final hidden\n states) e.g. for ImageNet.\n ", "n_words": 71, "vocab_size": 50, "n_whitespaces": 179, "language": "en" } }, { "id": 206351, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/test/client.py", "file_name": "client.py", "fun_name": "logout", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def logout(self):\n \n from django.contrib.auth import get_user, logout\n\n request = HttpRequest()\n if self.session:\n request.session = self.session\n request.user = get_user(request)\n else:\n engine = import_module(settings.SESSION_ENGINE)\n request.session = engine.SessionStore()\n logout(request)\n self.cookies = SimpleCookie()\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 122, "n_words": 29, "vocab_size": 23, "complexity": 2, "nloc": 11, "token_counts": 71, "n_ast_nodes": 119, "n_identifiers": 17, "d_id": 51502, "documentation": { "docstring": "Log out the user by removing the cookies and session object.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 255403, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/test/compose_test.py", "file_name": "compose_test.py", "fun_name": "test_add_prefix_outputs", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def test_add_prefix_outputs(self) -> None:\n \n self._test_add_prefix(rename_outputs=True)\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 5, "token_counts": 16, "n_ast_nodes": 29, "n_identifiers": 4, "d_id": 74743, "documentation": { "docstring": "\n Tests prefixing graph outputs only. Relevant node edges should be renamed as well\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 28, "language": "en" } }, { "id": 220420, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/coroutines.py", "file_name": "coroutines.py", "fun_name": "coroutine", "commit_message": "add python 3.10.4 for windows", "code": "def coroutine(func):\n \n warnings.warn('\"@coroutine\" decorator is deprecated since Python 3.8, use \"async def\" instead',\n DeprecationWarning,\n stacklevel=2)\n if inspect.iscoroutinefunction(func):\n # In Python 3.5 that's all we need to do for coroutines\n # defined with \"async def\".\n return func\n\n if inspect.isgeneratorfunction(func):\n coro = func\n else:", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 119, "n_words": 42, "vocab_size": 37, "complexity": 4, "nloc": 19, "token_counts": 83, "n_ast_nodes": 72, "n_identifiers": 10, "d_id": 55987, "documentation": { "docstring": "Decorator to mark coroutines.\n\n If the coroutine is not yielded from before it is destroyed,\n an error message is logged.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 29, "language": "en" } }, { "id": 181962, "commit_id": "c611fd84ff3a4f67a1f2a5a38d42fad37215cb9a", "repo": "textual", "path": "src/textual/app.py", "file_name": "app.py", "fun_name": "update_styles", "commit_message": "docstrings", "code": "def update_styles(self) -> None:\n \n self.post_message_no_wait(messages.RefreshStyles(self))\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 7, "token_counts": 19, "n_ast_nodes": 34, "n_identifiers": 5, "d_id": 43693, "documentation": { "docstring": "Request update of styles.\n\n Should be called whenever CSS classes / pseudo classes change.\n\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 28, "language": "en" } }, { "id": 160361, "commit_id": "3ba8be6f748edc3afedc49a320423093acfb43d4", "repo": "numpy", "path": "numpy/linalg/linalg.py", "file_name": "linalg.py", "fun_name": "pinv", "commit_message": "DOC:linalg: Remove ref to scipy.linalg.pinv2", "code": "def pinv(a, rcond=1e-15, hermitian=False):\n \n a, wrap = _makearray(a)\n rcond = asarray(rcond)\n if _is_empty_2d(a):\n m, n = a.shape[-2:]\n res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)\n return wrap(res)\n a = a.conjugate()\n u, s, vt = svd(a, full_matrices=False, hermitian=hermitian)\n\n # discard small singular values\n cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)\n large = s > cutoff\n s = divide(1, s, where=large, out=s)\n s[~large] = 0\n\n res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))\n return wrap(res)\n\n\n# Determinant\n\n\n@array_function_dispatch(_unary_dispatcher)", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "@array_function_dispatch(_unary_dispatcher)", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 132, "n_words": 74, "vocab_size": 57, "complexity": 2, "nloc": 15, "token_counts": 182, "n_ast_nodes": 289, "n_identifiers": 34, "d_id": 38611, "documentation": { "docstring": "\n Compute the (Moore-Penrose) pseudo-inverse of a matrix.\n\n Calculate the generalized inverse of a matrix using its\n singular-value decomposition (SVD) and including all\n *large* singular values.\n\n .. versionchanged:: 1.14\n Can now operate on stacks of matrices\n\n Parameters\n ----------\n a : (..., M, N) array_like\n Matrix or stack of matrices to be pseudo-inverted.\n rcond : (...) array_like of float\n Cutoff for small singular values.\n Singular values less than or equal to\n ``rcond * largest_singular_value`` are set to zero.\n Broadcasts against the stack of matrices.\n hermitian : bool, optional\n If True, `a` is assumed to be Hermitian (symmetric if real-valued),\n enabling a more efficient method for finding singular values.\n Defaults to False.\n\n .. versionadded:: 1.17.0\n\n Returns\n -------\n B : (..., N, M) ndarray\n The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so\n is `B`.\n\n Raises\n ------\n LinAlgError\n If the SVD computation does not converge.\n\n See Also\n --------\n scipy.linalg.pinv : Similar function in SciPy.\n scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a\n Hermitian matrix.\n\n Notes\n -----\n The pseudo-inverse of a matrix A, denoted :math:`A^+`, is\n defined as: \"the matrix that 'solves' [the least-squares problem]\n :math:`Ax = b`,\" i.e., if :math:`\\\\bar{x}` is said solution, then\n :math:`A^+` is that matrix such that :math:`\\\\bar{x} = A^+b`.\n\n It can be shown that if :math:`Q_1 \\\\Sigma Q_2^T = A` is the singular\n value decomposition of A, then\n :math:`A^+ = Q_2 \\\\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are\n orthogonal matrices, :math:`\\\\Sigma` is a diagonal matrix consisting\n of A's so-called singular values, (followed, typically, by\n zeros), and then :math:`\\\\Sigma^+` is simply the diagonal matrix\n consisting of the reciprocals of A's singular values\n (again, followed by zeros). [1]_\n\n References\n ----------\n .. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,\n FL, Academic Press, Inc., 1980, pp. 139-142.\n\n Examples\n --------\n The following example checks that ``a * a+ * a == a`` and\n ``a+ * a * a+ == a+``:\n\n >>> a = np.random.randn(9, 6)\n >>> B = np.linalg.pinv(a)\n >>> np.allclose(a, np.dot(a, np.dot(B, a)))\n True\n >>> np.allclose(B, np.dot(B, np.dot(a, B)))\n True\n\n ", "n_words": 331, "vocab_size": 211, "n_whitespaces": 600, "language": "en" } }, { "id": 207747, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_save_as_continue_false", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_save_as_continue_false(self):\n \n post_data = {\"_saveasnew\": \"\", \"name\": \"John M\", \"gender\": 1, \"age\": 42}\n url = reverse(\n \"admin:admin_views_person_change\",\n args=(self.per1.pk,),\n current_app=site2.name,\n )\n response = self.client.post(url, post_data)\n self.assertEqual(len(Person.objects.filter(name=\"John M\")), 1)\n self.assertEqual(len(Person.objects.filter(id=self.per1.pk)), 1)\n self.assertRedirects(\n response,\n reverse(\"admin:admin_views_person_changelist\", current_app=site2.name),\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 152, "n_words": 34, "vocab_size": 30, "complexity": 1, "nloc": 14, "token_counts": 123, "n_ast_nodes": 203, "n_identifiers": 21, "d_id": 52083, "documentation": { "docstring": "\n Saving a new object using \"Save as new\" redirects to the changelist\n instead of the change view when ModelAdmin.save_as_continue=False.\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 41, "language": "en" } }, { "id": 205602, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/fields/related.py", "file_name": "related.py", "fun_name": "get_forward_related_filter", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_forward_related_filter(self, obj):\n \n return {\n \"%s__%s\" % (self.name, rh_field.name): getattr(obj, rh_field.attname)\n for _, rh_field in self.related_fields\n }\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 60, "n_words": 17, "vocab_size": 17, "complexity": 2, "nloc": 5, "token_counts": 39, "n_ast_nodes": 61, "n_identifiers": 9, "d_id": 51153, "documentation": { "docstring": "\n Return the keyword arguments that when supplied to\n self.model.object.filter(), would select all instances related through\n this field to the remote obj. This is used to build the querysets\n returned by related descriptors. obj is an instance of\n self.related_field.model.\n ", "n_words": 38, "vocab_size": 32, "n_whitespaces": 81, "language": "en" } }, { "id": 45548, "commit_id": "08575ddd8a72f96a3439f73e973ee9958188eb83", "repo": "airflow", "path": "tests/www/views/test_views_extra_links.py", "file_name": "test_views_extra_links.py", "fun_name": "test_operator_extra_link_override_plugin", "commit_message": "Change BaseOperatorLink interface to take a ti_key, not a datetime (#21798)", "code": "def test_operator_extra_link_override_plugin(dag_run, task_2, viewer_client):\n \n response = viewer_client.get(\n f\"{ENDPOINT}?dag_id={task_2.dag_id}&task_id={task_2.task_id}\"\n f\"&execution_date={DEFAULT_DATE}&link_name=airflow\",\n follow_redirects=True,\n )\n\n assert response.status_code == 200\n response_str = response.data\n if isinstance(response.data, bytes):\n response_str = response_str.decode()\n assert json.loads(response_str) == {'url': 'https://airflow.apache.org/1.10.5/', 'error': None}\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 80, "n_words": 31, "vocab_size": 26, "complexity": 2, "nloc": 11, "token_counts": 71, "n_ast_nodes": 139, "n_identifiers": 19, "d_id": 8640, "documentation": { "docstring": "\n This tests checks if Operator Link (AirflowLink) defined in the Dummy2TestOperator\n is overridden by Airflow Plugin (AirflowLink2).\n\n AirflowLink returns 'https://airflow.apache.org/' link\n AirflowLink2 returns 'https://airflow.apache.org/1.10.5/' link\n ", "n_words": 25, "vocab_size": 23, "n_whitespaces": 41, "language": "en" } }, { "id": 242493, "commit_id": "de968dd920eaa3d1a27877059c6bbb9043a9d26b", "repo": "Pillow", "path": "src/PIL/Image.py", "file_name": "Image.py", "fun_name": "histogram", "commit_message": "Document that histogram() uses 256 bins per channel", "code": "def histogram(self, mask=None, extrema=None):\n \n self.load()\n if mask:\n mask.load()\n return self.im.histogram((0, 0), mask.im)\n if self.mode in (\"I\", \"F\"):\n if extrema is None:\n extrema = self.getextrema()\n return self.im.histogram(extrema)\n return self.im.histogram()\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 122, "n_words": 28, "vocab_size": 23, "complexity": 4, "nloc": 10, "token_counts": 84, "n_ast_nodes": 137, "n_identifiers": 8, "d_id": 69871, "documentation": { "docstring": "\n Returns a histogram for the image. The histogram is returned as a\n list of pixel counts, one for each pixel value in the source\n image. Counts are grouped into 256 bins for each band, even if\n the image has more than 8 bits per band. If the image has more\n than one band, the histograms for all bands are concatenated (for\n example, the histogram for an \"RGB\" image contains 768 values).\n\n A bilevel image (mode \"1\") is treated as a greyscale (\"L\") image\n by this method.\n\n If a mask is provided, the method returns a histogram for those\n parts of the image where the mask image is non-zero. The mask\n image must have the same size as the image, and be either a\n bi-level image (mode \"1\") or a greyscale image (\"L\").\n\n :param mask: An optional mask.\n :param extrema: An optional tuple of manually-specified extrema.\n :returns: A list containing pixel counts.\n ", "n_words": 151, "vocab_size": 89, "n_whitespaces": 264, "language": "en" } }, { "id": 320618, "commit_id": "86b5bed388544d2d445a3dba151e3c3a4c8814b7", "repo": "qutebrowser", "path": "qutebrowser/browser/webengine/webenginetab.py", "file_name": "webenginetab.py", "fun_name": "_error_page_workaround", "commit_message": "Partially re-revert _error_page_workaround changes\n\nThe logging part removed in 438b8b46094890a28db6bac07ff1ae67bbc5ee78 is still needed for some tests, and debugging too.", "code": "def _error_page_workaround(self, js_enabled, html):\n \n match = re.search(r'\"errorCode\":\"([^\"]*)\"', html)\n if match is None:\n return\n\n error = match.group(1)\n log.webview.error(\"Load error: {}\".format(error))\n\n if js_enabled:\n return\n\n self._show_error_page(self.url(), error=error)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 95, "n_words": 24, "vocab_size": 20, "complexity": 3, "nloc": 9, "token_counts": 66, "n_ast_nodes": 109, "n_identifiers": 14, "d_id": 117229, "documentation": { "docstring": "Check if we're displaying a Chromium error page.\n\n This gets called if we got a loadFinished(False), so we can display at\n least some error page in situations where Chromium's can't be\n displayed.\n\n WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66643\n ", "n_words": 35, "vocab_size": 31, "n_whitespaces": 70, "language": "en" } }, { "id": 166209, "commit_id": "90140f055892a46f473bd26affab88a7f171e394", "repo": "pandas", "path": "pandas/core/exchange/column.py", "file_name": "column.py", "fun_name": "_get_validity_buffer", "commit_message": "ENH: Implement DataFrame interchange protocol (#46141)", "code": "def _get_validity_buffer(self) -> Tuple[PandasBuffer, Any]:\n \n null, invalid = self.describe_null\n\n if self.dtype[0] == DtypeKind.STRING:\n # For now, use byte array as the mask.\n # TODO: maybe store as bit array to save space?..\n buf = self._col.to_numpy()\n\n # Determine the encoding for valid values\n valid = invalid == 0\n invalid = not valid\n\n mask = np.zeros(shape=(len(buf),), dtype=np.bool8)\n for i, obj in enumerate(buf):\n mask[i] = valid if isinstance(obj, str) else invalid\n\n # Convert the mask array to a Pandas \"buffer\" using\n # a NumPy array as the backing store\n buffer = PandasBuffer(mask)\n\n # Define the dtype of the returned buffer\n dtype = (DtypeKind.BOOL, 8, ArrowCTypes.BOOL, Endianness.NATIVE)\n\n return buffer, dtype\n\n try:\n msg = _NO_VALIDITY_BUFFER[null] + \" so does not have a separate mask\"\n except KeyError:\n # TODO: implement for other bit/byte masks?\n raise NotImplementedError(\"See self.describe_null\")\n\n raise NoBufferPresent(msg)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 377, "n_words": 133, "vocab_size": 88, "complexity": 5, "nloc": 22, "token_counts": 147, "n_ast_nodes": 237, "n_identifiers": 36, "d_id": 39774, "documentation": { "docstring": "\n Return the buffer containing the mask values indicating missing data and\n the buffer's associated dtype.\n Raises NoBufferPresent if null representation is not a bit or byte mask.\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 56, "language": "en" } }, { "id": 61273, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py", "file_name": "misc.py", "fun_name": "is_installable_dir", "commit_message": "upd; format", "code": "def is_installable_dir(path):\n # type: (str) -> bool\n \n if not os.path.isdir(path):\n return False\n setup_py = os.path.join(path, \"setup.py\")\n if os.path.isfile(setup_py):\n return True\n pyproject_toml = os.path.join(path, \"pyproject.toml\")\n if os.path.isfile(pyproject_toml):\n return True\n return False\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 75, "n_words": 30, "vocab_size": 21, "complexity": 4, "nloc": 10, "token_counts": 69, "n_ast_nodes": 116, "n_identifiers": 8, "d_id": 12484, "documentation": { "docstring": "Is path is a directory containing setup.py or pyproject.toml?", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 144436, "commit_id": "20ab9188c61d91ef0c79cc5d527f17db7c43b604", "repo": "ray", "path": "python/ray/_private/usage/usage_lib.py", "file_name": "usage_lib.py", "fun_name": "put_cluster_metadata", "commit_message": "[Ray Usage Stats] Record cluster metadata + Refactoring. (#22170)\n\nThis is the first PR to implement usage stats on Ray. Please refer to the file `usage_lib.py` for more details.\r\n\r\nThe full specification is here https://docs.google.com/document/d/1ZT-l9YbGHh-iWRUC91jS-ssQ5Qe2UQ43Lsoc1edCalc/edit#heading=h.17dss3b9evbj.\r\n\r\nYou can see the full PR for phase 1 from here; https://github.com/rkooo567/ray/pull/108/files.\r\n\r\nThe PR is doing some basic refactoring + adding cluster metadata to GCS instead of the version numbers. \r\n\r\nAfter this PR, we will add code to enable usage report \"off by default\".", "code": "def put_cluster_metadata(gcs_client, num_retries) -> None:\n \n metadata = _generate_cluster_metadata()\n ray._private.utils.internal_kv_put_with_retry(\n gcs_client,\n usage_constant.CLUSTER_METADATA_KEY,\n json.dumps(metadata).encode(),\n namespace=ray_constants.KV_NAMESPACE_CLUSTER,\n num_retries=num_retries,\n )\n return metadata\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 67, "n_words": 17, "vocab_size": 16, "complexity": 1, "nloc": 21, "token_counts": 53, "n_ast_nodes": 82, "n_identifiers": 17, "d_id": 33203, "documentation": { "docstring": "Generate the cluster metadata and store it to GCS.\n\n It is a blocking API.\n\n Params:\n gcs_client (GCSClient): The GCS client to perform KV operation PUT.\n num_retries (int): Max number of times to retry if PUT fails.\n\n Raises:\n gRPC exceptions if PUT fails.\n ", "n_words": 42, "vocab_size": 37, "n_whitespaces": 75, "language": "en" } }, { "id": 139719, "commit_id": "dea134a4726c46d57567e724bcc7a2de43f5200e", "repo": "ray", "path": "rllib/policy/torch_mixins.py", "file_name": "torch_mixins.py", "fun_name": "extra_action_out", "commit_message": "[RLlib] Clean up Policy mixins. (#24746)", "code": "def extra_action_out(self, input_dict, state_batches, model, action_dist):\n \n # Return value function outputs. VF estimates will hence be added to\n # the SampleBatches produced by the sampler(s) to generate the train\n # batches going into the loss function.\n return {\n SampleBatch.VF_PREDS: model.value_function(),\n }\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 94, "n_words": 41, "vocab_size": 35, "complexity": 1, "nloc": 4, "token_counts": 27, "n_ast_nodes": 44, "n_identifiers": 9, "d_id": 31763, "documentation": { "docstring": "Defines extra fetches per action computation.\n\n Args:\n input_dict (Dict[str, TensorType]): The input dict used for the action\n computing forward pass.\n state_batches (List[TensorType]): List of state tensors (empty for\n non-RNNs).\n model (ModelV2): The Model object of the Policy.\n action_dist (TorchDistributionWrapper): The instantiated distribution\n object, resulting from the model's outputs and the given\n distribution class.\n\n Returns:\n Dict[str, TensorType]: Dict with extra tf fetches to perform per\n action computation.\n ", "n_words": 66, "vocab_size": 52, "n_whitespaces": 217, "language": "en" } }, { "id": 126672, "commit_id": "326b5bd1acc6d3d00ab0546e4ae45da6bed501f7", "repo": "ray", "path": "dashboard/modules/job/tests/test_job_manager.py", "file_name": "test_job_manager.py", "fun_name": "test_kill_job_actor_in_before_driver_finish", "commit_message": "Convert job_manager to be async (#27123)\n\nUpdates jobs api\r\nUpdates snapshot api\r\nUpdates state api\r\n\r\nIncreases jobs api version to 2\r\n\r\nSigned-off-by: Alan Guo aguo@anyscale.com\r\n\r\nWhy are these changes needed?\r\nfollow-up for #25902 (comment)", "code": "async def test_kill_job_actor_in_before_driver_finish(self, job_manager):\n \n\n with tempfile.TemporaryDirectory() as tmp_dir:\n pid_file, _, job_id = await _run_hanging_command(job_manager, tmp_dir)\n with open(pid_file, \"r\") as file:\n pid = int(file.read())\n assert psutil.pid_exists(pid), \"driver subprocess should be running\"\n\n actor = job_manager._get_actor_for_job(job_id)\n ray.kill(actor, no_restart=True)\n await async_wait_for_condition_async_predicate(\n check_job_failed, job_manager=job_manager, job_id=job_id\n )\n\n # Ensure driver subprocess gets cleaned up after job reached\n # termination state\n await async_wait_for_condition(check_subprocess_cleaned, pid=pid)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 215, "n_words": 57, "vocab_size": 49, "complexity": 1, "nloc": 12, "token_counts": 99, "n_ast_nodes": 168, "n_identifiers": 26, "d_id": 28224, "documentation": { "docstring": "\n Test submitting a long running / blocker driver script, and kill\n the job supervisor actor before script returns and ensure\n\n 1) Job status is correctly marked as failed\n 2) No hanging subprocess from failed job\n ", "n_words": 35, "vocab_size": 32, "n_whitespaces": 71, "language": "en" } }, { "id": 303766, "commit_id": "ebbff7b60e43f17d65ead811d314602b9daddfc4", "repo": "core", "path": "tests/components/awair/conftest.py", "file_name": "conftest.py", "fun_name": "mint_data_fixture", "commit_message": "Add Awair Local API support (#75535)", "code": "def mint_data_fixture():\n \n return json.loads(load_fixture(\"awair/mint.json\"))\n\n\n@pytest.fixture(name=\"no_devices\", scope=\"session\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture(name=\"no_devices\", scope=\"session\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 11, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 54, "n_identifiers": 8, "d_id": 102575, "documentation": { "docstring": "Fixture representing data returned from Awair mint device.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 288775, "commit_id": "7d097d18b0c6041475080b3c400e37b25185faba", "repo": "core", "path": "homeassistant/components/snooz/config_flow.py", "file_name": "config_flow.py", "fun_name": "_async_wait_for_pairing_mode", "commit_message": "Add support for Snooz BLE devices (#78790)\n\nCo-authored-by: J. Nick Koston ", "code": "async def _async_wait_for_pairing_mode(self) -> None:\n \n assert self._discovery\n device = self._discovery.device\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 17, "token_counts": 76, "n_ast_nodes": 34, "n_identifiers": 4, "d_id": 87927, "documentation": { "docstring": "Process advertisements until pairing mode is detected.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 208063, "commit_id": "1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc", "repo": "celery", "path": "celery/canvas.py", "file_name": "canvas.py", "fun_name": "on_chord_header_end", "commit_message": "Canvas Header Stamping (#7384)\n\n* Strip down the header-stamping PR to the basics.\r\n\r\n* Serialize groups.\r\n\r\n* Add groups to result backend meta data.\r\n\r\n* Fix spelling mistake.\r\n\r\n* Revert changes to canvas.py\r\n\r\n* Revert changes to app/base.py\r\n\r\n* Add stamping implementation to canvas.py\r\n\r\n* Send task to AMQP with groups.\r\n\r\n* Successfully pass single group to result.\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* First draft of the visitor API.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed lint and elements\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* type -> isinstance\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Redo header stamping (#7341)\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed lint and elements\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* type -> isinstance\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Omer Katz \r\n\r\n* Added stamping mechanism\r\n\r\n* Manual stamping improved\r\n\r\n* flake8 fixed\r\n\r\n* Added subtests\r\n\r\n* Add comma.\r\n\r\n* Moved groups to stamps\r\n\r\n* Fixed chord and added test for that\r\n\r\n* Strip down the header-stamping PR to the basics.\r\n\r\n* Serialize groups.\r\n\r\n* Add groups to result backend meta data.\r\n\r\n* Fix spelling mistake.\r\n\r\n* Revert changes to canvas.py\r\n\r\n* Revert changes to app/base.py\r\n\r\n* Add stamping implementation to canvas.py\r\n\r\n* Send task to AMQP with groups.\r\n\r\n* Successfully pass single group to result.\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* First draft of the visitor API.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* Fixed lint and elements\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* type -> isinstance\r\n\r\n* Added stamping mechanism\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Manual stamping improved\r\n\r\n* fail_ci_if_error uncommented\r\n\r\n* flake8 fixed\r\n\r\n* Added subtests\r\n\r\n* Changes\r\n\r\n* Add comma.\r\n\r\n* Fixed chord and added test for that\r\n\r\n* canvas.py fixed\r\n\r\n* Test chord.py fixed\r\n\r\n* Fixed stamped_headers\r\n\r\n* collections import fixed\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* collections import fixed\r\n\r\n* Update celery/backends/base.py\r\n\r\nCo-authored-by: Omer Katz \r\n\r\n* ampq.py fixed\r\n\r\n* Refrain from using deprecated import path.\r\n\r\n* Fix test_complex_chain regression.\r\n\r\nWhenever we stamp a group we need to freeze it first if it wasn't already frozen.\r\nSomewhere along the line, the group id changed because we were freezing twice.\r\nThis commit places the stamping operation after preparing the chain's steps which fixes the problem somehow.\r\n\r\nWe don't know why yet.\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed issues with maybe_list. Add documentation\r\n\r\n* Fixed potential issue with integration tests\r\n\r\n* Fixed issues with _regen\r\n\r\n* Fixed issues with _regen\r\n\r\n* Fixed test_generator issues\r\n\r\n* Fixed _regen stamping\r\n\r\n* Fixed _regen stamping\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Update docs/userguide/canvas.rst\r\n\r\nCo-authored-by: Omer Katz \r\n\r\n* Fixed Couchbase\r\n\r\n* Better stamping intro\r\n\r\n* New GroupVisitor example\r\n\r\n* Adjust documentation.\r\n\r\nCo-authored-by: Naomi Elstein \r\nCo-authored-by: Omer Katz \r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Asif Saif Uddin \r\nCo-authored-by: Omer Katz ", "code": "def on_chord_header_end(self, chord, **header) -> None:\n \n self.on_group_end(chord.tasks, **header)\n", "url": "https://github.com/celery/celery.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 22, "n_words": 8, "vocab_size": 7, "complexity": 1, "nloc": 8, "token_counts": 24, "n_ast_nodes": 39, "n_identifiers": 6, "d_id": 52189, "documentation": { "docstring": "Method that is called on сhord header stamping end.\n\n Arguments:\n chord (chord): chord that is stamped.\n headers (Dict): Partial headers that could be merged with existing headers.\n ", "n_words": 27, "vocab_size": 22, "n_whitespaces": 72, "language": "en" } }, { "id": 3353, "commit_id": "f83eca58eaf2129d21b5796a301732ab22675130", "repo": "airbyte", "path": "airbyte-cdk/python/unit_tests/sources/test_abstract_source.py", "file_name": "test_abstract_source.py", "fun_name": "test_valid_incremental_read_with_slices", "commit_message": "CDK: Fix typing errors (#9037)\n\n* fix typing, drop AirbyteLogger\r\n\r\n* format\r\n\r\n* bump the version\r\n\r\n* use logger instead of fixture logger\r\n\r\nCo-authored-by: Eugene Kulak \r\nCo-authored-by: auganbay ", "code": "def test_valid_incremental_read_with_slices(mocker):\n \n slices = [{\"1\": \"1\"}, {\"2\": \"2\"}]\n stream_output = [{\"k1\": \"v1\"}, {\"k2\": \"v2\"}, {\"k3\": \"v3\"}]\n s1 = MockStream(\n [({\"sync_mode\": SyncMode.incremental, \"stream_slice\": s, \"stream_state\": mocker.ANY}, stream_output) for s in slices], name=\"s1\"\n )\n s2 = MockStream(\n [({\"sync_mode\": SyncMode.incremental, \"stream_slice\": s, \"stream_state\": mocker.ANY}, stream_output) for s in slices], name=\"s2\"\n )\n state = {\"cursor\": \"value\"}\n mocker.patch.object(MockStream, \"get_updated_state\", return_value=state)\n mocker.patch.object(MockStream, \"supports_incremental\", return_value=True)\n mocker.patch.object(MockStream, \"get_json_schema\", return_value={})\n mocker.patch.object(MockStream, \"stream_slices\", return_value=slices)\n\n src = MockSource(streams=[s1, s2])\n catalog = ConfiguredAirbyteCatalog(streams=[_configured_stream(s1, SyncMode.incremental), _configured_stream(s2, SyncMode.incremental)])\n\n expected = [\n # stream 1 slice 1\n *_as_records(\"s1\", stream_output),\n _state({\"s1\": state}),\n # stream 1 slice 2\n *_as_records(\"s1\", stream_output),\n _state({\"s1\": state}),\n # stream 2 slice 1\n *_as_records(\"s2\", stream_output),\n _state({\"s1\": state, \"s2\": state}),\n # stream 2 slice 2\n *_as_records(\"s2\", stream_output),\n _state({\"s1\": state, \"s2\": state}),\n ]\n\n messages = _fix_emitted_at(list(src.read(logger, {}, catalog, state=defaultdict(dict))))\n\n assert expected == messages\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 280, "n_words": 128, "vocab_size": 74, "complexity": 3, "nloc": 28, "token_counts": 326, "n_ast_nodes": 544, "n_identifiers": 32, "d_id": 455, "documentation": { "docstring": "Tests that an incremental read which uses slices outputs each record in the slice followed by a STATE message, for each slice", "n_words": 22, "vocab_size": 20, "n_whitespaces": 21, "language": "en" } }, { "id": 267978, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/docker_util.py", "file_name": "docker_util.py", "fun_name": "require_docker", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def require_docker() -> DockerCommand:\n \n if command := get_docker_command():\n return command\n\n raise ApplicationError(f'No container runtime detected. Supported commands: {\", \".join(DOCKER_COMMANDS)}')\n\n\n@cache", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "@cache", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 35, "n_words": 20, "vocab_size": 19, "complexity": 2, "nloc": 5, "token_counts": 22, "n_ast_nodes": 58, "n_identifiers": 8, "d_id": 79253, "documentation": { "docstring": "Return the docker command to invoke. Raises an exception if docker is not available.", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 196099, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/graycode.py", "file_name": "graycode.py", "fun_name": "skip", "commit_message": "Updated import locations", "code": "def skip(self):\n \n self._skip = True\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 21, "n_identifiers": 3, "d_id": 47599, "documentation": { "docstring": "\n Skips the bit generation.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import GrayCode\n >>> a = GrayCode(3)\n >>> for i in a.generate_gray():\n ... if i == '010':\n ... a.skip()\n ... print(i)\n ...\n 000\n 001\n 011\n 010\n 111\n 101\n 100\n\n See Also\n ========\n\n generate_gray\n ", "n_words": 41, "vocab_size": 34, "n_whitespaces": 205, "language": "en" } }, { "id": 87161, "commit_id": "5462ee11ad11ebb9a50323befcd286816d7898c8", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_project_details.py", "file_name": "test_project_details.py", "fun_name": "test_dynamic_sampling_rules_should_contain_single_uniform_rule", "commit_message": "feat(ds): Support new DS behaviour in project_details endpoint (#40387)\n\nSupports new adaptive dynamic sampling behaviour alongside\r\nthe deprecated dynamic sampling behaviour and achieves that\r\nthrough feature flag differentiation\r\n\r\nThis PR achieve that through the following:\r\n- Introducing a new `DynamicSamplingBiasSerializer` which is composed of\r\nid representing the bias name and a boolean flag indicating whether that\r\nparticular flag is active or not\r\n- Modifies current existing behavior for both old sampling flag and new\r\nsampling flag. Essentially the new setup entails that to be on the old\r\ndynamic sampling, the following flags need to be enabled\r\n\"organizations:server-side-sampling\" and\r\n\"organizations:server-side-sampling-ui\", and to be on the new dynamic\r\nsampling configurations, you need the following flags to be enabled\r\n\"organizations:dynamic-sampling-basic\" and\r\n\"organizations:server-side-sampling\"\r\nP.S. 1: These flags will be replaced \r\n\"organizations:server-side-sampling-ui\" ->\r\n\"organizations:dynamic-sampling-deprecated\"\r\n\"organizations:server-side-sampling-basic\" ->\r\n\"organizations:dynamic-sampling\"\r\nHence, these feature flags need to be updated once this PR lands\r\nhttps://github.com/getsentry/sentry/pull/40388\r\nP.S. 2: If a project is on the new plan and the old plan, the new plan\r\ntakes precedence\r\n- Introduces default biases that are enabled by default and can be\r\noverwritten. The motivation to do this is to be able to add new biases\r\nthat are enabled by default, and both the GET and PUT request honor this\r\nlist\r\n- `GET` and `POST` endpoint does a dictionary update of user's stored\r\nbiases on the default biases that are hardcoded, and returns them to the\r\nUI/ relay. This means that the introduced project option\r\n\"sentry:dynamic_sampling_biases\" might not have all the toggles\r\nenabled/disabled through the UI but only the ones that a customer chose\r\nto modify\r\n\r\n\r\nFollowup:\r\n- This new feature flag behaviour needs to be reflected in ProjectConfig\r\ncomputations", "code": "def test_dynamic_sampling_rules_should_contain_single_uniform_rule(self):\n \n with Feature({self.universal_ds_flag: True, self.old_ds_flag: True}):\n response = self.get_response(\n self.org_slug,\n self.proj_slug,\n dynamicSampling=_dyn_sampling_data(multiple_uniform_rules=True),\n )\n assert response.status_code == 400\n assert (\n response.json()[\"dynamicSampling\"][\"non_field_errors\"][0] == \"Uniform rule \"\n \"must be in the last position only\"\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 177, "n_words": 33, "vocab_size": 30, "complexity": 1, "nloc": 12, "token_counts": 74, "n_ast_nodes": 124, "n_identifiers": 14, "d_id": 18235, "documentation": { "docstring": "\n Tests that ensures you can only have one uniform rule\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 83601, "commit_id": "3a135b04d9e9f84aa2a31d6fc0b1b08e9cf9aeac", "repo": "zulip", "path": "zerver/tests/test_submessage.py", "file_name": "test_submessage.py", "fun_name": "test_submessage_event_sent_after_transaction_commits", "commit_message": "actions: Split out zerver.actions.submessage.\n\nSigned-off-by: Anders Kaseorg ", "code": "def test_submessage_event_sent_after_transaction_commits(self) -> None:\n \n hamlet = self.example_user(\"hamlet\")\n message_id = self.send_stream_message(hamlet, \"Denmark\")\n\n with self.tornado_redirected_to_list([], expected_num_events=1):\n with mock.patch(\"zerver.actions.submessage.send_event\") as m:\n m.side_effect = AssertionError(\n \"Events should be sent only after the transaction commits.\"\n )\n do_add_submessage(hamlet.realm, hamlet.id, message_id, \"whatever\", \"whatever\")\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 139, "n_words": 36, "vocab_size": 33, "complexity": 1, "nloc": 14, "token_counts": 73, "n_ast_nodes": 131, "n_identifiers": 16, "d_id": 17688, "documentation": { "docstring": "\n Tests that `send_event` is hooked to `transaction.on_commit`. This is important, because\n we don't want to end up holding locks on message rows for too long if the event queue runs\n into a problem.\n ", "n_words": 33, "vocab_size": 31, "n_whitespaces": 62, "language": "en" } }, { "id": 218483, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ipaddress.py", "file_name": "ipaddress.py", "fun_name": "_reverse_pointer", "commit_message": "add python 3.10.4 for windows", "code": "def _reverse_pointer(self):\n \n reverse_octets = str(self).split('.')[::-1]\n return '.'.join(reverse_octets) + '.in-addr.arpa'\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 31, "n_ast_nodes": 60, "n_identifiers": 6, "d_id": 55338, "documentation": { "docstring": "Return the reverse DNS pointer name for the IPv4 address.\n\n This implements the method described in RFC1035 3.5.\n\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 32, "language": "en" } }, { "id": 111759, "commit_id": "8b2eb425274cdb4537fbce4a315aec12a378d6db", "repo": "nni", "path": "nni/retiarii/oneshot/pytorch/base_lightning.py", "file_name": "base_lightning.py", "fun_name": "configure_architecture_optimizers", "commit_message": "Lightning implementation for retiarii oneshot nas (#4479)", "code": "def configure_architecture_optimizers(self):\n \n return None\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 8, "n_ast_nodes": 16, "n_identifiers": 2, "d_id": 24482, "documentation": { "docstring": "\n Hook kept for subclasses. A specific NAS method inheriting this base class should return its architecture optimizers here\n if architecture parameters are needed. Note that lr schedulers are not supported now for architecture_optimizers.\n\n Returns\n ----------\n arc_optimizers : List[Optimizer], Optimizer\n Optimizers used by a specific NAS algorithm. Return None if no architecture optimizers are needed.\n ", "n_words": 54, "vocab_size": 44, "n_whitespaces": 108, "language": "en" } }, { "id": 124016, "commit_id": "52bb8e47d483082e528fc8595005e0813a46efb8", "repo": "ray", "path": "rllib/evaluation/env_runner_v2.py", "file_name": "env_runner_v2.py", "fun_name": "_get_simple_image_viewer", "commit_message": "[RLlib] EnvRunnerV2 and EpisodeV2 that support Connectors. (#25922)", "code": "def _get_simple_image_viewer(self):\n \n # Try to render the env, if required.\n if not self._render:\n return None\n\n try:\n from gym.envs.classic_control.rendering import SimpleImageViewer\n\n return SimpleImageViewer()\n except (ImportError, ModuleNotFoundError):\n self._render = False # disable rendering\n logger.warning(\n \"Could not import gym.envs.classic_control.\"\n \"rendering! Try `pip install gym[all]`.\"\n )\n\n return None\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 183, "n_words": 44, "vocab_size": 36, "complexity": 3, "nloc": 13, "token_counts": 51, "n_ast_nodes": 89, "n_identifiers": 12, "d_id": 27497, "documentation": { "docstring": "Maybe construct a SimpleImageViewer instance for episode rendering.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 3757, "commit_id": "a3aae8017a0a40ff2006e2567f71dccb04c997a5", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/async_job_manager.py", "file_name": "async_job_manager.py", "fun_name": "_update_api_throttle_limit", "commit_message": "🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805)\n\n* Facebook Marketing performance improvement\r\n\r\n* add comments and little refactoring\r\n\r\n* fix integration tests with the new config\r\n\r\n* improve job status handling, limit concurrency to 10\r\n\r\n* fix campaign jobs, refactor manager\r\n\r\n* big refactoring of async jobs, support random order of slices\r\n\r\n* update source _read_incremental to hook new state logic\r\n\r\n* fix issues with timeout\r\n\r\n* remove debugging and clean up, improve retry logic\r\n\r\n* merge changes from #8234\r\n\r\n* fix call super _read_increment\r\n\r\n* generalize batch execution, add use_batch flag\r\n\r\n* improve coverage, do some refactoring of spec\r\n\r\n* update test, remove overrides of source\r\n\r\n* add split by AdSet\r\n\r\n* add smaller insights\r\n\r\n* fix end_date < start_date case\r\n\r\n* add account_id to PK\r\n\r\n* add notes\r\n\r\n* fix new streams\r\n\r\n* fix reversed incremental stream\r\n\r\n* update spec.json for SAT\r\n\r\n* upgrade CDK and bump version\r\n\r\nCo-authored-by: Dmytro Rezchykov \r\nCo-authored-by: Eugene Kulak ", "code": "def _update_api_throttle_limit(self):\n \n self._api.account.get_insights()\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 17, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 28, "n_identifiers": 5, "d_id": 545, "documentation": { "docstring": "\n Sends /insights GET request with no parameters so it would\n respond with empty list of data so api use \"x-fb-ads-insights-throttle\"\n header to update current insights throttle limit.\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 56, "language": "en" } }, { "id": 201113, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/apps/tests.py", "file_name": "tests.py", "fun_name": "test_two_configs_one_default_app", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_two_configs_one_default_app(self):\n \n with self.settings(INSTALLED_APPS=[\"apps.two_configs_one_default_app\"]):\n config = apps.get_app_config(\"two_configs_one_default_app\")\n self.assertIsInstance(config, TwoConfig)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 41, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 34, "n_ast_nodes": 63, "n_identifiers": 9, "d_id": 49879, "documentation": { "docstring": "\n Load an app that provides two AppConfig classes, one being the default.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 139488, "commit_id": "bc3a1d35cf6e9a5fd7eef908a8e76aefb80ce6a9", "repo": "ray", "path": "rllib/policy/eager_tf_policy_v2.py", "file_name": "eager_tf_policy_v2.py", "fun_name": "variables", "commit_message": "[RLlib] Introduce new policy base classes. (#24742)", "code": "def variables(self):\n \n if isinstance(self.model, tf.keras.Model):\n return self.model.variables\n else:\n return self.model.variables()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 53, "n_words": 10, "vocab_size": 9, "complexity": 2, "nloc": 5, "token_counts": 36, "n_ast_nodes": 60, "n_identifiers": 7, "d_id": 31714, "documentation": { "docstring": "Return the list of all savable variables for this policy.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 248011, "commit_id": "1783156dbcf4164692e66275d1c29857c434995b", "repo": "synapse", "path": "synapse/handlers/account_validity.py", "file_name": "account_validity.py", "fun_name": "_send_renewal_emails", "commit_message": "Add some type hints to datastore (#12423)\n\n* Add some type hints to datastore\r\n\r\n* newsfile\r\n\r\n* change `Collection` to `List`\r\n\r\n* refactor return type of `select_users_txn`\r\n\r\n* correct type hint in `stream.py`\r\n\r\n* Remove `Optional` in `select_users_txn`\r\n\r\n* remove not needed return type in `__init__`\r\n\r\n* Revert change in `get_stream_id_for_event_txn`\r\n\r\n* Remove import from `Literal`", "code": "async def _send_renewal_emails(self) -> None:\n \n expiring_users = await self.store.get_users_expiring_soon()\n\n if expiring_users:\n for user_id, expiration_ts_ms in expiring_users:\n await self._send_renewal_email(\n user_id=user_id, expiration_ts=expiration_ts_ms\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 102, "n_words": 21, "vocab_size": 19, "complexity": 3, "nloc": 12, "token_counts": 41, "n_ast_nodes": 69, "n_identifiers": 9, "d_id": 72045, "documentation": { "docstring": "Gets the list of users whose account is expiring in the amount of time\n configured in the ``renew_at`` parameter from the ``account_validity``\n configuration, and sends renewal emails to all of these users as long as they\n have an email 3PID attached to their account.\n ", "n_words": 44, "vocab_size": 35, "n_whitespaces": 72, "language": "en" } }, { "id": 157100, "commit_id": "c4d35f5515191409913827fd4faa3b69a3d7399a", "repo": "dask", "path": "dask/array/backends.py", "file_name": "backends.py", "fun_name": "ones", "commit_message": "Backend library dispatching for IO in Dask-Array and Dask-DataFrame (#9475)", "code": "def ones(shape, *, dtype=None, meta=None, **kwargs):\n \n raise NotImplementedError\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 33, "n_identifiers": 6, "d_id": 36851, "documentation": { "docstring": "Create an array of ones\n\n Returns a new array having a specified shape and filled\n with ones.\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 38, "language": "en" } }, { "id": 217168, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/encodings/punycode.py", "file_name": "punycode.py", "fun_name": "selective_find", "commit_message": "add python 3.10.4 for windows", "code": "def selective_find(str, char, index, pos):\n \n\n l = len(str)\n while 1:\n pos += 1\n if pos == l:\n return (-1, -1)\n c = str[pos]\n if c == char:\n return index+1, pos\n elif c < char:\n index += 1\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 114, "n_words": 37, "vocab_size": 26, "complexity": 5, "nloc": 11, "token_counts": 62, "n_ast_nodes": 98, "n_identifiers": 8, "d_id": 54692, "documentation": { "docstring": "Return a pair (index, pos), indicating the next occurrence of\n char in str. index is the position of the character considering\n only ordinals up to and including char, and pos is the position in\n the full string. index/pos is the starting position in the full\n string.", "n_words": 46, "vocab_size": 30, "n_whitespaces": 57, "language": "en" } }, { "id": 31401, "commit_id": "d3cb28886ac68beba9a6646b422a4d727b056c0c", "repo": "transformers", "path": "src/transformers/models/xlm/modeling_xlm.py", "file_name": "modeling_xlm.py", "fun_name": "forward", "commit_message": "Not use -1e4 as attn mask (#17306)\n\n* Use torch.finfo(self.dtype).min\r\n\r\n* for GPTNeoX\r\n\r\n* for Albert\r\n\r\n* For Splinter\r\n\r\n* Update src/transformers/models/data2vec/modeling_data2vec_audio.py\r\n\r\nCo-authored-by: Patrick von Platen \r\n\r\n* fix -inf used in Bart-like models\r\n\r\n* Fix a few remaining -inf\r\n\r\n* more fix\r\n\r\n* clean up\r\n\r\n* For CLIP\r\n\r\n* For FSMT\r\n\r\n* clean up\r\n\r\n* fix test\r\n\r\n* Add dtype argument and use it for LayoutLMv3\r\n\r\n* update FlaxLongT5Attention\r\n\r\nCo-authored-by: ydshieh \r\nCo-authored-by: Patrick von Platen ", "code": "def forward(self, input, mask, kv=None, cache=None, head_mask=None, output_attentions=False):\n \n # Input is (bs, qlen, dim)\n # Mask is (bs, klen) (non-causal) or (bs, klen, klen)\n bs, qlen, dim = input.size()\n if kv is None:\n klen = qlen if cache is None else cache[\"slen\"] + qlen\n else:\n klen = kv.size(1)\n # assert dim == self.dim, f'Dimensions do not match: {dim} input vs {self.dim} configured'\n n_heads = self.n_heads\n dim_per_head = self.dim // n_heads\n mask_reshape = (bs, 1, qlen, klen) if mask.dim() == 3 else (bs, 1, 1, klen)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 177, "n_words": 85, "vocab_size": 56, "complexity": 12, "nloc": 42, "token_counts": 429, "n_ast_nodes": 161, "n_identifiers": 16, "d_id": 5731, "documentation": { "docstring": "\n Self-attention (if kv is None) or attention over source sentence (provided by kv).\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 28, "language": "en" } }, { "id": 86390, "commit_id": "8384b745769e08ffa2b10e9a546fce5d9d435da9", "repo": "sentry", "path": "tests/sentry/ingest/ingest_consumer/test_ingest_consumer_processing.py", "file_name": "test_ingest_consumer_processing.py", "fun_name": "test_userreport", "commit_message": "fix(tests): More django cache clears in ingest consumer tests (#39481)", "code": "def test_userreport(django_cache, default_project, monkeypatch):\n \n event_id = uuid.uuid4().hex\n start_time = time.time() - 3600\n\n mgr = EventManager(data={\"event_id\": event_id, \"user\": {\"email\": \"markus+dontatme@sentry.io\"}})\n\n mgr.normalize()\n mgr.save(default_project.id)\n\n (evtuser,) = EventUser.objects.all()\n assert not evtuser.name\n\n assert not UserReport.objects.all()\n\n assert process_userreport(\n {\n \"type\": \"user_report\",\n \"start_time\": start_time,\n \"payload\": json.dumps(\n {\n \"name\": \"Hans Gans\",\n \"event_id\": event_id,\n \"comments\": \"hello world\",\n \"email\": \"markus+dontatme@sentry.io\",\n }\n ),\n \"project_id\": default_project.id,\n },\n projects={default_project.id: default_project},\n )\n\n (report,) = UserReport.objects.all()\n assert report.comments == \"hello world\"\n\n (evtuser,) = EventUser.objects.all()\n assert evtuser.name == \"Hans Gans\"\n\n\n@pytest.mark.django_db", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "@pytest.mark.django_db", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 301, "n_words": 75, "vocab_size": 56, "complexity": 1, "nloc": 29, "token_counts": 180, "n_ast_nodes": 321, "n_identifiers": 31, "d_id": 18110, "documentation": { "docstring": "\n Test that user_report-type kafka messages end up in a user report being\n persisted. We additionally test some logic around upserting data in\n eventuser which is also present in the legacy endpoint.\n ", "n_words": 31, "vocab_size": 29, "n_whitespaces": 44, "language": "en" } }, { "id": 219903, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pyio.py", "file_name": "_pyio.py", "fun_name": "readinto", "commit_message": "add python 3.10.4 for windows", "code": "def readinto(self, b):\n \n self._unsupported(\"readinto\")\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 27, "n_identifiers": 4, "d_id": 55891, "documentation": { "docstring": "Read bytes into a pre-allocated bytes-like object b.\n\n Returns an int representing the number of bytes read (0 for EOF), or\n None if the object is set not to block and has no data to read.\n ", "n_words": 36, "vocab_size": 32, "n_whitespaces": 57, "language": "en" } }, { "id": 123977, "commit_id": "ddd63aba77b0e4da699e358beba37cd907f7cb37", "repo": "ray", "path": "python/ray/workflow/tests/test_recovery.py", "file_name": "test_recovery.py", "fun_name": "test_recovery_cluster_failure_resume_all", "commit_message": "[workflow] Major refactoring - new async workflow executor (#25618)\n\n* major workflow refactoring", "code": "def test_recovery_cluster_failure_resume_all(tmp_path, shutdown_only):\n ray.shutdown()\n\n tmp_path = tmp_path\n subprocess.check_call([\"ray\", \"start\", \"--head\"])\n time.sleep(1)\n workflow_dir = tmp_path / \"workflow\"\n lock_file = tmp_path / \"lock_file\"\n lock = FileLock(lock_file)\n lock.acquire()\n\n proc = run_string_as_driver_nonblocking(\n f", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "proc = run_string_as_driver_nonblocking(\n f\"\"\"@ray.remote", "n_ast_errors": 2, "ast_levels": 9, "n_whitespaces": 62, "n_words": 29, "vocab_size": 21, "complexity": 1, "nloc": 39, "token_counts": 146, "n_ast_nodes": 132, "n_identifiers": 19, "d_id": 27486, "documentation": { "docstring": "\nimport time\nimport ray\nfrom ray import workflow\nfrom filelock import FileLock\n\n@ray.remote", "n_words": 13, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 22594, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "calculator.py", "file_name": "calculator.py", "fun_name": "calc", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def calc(term):\n \n\n # This part is for reading and converting arithmetic terms.\n term = term.replace(\" \", \"\")\n term = term.replace(\"^\", \"**\")\n term = term.replace(\"=\", \"\")\n term = term.replace(\"?\", \"\")\n term = term.replace(\"%\", \"/100.00\")\n term = term.replace(\"rad\", \"radians\")\n term = term.replace(\"mod\", \"%\")\n term = term.replace(\"aval\", \"abs\")\n\n functions = [\n \"sin\",\n \"cos\",\n \"tan\",\n \"pow\",\n \"cosh\",\n \"sinh\",\n \"tanh\",\n \"sqrt\",\n \"pi\",\n \"radians\",\n \"e\",\n ]\n\n # This part is for reading and converting function expressions.\n term = term.lower()\n\n for func in functions:\n if func in term:\n withmath = \"math.\" + func\n term = term.replace(func, withmath)\n\n try:\n\n # here goes the actual evaluating.\n term = eval(term)\n\n # here goes to the error cases.\n except ZeroDivisionError:\n\n print(\"Can't divide by 0. Please try again.\")\n\n except NameError:\n\n print(\"Invalid input. Please try again\")\n\n except AttributeError:\n\n print(\"Please check usage method and try again.\")\n except TypeError:\n print(\"please enter inputs of correct datatype \")\n\n return term\n\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 359, "n_words": 143, "vocab_size": 93, "complexity": 7, "nloc": 38, "token_counts": 182, "n_ast_nodes": 345, "n_identifiers": 13, "d_id": 4373, "documentation": { "docstring": "\n input: term of type str\n output: returns the result of the computed term.\n purpose: This function is the actual calculator and the heart of the application\n ", "n_words": 26, "vocab_size": 20, "n_whitespaces": 39, "language": "en" } }, { "id": 260971, "commit_id": "15599753b63f10748ffb374aacd37dbb37806a37", "repo": "scikit-learn", "path": "sklearn/utils/sparsefuncs.py", "file_name": "sparsefuncs.py", "fun_name": "csc_median_axis_0", "commit_message": "DOC ensures sklearn.utils.sparsefuncs.csc_median_axis_0 passes numpydoc validation (#24461)", "code": "def csc_median_axis_0(X):\n \n if not isinstance(X, sp.csc_matrix):\n raise TypeError(\"Expected matrix of CSC format, got %s\" % X.format)\n\n indptr = X.indptr\n n_samples, n_features = X.shape\n median = np.zeros(n_features)\n\n for f_ind, (start, end) in enumerate(zip(indptr[:-1], indptr[1:])):\n\n # Prevent modifying X in place\n data = np.copy(X.data[start:end])\n nz = n_samples - data.size\n median[f_ind] = _get_median(data, nz)\n\n return median\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 109, "n_words": 53, "vocab_size": 46, "complexity": 3, "nloc": 11, "token_counts": 109, "n_ast_nodes": 173, "n_identifiers": 24, "d_id": 76593, "documentation": { "docstring": "Find the median across axis 0 of a CSC matrix.\n\n It is equivalent to doing np.median(X, axis=0).\n\n Parameters\n ----------\n X : sparse matrix of shape (n_samples, n_features)\n Input data. It should be of CSC format.\n\n Returns\n -------\n median : ndarray of shape (n_features,)\n Median.\n ", "n_words": 44, "vocab_size": 36, "n_whitespaces": 82, "language": "en" } }, { "id": 79167, "commit_id": "b4bc6818659ae785af39965569ed4bca51f0bf0d", "repo": "wagtail", "path": "wagtail/admin/viewsets/chooser.py", "file_name": "chooser.py", "fun_name": "get_block_class", "commit_message": "Fix DocumentChooserBlock deconstruction for custom document models\n\nFixes #8989. The previous fix #9004 failed for custom document models because ChooserViewset assigns an internal name for the ChooserBlock class based on the model name, and if this is anything other than Document it won't match the name DocumentChooserBlock that it's exposed under in wagtail.documents.blocks. Fix this by replacing the `block_class` property with a `get_block_class` method that lets us specify the class name. As a bonus, user code that defines chooser blocks no longer has to directly hack the `__module__` attribute.", "code": "def get_block_class(self, name=None, module_path=None):\n \n meta = type(\n \"Meta\",\n (self.base_block_class._meta_class,),\n {\n \"icon\": self.icon,\n },\n )\n cls = type(\n name or \"%sChooserBlock\" % self.model_name,\n (self.base_block_class,),\n {\n \"target_model\": self.model,\n \"widget\": self.widget_class(),\n \"Meta\": meta,\n },\n )\n if module_path:\n cls.__module__ = module_path\n return cls\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 247, "n_words": 39, "vocab_size": 32, "complexity": 3, "nloc": 20, "token_counts": 90, "n_ast_nodes": 144, "n_identifiers": 14, "d_id": 16885, "documentation": { "docstring": "\n Returns a StreamField ChooserBlock class using this chooser.\n\n :param name: Name to give to the class; defaults to the model name with \"ChooserBlock\" appended\n :param module_path: The dotted path of the module where the class can be imported from; used when\n deconstructing the block definition for migration files.\n ", "n_words": 48, "vocab_size": 40, "n_whitespaces": 88, "language": "en" } }, { "id": 205312, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/migrations/migration.py", "file_name": "migration.py", "fun_name": "unapply", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def unapply(self, project_state, schema_editor, collect_sql=False):\n \n # Construct all the intermediate states we need for a reverse migration\n to_run = []\n new_state = project_state\n # Phase 1\n for operation in self.operations:\n # If it's irreversible, error out\n if not operation.reversible:\n raise IrreversibleError(\n \"Operation %s in %s is not reversible\" % (operation, self)\n )\n # Preserve new state from previous run to not tamper the same state\n # over all operations\n new_state = new_state.clone()\n old_state = new_state.clone()\n operation.state_forwards(self.app_label, new_state)\n to_run.insert(0, (operation, old_state, new_state))\n\n # Phase 2\n for operation, to_state, from_state in to_run:\n if collect_sql:\n schema_editor.collected_sql.append(\"--\")\n if not operation.reduces_to_sql:\n schema_editor.collected_sql.append(\n \"-- MIGRATION NOW PERFORMS OPERATION THAT CANNOT BE WRITTEN AS SQL:\"\n )\n schema_editor.collected_sql.append(\"-- %s\" % operation.describe())\n schema_editor.collected_sql.append(\"--\")\n if not operation.reduces_to_sql:\n continue\n atomic_operation = operation.atomic or (\n self.atomic and operation.atomic is not False\n )\n if not schema_editor.atomic_migration and atomic_operation:\n # Force a transaction on a non-transactional-DDL backend or an\n # atomic operation inside a non-atomic migration.\n with atomic(schema_editor.connection.alias):\n operation.database_backwards(\n self.app_label, schema_editor, from_state, to_state\n )\n else:\n # Normal behaviour\n operation.database_backwards(\n self.app_label, schema_editor, from_state, to_state\n )\n return project_state\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 767, "n_words": 172, "vocab_size": 116, "complexity": 11, "nloc": 36, "token_counts": 214, "n_ast_nodes": 351, "n_identifiers": 28, "d_id": 51088, "documentation": { "docstring": "\n Take a project_state representing all migrations prior to this one\n and a schema_editor for a live database and apply the migration\n in a reverse order.\n\n The backwards migration process consists of two phases:\n\n 1. The intermediate states from right before the first until right\n after the last operation inside this migration are preserved.\n 2. The operations are applied in reverse order using the states\n recorded in step 1.\n ", "n_words": 68, "vocab_size": 49, "n_whitespaces": 138, "language": "en" } }, { "id": 102046, "commit_id": "9e2026f6feba4fc1d60e0d985cbc1ba9c44a4848", "repo": "faceswap", "path": "plugins/extract/align/_base/processing.py", "file_name": "processing.py", "fun_name": "do_filter", "commit_message": "Extract: Implement re-align/2nd pass\n - implement configurable re-align function in extract\n - update locales + documentation\n - re-factor align._base and split to separate modules\n - move normalization method to plugin parent\n - bugfix: FAN use zeros for pre-processing crop\n - lint AlignedFilter", "code": "def do_filter(self) -> bool:\n \n return self._active and self._do_filter\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 2, "nloc": 4, "token_counts": 16, "n_ast_nodes": 28, "n_identifiers": 5, "d_id": 21413, "documentation": { "docstring": "bool: ``True`` if re-aligning is active and faces which failed the aligner filter test\n should not be re-aligned otherwise ``False``", "n_words": 20, "vocab_size": 20, "n_whitespaces": 26, "language": "en" } }, { "id": 196561, "commit_id": "72b9b01d0ede4543c1d3f10e08a79345c550254a", "repo": "sympy", "path": "sympy/algebras/quaternion.py", "file_name": "quaternion.py", "fun_name": "vector_part", "commit_message": "minor changes", "code": "def vector_part(self):\n \n\n return Quaternion(0, self.b, self.c, self.d)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 36, "n_identifiers": 6, "d_id": 47998, "documentation": { "docstring": "\n Returns the vector part of the quaternion.\n\n Explanation\n ===========\n\n If q is a quaternion given by q = a + b*i + c*j + d*k where a, b, c and d\n are real numbers then the vector part of q is b*i + c*j + d*k.\n\n Returns\n =======\n\n Quaternion: representing vector part of the quaternion.\n\n Examples\n ========\n\n >>> from sympy.algebras.quaternion import Quaternion\n >>> q = Quaternion(1, 1, 1, 1)\n >>> q.vector_part()\n 0 + 1*i + 1*j + 1*k\n\n >>> q = Quaternion(4, 8, 13, 12)\n >>> q.vector_part()\n 0 + 8*i + 13*j + 12*k\n\n See Also\n ========\n https://en.wikipedia.org/wiki/Versor\n\n ", "n_words": 98, "vocab_size": 59, "n_whitespaces": 246, "language": "en" } }, { "id": 265579, "commit_id": "de17a651e6f976e8b7c16b49d4e78f6a6988b870", "repo": "netbox", "path": "netbox/ipam/api/views.py", "file_name": "views.py", "fun_name": "get_results_limit", "commit_message": "Closes #10043: Add support for 'limit' query parameter to available VLANs API endpoint", "code": "def get_results_limit(request):\n \n config = get_config()\n try:\n limit = int(request.query_params.get('limit', config.PAGINATE_COUNT)) or config.MAX_PAGE_SIZE\n except ValueError:\n limit = config.PAGINATE_COUNT\n if config.MAX_PAGE_SIZE:\n limit = min(limit, config.MAX_PAGE_SIZE)\n\n return limit\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 64, "n_words": 25, "vocab_size": 19, "complexity": 4, "nloc": 9, "token_counts": 59, "n_ast_nodes": 99, "n_identifiers": 12, "d_id": 78147, "documentation": { "docstring": "\n Return the lesser of the specified limit (if any) and the configured MAX_PAGE_SIZE.\n ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 20, "language": "en" } }, { "id": 20549, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pyparsing/core.py", "file_name": "core.py", "fun_name": "parse_with_tabs", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def parse_with_tabs(self) -> \"ParserElement\":\n \n self.keepTabs = True\n return self\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 8, "token_counts": 15, "n_ast_nodes": 29, "n_identifiers": 3, "d_id": 3421, "documentation": { "docstring": "\n Overrides default behavior to expand ```` s to spaces before parsing the input string.\n Must be called before ``parse_string`` when the input grammar contains elements that\n match ```` characters.\n ", "n_words": 29, "vocab_size": 24, "n_whitespaces": 58, "language": "en" } }, { "id": 204710, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/management/sql.py", "file_name": "sql.py", "fun_name": "sql_flush", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def sql_flush(style, connection, reset_sequences=True, allow_cascade=False):\n \n tables = connection.introspection.django_table_names(\n only_existing=True, include_views=False\n )\n return connection.ops.sql_flush(\n style,\n tables,\n reset_sequences=reset_sequences,\n allow_cascade=allow_cascade,\n )\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 68, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 10, "token_counts": 52, "n_ast_nodes": 76, "n_identifiers": 11, "d_id": 50845, "documentation": { "docstring": "\n Return a list of the SQL statements used to flush the database.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 19, "language": "en" } }, { "id": 6987, "commit_id": "7a2bfd65e97e81f02e4c8821e2a82a78d5f6ab00", "repo": "ludwig", "path": "ludwig/utils/misc_utils.py", "file_name": "misc_utils.py", "fun_name": "set_saved_weights_in_checkpoint_flag", "commit_message": "Set saved_weights_in_checkpoint immediately after creating model. Also adds test. (#2131)\n\n* Set saved_weights_in_checkpoint immediately after creating model. Also adds integration test.\r\n\r\n* Set saved_weights_in_checkpoint config key on load.\r\n\r\nCo-authored-by: Daniel Treiman ", "code": "def set_saved_weights_in_checkpoint_flag(config):\n \n for input_feature in config.get(\"input_features\", []):\n input_feature[\"saved_weights_in_checkpoint\"] = True\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 23, "n_words": 10, "vocab_size": 10, "complexity": 2, "nloc": 3, "token_counts": 25, "n_ast_nodes": 45, "n_identifiers": 4, "d_id": 1097, "documentation": { "docstring": "Adds a flag to all input features indicating that the weights are saved in the checkpoint.\n\n Next time the model is loaded we will restore pre-trained encoder weights from ludwig model (and not load from cache\n or model hub).\n ", "n_words": 39, "vocab_size": 33, "n_whitespaces": 48, "language": "en" } }, { "id": 245868, "commit_id": "79c8295801acedee0cbdbf128a01b9fe162646b0", "repo": "mmdetection", "path": "tests/test_models/test_dense_heads/test_condinst_head.py", "file_name": "test_condinst_head.py", "fun_name": "test_condinst_maskhead_loss", "commit_message": "[Feature]: Support Condinst (#9223)\n\n* [Feature]: support condinst for instance segmentation\r\n\r\n* update\r\n\r\n* update\r\n\r\n* update\r\n\r\n* fix config name and add test unit\r\n\r\n* fix squeeze error\r\n\r\n* add README and chang mask to poly", "code": "def test_condinst_maskhead_loss(self):\n \n s = 256\n img_metas = [{\n 'img_shape': (s, s, 3),\n 'pad_shape': (s, s, 3),\n 'scale_factor': 1,\n }]\n condinst_bboxhead = CondInstBboxHead(\n num_classes=4,\n in_channels=1,\n feat_channels=1,\n stacked_convs=1,\n norm_cfg=None)\n\n mask_feature_head = _fake_mask_feature_head()\n condinst_maskhead = CondInstMaskHead(\n mask_feature_head=mask_feature_head,\n loss_mask=dict(\n type='DiceLoss',\n use_sigmoid=True,\n activate=True,\n eps=5e-6,\n loss_weight=1.0))\n\n # Fcos head expects a multiple levels of features per image\n feats = []\n for i in range(len(condinst_bboxhead.strides)):\n feats.append(\n torch.rand(1, 1, s // (2**(i + 3)), s // (2**(i + 3))))\n feats = tuple(feats)\n cls_scores, bbox_preds, centernesses, param_preds =\\\n condinst_bboxhead.forward(feats)\n\n # Test that empty ground truth encourages the network to\n # predict background\n gt_instances = InstanceData()\n gt_instances.bboxes = torch.empty((0, 4))\n gt_instances.labels = torch.LongTensor([])\n gt_instances.masks = _rand_masks(0, gt_instances.bboxes.numpy(), s, s)\n\n _ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds,\n centernesses, param_preds,\n [gt_instances], img_metas)\n # When truth is empty then all mask loss\n # should be zero for random inputs\n positive_infos = condinst_bboxhead.get_positive_infos()\n mask_outs = condinst_maskhead.forward(feats, positive_infos)\n empty_gt_mask_losses = condinst_maskhead.loss_by_feat(\n *mask_outs, [gt_instances], img_metas, positive_infos)\n loss_mask = empty_gt_mask_losses['loss_mask']\n self.assertEqual(loss_mask, 0, 'mask loss should be zero')\n\n # When truth is non-empty then all cls, box loss and centerness loss\n # should be nonzero for random inputs\n gt_instances = InstanceData()\n gt_instances.bboxes = torch.Tensor(\n [[23.6667, 23.8757, 238.6326, 151.8874]])\n gt_instances.labels = torch.LongTensor([2])\n gt_instances.masks = _rand_masks(1, gt_instances.bboxes.numpy(), s, s)\n\n _ = condinst_bboxhead.loss_by_feat(cls_scores, bbox_preds,\n centernesses, param_preds,\n [gt_instances], img_metas)\n positive_infos = condinst_bboxhead.get_positive_infos()\n mask_outs = condinst_maskhead.forward(feats, positive_infos)\n one_gt_mask_losses = condinst_maskhead.loss_by_feat(\n *mask_outs, [gt_instances], img_metas, positive_infos)\n loss_mask = one_gt_mask_losses['loss_mask']\n self.assertGreater(loss_mask, 0, 'mask loss should be nonzero')\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 917, "n_words": 228, "vocab_size": 134, "complexity": 2, "nloc": 56, "token_counts": 412, "n_ast_nodes": 641, "n_identifiers": 55, "d_id": 70917, "documentation": { "docstring": "Tests condinst maskhead loss when truth is empty and non-empty.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 270918, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/base_layer_v1.py", "file_name": "base_layer_v1.py", "fun_name": "get_updates_for", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_updates_for(self, inputs):\n \n if inputs is None:\n # Requesting unconditional updates.\n return [u for u in self.updates if u._unconditional_update]\n\n # Requesting input-conditional updates.\n updates = [u for u in self.updates if not u._unconditional_update]\n inputs = tf.nest.flatten(inputs)\n reachable = tf_utils.get_reachable_from_inputs(inputs, updates)\n return [u for u in updates if u in reachable]\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 121, "n_words": 50, "vocab_size": 27, "complexity": 8, "nloc": 7, "token_counts": 75, "n_ast_nodes": 117, "n_identifiers": 12, "d_id": 80599, "documentation": { "docstring": "Retrieves updates relevant to a specific set of inputs.\n\n Args:\n inputs: Input tensor or list/tuple of input tensors.\n\n Returns:\n List of update ops of the layer that depend on `inputs`.\n ", "n_words": 30, "vocab_size": 27, "n_whitespaces": 69, "language": "en" } }, { "id": 100568, "commit_id": "bdbbad4d310fb606b6f412aa81e9f57ccd994e97", "repo": "faceswap", "path": "lib/gpu_stats/cpu.py", "file_name": "cpu.py", "fun_name": "_get_free_vram", "commit_message": "Refactor lib.gpu_stats (#1218)\n\n* inital gpu_stats refactor\r\n\r\n* Add dummy CPU Backend\r\n\r\n* Update Sphinx documentation", "code": "def _get_free_vram(self) -> List[float]:\n \n vram = []\n self._log(\"debug\", f\"GPU VRAM free: {vram}\")\n return vram\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 42, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 11, "token_counts": 26, "n_ast_nodes": 50, "n_identifiers": 6, "d_id": 20032, "documentation": { "docstring": " Obtain the amount of RAM that is available, in Megabytes, for the running system.\n\n Returns\n -------\n list\n An empty list for CPU backends\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 64, "language": "en" } }, { "id": 60716, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/index/collector.py", "file_name": "collector.py", "fun_name": "_ensure_html_response", "commit_message": "upd; format", "code": "def _ensure_html_response(url, session):\n # type: (str, PipSession) -> None\n \n scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)\n if scheme not in {'http', 'https'}:\n raise _NotHTTP()\n\n resp = session.head(url, allow_redirects=True)\n raise_for_status(resp)\n\n _ensure_html_header(resp)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 58, "n_words": 30, "vocab_size": 29, "complexity": 2, "nloc": 7, "token_counts": 60, "n_ast_nodes": 98, "n_identifiers": 17, "d_id": 12258, "documentation": { "docstring": "Send a HEAD request to the URL, and ensure the response contains HTML.\n\n Raises `_NotHTTP` if the URL is not available for a HEAD request, or\n `_NotHTML` if the content type is not text/html.\n ", "n_words": 34, "vocab_size": 26, "n_whitespaces": 43, "language": "en" } }, { "id": 197094, "commit_id": "f8674bfe4988332e7ce60ceb36b365ce9aff662a", "repo": "sympy", "path": "sympy/diffgeom/diffgeom.py", "file_name": "diffgeom.py", "fun_name": "__new__", "commit_message": "Update the sympy.diffgeom mutability deprecations", "code": "def __new__(cls, name, patch, symbols=None, relations={}, **kwargs):\n if not isinstance(name, Str):\n name = Str(name)\n\n # canonicallize the symbols\n if symbols is None:\n names = kwargs.get('names', None)\n if names is None:\n symbols = Tuple(\n *[Symbol('%s_%s' % (name.name, i), real=True)\n for i in range(patch.dim)]\n )\n else:\n sympy_deprecation_warning(\n f,\n deprecated_since_version=\"1.7\",\n active_deprecations_target=\"deprecated-diffgeom-mutable\",\n )\n symbols = Tuple(\n *[Symbol(n, real=True) for n in names]\n )\n else:\n syms = []\n for s in symbols:\n if isinstance(s, Symbol):\n syms.append(Symbol(s.name, **s._assumptions.generator))\n elif isinstance(s, str):\n sympy_deprecation_warning(\n f,\n\n deprecated_since_version=\"1.7\",\n active_deprecations_target=\"deprecated-diffgeom-mutable\",\n )\n syms.append(Symbol(s, real=True))\n symbols = Tuple(*syms)\n\n # canonicallize the relations\n rel_temp = {}\n for k,v in relations.items():\n s1, s2 = k\n if not isinstance(s1, Str):\n s1 = Str(s1)\n if not isinstance(s2, Str):\n s2 = Str(s2)\n key = Tuple(s1, s2)\n\n # Old version used Lambda as a value.\n if isinstance(v, Lambda):\n v = (tuple(v.signature), tuple(v.expr))\n else:\n v = (tuple(v[0]), tuple(v[1]))\n rel_temp[key] = v\n relations = Dict(rel_temp)\n\n # construct the object\n obj = super().__new__(cls, name, patch, symbols, relations)\n\n # Add deprecated attributes\n obj.transforms = _deprecated_dict(\n , {})\n obj._names = [str(n) for n in symbols]\n obj.patch.coord_systems.append(obj) # deprecated\n obj._dummies = [Dummy(str(n)) for n in symbols] # deprecated\n obj._dummy = Dummy()\n\n return obj\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 923, "n_words": 188, "vocab_size": 109, "complexity": 15, "nloc": 73, "token_counts": 399, "n_ast_nodes": 681, "n_identifiers": 50, "d_id": 48334, "documentation": { "docstring": "\nThe 'names' argument to CoordSystem is deprecated. Use 'symbols' instead. That\nis, replace\n\n CoordSystem(..., names={names})\n\nwith\n\n CoordSystem(..., symbols=[{', '.join([\"Symbol(\" + repr(n) + \", real=True)\" for n in names])}])\n \n\nPassing a string as the coordinate symbol name to CoordSystem is deprecated.\nPass a Symbol with the appropriate name and assumptions instead.\n\nThat is, replace {s} with Symbol({s!r}, real=True).\n \n CoordSystem.transforms is deprecated. The CoordSystem class is now\n immutable. Use the 'relations' keyword argument to the\n CoordSystems() constructor to specify relations.\n ", "n_words": 78, "vocab_size": 52, "n_whitespaces": 167, "language": "en" } }, { "id": 60402, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/scripts/cpp_lint.py", "file_name": "cpp_lint.py", "fun_name": "GetHeaderGuardCPPVariable", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def GetHeaderGuardCPPVariable(filename):\n \n\n # Restores original filename in case that cpplint is invoked from Emacs's\n # flymake.\n filename = re.sub(r'_flymake\\.h$', '.h', filename)\n filename = re.sub(r'/\\.flymake/([^/]*)$', r'/\\1', filename)\n\n fileinfo = FileInfo(filename)\n file_path_from_root = fileinfo.RepositoryName()\n if _root:\n file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)\n return re.sub(r'[-./\\s]', '_', file_path_from_root).upper() + '_'\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 61, "n_words": 49, "vocab_size": 38, "complexity": 2, "nloc": 8, "token_counts": 85, "n_ast_nodes": 145, "n_identifiers": 12, "d_id": 12130, "documentation": { "docstring": "Returns the CPP variable that should be used as a header guard.\n\n Args:\n filename: The name of a C++ header file.\n\n Returns:\n The CPP variable that should be used as a header guard in the\n named file.\n\n ", "n_words": 37, "vocab_size": 23, "n_whitespaces": 49, "language": "en" } }, { "id": 42252, "commit_id": "e644793f0ac2b1be178425f20f529121f37f29de", "repo": "seaborn", "path": "seaborn/palettes.py", "file_name": "palettes.py", "fun_name": "husl_palette", "commit_message": "Convert color palette docstrings to notebooks (#3034)\n\n* Convert color palette docstrings to notebooks and rerun all with py310 kernel\r\n\r\n* Add v0.12.1 release notes to index\r\n\r\n* Improve failure mode when ipywidgets is not involved\r\n\r\n* Update palettes docstrings\r\n\r\n* Remove all other doctest-style examples\r\n\r\n* Remove doctest-oriented testing infrastructure\r\n\r\n* Mention in release notes\r\n\r\n* Skip colormap patch test on matplotlib's where it's not relevant\r\n\r\n* Use more robust approach to mpl backcompat", "code": "def husl_palette(n_colors=6, h=.01, s=.9, l=.65, as_cmap=False): # noqa\n \n if as_cmap:\n n_colors = 256\n hues = np.linspace(0, 1, int(n_colors) + 1)[:-1]\n hues += h\n hues %= 1\n hues *= 359\n s *= 99\n l *= 99 # noqa\n palette = [_color_to_rgb((h_i, s, l), input=\"husl\") for h_i in hues]\n if as_cmap:\n return mpl.colors.ListedColormap(palette, \"hsl\")\n else:\n return _ColorPalette(palette)\n\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 111, "n_words": 55, "vocab_size": 42, "complexity": 4, "nloc": 14, "token_counts": 117, "n_ast_nodes": 173, "n_identifiers": 18, "d_id": 7512, "documentation": { "docstring": "\n Return hues with constant lightness and saturation in the HUSL system.\n\n The hues are evenly sampled along a circular path. The resulting palette will be\n appropriate for categorical or cyclical data.\n\n The `h`, `l`, and `s` values should be between 0 and 1.\n\n This function is similar to :func:`hls_palette`, but it uses a nonlinear color\n space that is more perceptually uniform.\n\n Parameters\n ----------\n n_colors : int\n Number of colors in the palette.\n h : float\n The value of the first hue.\n l : float\n The lightness value.\n s : float\n The saturation intensity.\n as_cmap : bool\n If True, return a matplotlib colormap object.\n\n Returns\n -------\n palette\n list of RGB tuples or :class:`matplotlib.colors.ListedColormap`\n\n See Also\n --------\n hls_palette : Make a palette using evenly spaced hues in the HSL system.\n\n Examples\n --------\n .. include:: ../docstrings/husl_palette.rst\n\n ", "n_words": 133, "vocab_size": 97, "n_whitespaces": 245, "language": "en" } }, { "id": 219476, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_aix_support.py", "file_name": "_aix_support.py", "fun_name": "aix_platform", "commit_message": "add python 3.10.4 for windows", "code": "def aix_platform():\n # type: () -> str\n \n vrmf, bd = _aix_bosmp64()\n return _aix_tag(_aix_vrtl(vrmf), bd)\n\n\n# extract vrtl from the BUILD_GNU_TYPE as an int", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 34, "n_words": 23, "vocab_size": 22, "complexity": 1, "nloc": 3, "token_counts": 22, "n_ast_nodes": 42, "n_identifiers": 6, "d_id": 55599, "documentation": { "docstring": "\n AIX filesets are identified by four decimal values: V.R.M.F.\n V (version) and R (release) can be retreived using ``uname``\n Since 2007, starting with AIX 5.3 TL7, the M value has been\n included with the fileset bos.mp64 and represents the Technology\n Level (TL) of AIX. The F (Fix) value also increases, but is not\n relevant for comparing releases and binary compatibility.\n For binary compatibility the so-called builddate is needed.\n Again, the builddate of an AIX release is associated with bos.mp64.\n AIX ABI compatibility is described as guaranteed at: https://www.ibm.com/\\\n support/knowledgecenter/en/ssw_aix_72/install/binary_compatability.html\n\n For pep425 purposes the AIX platform tag becomes:\n \"aix-{:1x}{:1d}{:02d}-{:04d}-{}\".format(v, r, tl, builddate, bitsize)\n e.g., \"aix-6107-1415-32\" for AIX 6.1 TL7 bd 1415, 32-bit\n and, \"aix-6107-1415-64\" for AIX 6.1 TL7 bd 1415, 64-bit\n ", "n_words": 120, "vocab_size": 90, "n_whitespaces": 167, "language": "en" } }, { "id": 200252, "commit_id": "8fc835bcd86ea080644783a363e47adca6dff3a7", "repo": "sympy", "path": "sympy/diffgeom/diffgeom.py", "file_name": "diffgeom.py", "fun_name": "__call__", "commit_message": "Remove redundant list calls", "code": "def __call__(self, *fields):\n \n orders = (covariant_order(e) + contravariant_order(e) for e in self.args)\n mul = 1/Mul(*(factorial(o) for o in orders))\n perms = permutations(fields)\n perms_par = (Permutation(\n p).signature() for p in permutations(range(len(fields))))\n tensor_prod = TensorProduct(*self.args)\n return mul*Add(*[tensor_prod(*p[0])*p[1] for p in zip(perms, perms_par)])\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 100, "n_words": 40, "vocab_size": 29, "complexity": 5, "nloc": 8, "token_counts": 117, "n_ast_nodes": 185, "n_identifiers": 24, "d_id": 49566, "documentation": { "docstring": "Apply on a list of vector_fields.\n The expression is rewritten internally in terms of tensor products and evaluated.", "n_words": 18, "vocab_size": 17, "n_whitespaces": 24, "language": "en" } }, { "id": 205420, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/base.py", "file_name": "base.py", "fun_name": "_check_ordering", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _check_ordering(cls):\n \n if cls._meta._ordering_clash:\n return [\n checks.Error(\n \"'ordering' and 'order_with_respect_to' cannot be used together.\",\n obj=cls,\n id=\"models.E021\",\n ),\n ]\n\n if cls._meta.order_with_respect_to or not cls._meta.ordering:\n return []\n\n if not isinstance(cls._meta.ordering, (list, tuple)):\n return [\n checks.Error(\n \"'ordering' must be a tuple or list (even if you want to order by only one field).\",\n obj=cls,\n id=\"models.E014\",\n )\n ]\n\n errors = []\n fields = cls._meta.ordering\n\n # Skip expressions and '?' fields.\n fields = (f for f in fields if isinstance(f, str) and f != \"?\")\n\n # Convert \"-field\" to \"field\".\n fields = ((f[1:] if f.startswith(\"-\") else f) for f in fields)\n\n # Separate related fields and non-related fields.\n _fields = []\n related_fields = []\n for f in fields:\n if LOOKUP_SEP in f:\n related_fields.append(f)\n else:\n _fields.append(f)\n fields = _fields\n\n # Check related fields.\n for field in related_fields:\n _cls = cls\n fld = None\n for part in field.split(LOOKUP_SEP):\n try:\n # pk is an alias that won't be found by opts.get_field.\n if part == \"pk\":\n fld = _cls._meta.pk\n else:\n fld = _cls._meta.get_field(part)\n if fld.is_relation:\n _cls = fld.path_infos[-1].to_opts.model\n else:\n _cls = None\n except (FieldDoesNotExist, AttributeError):\n if fld is None or (\n fld.get_transform(part) is None and fld.get_lookup(part) is None\n ):\n errors.append(\n checks.Error(\n \"'ordering' refers to the nonexistent field, \"\n \"related field, or lookup '%s'.\" % field,\n obj=cls,\n id=\"models.E015\",\n )\n )\n\n # Skip ordering on pk. This is always a valid order_by field\n # but is an alias and therefore won't be found by opts.get_field.\n fields = {f for f in fields if f != \"pk\"}\n\n # Check for invalid or nonexistent fields in ordering.\n invalid_fields = []\n\n # Any field name that is not present in field_names does not exist.\n # Also, ordering by m2m fields is not allowed.\n opts = cls._meta\n valid_fields = set(\n chain.from_iterable(\n (f.name, f.attname)\n if not (f.auto_created and not f.concrete)\n else (f.field.related_query_name(),)\n for f in chain(opts.fields, opts.related_objects)\n )\n )\n\n invalid_fields.extend(fields - valid_fields)\n\n for invalid_field in invalid_fields:\n errors.append(\n checks.Error(\n \"'ordering' refers to the nonexistent field, related \"\n \"field, or lookup '%s'.\" % invalid_field,\n obj=cls,\n id=\"models.E015\",\n )\n )\n return errors\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 1568, "n_words": 332, "vocab_size": 161, "complexity": 26, "nloc": 78, "token_counts": 414, "n_ast_nodes": 673, "n_identifiers": 51, "d_id": 51123, "documentation": { "docstring": "\n Check \"ordering\" option -- is it a list of strings and do all fields\n exist?\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 37, "language": "en" } }, { "id": 196069, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/free_groups.py", "file_name": "free_groups.py", "fun_name": "cyclic_reduction", "commit_message": "Updated import locations", "code": "def cyclic_reduction(self, removed=False):\n \n word = self.copy()\n g = self.group.identity\n while not word.is_cyclically_reduced():\n exp1 = abs(word.exponent_syllable(0))\n exp2 = abs(word.exponent_syllable(-1))\n exp = min(exp1, exp2)\n start = word[0]**abs(exp)\n end = word[-1]**abs(exp)\n word = start**-1*word*end**-1\n g = g*start\n if removed:\n return word, g\n return word\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 171, "n_words": 41, "vocab_size": 28, "complexity": 3, "nloc": 14, "token_counts": 113, "n_ast_nodes": 184, "n_identifiers": 17, "d_id": 47569, "documentation": { "docstring": "Return a cyclically reduced version of the word. Unlike\n `identity_cyclic_reduction`, this will not cyclically permute\n the reduced word - just remove the \"unreduced\" bits on either\n side of it. Compare the examples with those of\n `identity_cyclic_reduction`.\n\n When `removed` is `True`, return a tuple `(word, r)` where\n self `r` is such that before the reduction the word was either\n `r*word*r**-1`.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import free_group\n >>> F, x, y = free_group(\"x, y\")\n >>> (x**2*y**2*x**-1).cyclic_reduction()\n x*y**2\n >>> (x**-3*y**-1*x**5).cyclic_reduction()\n y**-1*x**2\n >>> (x**-3*y**-1*x**5).cyclic_reduction(removed=True)\n (y**-1*x**2, x**-3)\n\n ", "n_words": 83, "vocab_size": 66, "n_whitespaces": 209, "language": "en" } }, { "id": 66794, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v13_0/set_operation_time_based_on_operating_cost.py", "file_name": "set_operation_time_based_on_operating_cost.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tfrappe.reload_doc(\"manufacturing\", \"doctype\", \"bom\")\n\tfrappe.reload_doc(\"manufacturing\", \"doctype\", \"bom_operation\")\n\n\tfrappe.db.sql(\n\t\t\n\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 5, "n_words": 10, "vocab_size": 8, "complexity": 1, "nloc": 14, "token_counts": 32, "n_ast_nodes": 63, "n_identifiers": 5, "d_id": 14337, "documentation": { "docstring": "\n\t\tUPDATE\n\t\t\t`tabBOM Operation`\n\t\tSET\n\t\t\ttime_in_mins = (operating_cost * 60) / hour_rate\n\t\tWHERE\n\t\t\ttime_in_mins = 0 AND operating_cost > 0\n\t\t\tAND hour_rate > 0 AND docstatus = 1 AND parenttype = \"BOM\"\n\t", "n_words": 31, "vocab_size": 20, "n_whitespaces": 24, "language": "en" } }, { "id": 261698, "commit_id": "2b34dfde2453743fa046312a49cc312a5586ea04", "repo": "scikit-learn", "path": "sklearn/semi_supervised/_label_propagation.py", "file_name": "_label_propagation.py", "fun_name": "predict", "commit_message": "DOC Improve docs of BaseLabelPropagation.transduction_ (#24985)", "code": "def predict(self, X):\n \n # Note: since `predict` does not accept semi-supervised labels as input,\n # `fit(X, y).predict(X) != fit(X, y).transduction_`.\n # Hence, `fit_predict` is not implemented.\n # See https://github.com/scikit-learn/scikit-learn/pull/24898\n probas = self.predict_proba(X)\n return self.classes_[np.argmax(probas, axis=1)].ravel()\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 84, "n_words": 35, "vocab_size": 31, "complexity": 1, "nloc": 3, "token_counts": 36, "n_ast_nodes": 62, "n_identifiers": 10, "d_id": 76941, "documentation": { "docstring": "Perform inductive inference across the model.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n The data matrix.\n\n Returns\n -------\n y : ndarray of shape (n_samples,)\n Predictions for input data.\n ", "n_words": 30, "vocab_size": 27, "n_whitespaces": 101, "language": "en" } }, { "id": 110055, "commit_id": "13438f842729df1b04445d44ea83f616d1b85567", "repo": "matplotlib", "path": "lib/matplotlib/cbook/__init__.py", "file_name": "__init__.py", "fun_name": "_array_perimeter", "commit_message": "Fix some minor docstring typos", "code": "def _array_perimeter(arr):\n \n # note we use Python's half-open ranges to avoid repeating\n # the corners\n forward = np.s_[0:-1] # [0 ... -1)\n backward = np.s_[-1:0:-1] # [-1 ... 0)\n return np.concatenate((\n arr[0, forward],\n arr[forward, -1],\n arr[-1, backward],\n arr[backward, 0],\n ))\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 95, "n_words": 40, "vocab_size": 35, "complexity": 1, "nloc": 9, "token_counts": 69, "n_ast_nodes": 104, "n_identifiers": 7, "d_id": 23900, "documentation": { "docstring": "\n Get the elements on the perimeter of *arr*.\n\n Parameters\n ----------\n arr : ndarray, shape (M, N)\n The input array.\n\n Returns\n -------\n ndarray, shape (2*(M - 1) + 2*(N - 1),)\n The elements on the perimeter of the array::\n\n [arr[0, 0], ..., arr[0, -1], ..., arr[-1, -1], ..., arr[-1, 0], ...]\n\n Examples\n --------\n >>> i, j = np.ogrid[:3, :4]\n >>> a = i*10 + j\n >>> a\n array([[ 0, 1, 2, 3],\n [10, 11, 12, 13],\n [20, 21, 22, 23]])\n >>> _array_perimeter(a)\n array([ 0, 1, 2, 3, 13, 23, 22, 21, 20, 10])\n ", "n_words": 92, "vocab_size": 64, "n_whitespaces": 191, "language": "en" } }, { "id": 176920, "commit_id": "e5f1edb82a379ceb6afcf421fa5f6b4cb43cfbaf", "repo": "networkx", "path": "networkx/algorithms/link_analysis/hits_alg.py", "file_name": "hits_alg.py", "fun_name": "_hits_numpy", "commit_message": "Make HITS numpy and scipy private functions (#5771)\n\n* Make HITS numpy and scipy private functions\r\n\r\n* fix examples with correct imports\r\n\r\n* remove functions from TOC", "code": "def _hits_numpy(G, normalized=True):\n \n import numpy as np\n\n if len(G) == 0:\n return {}, {}\n adj_ary = nx.to_numpy_array(G)\n # Hub matrix\n H = adj_ary @ adj_ary.T\n e, ev = np.linalg.eig(H)\n h = ev[:, np.argmax(e)] # eigenvector corresponding to the maximum eigenvalue\n # Authority matrix\n A = adj_ary.T @ adj_ary\n e, ev = np.linalg.eig(A)\n a = ev[:, np.argmax(e)] # eigenvector corresponding to the maximum eigenvalue\n if normalized:\n h /= h.sum()\n a /= a.sum()\n else:\n h /= h.max()\n a /= a.max()\n hubs = dict(zip(G, map(float, h)))\n authorities = dict(zip(G, map(float, a)))\n return hubs, authorities\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 179, "n_words": 91, "vocab_size": 53, "complexity": 3, "nloc": 20, "token_counts": 173, "n_ast_nodes": 276, "n_identifiers": 27, "d_id": 42172, "documentation": { "docstring": "Returns HITS hubs and authorities values for nodes.\n\n The HITS algorithm computes two numbers for a node.\n Authorities estimates the node value based on the incoming links.\n Hubs estimates the node value based on outgoing links.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n normalized : bool (default=True)\n Normalize results by the sum of all of the values.\n\n Returns\n -------\n (hubs,authorities) : two-tuple of dictionaries\n Two dictionaries keyed by node containing the hub and authority\n values.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n\n The `hubs` and `authorities` are given by the eigenvectors corresponding to the\n maximum eigenvalues of the hubs_matrix and the authority_matrix, respectively.\n\n The ``hubs`` and ``authority`` matrices are computed from the adjancency\n matrix:\n\n >>> adj_ary = nx.to_numpy_array(G)\n >>> hubs_matrix = adj_ary @ adj_ary.T\n >>> authority_matrix = adj_ary.T @ adj_ary\n\n `_hits_numpy` maps the eigenvector corresponding to the maximum eigenvalue\n of the respective matrices to the nodes in `G`:\n\n >>> from networkx.algorithms.link_analysis.hits_alg import _hits_numpy\n >>> hubs, authority = _hits_numpy(G)\n\n Notes\n -----\n The eigenvector calculation uses NumPy's interface to LAPACK.\n\n The HITS algorithm was designed for directed graphs but this\n algorithm does not check if the input graph is directed and will\n execute on undirected graphs.\n\n References\n ----------\n .. [1] A. Langville and C. Meyer,\n \"A survey of eigenvector methods of web information retrieval.\"\n http://citeseer.ist.psu.edu/713792.html\n .. [2] Jon Kleinberg,\n Authoritative sources in a hyperlinked environment\n Journal of the ACM 46 (5): 604-32, 1999.\n doi:10.1145/324133.324140.\n http://www.cs.cornell.edu/home/kleinber/auth.pdf.\n ", "n_words": 233, "vocab_size": 146, "n_whitespaces": 397, "language": "en" } }, { "id": 166568, "commit_id": "44b660dc4a07f4fb507c31795ae63dca2e6e9440", "repo": "pandas", "path": "pandas/util/_print_versions.py", "file_name": "_print_versions.py", "fun_name": "_get_dependency_info", "commit_message": "fix pandas.show_versions() and remove pin for setuptools (#47096)", "code": "def _get_dependency_info() -> dict[str, JSONSerializable]:\n \n deps = [\n \"pandas\",\n # required\n \"numpy\",\n \"pytz\",\n \"dateutil\",\n # install / build,\n \"setuptools\",\n \"pip\",\n \"Cython\",\n # test\n \"pytest\",\n \"hypothesis\",\n # docs\n \"sphinx\",\n # Other, need a min version\n \"blosc\",\n \"feather\",\n \"xlsxwriter\",\n \"lxml.etree\",\n \"html5lib\",\n \"pymysql\",\n \"psycopg2\",\n \"jinja2\",\n # Other, not imported.\n \"IPython\",\n \"pandas_datareader\",\n ]\n deps.extend(list(VERSIONS))\n\n result: dict[str, JSONSerializable] = {}\n for modname in deps:\n mod = import_optional_dependency(modname, errors=\"ignore\")\n result[modname] = get_version(mod) if mod else None\n return result\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 289, "n_words": 72, "vocab_size": 61, "complexity": 3, "nloc": 32, "token_counts": 106, "n_ast_nodes": 191, "n_identifiers": 14, "d_id": 39828, "documentation": { "docstring": "\n Returns dependency information as a JSON serializable dictionary.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 140502, "commit_id": "21f1e8a5c65241ef618df88885c38222550209dd", "repo": "ray", "path": "python/ray/util/actor_pool.py", "file_name": "actor_pool.py", "fun_name": "push", "commit_message": "[Core] Use newly pushed actor for existing pending tasks (#24980)\n\nNewly pushed actors will never be used with existing pending submits, so the worker will not be used to speed up existing tasks. If _return_actor is called at the end of push instead, the actor is pushed to _idle_actors and immediately used if there are pending submits.", "code": "def push(self, actor):\n \n busy_actors = []\n if self._future_to_actor.values():\n _, busy_actors = zip(*self._future_to_actor.values())\n if actor in self._idle_actors or actor in busy_actors:\n raise ValueError(\"Actor already belongs to current ActorPool\")\n else:\n self._return_actor(actor)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 97, "n_words": 29, "vocab_size": 24, "complexity": 4, "nloc": 8, "token_counts": 60, "n_ast_nodes": 103, "n_identifiers": 11, "d_id": 31978, "documentation": { "docstring": "Pushes a new actor into the current list of idle actors.\n\n Examples:\n >>> @ray.remote # doctest: +SKIP\n >>> class Actor: # doctest: +SKIP\n ... ... # doctest: +SKIP\n >>> a1, b1 = Actor.remote(), Actor.remote() # doctest: +SKIP\n >>> pool = ActorPool([a1]) # doctest: +SKIP\n >>> pool.submit(lambda a, v: a.double.remote(v), 1) # doctest: +SKIP\n >>> print(pool.get_next()) # doctest: +SKIP\n 2\n >>> pool2 = ActorPool([b1]) # doctest: +SKIP\n >>> pool2.push(pool.pop_idle()) # doctest: +SKIP\n ", "n_words": 71, "vocab_size": 37, "n_whitespaces": 199, "language": "en" } }, { "id": 154354, "commit_id": "eee5f435f68786778184c4886ff25d386fce0c4f", "repo": "modin", "path": "modin/core/dataframe/pandas/partitioning/partition_manager.py", "file_name": "partition_manager.py", "fun_name": "concat", "commit_message": "PERF-#4892: Compute `lengths` in `rebalance_partitions` when possible (#4893)\n\nSigned-off-by: Myachev ", "code": "def concat(cls, axis, left_parts, right_parts):\n \n # TODO: Possible change is `isinstance(right_parts, list)`\n if type(right_parts) is list:\n # `np.array` with partitions of empty ModinFrame has a shape (0,)\n # but `np.concatenate` can concatenate arrays only if its shapes at\n # specified axis are equals, so filtering empty frames to avoid concat error\n right_parts = [o for o in right_parts if o.size != 0]\n to_concat = (\n [left_parts] + right_parts if left_parts.size != 0 else right_parts\n )\n result = (\n np.concatenate(to_concat, axis=axis) if len(to_concat) else left_parts\n )\n else:\n result = np.append(left_parts, right_parts, axis=axis)\n if axis == 0:\n return cls.rebalance_partitions(result)\n else:\n return result, None\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 294, "n_words": 101, "vocab_size": 76, "complexity": 7, "nloc": 15, "token_counts": 107, "n_ast_nodes": 167, "n_identifiers": 16, "d_id": 35945, "documentation": { "docstring": "\n Concatenate the blocks of partitions with another set of blocks.\n\n Parameters\n ----------\n axis : int\n The axis to concatenate to.\n left_parts : np.ndarray\n NumPy array of partitions to concatenate with.\n right_parts : np.ndarray or list\n NumPy array of partitions to be concatenated.\n\n Returns\n -------\n np.ndarray\n A new NumPy array with concatenated partitions.\n list[int] or None\n Row lengths if possible to compute it.\n\n Notes\n -----\n Assumes that the blocks are already the same shape on the\n dimension being concatenated. A ValueError will be thrown if this\n condition is not met.\n ", "n_words": 89, "vocab_size": 61, "n_whitespaces": 257, "language": "en" } }, { "id": 323178, "commit_id": "44a290e94d1becd1f09fddc3d873f9e19c9d6919", "repo": "PaddleNLP", "path": "paddlenlp/trainer/trainer_utils.py", "file_name": "trainer_utils.py", "fun_name": "_secs2timedelta", "commit_message": "[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761)\n\n* add some datasets for finetune.\r\n\r\n* support fine tune for all tastks.\r\n\r\n* add trainer prototype.\r\n\r\n* init verison for paddlenlp trainer.\r\n\r\n* refine trainer.\r\n\r\n* update for some details.\r\n\r\n* support multi-cards training evaluation.\r\n\r\n* support load from ckpt.\r\n\r\n* support for export inference model.\r\n\r\n* first version of trainer.\r\n\r\n* seq cls support clue.\r\n\r\n* trainer support for token classification and question answersing tasks.\r\n\r\n* fix as reviews.\r\n\r\nCo-authored-by: Zeyu Chen ", "code": "def _secs2timedelta(secs):\n \n\n msec = int(abs(secs - int(secs)) * 100)\n return f\"{datetime.timedelta(seconds=int(secs))}.{msec:02d}\"\n\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 20, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 25, "n_ast_nodes": 70, "n_identifiers": 8, "d_id": 118399, "documentation": { "docstring": "\n convert seconds to hh:mm:ss.msec, msecs rounded to 2 decimals\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 16, "language": "en" } }, { "id": 263853, "commit_id": "6e1bfa2de254d8ae302f54dcea0cfefae4dd3585", "repo": "pyinstaller", "path": "PyInstaller/building/build_main.py", "file_name": "build_main.py", "fun_name": "_get_module_collection_mode", "commit_message": "building: more module collection modes, consolidate noarchive codepath\n\nMap the module collection mode strings into (combinations of)\ninteger flags that control try basic collection modes:\n- collect a pyc into PYZ archive\n- collect a pyc as a data file\n- collect a py as a data file\n\nConsolidate the `noarchive=True` codepath into module collection\nmode, where \"collect a pyc into PYZ archive\" flag is swapped\nfor a \"collect a pyc as a data file\".\n\nThe new collection mode also implicitly fixes couple of minor\nannoyances of the `noarchive=True` mode:\n- the user-writable paths containing python source code are not\n littered with pyc/pyo files anymore; all pycs are now gathered\n in build directory\n- the name of pycs in local build directory are not mangled anymore\n (was previously the case for pycs that could not be written to\n their original locations due to lack of permissions)\n- the pycs have code paths stripped from them, same as in\n noarchive=False mode", "code": "def _get_module_collection_mode(mode_dict, name, noarchive=False):\n \n # Default mode: collect into PYZ, unless noarchive is enabled. In that case, collect as pyc.\n mode_flags = _ModuleCollectionMode.PYC if noarchive else _ModuleCollectionMode.PYZ\n\n # If we have no collection mode settings, end here and now.\n if not mode_dict:\n return mode_flags\n\n # Search the parent modules/packages in top-down fashion, and take the last given setting. This ensures that\n # a setting given for the top-level package is recursively propagated to all its subpackages and submodules,\n # but also allows individual sub-modules to override the setting again.\n mode = 'pyz'\n\n name_parts = name.split('.')\n for i in range(len(name_parts)):\n modlevel = \".\".join(name_parts[:i + 1])\n modlevel_mode = mode_dict.get(modlevel, None)\n if modlevel_mode is not None:\n mode = modlevel_mode\n\n # Convert mode string to _ModuleCollectionMode flags\n try:\n mode_flags = _MODULE_COLLECTION_MODES[mode]\n except KeyError:\n raise ValueError(f\"Unknown module collection mode for {name!r}: {mode!r}!\")\n\n # noarchive flag being set means that we need to change _ModuleCollectionMode.PYZ into _ModuleCollectionMode.PYC\n if noarchive and _ModuleCollectionMode.PYZ in mode_flags:\n mode_flags ^= _ModuleCollectionMode.PYZ\n mode_flags |= _ModuleCollectionMode.PYC\n\n return mode_flags\n\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 283, "n_words": 165, "vocab_size": 106, "complexity": 8, "nloc": 19, "token_counts": 122, "n_ast_nodes": 216, "n_identifiers": 21, "d_id": 77469, "documentation": { "docstring": "\n Determine the module/package collection mode for the given module name, based on the provided collection\n mode settings dictionary.\n ", "n_words": 18, "vocab_size": 14, "n_whitespaces": 28, "language": "en" } }, { "id": 272918, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/preprocessing/discretization.py", "file_name": "discretization.py", "fun_name": "adapt", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def adapt(self, data, batch_size=None, steps=None):\n \n super().adapt(data, batch_size=batch_size, steps=steps)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 32, "n_ast_nodes": 49, "n_identifiers": 6, "d_id": 81075, "documentation": { "docstring": "Computes bin boundaries from quantiles in a input dataset.\n\n Calling `adapt()` on a `Discretization` layer is an alternative to passing\n in a `bin_boundaries` argument during construction. A `Discretization` layer\n should always be either adapted over a dataset or passed `bin_boundaries`.\n\n During `adapt()`, the layer will estimate the quantile boundaries of the\n input dataset. The number of quantiles can be controlled via the `num_bins`\n argument, and the error tolerance for quantile boundaries can be controlled\n via the `epsilon` argument.\n\n In order to make `Discretization` efficient in any distribution context, the\n computed boundaries are kept static with respect to any compiled `tf.Graph`s\n that call the layer. As a consequence, if the layer is adapted a second\n time, any models using the layer should be re-compiled. For more information\n see `tf.keras.layers.experimental.preprocessing.PreprocessingLayer.adapt`.\n\n `adapt()` is meant only as a single machine utility to compute layer state.\n To analyze a dataset that cannot fit on a single machine, see\n [Tensorflow Transform](https://www.tensorflow.org/tfx/transform/get_started)\n for a multi-machine, map-reduce solution.\n\n Arguments:\n data: The data to train on. It can be passed either as a\n `tf.data.Dataset`, or as a numpy array.\n batch_size: Integer or `None`.\n Number of samples per state update.\n If unspecified, `batch_size` will default to 32.\n Do not specify the `batch_size` if your data is in the\n form of datasets, generators, or `keras.utils.Sequence` instances\n (since they generate batches).\n steps: Integer or `None`.\n Total number of steps (batches of samples)\n When training with input tensors such as\n TensorFlow data tensors, the default `None` is equal to\n the number of samples in your dataset divided by\n the batch size, or 1 if that cannot be determined. If x is a\n `tf.data` dataset, and 'steps' is None, the epoch will run until\n the input dataset is exhausted. When passing an infinitely\n repeating dataset, you must specify the `steps` argument. This\n argument is not supported with array inputs.\n ", "n_words": 305, "vocab_size": 175, "n_whitespaces": 653, "language": "en" } }, { "id": 819, "commit_id": "56137bacda6fea5a0053c65eb6fd88688f5298cc", "repo": "PySyft", "path": "packages/syft/src/syft/core/adp/vectorized_publish.py", "file_name": "vectorized_publish.py", "fun_name": "calculate_bounds_for_mechanism", "commit_message": "Implemented working vectorized_publish method into codebase\n\nTook 26 minutes", "code": "def calculate_bounds_for_mechanism(value_array, min_val_array, max_val_array):\n \n \n\n # TODO: Double check whether the iDPGaussianMechanism class squares its squared_l2_norm values!!\n worst_case_l2_norm = np.sqrt(np.sum(np.square(max_val_array - min_val_array))) * np.ones_like(value_array)\n l2_norm = np.sqrt(np.sum(np.square(value_array))) * np.ones_like(value_array)\n # print(l2_norm.shape, worst_case_l2_norm.shape)\n # print(l2_norm.shape)\n return l2_norm, worst_case_l2_norm\n\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 61, "n_words": 36, "vocab_size": 30, "complexity": 1, "nloc": 8, "token_counts": 67, "n_ast_nodes": 113, "n_identifiers": 11, "d_id": 126, "documentation": { "docstring": "Calculates the squared L2 norm values needed to create a Mechanism, and calculate privacy budget + spend If you calculate the privacy budget spend with the worst case bound, you can show this number to the D.S.\n If you calculate it with the regular value (the value computed below when public_only = False, you cannot show the \n privacy budget to the DS because this violates privacy.\n ", "n_words": 66, "vocab_size": 43, "n_whitespaces": 76, "language": "en" } }, { "id": 13277, "commit_id": "71e422211fe10930d384f2bf679785d3c415f514", "repo": "jina", "path": "jina/serve/instrumentation/__init__.py", "file_name": "__init__.py", "fun_name": "__call__", "commit_message": "feat: record existing Prometheus metrics into OpenTelemetry Histograms (#5275)", "code": "def __call__(self, f):\n \n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 10, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 4, "token_counts": 19, "n_ast_nodes": 15, "n_identifiers": 3, "d_id": 2593, "documentation": { "docstring": "function that gets called when this class is used as a decortor\n :param f: function that is decorated\n :return: wrapped function\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 42, "language": "en" } }, { "id": 177486, "commit_id": "1f033118f2e0cca12c6e2375708dc92197b62da6", "repo": "networkx", "path": "networkx/algorithms/bipartite/redundancy.py", "file_name": "redundancy.py", "fun_name": "_node_redundancy", "commit_message": "Minor Python 2 cleanup (#6219)\n\nPython3 cleanup\r\n\r\nUse dict.keys() for set operations rather than explicitly\r\ncreating sets.", "code": "def _node_redundancy(G, v):\n \n n = len(G[v])\n overlap = sum(\n 1 for (u, w) in combinations(G[v], 2) if (G[u].keys() & G[w].keys()) - {v}\n )\n return (2 * overlap) / (n * (n - 1))\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 55, "n_words": 33, "vocab_size": 29, "complexity": 3, "nloc": 6, "token_counts": 79, "n_ast_nodes": 121, "n_identifiers": 11, "d_id": 42392, "documentation": { "docstring": "Returns the redundancy of the node `v` in the bipartite graph `G`.\n\n If `G` is a graph with `n` nodes, the redundancy of a node is the ratio\n of the \"overlap\" of `v` to the maximum possible overlap of `v`\n according to its degree. The overlap of `v` is the number of pairs of\n neighbors that have mutual neighbors themselves, other than `v`.\n\n `v` must have at least two neighbors in `G`.\n\n ", "n_words": 72, "vocab_size": 41, "n_whitespaces": 90, "language": "en" } }, { "id": 177528, "commit_id": "979d54acba7c3d372c93d44c6c149700608ce8b0", "repo": "networkx", "path": "networkx/classes/digraph.py", "file_name": "digraph.py", "fun_name": "add_edges_from", "commit_message": "doc: update documentation when providing an iterator over current graph to add/remove_edges_from. (#6268)\n\n* doc for add_edges_from\r\n\r\n* doc for digraph\r\n\r\n* doc for multigraph\r\n\r\n* multigraph.add_nodes_from returns keylist\r\n\r\n* update docs for graph - edges\r\n\r\n* doc update: graph.add_nodes_from\r\n\r\n* doc update: graph.remove_nodes_from\r\n\r\n* doc update: graph.add_edges_from\r\n\r\n* doc update: rewording for graph.add_edges_from\r\n\r\n* doc update: graph.add_weighted_edges_from rewording\r\n\r\n* doc update: digraph updated as graph\r\n\r\n* doc update: digraph minor sync\r\n\r\n* doc update: multigraph same as graph\r\n\r\n* Update graph.py\r\n\r\n* Update digraph.py\r\n\r\n* Update multigraph.py", "code": "def add_edges_from(self, ebunch_to_add, **attr):\n \n for e in ebunch_to_add:\n ne = len(e)\n if ne == 3:\n u, v, dd = e\n elif ne == 2:\n u, v = e\n dd = {}\n else:\n raise NetworkXError(f\"Edge tuple {e} must be a 2-tuple or 3-tuple.\")\n if u not in self._succ:\n if u is None:\n raise ValueError(\"None cannot be a node\")\n self._succ[u] = self.adjlist_inner_dict_factory()\n self._pred[u] = self.adjlist_inner_dict_factory()\n self._node[u] = self.node_attr_dict_factory()\n if v not in self._succ:\n if v is None:\n raise ValueError(\"None cannot be a node\")\n self._succ[v] = self.adjlist_inner_dict_factory()\n self._pred[v] = self.adjlist_inner_dict_factory()\n self._node[v] = self.node_attr_dict_factory()\n datadict = self._adj[u].get(v, self.edge_attr_dict_factory())\n datadict.update(attr)\n datadict.update(dd)\n self._succ[u][v] = datadict\n self._pred[v][u] = datadict\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 455, "n_words": 102, "vocab_size": 55, "complexity": 8, "nloc": 27, "token_counts": 217, "n_ast_nodes": 350, "n_identifiers": 22, "d_id": 42422, "documentation": { "docstring": "Add all the edges in ebunch_to_add.\n\n Parameters\n ----------\n ebunch_to_add : container of edges\n Each edge given in the container will be added to the\n graph. The edges must be given as 2-tuples (u, v) or\n 3-tuples (u, v, d) where d is a dictionary containing edge data.\n attr : keyword arguments, optional\n Edge data (or labels or objects) can be assigned using\n keyword arguments.\n\n See Also\n --------\n add_edge : add a single edge\n add_weighted_edges_from : convenient way to add weighted edges\n\n Notes\n -----\n Adding the same edge twice has no effect but any edge data\n will be updated when each duplicate edge is added.\n\n Edge attributes specified in an ebunch take precedence over\n attributes specified via keyword arguments.\n\n When adding edges from an iterator over the graph you are changing,\n a `RuntimeError` can be raised with message:\n `RuntimeError: dictionary changed size during iteration`. This\n happens when the graph's underlying dictionary is modified during\n iteration. To avoid this error, evaluate the iterator into a separate\n object, e.g. by using `list(iterator_of_edges)`, and pass this\n object to `G.add_edges_from`.\n\n Examples\n --------\n >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc\n >>> G.add_edges_from([(0, 1), (1, 2)]) # using a list of edge tuples\n >>> e = zip(range(0, 3), range(1, 4))\n >>> G.add_edges_from(e) # Add the path graph 0-1-2-3\n\n Associate data to edges\n\n >>> G.add_edges_from([(1, 2), (2, 3)], weight=3)\n >>> G.add_edges_from([(3, 4), (1, 4)], label=\"WN2898\")\n\n Evaluate an iterator over a graph if using it to modify the same graph\n\n >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 4)])\n >>> # Grow graph by one new node, adding edges to all existing nodes.\n >>> # wrong way - will raise RuntimeError\n >>> # G.add_edges_from(((5, n) for n in G.nodes))\n >>> # right way - note that there will be no self-edge for node 5\n >>> G.add_edges_from(list((5, n) for n in G.nodes))\n ", "n_words": 305, "vocab_size": 185, "n_whitespaces": 629, "language": "en" } }, { "id": 68939, "commit_id": "12b7e14fded587abc0f7821e3c3dfbea64498a7d", "repo": "erpnext", "path": "erpnext/regional/india/utils.py", "file_name": "utils.py", "fun_name": "validate_house_rent_dates", "commit_message": "chore: keep back code to be a part of other apps / to be ported later", "code": "def validate_house_rent_dates(doc):\n\tif not doc.rented_to_date or not doc.rented_from_date:\n\t\tfrappe.throw(_(\"House rented dates required for exemption calculation\"))\n\n\tif date_diff(doc.rented_to_date, doc.rented_from_date) < 14:\n\t\tfrappe.throw(_(\"House rented dates should be atleast 15 days apart\"))\n\n\tproofs = frappe.db.sql(\n\t\t,\n\t\t{\n\t\t\t\"employee\": doc.employee,\n\t\t\t\"payroll_period\": doc.payroll_period,\n\t\t\t\"from_date\": doc.rented_from_date,\n\t\t\t\"to_date\": doc.rented_to_date,\n\t\t},\n\t)\n\n\tif proofs:\n\t\tfrappe.throw(_(\"House rent paid days overlapping with {0}\").format(proofs[0][0]))\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 37, "n_words": 53, "vocab_size": 45, "complexity": 5, "nloc": 22, "token_counts": 109, "n_ast_nodes": 183, "n_identifiers": 14, "d_id": 14948, "documentation": { "docstring": "\n select name\n from `tabEmployee Tax Exemption Proof Submission`\n where\n docstatus=1 and employee=%(employee)s and payroll_period=%(payroll_period)s\n and (rented_from_date between %(from_date)s and %(to_date)s or rented_to_date between %(from_date)s and %(to_date)s)\n ", "n_words": 26, "vocab_size": 20, "n_whitespaces": 73, "language": "en" } }, { "id": 65661, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/controllers/queries.py", "file_name": "queries.py", "fun_name": "customer_query", "commit_message": "style: format code with black", "code": "def customer_query(doctype, txt, searchfield, start, page_len, filters):\n\tconditions = []\n\tcust_master_name = frappe.defaults.get_user_default(\"cust_master_name\")\n\n\tif cust_master_name == \"Customer Name\":\n\t\tfields = [\"name\", \"customer_group\", \"territory\"]\n\telse:\n\t\tfields = [\"name\", \"customer_name\", \"customer_group\", \"territory\"]\n\n\tfields = get_fields(\"Customer\", fields)\n\n\tsearchfields = frappe.get_meta(\"Customer\").get_search_fields()\n\tsearchfields = \" or \".join(field + \" like %(txt)s\" for field in searchfields)\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\t**{\n\t\t\t\t\"fields\": \", \".join(fields),\n\t\t\t\t\"scond\": searchfields,\n\t\t\t\t\"mcond\": get_match_cond(doctype),\n\t\t\t\t\"fcond\": get_filters_cond(doctype, filters, conditions).replace(\"%\", \"%%\"),\n\t\t\t}\n\t\t),\n\t\t{\"txt\": \"%%%s%%\" % txt, \"_txt\": txt.replace(\"%\", \"\"), \"start\": start, \"page_len\": page_len},\n\t)\n\n\n# searches for supplier\n@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 62, "n_words": 86, "vocab_size": 69, "complexity": 3, "nloc": 30, "token_counts": 172, "n_ast_nodes": 322, "n_identifiers": 27, "d_id": 13980, "documentation": { "docstring": "select {fields} from `tabCustomer`\n\t\twhere docstatus < 2\n\t\t\tand ({scond}) and disabled=0\n\t\t\t{fcond} {mcond}\n\t\torder by\n\t\t\tif(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),\n\t\t\tif(locate(%(_txt)s, customer_name), locate(%(_txt)s, customer_name), 99999),\n\t\t\tidx desc,\n\t\t\tname, customer_name\n\t\tlimit %(start)s, %(page_len)s", "n_words": 33, "vocab_size": 27, "n_whitespaces": 23, "language": "en" } }, { "id": 67609, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/doctype/delivery_trip/delivery_trip.py", "file_name": "delivery_trip.py", "fun_name": "get_default_contact", "commit_message": "style: format code with black", "code": "def get_default_contact(out, name):\n\tcontact_persons = frappe.db.sql(\n\t\t,\n\t\t(name),\n\t\tas_dict=1,\n\t)\n\n\tif contact_persons:\n\t\tfor out.contact_person in contact_persons:\n\t\t\tif out.contact_person.is_primary_contact:\n\t\t\t\treturn out.contact_person\n\n\t\tout.contact_person = contact_persons[0]\n\n\t\treturn out.contact_person\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 13, "n_words": 25, "vocab_size": 18, "complexity": 4, "nloc": 21, "token_counts": 59, "n_ast_nodes": 90, "n_identifiers": 10, "d_id": 14575, "documentation": { "docstring": "\n\t\t\tSELECT parent,\n\t\t\t\t(SELECT is_primary_contact FROM tabContact c WHERE c.name = dl.parent) AS is_primary_contact\n\t\t\tFROM\n\t\t\t\t`tabDynamic Link` dl\n\t\t\tWHERE\n\t\t\t\tdl.link_doctype=\"Customer\"\n\t\t\t\tAND dl.link_name=%s\n\t\t\t\tAND dl.parenttype = \"Contact\"\n\t\t", "n_words": 25, "vocab_size": 20, "n_whitespaces": 17, "language": "en" } }, { "id": 297243, "commit_id": "9f7fd8956f22bd873d14ae89460cdffe6ef6f85d", "repo": "core", "path": "homeassistant/helpers/template.py", "file_name": "template.py", "fun_name": "distance", "commit_message": "Use new unit enums in helpers (#83387)", "code": "def distance(hass, *args):\n \n locations = []\n\n to_process = list(args)\n\n while to_process:\n value = to_process.pop(0)\n if isinstance(value, str) and not valid_entity_id(value):\n point_state = None\n else:\n point_state = _resolve_state(hass, value)\n\n if point_state is None:\n # We expect this and next value to be lat&lng\n if not to_process:\n _LOGGER.warning(\n \"Distance:Expected latitude and longitude, got %s\", value\n )\n return None\n\n value_2 = to_process.pop(0)\n latitude = convert(value, float)\n longitude = convert(value_2, float)\n\n if latitude is None or longitude is None:\n _LOGGER.warning(\n \"Distance:Unable to process latitude and longitude: %s, %s\",\n value,\n value_2,\n )\n return None\n\n else:\n if not loc_helper.has_location(point_state):\n _LOGGER.warning(\n \"Distance:State does not contain valid location: %s\", point_state\n )\n return None\n\n latitude = point_state.attributes.get(ATTR_LATITUDE)\n longitude = point_state.attributes.get(ATTR_LONGITUDE)\n\n locations.append((latitude, longitude))\n\n if len(locations) == 1:\n return hass.config.distance(*locations[0])\n\n return hass.config.units.length(\n loc_util.distance(*locations[0] + locations[1]), UnitOfLength.METERS\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 554, "n_words": 126, "vocab_size": 72, "complexity": 10, "nloc": 39, "token_counts": 223, "n_ast_nodes": 358, "n_identifiers": 34, "d_id": 96212, "documentation": { "docstring": "Calculate distance.\n\n Will calculate distance from home to a point or between points.\n Points can be passed in using state objects or lat/lng coordinates.\n ", "n_words": 24, "vocab_size": 23, "n_whitespaces": 33, "language": "en" } }, { "id": 207127, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_filters/tests.py", "file_name": "tests.py", "fun_name": "test_filter_with_failing_queryset", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_filter_with_failing_queryset(self):\n \n modeladmin = DecadeFilterBookAdminWithFailingQueryset(Book, site)\n request = self.request_factory.get(\"/\", {})\n request.user = self.alfred\n with self.assertRaises(ZeroDivisionError):\n modeladmin.get_changelist_instance(request)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 62, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 6, "token_counts": 48, "n_ast_nodes": 83, "n_identifiers": 14, "d_id": 51872, "documentation": { "docstring": "\n When a filter's queryset method fails, it fails loudly and\n the corresponding exception doesn't get swallowed (#17828).\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 39, "language": "en" } }, { "id": 250085, "commit_id": "3ac412b4e2f8c5ba11dc962b8a9d871c1efdce9b", "repo": "synapse", "path": "tests/storage/databases/main/test_room.py", "file_name": "test_room.py", "fun_name": "test_background_populate_rooms_creator_column", "commit_message": "Require types in tests.storage. (#14646)\n\nAdds missing type hints to `tests.storage` package\r\nand does not allow untyped definitions.", "code": "def test_background_populate_rooms_creator_column(self) -> None:\n \n\n # Insert a room without the creator\n room_id = self._generate_room()\n self.get_success(\n self.store.db_pool.simple_update(\n table=\"rooms\",\n keyvalues={\"room_id\": room_id},\n updatevalues={\"creator\": None},\n desc=\"test\",\n )\n )\n\n # Make sure the test is starting out with a room without a creator\n room_creator_before = self.get_success(\n self.store.db_pool.simple_select_one_onecol(\n table=\"rooms\",\n keyvalues={\"room_id\": room_id},\n retcol=\"creator\",\n allow_none=True,\n )\n )\n self.assertEqual(room_creator_before, None)\n\n # Insert and run the background update.\n self.get_success(\n self.store.db_pool.simple_insert(\n \"background_updates\",\n {\n \"update_name\": _BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN,\n \"progress_json\": \"{}\",\n },\n )\n )\n\n # ... and tell the DataStore that it hasn't finished all updates yet\n self.store.db_pool.updates._all_done = False\n\n # Now let's actually drive the updates to completion\n self.wait_for_background_updates()\n\n # Make sure the background update filled in the room creator\n room_creator_after = self.get_success(\n self.store.db_pool.simple_select_one_onecol(\n table=\"rooms\",\n keyvalues={\"room_id\": room_id},\n retcol=\"creator\",\n allow_none=True,\n )\n )\n self.assertEqual(room_creator_after, self.user_id)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 610, "n_words": 119, "vocab_size": 73, "complexity": 1, "nloc": 42, "token_counts": 188, "n_ast_nodes": 316, "n_identifiers": 25, "d_id": 73263, "documentation": { "docstring": "Test that the background update to populate the rooms creator column\n works properly.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 79041, "commit_id": "1cec004d97f0cfa3d4a8a1e99eea42ae7f651993", "repo": "wagtail", "path": "wagtail/models/__init__.py", "file_name": "__init__.py", "fun_name": "with_content_json", "commit_message": "Extract page locking fields into a mixin", "code": "def with_content_json(self, content):\n \n obj = super().with_content_json(content)\n\n # Ensure other values that are meaningful for the object as a whole (rather than\n # to a specific revision) are preserved\n obj.locked = self.locked\n obj.locked_at = self.locked_at\n obj.locked_by = self.locked_by\n\n return obj\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 95, "n_words": 39, "vocab_size": 32, "complexity": 1, "nloc": 6, "token_counts": 41, "n_ast_nodes": 70, "n_identifiers": 8, "d_id": 16859, "documentation": { "docstring": "\n Returns a new version of the object with field values updated to reflect changes\n in the provided ``content`` (which usually comes from a previously-saved revision).\n\n Certain field values are preserved in order to prevent errors if the returned\n object is saved, such as ``id``. The following field values are also preserved,\n as they are considered to be meaningful to the object as a whole, rather than\n to a specific revision:\n\n * ``locked``\n * ``locked_at``\n * ``locked_by``\n ", "n_words": 76, "vocab_size": 53, "n_whitespaces": 147, "language": "en" } }, { "id": 228377, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_surface.py", "file_name": "_surface.py", "fun_name": "hidesurface", "commit_message": "switch to black .22", "code": "def hidesurface(self):\n \n return self[\"hidesurface\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 60050, "documentation": { "docstring": "\n Determines whether or not a surface is drawn. For example, set\n `hidesurface` to False `contours.x.show` to True and\n `contours.y.show` to True to draw a wire frame plot.\n\n The 'hidesurface' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n ", "n_words": 43, "vocab_size": 35, "n_whitespaces": 107, "language": "en" } }, { "id": 101466, "commit_id": "13cfb3f39e72e9ca181f173b7b3db2a048db0d08", "repo": "faceswap", "path": "plugins/extract/pipeline.py", "file_name": "pipeline.py", "fun_name": "_total_vram_required", "commit_message": "extract: Add batch processing mode", "code": "def _total_vram_required(self) -> float:\n \n vrams = self._vram_per_phase\n vram_required_count = sum(1 for p in vrams.values() if p > 0)\n logger.debug(\"VRAM requirements: %s. Plugins requiring VRAM: %s\",\n vrams, vram_required_count)\n retval = (sum(vrams.values()) *\n self._parallel_scaling.get(vram_required_count, self._scaling_fallback))\n logger.debug(\"Total VRAM required: %s\", retval)\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 126, "n_words": 40, "vocab_size": 35, "complexity": 3, "nloc": 10, "token_counts": 76, "n_ast_nodes": 124, "n_identifiers": 15, "d_id": 20879, "documentation": { "docstring": " Return vram required for all phases plus the buffer ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 216417, "commit_id": "5550d1823e9cb571740ae9e57b25424cfe6a919e", "repo": "salt", "path": "tests/pytests/functional/modules/win_file/test_check_perms.py", "file_name": "test_check_perms.py", "fun_name": "test_check_perms_grant_test_true", "commit_message": "Add changelong", "code": "def test_check_perms_grant_test_true(test_file):\n \n expected = {\n \"comment\": \"\",\n \"changes\": {\"grant_perms\": {\"Users\": {\"permissions\": \"read_execute\"}}},\n \"name\": str(test_file),\n \"result\": None,\n }\n with patch.dict(win_dacl.__opts__, {\"test\": True}):\n result = win_file.check_perms(\n path=str(test_file),\n grant_perms={\"Users\": {\"perms\": \"read_execute\"}},\n inheritance=None,\n )\n assert result == expected\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 128, "n_words": 34, "vocab_size": 31, "complexity": 1, "nloc": 14, "token_counts": 91, "n_ast_nodes": 165, "n_identifiers": 14, "d_id": 54581, "documentation": { "docstring": "\n Test setting grant perms on a file with test=True\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 230876, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/_annotation.py", "file_name": "_annotation.py", "fun_name": "ayref", "commit_message": "switch to black .22", "code": "def ayref(self):\n \n return self[\"ayref\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 62549, "documentation": { "docstring": "\n Indicates in what coordinates the tail of the annotation\n (ax,ay) is specified. If set to a ay axis id (e.g. \"ay\" or\n \"ay2\"), the `ay` position refers to a ay coordinate. If set to\n \"paper\", the `ay` position refers to the distance from the\n bottom of the plotting area in normalized coordinates where 0\n (1) corresponds to the bottom (top). If set to a ay axis ID\n followed by \"domain\" (separated by a space), the position\n behaves like for \"paper\", but refers to the distance in\n fractions of the domain length from the bottom of the domain of\n that axis: e.g., *ay2 domain* refers to the domain of the\n second ay axis and a ay position of 0.5 refers to the point\n between the bottom and the top of the domain of the second ay\n axis. In order for absolute positioning of the arrow to work,\n \"ayref\" must be exactly the same as \"yref\", otherwise \"ayref\"\n will revert to \"pixel\" (explained next). For relative\n positioning, \"ayref\" can be set to \"pixel\", in which case the\n \"ay\" value is specified in pixels relative to \"y\". Absolute\n positioning is useful for trendline annotations which should\n continue to indicate the correct trend when zoomed. Relative\n positioning is useful for specifying the text offset for an\n annotated point.\n\n The 'ayref' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['pixel']\n - A string that matches one of the following regular expressions:\n ['^y([2-9]|[1-9][0-9]+)?( domain)?$']\n\n Returns\n -------\n Any\n ", "n_words": 249, "vocab_size": 133, "n_whitespaces": 481, "language": "en" } }, { "id": 275644, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/schedules/learning_rate_schedule.py", "file_name": "learning_rate_schedule.py", "fun_name": "serialize", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def serialize(learning_rate_schedule):\n \n return generic_utils.serialize_keras_object(learning_rate_schedule)\n\n\n@keras_export(\"keras.optimizers.schedules.deserialize\")", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.optimizers.schedules.deserialize\")", "n_ast_errors": 1, "ast_levels": 7, "n_whitespaces": 10, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 35, "n_identifiers": 5, "d_id": 81437, "documentation": { "docstring": "Serializes a `LearningRateSchedule` into a JSON-compatible representation.\n\n Args:\n learning_rate_schedule: The `LearningRateSchedule` object to serialize.\n\n Returns:\n A JSON-serializable dict representing the object's config.\n\n Example:\n\n >>> lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n ... 0.1, decay_steps=100000, decay_rate=0.96, staircase=True)\n >>> tf.keras.optimizers.schedules.serialize(lr_schedule)\n {'class_name': 'ExponentialDecay', 'config': {...}}\n ", "n_words": 38, "vocab_size": 35, "n_whitespaces": 74, "language": "en" } }, { "id": 9173, "commit_id": "af2d71fab71bfd819daef263f4988f36499c0af2", "repo": "insightface", "path": "parsing/dml_csr/test.py", "file_name": "test.py", "fun_name": "get_arguments", "commit_message": "Create test.py", "code": "def get_arguments():\n \n parser = argparse.ArgumentParser(description=\"DML_CSR Network\")\n parser.add_argument(\"--batch-size\", type=int, default=1,\n help=\"Number of images sent to the network in one step.\")\n parser.add_argument(\"--data-dir\", type=str, default=DATA_DIRECTORY,\n help=\"Path to the directory containing the PASCAL VOC dataset.\")\n parser.add_argument(\"--out-dir\", type=str, default=DATA_DIRECTORY,\n help=\"Path to the directory containing the PASCAL VOC dataset.\")\n parser.add_argument(\"--dataset\", type=str, default='val',\n help=\"Path to the file listing the images in the dataset.\")\n parser.add_argument(\"--ignore-label\", type=int, default=IGNORE_LABEL,\n help=\"The index of the label to ignore during the training.\")\n parser.add_argument(\"--num-classes\", type=int, default=NUM_CLASSES,\n help=\"Number of classes to predict (including background).\")\n parser.add_argument(\"--restore-from\", type=str,\n help=\"Where restore model parameters from.\")\n parser.add_argument(\"--gpu\", type=str, default='7',\n help=\"choose gpu device.\")\n parser.add_argument(\"--input-size\", type=str, default=INPUT_SIZE,\n help=\"Comma-separated string with height and width of images.\")\n parser.add_argument(\"--local_rank\", type=int, default=0,\n help=\"choose gpu numbers\") \n parser.add_argument('--dist-backend', default='nccl', type=str,\n help='distributed backend')\n parser.add_argument(\"--model_type\", type=int, default=0,\n help=\"choose model type\") \n return parser.parse_args()\n\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 445, "n_words": 122, "vocab_size": 78, "complexity": 1, "nloc": 27, "token_counts": 233, "n_ast_nodes": 389, "n_identifiers": 16, "d_id": 1570, "documentation": { "docstring": "Parse all the arguments provided from the CLI.\n \n Returns:\n A list of parsed arguments.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 29, "language": "en" } }, { "id": 218534, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ipaddress.py", "file_name": "ipaddress.py", "fun_name": "is_link_local", "commit_message": "add python 3.10.4 for windows", "code": "def is_link_local(self):\n \n return (self.network_address.is_link_local and\n self.broadcast_address.is_link_local)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 35, "n_words": 6, "vocab_size": 6, "complexity": 2, "nloc": 3, "token_counts": 20, "n_ast_nodes": 34, "n_identifiers": 4, "d_id": 55371, "documentation": { "docstring": "Test if the address is reserved for link-local.\n\n Returns:\n A boolean, True if the address is reserved per RFC 4291.\n\n ", "n_words": 20, "vocab_size": 15, "n_whitespaces": 45, "language": "en" } }, { "id": 204611, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/management/color.py", "file_name": "color.py", "fun_name": "no_style", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def no_style():\n \n return make_style(\"nocolor\")\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 10, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 50814, "documentation": { "docstring": "\n Return a Style object with no color scheme.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 161314, "commit_id": "f17e3b04e1049528e13ae340db3ac8212c56a35d", "repo": "MockingBird", "path": "synthesizer/models/tacotron.py", "file_name": "tacotron.py", "fun_name": "_add_speaker_embedding", "commit_message": "Refactor (#650)\n\n* Refactor model\r\n\r\n* Add description for\r\n\r\n* update launch json", "code": "def _add_speaker_embedding(x, speaker_embedding):\n \n # Save the dimensions as human-readable names\n batch_size = x.size()[0]\n text_num_chars = x.size()[1]\n\n # Start by making a copy of each speaker embedding to match the input text length\n # The output of this has size (batch_size, text_num_chars * speaker_embedding_size)\n speaker_embedding_size = speaker_embedding.size()[1]\n e = speaker_embedding.repeat_interleave(text_num_chars, dim=1)\n\n # Reshape it and transpose\n e = e.reshape(batch_size, speaker_embedding_size, text_num_chars)\n e = e.transpose(1, 2)\n\n # Concatenate the tiled speaker embedding with the encoder output\n x = torch.cat((x, e), 2)\n return x\n", "url": "https://github.com/babysor/MockingBird.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 187, "n_words": 81, "vocab_size": 59, "complexity": 1, "nloc": 9, "token_counts": 88, "n_ast_nodes": 141, "n_identifiers": 14, "d_id": 38961, "documentation": { "docstring": "Add speaker embedding\n This concats the speaker embedding for each char in the encoder output\n Args:\n x (3D tensor with size `[batch_size, text_num_chars, encoder_dims]`): the encoder output\n speaker_embedding (2D tensor `[batch_size, speaker_embedding_size]`): the speaker embedding\n\n Returns:\n 3D tensor with size `[batch_size, text_num_chars, encoder_dims+speaker_embedding_size]`\n ", "n_words": 43, "vocab_size": 27, "n_whitespaces": 108, "language": "en" } }, { "id": 226442, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_choropleth.py", "file_name": "_choropleth.py", "fun_name": "locationssrc", "commit_message": "switch to black .22", "code": "def locationssrc(self):\n \n return self[\"locationssrc\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58115, "documentation": { "docstring": "\n Sets the source reference on Chart Studio Cloud for\n `locations`.\n\n The 'locationssrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 84, "language": "en" } }, { "id": 269909, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/callbacks.py", "file_name": "callbacks.py", "fun_name": "on_predict_end", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def on_predict_end(self, logs=None):\n \n logs = self._process_logs(logs)\n for callback in self.callbacks:\n callback.on_predict_end(logs)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 43, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 4, "token_counts": 31, "n_ast_nodes": 51, "n_identifiers": 6, "d_id": 80323, "documentation": { "docstring": "Calls the `on_predict_end` methods of its callbacks.\n\n Args:\n logs: Dict. Currently, no data is passed via this argument\n for this method, but that may change in the future.\n ", "n_words": 28, "vocab_size": 26, "n_whitespaces": 66, "language": "en" } }, { "id": 153097, "commit_id": "1e65a4afd191cf61ba05b80545d23f9b88962f41", "repo": "modin", "path": "modin/core/dataframe/algebra/default2pandas/groupby.py", "file_name": "groupby.py", "fun_name": "get_func", "commit_message": "FIX-#3197: do not pass lambdas to the backend in GroupBy (#3373)\n\nSigned-off-by: Dmitry Chigarev ", "code": "def get_func(cls, key, **kwargs):\n \n if \"agg_func\" in kwargs:\n return cls.inplace_applyier_builder(key, kwargs[\"agg_func\"])\n elif \"func_dict\" in kwargs:\n return cls.inplace_applyier_builder(key, kwargs[\"func_dict\"])\n else:\n return cls.inplace_applyier_builder(key)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 82, "n_words": 21, "vocab_size": 16, "complexity": 3, "nloc": 7, "token_counts": 54, "n_ast_nodes": 92, "n_identifiers": 5, "d_id": 35257, "documentation": { "docstring": "\n Extract aggregation function from groupby arguments.\n\n Parameters\n ----------\n key : callable or str\n Default aggregation function. If aggregation function is not specified\n via groupby arguments, then `key` function is used.\n **kwargs : dict\n GroupBy arguments that may contain aggregation function.\n\n Returns\n -------\n callable\n Aggregation function.\n\n Notes\n -----\n There are two ways of how groupby aggregation can be invoked:\n 1. Explicitly with query compiler method: `qc.groupby_sum()`.\n 2. By passing aggregation function as an argument: `qc.groupby_agg(\"sum\")`.\n Both are going to produce the same result, however in the first case actual aggregation\n function can be extracted from the method name, while for the second only from the method arguments.\n ", "n_words": 106, "vocab_size": 78, "n_whitespaces": 271, "language": "en" } }, { "id": 256915, "commit_id": "11cf94a9652a577732941f27ad59eb7c8bc5063e", "repo": "haystack", "path": "test/test_pipeline_yaml.py", "file_name": "test_pipeline_yaml.py", "fun_name": "mock_json_schema", "commit_message": "Pipeline's YAML: syntax validation (#2226)\n\n* Add BasePipeline.validate_config, BasePipeline.validate_yaml, and some new custom exception classes\r\n\r\n* Make error composition work properly\r\n\r\n* Clarify typing\r\n\r\n* Help mypy a bit more\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Enable autogenerated docs for Milvus1 and 2 separately\r\n\r\n* Revert \"Enable autogenerated docs for Milvus1 and 2 separately\"\r\n\r\nThis reverts commit 282be4a78a6e95862a9b4c924fc3dea5ca71e28d.\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Re-enable 'additionalProperties: False'\r\n\r\n* Add pipeline.type to JSON Schema, was somehow forgotten\r\n\r\n* Disable additionalProperties on the pipeline properties too\r\n\r\n* Fix json-schemas for 1.1.0 and 1.2.0 (should not do it again in the future)\r\n\r\n* Cal super in PipelineValidationError\r\n\r\n* Improve _read_pipeline_config_from_yaml's error handling\r\n\r\n* Fix generate_json_schema.py to include document stores\r\n\r\n* Fix json schemas (retro-fix 1.1.0 again)\r\n\r\n* Improve custom errors printing, add link to docs\r\n\r\n* Add function in BaseComponent to list its subclasses in a module\r\n\r\n* Make some document stores base classes abstract\r\n\r\n* Add marker 'integration' in pytest flags\r\n\r\n* Slighly improve validation of pipelines at load\r\n\r\n* Adding tests for YAML loading and validation\r\n\r\n* Make custom_query Optional for validation issues\r\n\r\n* Fix bug in _read_pipeline_config_from_yaml\r\n\r\n* Improve error handling in BasePipeline and Pipeline and add DAG check\r\n\r\n* Move json schema generation into haystack/nodes/_json_schema.py (useful for tests)\r\n\r\n* Simplify errors slightly\r\n\r\n* Add some YAML validation tests\r\n\r\n* Remove load_from_config from BasePipeline, it was never used anyway\r\n\r\n* Improve tests\r\n\r\n* Include json-schemas in package\r\n\r\n* Fix conftest imports\r\n\r\n* Make BasePipeline abstract\r\n\r\n* Improve mocking by making the test independent from the YAML version\r\n\r\n* Add exportable_to_yaml decorator to forget about set_config on mock nodes\r\n\r\n* Fix mypy errors\r\n\r\n* Comment out one monkeypatch\r\n\r\n* Fix typing again\r\n\r\n* Improve error message for validation\r\n\r\n* Add required properties to pipelines\r\n\r\n* Fix YAML version for REST API YAMLs to 1.2.0\r\n\r\n* Fix load_from_yaml call in load_from_deepset_cloud\r\n\r\n* fix HaystackError.__getattr__\r\n\r\n* Add super().__init__()in most nodes and docstore, comment set_config\r\n\r\n* Remove type from REST API pipelines\r\n\r\n* Remove useless init from doc2answers\r\n\r\n* Call super in Seq3SeqGenerator\r\n\r\n* Typo in deepsetcloud.py\r\n\r\n* Fix rest api indexing error mismatch and mock version of JSON schema in all tests\r\n\r\n* Working on pipeline tests\r\n\r\n* Improve errors printing slightly\r\n\r\n* Add back test_pipeline.yaml\r\n\r\n* _json_schema.py supports different versions with identical schemas\r\n\r\n* Add type to 0.7 schema for backwards compatibility\r\n\r\n* Fix small bug in _json_schema.py\r\n\r\n* Try alternative to generate json schemas on the CI\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Make linux CI match autoformat CI\r\n\r\n* Fix super-init-not-called\r\n\r\n* Accidentally committed file\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix test_summarizer_translation.py's import\r\n\r\n* Mock YAML in a few suites, split and simplify test_pipeline_debug_and_validation.py::test_invalid_run_args\r\n\r\n* Fix json schema for ray tests too\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Reintroduce validation\r\n\r\n* Usa unstable version in tests and rest api\r\n\r\n* Make unstable support the latest versions\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Remove needless fixture\r\n\r\n* Make type in pipeline optional in the strings validation\r\n\r\n* Fix schemas\r\n\r\n* Fix string validation for pipeline type\r\n\r\n* Improve validate_config_strings\r\n\r\n* Remove type from test p[ipelines\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix test_pipeline\r\n\r\n* Removing more type from pipelines\r\n\r\n* Temporary CI patc\r\n\r\n* Fix issue with exportable_to_yaml never invoking the wrapped init\r\n\r\n* rm stray file\r\n\r\n* pipeline tests are green again\r\n\r\n* Linux CI now needs .[all] to generate the schema\r\n\r\n* Bugfixes, pipeline tests seems to be green\r\n\r\n* Typo in version after merge\r\n\r\n* Implement missing methods in Weaviate\r\n\r\n* Trying to avoid FAISS tests from running in the Milvus1 test suite\r\n\r\n* Fix some stray test paths and faiss index dumping\r\n\r\n* Fix pytest markers list\r\n\r\n* Temporarily disable cache to be able to see tests failures\r\n\r\n* Fix pyproject.toml syntax\r\n\r\n* Use only tmp_path\r\n\r\n* Fix preprocessor signature after merge\r\n\r\n* Fix faiss bug\r\n\r\n* Fix Ray test\r\n\r\n* Fix documentation issue by removing quotes from faiss type\r\n\r\n* Update Documentation & Code Style\r\n\r\n* use document properly in preprocessor tests\r\n\r\n* Update Documentation & Code Style\r\n\r\n* make preprocessor capable of handling documents\r\n\r\n* import document\r\n\r\n* Revert support for documents in preprocessor, do later\r\n\r\n* Fix bug in _json_schema.py that was breaking validation\r\n\r\n* re-enable cache\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Simplify calling _json_schema.py from the CI\r\n\r\n* Remove redundant ABC inheritance\r\n\r\n* Ensure exportable_to_yaml works only on implementations\r\n\r\n* Rename subclass to class_ in Meta\r\n\r\n* Make run() and get_config() abstract in BasePipeline\r\n\r\n* Revert unintended change in preprocessor\r\n\r\n* Move outgoing_edges_input_node check inside try block\r\n\r\n* Rename VALID_CODE_GEN_INPUT_REGEX into VALID_INPUT_REGEX\r\n\r\n* Add check for a RecursionError on validate_config_strings\r\n\r\n* Address usages of _pipeline_config in data silo and elasticsearch\r\n\r\n* Rename _pipeline_config into _init_parameters\r\n\r\n* Fix pytest marker and remove unused imports\r\n\r\n* Remove most redundant ABCs\r\n\r\n* Rename _init_parameters into _component_configuration\r\n\r\n* Remove set_config and type from _component_configuration's dict\r\n\r\n* Remove last instances of set_config and replace with super().__init__()\r\n\r\n* Implement __init_subclass__ approach\r\n\r\n* Simplify checks on the existence of _component_configuration\r\n\r\n* Fix faiss issue\r\n\r\n* Dynamic generation of node schemas & weed out old schemas\r\n\r\n* Add debatable test\r\n\r\n* Add docstring to debatable test\r\n\r\n* Positive diff between schemas implemented\r\n\r\n* Improve diff printing\r\n\r\n* Rename REST API YAML files to trigger IDE validation\r\n\r\n* Fix typing issues\r\n\r\n* Fix more typing\r\n\r\n* Typo in YAML filename\r\n\r\n* Remove needless type:ignore\r\n\r\n* Add tests\r\n\r\n* Fix tests & validation feedback for accessory classes in custom nodes\r\n\r\n* Refactor RAGeneratorType out\r\n\r\n* Fix broken import in conftest\r\n\r\n* Improve source error handling\r\n\r\n* Remove unused import in test_eval.py breaking tests\r\n\r\n* Fix changed error message in tests matches too\r\n\r\n* Normalize generate_openapi_specs.py and generate_json_schema.py in the actions\r\n\r\n* Fix path to generate_openapi_specs.py in autoformat.yml\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Add test for FAISSDocumentStore-like situations (superclass with init params)\r\n\r\n* Update Documentation & Code Style\r\n\r\n* Fix indentation\r\n\r\n* Remove commented set_config\r\n\r\n* Store model_name_or_path in FARMReader to use in DistillationDataSilo\r\n\r\n* Rename _component_configuration into _component_config\r\n\r\n* Update Documentation & Code Style\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def mock_json_schema(request, monkeypatch, tmp_path):\n \n # Do not patch integration tests\n if \"integration\" in request.keywords:\n return\n\n # Mock the subclasses list to make it very small, containing only mock nodes\n monkeypatch.setattr(\n haystack.nodes._json_schema,\n \"find_subclasses_in_modules\",\n lambda *a, **k: [(conftest, MockDocumentStore), (conftest, MockReader), (conftest, MockRetriever)],\n )\n # Point the JSON schema path to tmp_path\n monkeypatch.setattr(haystack.pipelines.config, \"JSON_SCHEMAS_PATH\", tmp_path)\n\n # Generate mock schema in tmp_path\n filename = f\"haystack-pipeline-unstable.schema.json\"\n test_schema = _json_schema.get_json_schema(filename=filename, compatible_versions=[\"unstable\"])\n\n with open(tmp_path / filename, \"w\") as schema_file:\n json.dump(test_schema, schema_file, indent=4)\n\n\n#\n# Integration\n#\n\n\n@pytest.mark.integration\n@pytest.mark.elasticsearch", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "@pytest.mark.integration\n@pytest.mark.elasticsearch", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 148, "n_words": 82, "vocab_size": 68, "complexity": 2, "nloc": 13, "token_counts": 116, "n_ast_nodes": 209, "n_identifiers": 30, "d_id": 74961, "documentation": { "docstring": "\n JSON schema with the unstable version and only mocked nodes.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 222538, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/dis.py", "file_name": "dis.py", "fun_name": "_get_name_info", "commit_message": "add python 3.10.4 for windows", "code": "def _get_name_info(name_index, name_list):\n \n argval = name_index\n if name_list is not None:\n argval = name_list[name_index]\n argrepr = argval\n else:\n argrepr = repr(argval)\n return argval, argrepr\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 60, "n_words": 24, "vocab_size": 17, "complexity": 2, "nloc": 8, "token_counts": 38, "n_ast_nodes": 63, "n_identifiers": 6, "d_id": 56627, "documentation": { "docstring": "Helper to get optional details about named references\n\n Returns the dereferenced name as both value and repr if the name\n list is defined.\n Otherwise returns the name index and its repr().\n ", "n_words": 31, "vocab_size": 26, "n_whitespaces": 52, "language": "en" } }, { "id": 276973, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/utils/losses_utils.py", "file_name": "losses_utils.py", "fun_name": "cast_losses_to_common_dtype", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def cast_losses_to_common_dtype(losses):\n \n highest_float = None\n for loss in losses:\n if loss.dtype.is_floating:\n if highest_float is None or loss.dtype.size > highest_float.size:\n highest_float = loss.dtype\n elif {loss.dtype, highest_float} == {\"bfloat16\", \"float16\"}:\n highest_float = \"float32\"\n if loss.dtype.is_complex:\n return (\n losses # If we find any complex losses, do not cast any losses\n )\n if highest_float:\n losses = [tf.cast(loss, highest_float) for loss in losses]\n return losses\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 187, "n_words": 61, "vocab_size": 43, "complexity": 9, "nloc": 15, "token_counts": 91, "n_ast_nodes": 148, "n_identifiers": 10, "d_id": 81807, "documentation": { "docstring": "Cast a list of losses to a common dtype.\n\n If any loss is floating-point, they will all be casted to the most-precise\n floating-point loss. Otherwise the losses are not casted. We also skip casting\n losses if there are any complex losses.\n\n Args:\n losses: A list of losses.\n\n Returns:\n `losses`, but they have been casted to a common dtype.\n ", "n_words": 58, "vocab_size": 42, "n_whitespaces": 86, "language": "en" } }, { "id": 101476, "commit_id": "13cfb3f39e72e9ca181f173b7b3db2a048db0d08", "repo": "faceswap", "path": "plugins/plugin_loader.py", "file_name": "plugin_loader.py", "fun_name": "get_available_models", "commit_message": "extract: Add batch processing mode", "code": "def get_available_models() -> List[str]:\n \n modelpath = os.path.join(os.path.dirname(__file__), \"train\", \"model\")\n models = sorted(item.name.replace(\".py\", \"\").replace(\"_\", \"-\")\n for item in os.scandir(modelpath)\n if not item.name.startswith(\"_\")\n and not item.name.endswith(\"defaults.py\")\n and item.name.endswith(\".py\"))\n return models\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 148, "n_words": 28, "vocab_size": 24, "complexity": 5, "nloc": 15, "token_counts": 93, "n_ast_nodes": 163, "n_identifiers": 17, "d_id": 20889, "documentation": { "docstring": " Return a list of available training models\n\n Returns\n -------\n list:\n A list of the available training model plugin names\n ", "n_words": 19, "vocab_size": 15, "n_whitespaces": 59, "language": "en" } }, { "id": 39442, "commit_id": "1d7341e93d1f03387699fb3c6ae0b6c0e464296f", "repo": "recommenders", "path": "recommenders/utils/python_utils.py", "file_name": "python_utils.py", "fun_name": "cosine_similarity", "commit_message": "Add new item similarity metrics for SAR (#1754)\n\n* Add mutual information similarity in SAR\r\n\r\n* Add lexicographers mutual information similarity for SAR\r\n\r\n* Add cosine similarity for SAR\r\n\r\n* Add inclusion index for SAR\r\n\r\n* Typos\r\n\r\n* Change SARSingleNode to SAR\r\n\r\n* Convert item similarity matrix to np.array\r\n\r\n* Update\r\n\r\n* Update SAR tests\r\n\r\n* Remove unused imports\r\n\r\n* Add explanations for new similarity metrics", "code": "def cosine_similarity(cooccurrence):\n \n\n diag_rows, diag_cols = _get_row_and_column_matrix(cooccurrence.diagonal())\n\n with np.errstate(invalid=\"ignore\", divide=\"ignore\"):\n result = cooccurrence / np.sqrt(diag_rows * diag_cols)\n\n return np.array(result)\n\n", "url": "https://github.com/microsoft/recommenders.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 37, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 5, "token_counts": 51, "n_ast_nodes": 90, "n_identifiers": 13, "d_id": 7234, "documentation": { "docstring": "Helper method to calculate the Cosine similarity of a matrix of\n co-occurrences.\n\n Cosine similarity can be interpreted as the angle between the i-th\n and j-th item.\n\n Args:\n cooccurrence (numpy.ndarray): The symmetric matrix of co-occurrences of items.\n\n Returns:\n numpy.ndarray: The matrix of cosine similarity between any two items.\n\n ", "n_words": 47, "vocab_size": 33, "n_whitespaces": 79, "language": "en" } }, { "id": 133959, "commit_id": "297341e107daee1ea3aff991ae8ea8c90993c683", "repo": "ray", "path": "python/ray/util/client/ray_client_helpers.py", "file_name": "ray_client_helpers.py", "fun_name": "ray_start_client_server_for_address", "commit_message": "[Test][Client] Only start ray once in client tests (#28835)\n\nIt looks like we're frequently starting and shutting down Ray in this test because `ray_start_client_server` isn't connecting to the Ray created by `ray_start_regular_shared`, and is instead starting a new Ray head process every time it launches.\r\n\r\nRay client tests are failing frequently with:\r\n\r\n```\r\n[2022-10-06 07:31:46,253 E 13235 13751] core_worker_process.cc:277: The core worker has already been shutdown. This happens when the language frontend accesses the Ray's worker after it is shutdown. The process will exit\r\n```\r\n\r\nWhich is probably caused by having multiple ray clusters running simultaneous, with some shutting down asynchronously. This refactor forces all of the tests in the module to use the same Ray cluster.\r\n\r\nAlso fixes two other sources of potential flakiness:\r\n* Joins the thread in test_client_thread_safe (seems like this has a bad interaction when the client server is cleaned up)\r\n* Calls ray.get in `test_stdout_log_stream`, to make sure that the remote function is done running before we try searching for its output\r\n\r\nShould also have the happy side effect of speeding up test_client.\r\n\r\nRan the `Small & Client` tests (regular and external redis) twice each, no flakes, and windows version of test_client.", "code": "def ray_start_client_server_for_address(address):\n \n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 4, "token_counts": 20, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 30162, "documentation": { "docstring": "\n Starts a Ray client server that initializes drivers at the specified address.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 19, "language": "en" } }, { "id": 217436, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ftplib.py", "file_name": "ftplib.py", "fun_name": "voidcmd", "commit_message": "add python 3.10.4 for windows", "code": "def voidcmd(self, cmd):\n \n self.putcmd(cmd)\n return self.voidresp()\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 27, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 35, "n_identifiers": 5, "d_id": 54784, "documentation": { "docstring": "Send a command and expect a response beginning with '2'.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 176292, "commit_id": "b5d41847b8db0c82372faf69cd3a339d11da7ef0", "repo": "networkx", "path": "networkx/algorithms/shortest_paths/generic.py", "file_name": "generic.py", "fun_name": "all_shortest_paths", "commit_message": "DOC: Update documentation to include callables for weight argument (#5307)\n\nUpdate docs to include functions as valid input for weight argument.", "code": "def all_shortest_paths(G, source, target, weight=None, method=\"dijkstra\"):\n \n method = \"unweighted\" if weight is None else method\n if method == \"unweighted\":\n pred = nx.predecessor(G, source)\n elif method == \"dijkstra\":\n pred, dist = nx.dijkstra_predecessor_and_distance(G, source, weight=weight)\n elif method == \"bellman-ford\":\n pred, dist = nx.bellman_ford_predecessor_and_distance(G, source, weight=weight)\n else:\n raise ValueError(f\"method not supported: {method}\")\n\n return _build_paths_from_predecessors({source}, target, pred)\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 102, "n_words": 53, "vocab_size": 36, "complexity": 5, "nloc": 11, "token_counts": 103, "n_ast_nodes": 168, "n_identifiers": 14, "d_id": 41809, "documentation": { "docstring": "Compute all shortest simple paths in the graph.\n\n Parameters\n ----------\n G : NetworkX graph\n\n source : node\n Starting node for path.\n\n target : node\n Ending node for path.\n\n weight : None, string or function, optional (default = None)\n If None, every edge has weight/distance/cost 1.\n If a string, use this edge attribute as the edge weight.\n Any edge attribute not present defaults to 1.\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly\n three positional arguments: the two endpoints of an edge and\n the dictionary of edge attributes for that edge.\n The function must return a number.\n\n method : string, optional (default = 'dijkstra')\n The algorithm to use to compute the path lengths.\n Supported options: 'dijkstra', 'bellman-ford'.\n Other inputs produce a ValueError.\n If `weight` is None, unweighted graph methods are used, and this\n suggestion is ignored.\n\n Returns\n -------\n paths : generator of lists\n A generator of all paths between source and target.\n\n Raises\n ------\n ValueError\n If `method` is not among the supported options.\n\n NetworkXNoPath\n If `target` cannot be reached from `source`.\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> nx.add_path(G, [0, 1, 2])\n >>> nx.add_path(G, [0, 10, 2])\n >>> print([p for p in nx.all_shortest_paths(G, source=0, target=2)])\n [[0, 1, 2], [0, 10, 2]]\n\n Notes\n -----\n There may be many shortest paths between the source and target. If G\n contains zero-weight cycles, this function will not produce all shortest\n paths because doing so would produce infinitely many paths of unbounded\n length -- instead, we only produce the shortest simple paths.\n\n See Also\n --------\n shortest_path\n single_source_shortest_path\n all_pairs_shortest_path\n ", "n_words": 266, "vocab_size": 157, "n_whitespaces": 485, "language": "en" } }, { "id": 56187, "commit_id": "ab322ef9b1bb65887984854dc39b316f98da3b97", "repo": "prefect", "path": "tests/flow_runners/test_kubernetes.py", "file_name": "test_kubernetes.py", "fun_name": "test_assumptions_about_jsonpatch", "commit_message": "Allow Kubernetes users to customize or replace the Job manifest for flow runs\n\nAdding support for either replacing the base `job=` for a KubernetesFlowRunner,\napplying a list of RFC 6902 JSON patches provided by `customizations=`, or both.\nThis implements the core changes, while preserving backwards compatiblity with\nthe current API. Users can still provide `image=`, `namepace=` and other\ntop-level parameters, which are now considered \"shortcuts\" for generating JSON\npatches.\n\nThis is most of the work for PrefectHQ/orion#1900, but does not include the planned CLI updates\nto allow users to preview their jobs. Those will come in a separate change.\n\nAlso updating the Kubernetes integration tests to be more reliable, and adding\ndocs about how to get set up for running them.", "code": "def test_assumptions_about_jsonpatch(self):\n \n patch_1 = JsonPatch([{\"op\": \"add\", \"path\": \"/hi\", \"value\": \"there\"}])\n patch_2 = JsonPatch([{\"op\": \"add\", \"path\": \"/hi\", \"value\": \"there\"}])\n patch_3 = JsonPatch([{\"op\": \"add\", \"path\": \"/different\", \"value\": \"there\"}])\n assert patch_1 is not patch_2\n assert patch_1 == patch_2\n assert patch_1 != patch_3\n\n assert list(patch_1) == list(patch_2)\n assert list(patch_1) != list(patch_3)\n\n assert patch_1.apply({}) == patch_2.apply({})\n assert patch_1.apply({}) != patch_3.apply({})\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 132, "n_words": 55, "vocab_size": 24, "complexity": 1, "nloc": 11, "token_counts": 131, "n_ast_nodes": 239, "n_identifiers": 8, "d_id": 11459, "documentation": { "docstring": "Assert our assumptions about the behavior of the jsonpatch library, so we\n can be alert to any upstream changes", "n_words": 19, "vocab_size": 18, "n_whitespaces": 25, "language": "en" } }, { "id": 205460, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/deletion.py", "file_name": "deletion.py", "fun_name": "can_fast_delete", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def can_fast_delete(self, objs, from_field=None):\n \n if from_field and from_field.remote_field.on_delete is not CASCADE:\n return False\n if hasattr(objs, \"_meta\"):\n model = objs._meta.model\n elif hasattr(objs, \"model\") and hasattr(objs, \"_raw_delete\"):\n model = objs.model\n else:\n return False\n if self._has_signal_listeners(model):\n return False\n # The use of from_field comes from the need to avoid cascade back to\n # parent when parent delete is cascading to child.\n opts = model._meta\n return (\n all(\n link == from_field\n for link in opts.concrete_model._meta.parents.values()\n )\n and\n # Foreign keys pointing to this model.\n all(\n related.field.remote_field.on_delete is DO_NOTHING\n for related in get_candidate_relations_to_delete(opts)\n )\n and (\n # Something like generic foreign key.\n not any(\n hasattr(field, \"bulk_related_objects\")\n for field in opts.private_fields\n )\n )\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 471, "n_words": 108, "vocab_size": 70, "complexity": 12, "nloc": 29, "token_counts": 142, "n_ast_nodes": 230, "n_identifiers": 23, "d_id": 51132, "documentation": { "docstring": "\n Determine if the objects in the given queryset-like or single object\n can be fast-deleted. This can be done if there are no cascades, no\n parents and no signal listeners for the object class.\n\n The 'from_field' tells where we are coming from - we need this to\n determine if the objects are in fact to be deleted. Allow also\n skipping parent -> child -> parent chain preventing fast delete of\n the child.\n ", "n_words": 71, "vocab_size": 51, "n_whitespaces": 128, "language": "en" } }, { "id": 258953, "commit_id": "1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe", "repo": "scikit-learn", "path": "sklearn/utils/__init__.py", "file_name": "__init__.py", "fun_name": "get_chunk_n_rows", "commit_message": "MNT Update black to stable version (#22474)", "code": "def get_chunk_n_rows(row_bytes, *, max_n_rows=None, working_memory=None):\n \n\n if working_memory is None:\n working_memory = get_config()[\"working_memory\"]\n\n chunk_n_rows = int(working_memory * (2**20) // row_bytes)\n if max_n_rows is not None:\n chunk_n_rows = min(chunk_n_rows, max_n_rows)\n if chunk_n_rows < 1:\n warnings.warn(\n \"Could not adhere to working_memory config. \"\n \"Currently %.0fMiB, %.0fMiB required.\"\n % (working_memory, np.ceil(row_bytes * 2**-20))\n )\n chunk_n_rows = 1\n return chunk_n_rows\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 141, "n_words": 55, "vocab_size": 40, "complexity": 4, "nloc": 14, "token_counts": 88, "n_ast_nodes": 148, "n_identifiers": 12, "d_id": 75492, "documentation": { "docstring": "Calculates how many rows can be processed within working_memory.\n\n Parameters\n ----------\n row_bytes : int\n The expected number of bytes of memory that will be consumed\n during the processing of each row.\n max_n_rows : int, default=None\n The maximum return value.\n working_memory : int or float, default=None\n The number of rows to fit inside this number of MiB will be returned.\n When None (default), the value of\n ``sklearn.get_config()['working_memory']`` is used.\n\n Returns\n -------\n int or the value of n_samples\n\n Warns\n -----\n Issues a UserWarning if ``row_bytes`` exceeds ``working_memory`` MiB.\n ", "n_words": 86, "vocab_size": 63, "n_whitespaces": 164, "language": "en" } }, { "id": 130803, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/node.py", "file_name": "node.py", "fun_name": "_prepare_socket_file", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _prepare_socket_file(self, socket_path, default_prefix):\n \n result = socket_path\n is_mac = sys.platform.startswith(\"darwin\")\n if sys.platform == \"win32\":\n if socket_path is None:\n result = f\"tcp://{self._localhost}\" f\":{self._get_unused_port()}\"\n else:\n if socket_path is None:\n result = self._make_inc_temp(\n prefix=default_prefix, directory_name=self._sockets_dir\n )\n else:\n try_to_create_directory(os.path.dirname(socket_path))\n\n # Check socket path length to make sure it's short enough\n maxlen = (104 if is_mac else 108) - 1 # sockaddr_un->sun_path\n if len(result.split(\"://\", 1)[-1].encode(\"utf-8\")) > maxlen:\n raise OSError(\n \"AF_UNIX path length cannot exceed \"\n \"{} bytes: {!r}\".format(maxlen, result)\n )\n return result\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 333, "n_words": 77, "vocab_size": 56, "complexity": 6, "nloc": 20, "token_counts": 127, "n_ast_nodes": 234, "n_identifiers": 25, "d_id": 29375, "documentation": { "docstring": "Prepare the socket file for raylet and plasma.\n\n This method helps to prepare a socket file.\n 1. Make the directory if the directory does not exist.\n 2. If the socket file exists, do nothing (this just means we aren't the\n first worker on the node).\n\n Args:\n socket_path (string): the socket file to prepare.\n ", "n_words": 53, "vocab_size": 40, "n_whitespaces": 109, "language": "en" } }, { "id": 260484, "commit_id": "ecef8cb7f44ab6a8438b43eb33f519269511cbbf", "repo": "scikit-learn", "path": "sklearn/pipeline.py", "file_name": "pipeline.py", "fun_name": "make_union", "commit_message": "DOC numpydoc validation for `make_union` (#23909)\n\nCo-authored-by: Adrin Jalali \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def make_union(*transformers, n_jobs=None, verbose=False):\n \n return FeatureUnion(_name_estimators(transformers), n_jobs=n_jobs, verbose=verbose)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 31, "n_ast_nodes": 48, "n_identifiers": 6, "d_id": 76280, "documentation": { "docstring": "Construct a FeatureUnion from the given transformers.\n\n This is a shorthand for the FeatureUnion constructor; it does not require,\n and does not permit, naming the transformers. Instead, they will be given\n names automatically based on their types. It also does not allow weighting.\n\n Parameters\n ----------\n *transformers : list of estimators\n One or more estimators.\n\n n_jobs : int, default=None\n Number of jobs to run in parallel.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary `\n for more details.\n\n .. versionchanged:: v0.20\n `n_jobs` default changed from 1 to None.\n\n verbose : bool, default=False\n If True, the time elapsed while fitting each transformer will be\n printed as it is completed.\n\n Returns\n -------\n f : FeatureUnion\n A :class:`FeatureUnion` object for concatenating the results of multiple\n transformer objects.\n\n See Also\n --------\n FeatureUnion : Class for concatenating the results of multiple transformer\n objects.\n\n Examples\n --------\n >>> from sklearn.decomposition import PCA, TruncatedSVD\n >>> from sklearn.pipeline import make_union\n >>> make_union(PCA(), TruncatedSVD())\n FeatureUnion(transformer_list=[('pca', PCA()),\n ('truncatedsvd', TruncatedSVD())])\n ", "n_words": 164, "vocab_size": 115, "n_whitespaces": 349, "language": "en" } }, { "id": 101443, "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", "repo": "faceswap", "path": "tools/preview/preview.py", "file_name": "preview.py", "fun_name": "sections", "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", "code": "def sections(self) -> List[str]:\n \n return sorted(set(plugin.split(\".\")[0] for plugin in self._config.config.sections()\n if plugin.split(\".\")[0] != \"writer\"))\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 53, "n_words": 14, "vocab_size": 14, "complexity": 3, "nloc": 4, "token_counts": 51, "n_ast_nodes": 87, "n_identifiers": 10, "d_id": 20856, "documentation": { "docstring": " list: The sorted section names that exist within the convert Configuration options. ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 13, "language": "en" } }, { "id": 43253, "commit_id": "f3aacebe502c4ea5dc2b7d29373539296fa037eb", "repo": "airflow", "path": "airflow/providers/sftp/hooks/sftp.py", "file_name": "sftp.py", "fun_name": "get_conn", "commit_message": "Convert sftp hook to use paramiko instead of pysftp (#24512)", "code": "def get_conn(self) -> paramiko.SFTPClient: # type: ignore[override]\n \n if self.conn is None:\n # TODO: remove support for ssh_hook when it is removed from SFTPOperator\n if self.ssh_hook is not None:\n self.conn = self.ssh_hook.get_conn().open_sftp()\n else:\n self.conn = super().get_conn().open_sftp()\n return self.conn\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 122, "n_words": 37, "vocab_size": 28, "complexity": 3, "nloc": 12, "token_counts": 61, "n_ast_nodes": 105, "n_identifiers": 8, "d_id": 7891, "documentation": { "docstring": "\n Opens an SFTP connection to the remote host\n\n :rtype: paramiko.SFTPClient\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 32, "language": "en" } }, { "id": 266768, "commit_id": "a06fa496d3f837cca3c437ab6e9858525633d147", "repo": "ansible", "path": "test/lib/ansible_test/_internal/config.py", "file_name": "config.py", "fun_name": "only_targets", "commit_message": "ansible-test - Code cleanup and refactoring. (#77169)\n\n* Remove unnecessary PyCharm ignores.\r\n* Ignore intentional undefined attribute usage.\r\n* Add missing type hints. Fix existing type hints.\r\n* Fix docstrings and comments.\r\n* Use function to register completion handler.\r\n* Pass strings to display functions.\r\n* Fix CompositeAction handling of dest argument.\r\n* Use consistent types in expressions/assignments.\r\n* Use custom function to keep linters happy.\r\n* Add missing raise for custom exception.\r\n* Clean up key/value type handling in cloud plugins.\r\n* Use dataclass instead of dict for results.\r\n* Add custom type_guard function to check lists.\r\n* Ignore return type that can't be checked (yet).\r\n* Avoid changing types on local variables.", "code": "def only_targets(self, target_type): # type: (t.Type[THostConfig]) -> t.List[THostConfig]\n \n if not self.targets:\n raise Exception('There must be one or more targets.')\n\n assert type_guard(self.targets, target_type)\n\n return t.cast(t.List[THostConfig], self.targets)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 65, "n_words": 25, "vocab_size": 25, "complexity": 2, "nloc": 5, "token_counts": 44, "n_ast_nodes": 72, "n_identifiers": 10, "d_id": 78571, "documentation": { "docstring": "\n Return a list of target host configurations.\n Requires that there are one or more targets, all the specified type.\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 41, "language": "en" } }, { "id": 139297, "commit_id": "b76273357bd1b74757b0aa1d64cee551369d7fa6", "repo": "ray", "path": "rllib/agents/dqn/apex.py", "file_name": "apex.py", "fun_name": "update_replay_sample_priority", "commit_message": "[RLlib] APEX-DQN replay buffer config validation fix. (#24588)", "code": "def update_replay_sample_priority(self) -> int:\n \n num_samples_trained_this_itr = 0\n for _ in range(self.learner_thread.outqueue.qsize()):\n if self.learner_thread.is_alive():\n (\n replay_actor,\n priority_dict,\n env_steps,\n agent_steps,\n ) = self.learner_thread.outqueue.get(timeout=0.001)\n if (\n self.config[\"replay_buffer_config\"].get(\"prioritized_replay_alpha\")\n > 0\n ):\n replay_actor.update_priorities.remote(priority_dict)\n num_samples_trained_this_itr += env_steps\n self.update_target_networks(env_steps)\n self._counters[NUM_ENV_STEPS_TRAINED] += env_steps\n self._counters[NUM_AGENT_STEPS_TRAINED] += agent_steps\n self.workers.local_worker().set_global_vars(\n {\"timestep\": self._counters[NUM_ENV_STEPS_TRAINED]}\n )\n else:\n raise RuntimeError(\"The learner thread died in while training\")\n\n self._counters[STEPS_TRAINED_THIS_ITER_COUNTER] = num_samples_trained_this_itr\n self._timers[\"learner_dequeue\"] = self.learner_thread.queue_timer\n self._timers[\"learner_grad\"] = self.learner_thread.grad_timer\n self._timers[\"learner_overall\"] = self.learner_thread.overall_timer\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 451, "n_words": 63, "vocab_size": 48, "complexity": 4, "nloc": 35, "token_counts": 183, "n_ast_nodes": 296, "n_identifiers": 32, "d_id": 31651, "documentation": { "docstring": "Update the priorities of the sample batches with new priorities that are\n computed by the learner thread.\n\n Returns:\n The number of samples trained by the learner thread since the last\n training iteration.\n ", "n_words": 32, "vocab_size": 24, "n_whitespaces": 75, "language": "en" } }, { "id": 138869, "commit_id": "ff575eeafc610b5a71fac37682e388476b2fb8ea", "repo": "ray", "path": "rllib/utils/numpy.py", "file_name": "numpy.py", "fun_name": "make_action_immutable", "commit_message": "[RLlib] Make actions sent by RLlib to the env immutable. (#24262)", "code": "def make_action_immutable(obj):\n \n if isinstance(obj, np.ndarray):\n obj.setflags(write=False)\n return obj\n elif isinstance(obj, OrderedDict):\n return MappingProxyType(dict(obj))\n elif isinstance(obj, dict): \n return MappingProxyType(obj)\n else:\n return obj\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 87, "n_words": 21, "vocab_size": 14, "complexity": 4, "nloc": 10, "token_counts": 59, "n_ast_nodes": 96, "n_identifiers": 10, "d_id": 31539, "documentation": { "docstring": "Flags actions immutable to notify users when trying to change\n them.\n\n Can also be used with any tree-like structure containing either\n dictionaries, numpy arrays or already immutable objects per se.\n Note, however that `tree.map_structure()` will in general not \n include the shallow object containing all others and therefore\n immutability will hold only for all objects contained in it.\n Use `tree.traverse(fun, action, top_down=False)` to include\n also the containing object.\n\n Args:\n obj: The object to be made immutable.\n\n Returns:\n The immutable object.\n\n Examples:\n >>> import tree\n >>> import numpy as np\n >>> arr = np.arange(1,10)\n >>> d = dict(a = 1, b = (arr, arr))\n >>> tree.traverse(make_action_immutable, d, top_down=False)\n ", "n_words": 106, "vocab_size": 79, "n_whitespaces": 192, "language": "en" } }, { "id": 135576, "commit_id": "fdc7077dbcd8f54991cd36f6890d219519260dc4", "repo": "ray", "path": "python/ray/tests/test_multi_node_2.py", "file_name": "test_multi_node_2.py", "fun_name": "test_system_config", "commit_message": "[core] Introduce pull based health check to GCS. (#29442)\n\nThis PR introduced the pull-based health check to GCS. This is to fix the false positive issues when GCS is overloaded and incorrectly marks the healthy node as dead.\r\n\r\nThe health check service in each ray component is implemented using gRPC built-in services. This PR focus on the client-side health check.\r\n\r\nThe following features are supported:\r\n\r\n- Initial delay when a new node is added. This is for the new node to be able to ramp up.\r\n- Timeout for an RPC: in case of network issues, we introduce timeout, and the request fails to return within timeout is considered a failure.\r\n- If the health check failed X times consecutively, the node will be considered as dead.\r\n- We also introduce the interval that can be configured between two health checks sent.\r\n\r\nThis client doesn't send two health checks in parallel, so the next one always waits until the first one is finished.\r\n\r\nThis work has reference to k8s's healthiness probe features.\r\n\r\nA feature flag is introduced to turn it on or off and it's turned on in https://github.com/ray-project/ray/pull/29536", "code": "def test_system_config(ray_start_cluster_head):\n \n cluster = ray_start_cluster_head\n worker = cluster.add_node()\n cluster.wait_for_nodes()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 9, "vocab_size": 8, "complexity": 2, "nloc": 12, "token_counts": 83, "n_ast_nodes": 39, "n_identifiers": 6, "d_id": 30660, "documentation": { "docstring": "Checks that the internal configuration setting works.\n\n We set the cluster to timeout nodes after 2 seconds of no timeouts. We\n then remove a node, wait for 1 second to check that the cluster is out\n of sync, then wait another 2 seconds (giving 1 second of leeway) to check\n that the client has timed out. We also check to see if the config is set.\n ", "n_words": 66, "vocab_size": 43, "n_whitespaces": 81, "language": "en" } }, { "id": 100998, "commit_id": "91fecc47b2157d684ab9c219a860df51543222a3", "repo": "faceswap", "path": "lib/utils.py", "file_name": "utils.py", "fun_name": "get_backend", "commit_message": "lib.Utils - add DPI detector", "code": "def get_backend() -> ValidBackends:\n \n return _FS_BACKEND\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 9, "token_counts": 9, "n_ast_nodes": 18, "n_identifiers": 3, "d_id": 20441, "documentation": { "docstring": " Get the backend that Faceswap is currently configured to use.\n\n Returns\n -------\n str\n The backend configuration in use by Faceswap\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 40, "language": "en" } }, { "id": 204863, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/operations.py", "file_name": "operations.py", "fun_name": "quote_name", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def quote_name(self, name):\n \n raise NotImplementedError(\n \"subclasses of BaseDatabaseOperations may require a quote_name() method\"\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 46, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 4, "token_counts": 13, "n_ast_nodes": 25, "n_identifiers": 4, "d_id": 50940, "documentation": { "docstring": "\n Return a quoted version of the given table, index, or column name. Do\n not quote the given name if it's already been quoted.\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 45, "language": "en" } }, { "id": 267949, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/connections.py", "file_name": "connections.py", "fun_name": "inspect", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def inspect(self) -> DockerInspect:\n \n return docker_inspect(self.args, self.container_id)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 32, "n_identifiers": 6, "d_id": 79224, "documentation": { "docstring": "Inspect the container and return a DockerInspect instance with the results.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 59285, "commit_id": "2f2faf370f602cfd9df307ff71e785c1c9d6a538", "repo": "prefect", "path": "src/prefect/client/schemas.py", "file_name": "schemas.py", "fun_name": "to_state_create", "commit_message": "Update engine to use new results (#7094)\n\n# Conflicts:\n#\t.github/workflows/integration-tests.yaml\n#\tsrc/prefect/deployments.py\n#\tsrc/prefect/engine.py", "code": "def to_state_create(self) -> schemas.actions.StateCreate:\n \n from prefect.results import BaseResult\n\n return schemas.actions.StateCreate(\n type=self.type,\n name=self.name,\n message=self.message,\n data=self.data if isinstance(self.data, BaseResult) else None,\n state_details=self.state_details,\n )\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 104, "n_words": 21, "vocab_size": 21, "complexity": 2, "nloc": 16, "token_counts": 67, "n_ast_nodes": 99, "n_identifiers": 14, "d_id": 11889, "documentation": { "docstring": "\n Convert this state to a `StateCreate` type which can be used to set the state of\n a run in the API.\n\n This method will drop this state's `data` if it is not a result type. Only\n results should be sent to the API. Other data is only available locally.\n ", "n_words": 49, "vocab_size": 38, "n_whitespaces": 85, "language": "en" } }, { "id": 272179, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/integration_test/forwardprop_test.py", "file_name": "forwardprop_test.py", "fun_name": "_jvp", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _jvp(f, primals, tangents):\n \n with tf.autodiff.ForwardAccumulator(primals, tangents) as acc:\n primals_out = f(*primals)\n return primals_out, acc.jvp(\n primals_out, unconnected_gradients=tf.UnconnectedGradients.ZERO\n )\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 44, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 6, "token_counts": 48, "n_ast_nodes": 78, "n_identifiers": 13, "d_id": 80971, "documentation": { "docstring": "Compute the jacobian of `f` at `primals` multiplied by `tangents`.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 218395, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "_signature_bound_method", "commit_message": "add python 3.10.4 for windows", "code": "def _signature_bound_method(sig):\n \n\n params = tuple(sig.parameters.values())\n\n if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY):\n raise ValueError('invalid method signature')\n\n kind = params[0].kind\n if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY):\n # Drop first parameter:\n # '(p1, p2[, ...])' -> '(p2[, ...])'\n params = params[1:]\n else:\n if kind is not _VAR_POSITIONAL:\n # Unless we add a new parameter type we never\n # get here\n raise ValueError('invalid argument type')\n # It's a var-positional parameter.\n # Do nothing. '(*args[, ...])' -> '(*args[, ...])'\n\n return sig.replace(parameters=params)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 180, "n_words": 77, "vocab_size": 52, "complexity": 5, "nloc": 11, "token_counts": 86, "n_ast_nodes": 147, "n_identifiers": 14, "d_id": 55281, "documentation": { "docstring": "Private helper to transform signatures for unbound\n functions to bound methods.\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 196366, "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", "repo": "sympy", "path": "sympy/matrices/common.py", "file_name": "common.py", "fun_name": "subs", "commit_message": "Moved imports to higher level", "code": "def subs(self, *args, **kwargs): # should mirror core.basic.subs\n \n\n if len(args) == 1 and not isinstance(args[0], (dict, set)) and iter(args[0]) and not is_sequence(args[0]):\n args = (list(args[0]),)\n\n return self.applyfunc(lambda x: x.subs(*args, **kwargs))\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 64, "n_words": 30, "vocab_size": 27, "complexity": 5, "nloc": 4, "token_counts": 83, "n_ast_nodes": 130, "n_identifiers": 13, "d_id": 47866, "documentation": { "docstring": "Return a new matrix with subs applied to each entry.\n\n Examples\n ========\n\n >>> from sympy.abc import x, y\n >>> from sympy import SparseMatrix, Matrix\n >>> SparseMatrix(1, 1, [x])\n Matrix([[x]])\n >>> _.subs(x, y)\n Matrix([[y]])\n >>> Matrix(_).subs(y, x)\n Matrix([[x]])\n ", "n_words": 37, "vocab_size": 30, "n_whitespaces": 114, "language": "en" } }, { "id": 307722, "commit_id": "c0b04e9f91a34dfee0ceb12770148317fe3e2cbf", "repo": "core", "path": "homeassistant/components/search/__init__.py", "file_name": "__init__.py", "fun_name": "_resolve_script", "commit_message": "Sort some code in the search integration (#78519)", "code": "def _resolve_script(self, script_entity_id) -> None:\n \n for entity in script.entities_in_script(self.hass, script_entity_id):\n self._add_or_resolve(\"entity\", entity)\n\n for device in script.devices_in_script(self.hass, script_entity_id):\n self._add_or_resolve(\"device\", device)\n\n for area in script.areas_in_script(self.hass, script_entity_id):\n self._add_or_resolve(\"area\", area)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 87, "n_words": 26, "vocab_size": 20, "complexity": 4, "nloc": 11, "token_counts": 76, "n_ast_nodes": 120, "n_identifiers": 12, "d_id": 106489, "documentation": { "docstring": "Resolve a script.\n\n Will only be called if script is an entry point.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 27, "language": "en" } }, { "id": 183781, "commit_id": "bfb962bacf274373e5706090cd854b6aa0857270", "repo": "textual", "path": "tests/test_xterm_parser.py", "file_name": "test_xterm_parser.py", "fun_name": "test_escape_sequence_resulting_in_multiple_keypresses", "commit_message": "Backtracking unknown escape sequences, various tests for XTermParser", "code": "def test_escape_sequence_resulting_in_multiple_keypresses(parser):\n \n events = list(parser.feed(\"\\x1b[2;4~\"))\n assert len(events) == 2\n assert events[0].key == \"escape\"\n assert events[1].key == \"shift+insert\"\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 32, "n_words": 17, "vocab_size": 13, "complexity": 1, "nloc": 5, "token_counts": 42, "n_ast_nodes": 75, "n_identifiers": 7, "d_id": 44334, "documentation": { "docstring": "Some sequences are interpreted as more than 1 keypress", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 151672, "commit_id": "442467e8aed2ff639bfba04e7a2f6e175f774af1", "repo": "freqtrade", "path": "freqtrade/rpc/api_server/webserver.py", "file_name": "webserver.py", "fun_name": "_api_startup_event", "commit_message": "remove old comments and code", "code": "async def _api_startup_event(self):\n \n if not ApiServer._message_stream:\n ApiServer._message_stream = MessageStream()\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 34, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 3, "token_counts": 19, "n_ast_nodes": 36, "n_identifiers": 5, "d_id": 35096, "documentation": { "docstring": "\n Creates the MessageStream class on startup\n so it has access to the same event loop\n as uvicorn\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 46, "language": "en" } }, { "id": 259235, "commit_id": "7f0006c8aad1a09621ad19c3db19c3ff0555a183", "repo": "scikit-learn", "path": "sklearn/preprocessing/tests/test_encoders.py", "file_name": "test_encoders.py", "fun_name": "test_ohe_infrequent_three_levels_drop_frequent", "commit_message": "ENH Adds infrequent categories to OneHotEncoder (#16018)\n\n* ENH Completely adds infrequent categories\r\n\r\n* STY Linting\r\n\r\n* STY Linting\r\n\r\n* DOC Improves wording\r\n\r\n* DOC Lint\r\n\r\n* BUG Fixes\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* DOC Uses math to description float min_frequency\r\n\r\n* DOC Adds comment regarding drop\r\n\r\n* BUG Fixes method name\r\n\r\n* DOC Clearer docstring\r\n\r\n* TST Adds more tests\r\n\r\n* FIX Fixes mege\r\n\r\n* CLN More pythonic\r\n\r\n* CLN Address comments\r\n\r\n* STY Flake8\r\n\r\n* CLN Address comments\r\n\r\n* DOC Fix\r\n\r\n* MRG\r\n\r\n* WIP\r\n\r\n* ENH Address comments\r\n\r\n* STY Fix\r\n\r\n* ENH Use functiion call instead of property\r\n\r\n* ENH Adds counts feature\r\n\r\n* CLN Rename variables\r\n\r\n* DOC More details\r\n\r\n* CLN Remove unneeded line\r\n\r\n* CLN Less lines is less complicated\r\n\r\n* CLN Less diffs\r\n\r\n* CLN Improves readiabilty\r\n\r\n* BUG Fix\r\n\r\n* CLN Address comments\r\n\r\n* TST Fix\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* CLN Move docstring to userguide\r\n\r\n* DOC Better wrapping\r\n\r\n* TST Adds test to handle_unknown='error'\r\n\r\n* ENH Spelling error in docstring\r\n\r\n* BUG Fixes counter with nan values\r\n\r\n* BUG Removes unneeded test\r\n\r\n* BUG Fixes issue\r\n\r\n* ENH Sync with main\r\n\r\n* DOC Correct settings\r\n\r\n* DOC Adds docstring\r\n\r\n* DOC Immprove user guide\r\n\r\n* DOC Move to 1.0\r\n\r\n* DOC Update docs\r\n\r\n* TST Remove test\r\n\r\n* DOC Update docstring\r\n\r\n* STY Linting\r\n\r\n* DOC Address comments\r\n\r\n* ENH Neater code\r\n\r\n* DOC Update explaination for auto\r\n\r\n* Update sklearn/preprocessing/_encoders.py\r\n\r\nCo-authored-by: Roman Yurchak \r\n\r\n* TST Uses docstring instead of comments\r\n\r\n* TST Remove call to fit\r\n\r\n* TST Spelling error\r\n\r\n* ENH Adds support for drop + infrequent categories\r\n\r\n* ENH Adds infrequent_if_exist option\r\n\r\n* DOC Address comments for user guide\r\n\r\n* DOC Address comments for whats_new\r\n\r\n* DOC Update docstring based on comments\r\n\r\n* CLN Update test with suggestions\r\n\r\n* ENH Adds computed property infrequent_categories_\r\n\r\n* DOC Adds where the infrequent column is located\r\n\r\n* TST Adds more test for infrequent_categories_\r\n\r\n* DOC Adds docstring for _compute_drop_idx\r\n\r\n* CLN Moves _convert_to_infrequent_idx into its own method\r\n\r\n* TST Increases test coverage\r\n\r\n* TST Adds failing test\r\n\r\n* CLN Careful consideration of dropped and inverse_transform\r\n\r\n* STY Linting\r\n\r\n* DOC Adds docstrinb about dropping infrequent\r\n\r\n* DOC Uses only\r\n\r\n* DOC Numpydoc\r\n\r\n* TST Includes test for get_feature_names_out\r\n\r\n* DOC Move whats new\r\n\r\n* DOC Address docstring comments\r\n\r\n* DOC Docstring changes\r\n\r\n* TST Better comments\r\n\r\n* TST Adds check for handle_unknown='ignore' for infrequent\r\n\r\n* CLN Make _infrequent_indices private\r\n\r\n* CLN Change min_frequency default to None\r\n\r\n* DOC Adds comments\r\n\r\n* ENH adds support for max_categories=1\r\n\r\n* ENH Describe lexicon ordering for ties\r\n\r\n* DOC Better docstring\r\n\r\n* STY Fix\r\n\r\n* CLN Error when explicity dropping an infrequent category\r\n\r\n* STY Grammar\r\n\r\nCo-authored-by: Joel Nothman \r\nCo-authored-by: Roman Yurchak \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def test_ohe_infrequent_three_levels_drop_frequent(drop):\n \n\n X_train = np.array([[\"a\"] * 5 + [\"b\"] * 20 + [\"c\"] * 10 + [\"d\"] * 3]).T\n ohe = OneHotEncoder(\n handle_unknown=\"infrequent_if_exist\", sparse=False, max_categories=3, drop=drop\n ).fit(X_train)\n\n X_test = np.array([[\"b\"], [\"c\"], [\"d\"]])\n assert_allclose([[0, 0], [1, 0], [0, 1]], ohe.transform(X_test))\n\n # Check handle_unknown=\"ignore\"\n ohe.set_params(handle_unknown=\"ignore\").fit(X_train)\n msg = \"Found unknown categories\"\n with pytest.warns(UserWarning, match=msg):\n X_trans = ohe.transform([[\"b\"], [\"e\"]])\n\n assert_allclose([[0, 0], [0, 0]], X_trans)\n\n\n@pytest.mark.parametrize(\"drop\", [[\"a\"], [\"d\"]])", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"drop\", [[\"a\"], [\"d\"]])", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 109, "n_words": 63, "vocab_size": 49, "complexity": 1, "nloc": 12, "token_counts": 176, "n_ast_nodes": 322, "n_identifiers": 24, "d_id": 75667, "documentation": { "docstring": "Test three levels and dropping the frequent category.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 130421, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/autoscaler/_private/cli_logger.py", "file_name": "cli_logger.py", "fun_name": "indented", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def indented(self):\n \n cli_logger = self\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 6, "token_counts": 20, "n_ast_nodes": 18, "n_identifiers": 3, "d_id": 29267, "documentation": { "docstring": "Context manager that starts an indented block of output.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 20074, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distro.py", "file_name": "distro.py", "fun_name": "lsb_release_info", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def lsb_release_info(self):\n # type: () -> Dict[str, str]\n \n return self._lsb_release_info\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 20, "n_identifiers": 3, "d_id": 3219, "documentation": { "docstring": "\n Return a dictionary containing key-value pairs for the information\n items from the lsb_release command data source of the OS\n distribution.\n\n For details, see :func:`distro.lsb_release_info`.\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 60, "language": "en" } }, { "id": 128154, "commit_id": "9c39a28ba2f6221ffd8327fa21cb8294f0390fee", "repo": "ray", "path": "python/ray/data/tests/test_batch_mapper.py", "file_name": "test_batch_mapper.py", "fun_name": "test_batch_mapper_numpy_data_format", "commit_message": "[AIR][Numpy] Add numpy narrow waist to `Preprocessor` and `BatchMapper` (#28418)\n\nCo-authored-by: Eric Liang \r\nCo-authored-by: Clark Zinzow \r\nCo-authored-by: Amog Kamsetty ", "code": "def test_batch_mapper_numpy_data_format(ds_with_expected_pandas_numpy_df):\n \n ds, expected_df, expected_numpy_df = ds_with_expected_pandas_numpy_df\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 20, "token_counts": 145, "n_ast_nodes": 23, "n_identifiers": 5, "d_id": 28614, "documentation": { "docstring": "Tests batch mapper functionality for numpy data format.\n\n Note:\n For single column pandas dataframes, we automatically convert it to\n single column tensor with column name as `__value__`.\n ", "n_words": 27, "vocab_size": 24, "n_whitespaces": 47, "language": "en" } }, { "id": 83836, "commit_id": "803982e87254e3b1ebcb16ed795e224afceea3a3", "repo": "zulip", "path": "zerver/tests/test_subs.py", "file_name": "test_subs.py", "fun_name": "test_realm_admin_remove_others_from_public_stream", "commit_message": "message_flags: Short-circuit if no messages changed.\n\nOmit sending an event, and updating the database, if there are no\nmatching messages.", "code": "def test_realm_admin_remove_others_from_public_stream(self) -> None:\n \n result = self.attempt_unsubscribe_of_principal(\n query_count=15,\n target_users=[self.example_user(\"cordelia\")],\n is_realm_admin=True,\n is_subbed=True,\n invite_only=False,\n target_users_subbed=True,\n )\n json = self.assert_json_success(result)\n self.assert_length(json[\"removed\"], 1)\n self.assert_length(json[\"not_removed\"], 0)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 129, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 16, "token_counts": 76, "n_ast_nodes": 120, "n_identifiers": 14, "d_id": 17731, "documentation": { "docstring": "\n If you're a realm admin, you can remove people from public streams, even\n those you aren't on.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 39, "language": "en" } }, { "id": 217520, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/genericpath.py", "file_name": "genericpath.py", "fun_name": "_splitext", "commit_message": "add python 3.10.4 for windows", "code": "def _splitext(p, sep, altsep, extsep):\n \n # NOTE: This code must work for text and bytes strings.\n\n sepIndex = p.rfind(sep)\n if altsep:\n altsepIndex = p.rfind(altsep)\n sepIndex = max(sepIndex, altsepIndex)\n\n dotIndex = p.rfind(extsep)\n if dotIndex > sepIndex:\n # skip all leading dots\n filenameIndex = sepIndex + 1\n while filenameIndex < dotIndex:\n if p[filenameIndex:filenameIndex+1] != extsep:\n return p[:dotIndex], p[dotIndex:]\n filenameIndex += 1\n\n return p, p[:0]\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 155, "n_words": 62, "vocab_size": 48, "complexity": 5, "nloc": 13, "token_counts": 97, "n_ast_nodes": 154, "n_identifiers": 11, "d_id": 54832, "documentation": { "docstring": "Split the extension from a pathname.\n\n Extension is everything from the last dot to the end, ignoring\n leading dots. Returns \"(root, ext)\"; ext may be empty.", "n_words": 26, "vocab_size": 23, "n_whitespaces": 32, "language": "en" } }, { "id": 169100, "commit_id": "fe9e5d023e20304ad1bdfa1da53f3af452c72a00", "repo": "pandas", "path": "pandas/conftest.py", "file_name": "conftest.py", "fun_name": "any_numeric_dtype", "commit_message": "REGR: .describe on unsigned dtypes results in object (#48473)", "code": "def any_numeric_dtype(request):\n \n return request.param\n\n\n# categoricals are handled separately\n_any_skipna_inferred_dtype = [\n (\"string\", [\"a\", np.nan, \"c\"]),\n (\"string\", [\"a\", pd.NA, \"c\"]),\n (\"mixed\", [\"a\", pd.NaT, \"c\"]), # pd.NaT not considered valid by is_string_array\n (\"bytes\", [b\"a\", np.nan, b\"c\"]),\n (\"empty\", [np.nan, np.nan, np.nan]),\n (\"empty\", []),\n (\"mixed-integer\", [\"a\", np.nan, 2]),\n (\"mixed\", [\"a\", np.nan, 2.0]),\n (\"floating\", [1.0, np.nan, 2.0]),\n (\"integer\", [1, np.nan, 2]),\n (\"mixed-integer-float\", [1, np.nan, 2.0]),\n (\"decimal\", [Decimal(1), np.nan, Decimal(2)]),\n (\"boolean\", [True, np.nan, False]),\n (\"boolean\", [True, pd.NA, False]),\n (\"datetime64\", [np.datetime64(\"2013-01-01\"), np.nan, np.datetime64(\"2018-01-01\")]),\n (\"datetime\", [Timestamp(\"20130101\"), np.nan, Timestamp(\"20180101\")]),\n (\"date\", [date(2013, 1, 1), np.nan, date(2018, 1, 1)]),\n # The following two dtypes are commented out due to GH 23554\n # ('complex', [1 + 1j, np.nan, 2 + 2j]),\n # ('timedelta64', [np.timedelta64(1, 'D'),\n # np.nan, np.timedelta64(2, 'D')]),\n (\"timedelta\", [timedelta(1), np.nan, timedelta(2)]),\n (\"time\", [time(1), np.nan, time(2)]),\n (\"period\", [Period(2013), pd.NaT, Period(2018)]),\n (\"interval\", [Interval(0, 1), np.nan, Interval(0, 2)]),\n]\nids, _ = zip(*_any_skipna_inferred_dtype) # use inferred type as fixture-id\n\n\n@pytest.fixture(params=_any_skipna_inferred_dtype, ids=ids)", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.fixture(params=_any_skipna_inferred_dtype, ids=ids)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 244, "n_words": 149, "vocab_size": 103, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 591, "n_identifiers": 23, "d_id": 40391, "documentation": { "docstring": "\n Parameterized fixture for all numeric dtypes.\n\n * int\n * 'int8'\n * 'uint8'\n * 'int16'\n * 'uint16'\n * 'int32'\n * 'uint32'\n * 'int64'\n * 'uint64'\n * float\n * 'float32'\n * 'float64'\n * complex\n * 'complex64'\n * 'complex128'\n * 'UInt8'\n * 'Int8'\n * 'UInt16'\n * 'Int16'\n * 'UInt32'\n * 'Int32'\n * 'UInt64'\n * 'Int64'\n * 'Float32'\n * 'Float64'\n ", "n_words": 56, "vocab_size": 32, "n_whitespaces": 138, "language": "en" } }, { "id": 243751, "commit_id": "2ae55ccbdad9c842929fb238ea1eb81d1f999024", "repo": "Pillow", "path": "src/PIL/ImageFont.py", "file_name": "ImageFont.py", "fun_name": "get_variation_axes", "commit_message": "Improve exception traceback readability", "code": "def get_variation_axes(self):\n \n try:\n axes = self.font.getvaraxes()\n except AttributeError as e:\n msg = \"FreeType 2.9.1 or greater is required\"\n raise NotImplementedError(msg) from e\n for axis in axes:\n axis[\"name\"] = axis[\"name\"].replace(b\"\\x00\", b\"\")\n return axes\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 111, "n_words": 32, "vocab_size": 29, "complexity": 3, "nloc": 9, "token_counts": 57, "n_ast_nodes": 100, "n_identifiers": 11, "d_id": 70111, "documentation": { "docstring": "\n :returns: A list of the axes in a variation font.\n :exception OSError: If the font is not a variation font.\n ", "n_words": 20, "vocab_size": 16, "n_whitespaces": 42, "language": "en" } }, { "id": 38044, "commit_id": "f04257fdbcb6ecb5a9bef75f4c2a8d2e8b5a6209", "repo": "transformers", "path": "src/transformers/models/tapas/modeling_tf_tapas.py", "file_name": "modeling_tf_tapas.py", "fun_name": "flatten", "commit_message": "Add test to ensure models can take int64 inputs (#17210)\n\n* Add test to ensure models can take int64 inputs\r\n\r\n* is_integer is an attribute, not a method\r\n\r\n* Fix test when some inputs aren't tensors\r\n\r\n* Add casts to blenderbot and blenderbot-small\r\n\r\n* Add casts to the other failing models", "code": "def flatten(index, name=\"segmented_flatten\"):\n \n batch_size = tf.reduce_prod(index.batch_shape())\n offset = tf.range(batch_size) * index.num_segments\n offset = tf.reshape(offset, index.batch_shape())\n for _ in range(index.batch_dims, index.indices.shape.rank):\n offset = tf.expand_dims(offset, -1)\n\n indices = tf.cast(offset, index.indices.dtype) + index.indices\n return IndexMap(indices=tf.reshape(indices, [-1]), num_segments=index.num_segments * batch_size, batch_dims=0)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 65, "n_words": 37, "vocab_size": 30, "complexity": 2, "nloc": 8, "token_counts": 124, "n_ast_nodes": 193, "n_identifiers": 20, "d_id": 6902, "documentation": { "docstring": "\n Flattens a batched index map to a 1d index map. This operation relabels the segments to keep batch elements\n distinct. The k-th batch element will have indices shifted by `num_segments` * (k - 1). The result is a tensor with\n `num_segments` multiplied by the number of elements in the batch.\n\n Args:\n index: IndexMap to flatten.\n name: Name for the TensorFlow operation.\n\n Returns:\n The flattened IndexMap.\n ", "n_words": 65, "vocab_size": 51, "n_whitespaces": 99, "language": "en" } }, { "id": 81243, "commit_id": "550d9d5e42a605a23cb540584bf439c07c4185d4", "repo": "awx", "path": "awx/main/tests/functional/api/test_events.py", "file_name": "test_events.py", "fun_name": "test_job_job_events_children_summary_is_tree", "commit_message": "detect if job events are tree-like and collapsable in the UI", "code": "def test_job_job_events_children_summary_is_tree(get, organization_factory, job_template_factory):\n \n objs = organization_factory(\"org\", superusers=['admin'])\n jt = job_template_factory(\"jt\", organization=objs.organization, inventory='test_inv', project='test_proj').job_template\n job = jt.create_unified_job()\n url = reverse('api:job_job_events_children_summary', kwargs={'pk': job.pk})\n response = get(url, user=objs.superusers.admin, expect=200)\n assert response.data[\"event_processing_finished\"] == False\n \n JobEvent.create_from_data(\n job_id=job.pk, uuid='uuid1', parent_uuid='', event=\"playbook_on_start\", counter=1, stdout='a' * 1024, job_created=job.created\n ).save()\n JobEvent.create_from_data(\n job_id=job.pk, uuid='uuid2', parent_uuid='uuid1', event=\"playbook_on_play_start\", counter=2, stdout='a' * 1024, job_created=job.created\n ).save()\n JobEvent.create_from_data(\n job_id=job.pk, uuid='uuid3', parent_uuid='uuid2', event=\"playbook_on_task_start\", counter=3, stdout='a' * 1024, job_created=job.created\n ).save()\n JobEvent.create_from_data(job_id=job.pk, uuid='uuid4', parent_uuid='', event='verbose', counter=4, stdout='a' * 1024, job_created=job.created).save()\n JobEvent.create_from_data(\n job_id=job.pk, uuid='uuid5', parent_uuid='uuid1', event=\"playbook_on_play_start\", counter=5, stdout='a' * 1024, job_created=job.created\n ).save()\n JobEvent.create_from_data(\n job_id=job.pk, uuid='uuid6', parent_uuid='uuid2', event=\"playbook_on_task_start\", counter=6, stdout='a' * 1024, job_created=job.created\n ).save()\n job.emitted_events = job.get_event_queryset().count()\n job.status = \"successful\"\n job.save()\n url = reverse('api:job_job_events_children_summary', kwargs={'pk': job.pk})\n response = get(url, user=objs.superusers.admin, expect=200)\n assert response.data[\"children_summary\"] == {}\n assert response.data[\"meta_event_nested_uuid\"] == {}\n assert response.data[\"event_processing_finished\"] == True\n assert response.data[\"is_tree\"] == False\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 248, "n_words": 128, "vocab_size": 65, "complexity": 1, "nloc": 40, "token_counts": 442, "n_ast_nodes": 722, "n_identifiers": 37, "d_id": 17173, "documentation": { "docstring": "\n children_summary should return {is_tree: False} if the event structure is not tree-like\n \n E1\n E2\n E3\n E4 (verbose)\n E5\n E6 <-- parent is E2, but comes after another \"branch\" E5\n ", "n_words": 29, "vocab_size": 27, "n_whitespaces": 74, "language": "en" } }, { "id": 93873, "commit_id": "12bb908ad28a4c1b6564253053a6f65ba4cdded9", "repo": "sentry", "path": "src/sentry/snuba/discover.py", "file_name": "discover.py", "fun_name": "normalize_span_op_histogram_results", "commit_message": "feat(spans): Add a span count distribution endpoint (#36957)\n\n* hack histogram endpoint to serve span counts\r\n\r\n* wip\r\n\r\n* clean up\r\n\r\n* more clean up\r\n\r\n* clean up\r\n\r\n* fixes and test\r\n\r\n* address comments", "code": "def normalize_span_op_histogram_results(span_op, histogram_params, results):\n \n\n histogram_column = get_span_count_histogram_column(span_op, histogram_params)\n bin_name = get_function_alias(histogram_column)\n\n # zerofill and rename the columns while making sure to adjust for precision\n bucket_map = {}\n for row in results[\"data\"]:\n # we expect the bin the be an integer, this is because all floating\n # point values are rounded during the calculation\n bucket = int(row[bin_name])\n bucket_map[bucket] = row[\"count\"]\n\n new_data = []\n for i in range(histogram_params.num_buckets):\n bucket = histogram_params.start_offset + histogram_params.bucket_size * i\n row = {\"bin\": bucket, \"count\": bucket_map.get(bucket, 0)}\n if histogram_params.multiplier > 1:\n row[\"bin\"] /= float(histogram_params.multiplier)\n new_data.append(row)\n\n return new_data\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 184, "n_words": 90, "vocab_size": 71, "complexity": 4, "nloc": 15, "token_counts": 124, "n_ast_nodes": 203, "n_identifiers": 22, "d_id": 19022, "documentation": { "docstring": "\n Normalizes the span histogram results by renaming the columns to key and bin\n and make sure to zerofill any missing values.\n\n :param str span_op: The span op for which you want to generate the\n histograms for.\n :param HistogramParams histogram_params: The histogram parameters used.\n :param any results: The results from the histogram query that may be missing\n bins and needs to be normalized.\n ", "n_words": 62, "vocab_size": 43, "n_whitespaces": 95, "language": "en" } }, { "id": 257972, "commit_id": "15a59fd04071dc1e13c256680407ba1b63e7b1f2", "repo": "haystack", "path": "haystack/nodes/extractor/entity.py", "file_name": "entity.py", "fun_name": "simplify_ner_for_qa", "commit_message": "feat: Updated EntityExtractor to handle long texts and added better postprocessing (#3154)\n\n\r\n\r\n* Remove dependence on HuggingFace TokenClassificationPipeline and group all postprocessing functions under one class\r\n\r\n* Added copyright notice for HF and deepset to entity file to acknowledge that a lot of the postprocessing parts came from the transformers library.\r\n\r\n* Fixed text squishing problem. Added additional unit test for it.\r\n\r\nCo-authored-by: ju-gu ", "code": "def simplify_ner_for_qa(output):\n \n compact_output = []\n for answer in output[\"answers\"]:\n\n entities = []\n for entity in answer.meta[\"entities\"]:\n if (\n entity[\"start\"] >= answer.offsets_in_document[0].start\n and entity[\"end\"] <= answer.offsets_in_document[0].end\n ):\n entities.append(entity[\"word\"])\n\n compact_output.append({\"answer\": answer.answer, \"entities\": entities})\n return compact_output\n\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 133, "n_words": 33, "vocab_size": 28, "complexity": 5, "nloc": 12, "token_counts": 90, "n_ast_nodes": 152, "n_identifiers": 11, "d_id": 75176, "documentation": { "docstring": "\n Returns a simplified version of the output dictionary\n with the following structure:\n [\n {\n answer: { ... }\n entities: [ { ... }, {} ]\n }\n ]\n The entities included are only the ones that overlap with\n the answer itself.\n\n :param output: Output from a query pipeline\n ", "n_words": 47, "vocab_size": 36, "n_whitespaces": 108, "language": "en" } }, { "id": 181816, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tpot/base.py", "file_name": "base.py", "fun_name": "_combine_individual_stats", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def _combine_individual_stats(self, operator_count, cv_score, individual_stats):\n \n stats = deepcopy(\n individual_stats\n ) # Deepcopy, since the string reference to predecessor should be cloned\n stats[\"operator_count\"] = operator_count\n stats[\"internal_cv_score\"] = cv_score\n return stats\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 83, "n_words": 29, "vocab_size": 26, "complexity": 1, "nloc": 7, "token_counts": 32, "n_ast_nodes": 55, "n_identifiers": 7, "d_id": 43600, "documentation": { "docstring": "Combine the stats with operator count and cv score and preprare to be written to _evaluated_individuals\n\n Parameters\n ----------\n operator_count: int\n number of components in the pipeline\n cv_score: float\n internal cross validation score\n individual_stats: dictionary\n dict containing statistics about the individual. currently:\n 'generation': generation in which the individual was evaluated\n 'mutation_count': number of mutation operations applied to the individual and its predecessor cumulatively\n 'crossover_count': number of crossover operations applied to the individual and its predecessor cumulatively\n 'predecessor': string representation of the individual\n\n Returns\n -------\n stats: dictionary\n dict containing the combined statistics:\n 'operator_count': number of operators in the pipeline\n 'internal_cv_score': internal cross validation score\n and all the statistics contained in the 'individual_stats' parameter\n ", "n_words": 111, "vocab_size": 66, "n_whitespaces": 295, "language": "en" } }, { "id": 259066, "commit_id": "f14af688b7e77ecb6df9dfee93ec39b6c0334b86", "repo": "scikit-learn", "path": "sklearn/linear_model/tests/test_ridge.py", "file_name": "test_ridge.py", "fun_name": "test_ridgecv_normalize_deprecated", "commit_message": "FIX Make Ridge*CV warn about rescaling alphas with scaling (#22585)", "code": "def test_ridgecv_normalize_deprecated(Estimator):\n \n X = np.array([[1, -1], [1, 1]])\n y = np.array([0, 1])\n\n estimator = Estimator(normalize=True)\n\n with pytest.warns(\n FutureWarning, match=r\"Set parameter alphas to: original_alphas \\* n_samples\"\n ):\n estimator.fit(X, y)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 60, "n_words": 28, "vocab_size": 26, "complexity": 1, "nloc": 8, "token_counts": 68, "n_ast_nodes": 108, "n_identifiers": 13, "d_id": 75551, "documentation": { "docstring": "Check that the normalize deprecation warning mentions the rescaling of alphas\n\n Non-regression test for issue #22540\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 22, "language": "en" } }, { "id": 78250, "commit_id": "d967eccef28ce47f60d26be1c28f2d83a25f40b0", "repo": "wagtail", "path": "wagtail/contrib/settings/models.py", "file_name": "models.py", "fun_name": "load", "commit_message": "Add generic settings to compliment site-specific settings (#8327)", "code": "def load(cls, request_or_site=None):\n \n\n # We can only cache on the request, so if there is no request then\n # we know there's nothing in the cache.\n if request_or_site is None or isinstance(request_or_site, Site):\n return cls._get_or_create()\n\n # Check if we already have this in the cache and return it if so.\n attr_name = cls.get_cache_attr_name()\n if hasattr(request_or_site, attr_name):\n return getattr(request_or_site, attr_name)\n\n obj = cls._get_or_create()\n\n # Cache for next time.\n setattr(request_or_site, attr_name, obj)\n\n return obj\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 171, "n_words": 72, "vocab_size": 53, "complexity": 4, "nloc": 9, "token_counts": 67, "n_ast_nodes": 110, "n_identifiers": 12, "d_id": 16749, "documentation": { "docstring": "\n Get or create an instance of this model. There is only ever one\n instance of models inheriting from `AbstractSetting` so we can\n use `pk=1`.\n\n If `request_or_site` is present and is a request object, then we cache\n the result on the request for faster repeat access.\n ", "n_words": 45, "vocab_size": 38, "n_whitespaces": 88, "language": "en" } }, { "id": 223760, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/headerregistry.py", "file_name": "headerregistry.py", "fun_name": "map_to_type", "commit_message": "add python 3.10.4 for windows", "code": "def map_to_type(self, name, cls):\n \n self.registry[name.lower()] = cls\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 22, "n_ast_nodes": 36, "n_identifiers": 6, "d_id": 57051, "documentation": { "docstring": "Register cls as the specialized class for handling \"name\" headers.\n\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 95879, "commit_id": "73959a1d9b946cd0b7054bebcbc9f50929bc9dc3", "repo": "sentry", "path": "src/sentry/integrations/gitlab/client.py", "file_name": "client.py", "fun_name": "search_projects", "commit_message": "I have rebased 15188 (round #2) (#31375)\n\n* Make GitLab Group Path optional\r\n\r\nCo-authored-by: King Chung Huang \r\nCo-authored-by: Colleen O'Rourke ", "code": "def search_projects(self, group=None, query=None, simple=True):\n \n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 12, "n_words": 5, "vocab_size": 5, "complexity": 2, "nloc": 8, "token_counts": 55, "n_ast_nodes": 28, "n_identifiers": 5, "d_id": 19254, "documentation": { "docstring": "Get projects\n\n See https://docs.gitlab.com/ee/api/groups.html#list-a-group-s-projects\n and https://docs.gitlab.com/ee/api/projects.html#list-all-projects\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 27, "language": "en" } }, { "id": 103729, "commit_id": "e2a1f8dde783c55dbca449691986923cb4025721", "repo": "kitty", "path": "kitty_tests/check_build.py", "file_name": "check_build.py", "fun_name": "test_launcher_ensures_stdio", "commit_message": "...", "code": "def test_launcher_ensures_stdio(self):\n from kitty.constants import kitty_exe\n import subprocess\n exe = kitty_exe()\n cp = subprocess.run([exe, '+runpy', f])\n self.assertEqual(cp.returncode, 0)\n\n", "url": "https://github.com/kovidgoyal/kitty.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 52, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 15, "token_counts": 43, "n_ast_nodes": 76, "n_identifiers": 11, "d_id": 21712, "documentation": { "docstring": "\\\nimport os, sys\nif sys.stdin:\n os.close(sys.stdin.fileno())\nif sys.stdout:\n os.close(sys.stdout.fileno())\nif sys.stderr:\n os.close(sys.stderr.fileno())\nos.execlp({exe!r}, 'kitty', '+runpy', 'import sys; raise SystemExit(1 if sys.stdout is None or sys.stdin is None or sys.stderr is None else 0)')\n", "n_words": 34, "vocab_size": 26, "n_whitespaces": 37, "language": "en" } }, { "id": 73498, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/settings/tests/test_admin.py", "file_name": "test_admin.py", "fun_name": "test_redirect_to_default", "commit_message": "Reformat with black", "code": "def test_redirect_to_default(self):\n \n start_url = reverse(\"wagtailsettings:edit\", args=[\"tests\", \"testsetting\"])\n dest_url = reverse(\n \"wagtailsettings:edit\", args=[\"tests\", \"testsetting\", self.default_site.pk]\n )\n response = self.client.get(start_url, follow=True)\n self.assertRedirects(\n response, dest_url, status_code=302, fetch_redirect_response=False\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 96, "n_words": 25, "vocab_size": 21, "complexity": 1, "nloc": 9, "token_counts": 70, "n_ast_nodes": 116, "n_identifiers": 15, "d_id": 16029, "documentation": { "docstring": "\n Should redirect to the setting for the default site.\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 24, "language": "en" } }, { "id": 110054, "commit_id": "13438f842729df1b04445d44ea83f616d1b85567", "repo": "matplotlib", "path": "lib/matplotlib/cbook/__init__.py", "file_name": "__init__.py", "fun_name": "_unfold", "commit_message": "Fix some minor docstring typos", "code": "def _unfold(arr, axis, size, step):\n \n new_shape = [*arr.shape, size]\n new_strides = [*arr.strides, arr.strides[axis]]\n new_shape[axis] = (new_shape[axis] - size) // step + 1\n new_strides[axis] = new_strides[axis] * step\n return np.lib.stride_tricks.as_strided(arr,\n shape=new_shape,\n strides=new_strides,\n writeable=False)\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 176, "n_words": 32, "vocab_size": 27, "complexity": 1, "nloc": 9, "token_counts": 85, "n_ast_nodes": 129, "n_identifiers": 14, "d_id": 23899, "documentation": { "docstring": "\n Append an extra dimension containing sliding windows along *axis*.\n\n All windows are of size *size* and begin with every *step* elements.\n\n Parameters\n ----------\n arr : ndarray, shape (N_1, ..., N_k)\n The input array\n axis : int\n Axis along which the windows are extracted\n size : int\n Size of the windows\n step : int\n Stride between first elements of subsequent windows.\n\n Returns\n -------\n ndarray, shape (N_1, ..., 1 + (N_axis-size)/step, ..., N_k, size)\n\n Examples\n --------\n >>> i, j = np.ogrid[:3, :7]\n >>> a = i*10 + j\n >>> a\n array([[ 0, 1, 2, 3, 4, 5, 6],\n [10, 11, 12, 13, 14, 15, 16],\n [20, 21, 22, 23, 24, 25, 26]])\n >>> _unfold(a, axis=1, size=3, step=2)\n array([[[ 0, 1, 2],\n [ 2, 3, 4],\n [ 4, 5, 6]],\n [[10, 11, 12],\n [12, 13, 14],\n [14, 15, 16]],\n [[20, 21, 22],\n [22, 23, 24],\n [24, 25, 26]]])\n ", "n_words": 145, "vocab_size": 106, "n_whitespaces": 352, "language": "en" } }, { "id": 117189, "commit_id": "7c02e15aa403a4ca1fa34489dd2df9136d6c961c", "repo": "mindsdb", "path": "tests/integration_tests/flows/test_company_independent.py", "file_name": "test_company_independent.py", "fun_name": "test_5_model", "commit_message": "Projects structure (#3532)\n\nProjects structure", "code": "def test_5_model(self):\n query = \n\n predict_query = \n\n for cid, char in [(CID_A, 'a'), (CID_B, 'b')]:\n self.sql_via_http(\n query.format(char, char),\n company_id=cid,\n expected_resp_type=RESPONSE_TYPE.OK\n )\n\n response = self.sql_via_http(\n predict_query.format(char),\n company_id=cid,\n expected_resp_type=RESPONSE_TYPE.TABLE\n )\n self.assertTrue(len(response['data']), 1)\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 196, "n_words": 29, "vocab_size": 24, "complexity": 2, "nloc": 23, "token_counts": 90, "n_ast_nodes": 142, "n_identifiers": 18, "d_id": 25918, "documentation": { "docstring": "\n CREATE MODEL mindsdb.model_{}\n FROM test_integration_{} (\n select * from test_data.home_rentals limit 50\n ) PREDICT rental_price\n USING join_learn_process=true, time_aim=5\n \n select * from mindsdb.model_{} where sqft = 100\n ", "n_words": 26, "vocab_size": 22, "n_whitespaces": 112, "language": "en" } }, { "id": 116154, "commit_id": "02a831997cdffafca7cb160eb1938e72020ee049", "repo": "mindsdb", "path": "tests/unit/test_executor.py", "file_name": "test_executor.py", "fun_name": "test_predictor_tableau_header", "commit_message": "executor tests", "code": "def test_predictor_tableau_header(self, mock_handler):\n df = pd.DataFrame([\n {'a': 1, 'b': 'one'},\n {'a': 2, 'b': 'two'},\n {'a': 1, 'b': 'three'},\n ])\n self.set_handler(mock_handler, name='pg', tables={'tasks': df})\n\n # --- use predictor ---\n predicted_value = 5\n predictor = {\n 'name': 'task_model',\n 'predict': 'p',\n 'dtypes': {\n 'p': dtype.float,\n 'a': dtype.integer,\n 'b': dtype.categorical\n },\n 'predicted_value': predicted_value\n }\n self.set_predictor(predictor)\n ret = self.command_executor.execute_command(parse_sql(f, dialect='mindsdb'))\n\n # second column is having last value of 'b'\n # 3: count rows, 4: sum of 'a', 5 max of prediction\n assert ret.data[0] == [3, 4, 5]\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 298, "n_words": 82, "vocab_size": 64, "complexity": 1, "nloc": 32, "token_counts": 143, "n_ast_nodes": 250, "n_identifiers": 22, "d_id": 25675, "documentation": { "docstring": "\n SELECT \n SUM(1) AS `cnt__0B4A4E8BD11C48FFB4730D4D2C32191A_ok`,\n sum(`Custom SQL Query`.`a`) AS `sum_height_ok`,\n max(`Custom SQL Query`.`p`) AS `sum_length1_ok`\n FROM (\n SELECT res.a, res.p \n FROM pg.tasks as source\n JOIN mindsdb.task_model as res\n ) `Custom SQL Query`\n HAVING (COUNT(1) > 0)\n ", "n_words": 35, "vocab_size": 28, "n_whitespaces": 176, "language": "en" } }, { "id": 262791, "commit_id": "460a53842a220faa70f892ab0127b6d4dd21c4eb", "repo": "pyinstaller", "path": "tests/functional/test_misc.py", "file_name": "test_misc.py", "fun_name": "test_single_file_metadata", "commit_message": "tests: add a test for single-file metadata collection", "code": "def test_single_file_metadata(pyi_builder):\n # Add directory containing the my-test-package metadata to search path\n extra_path = os.path.join(_MODULES_DIR, \"pyi_single_file_metadata\")\n\n pyi_builder.test_source(\n ,\n pyi_args=['--paths', extra_path]\n )\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 46, "n_words": 21, "vocab_size": 21, "complexity": 1, "nloc": 17, "token_counts": 31, "n_ast_nodes": 54, "n_identifiers": 9, "d_id": 77372, "documentation": { "docstring": "\n import pkg_resources\n\n # The pkg_resources.get_distribution() call automatically triggers collection of the metadata. While it does not\n # raise an error if metadata is not found while freezing, the calls below will fall at run-time in that case.\n dist = pkg_resources.get_distribution('my-test-package')\n\n # Sanity check\n assert dist.project_name == 'my-test-package'\n assert dist.version == '1.0'\n assert dist.egg_name() == f'my_test_package-{dist.version}-py{sys.version_info[0]}.{sys.version_info[1]}'\n ", "n_words": 55, "vocab_size": 47, "n_whitespaces": 119, "language": "en" } }, { "id": 101049, "commit_id": "3c73ae4ec9f0f30649a5e20465a268bbcfd690eb", "repo": "faceswap", "path": "scripts/train.py", "file_name": "train.py", "fun_name": "should_toggle_mask", "commit_message": "bugfix: Update preview screen in GUI", "code": "def should_toggle_mask(self) -> bool:\n \n with self._lock:\n retval = self._triggers[\"toggle_mask\"]\n if retval:\n logger.debug(\"Sending toggle mask\")\n self._triggers[\"toggle_mask\"] = False\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 92, "n_words": 19, "vocab_size": 16, "complexity": 2, "nloc": 14, "token_counts": 40, "n_ast_nodes": 74, "n_identifiers": 8, "d_id": 20487, "documentation": { "docstring": " Check whether the mask should be toggled and return the value. If ``True`` is returned\n then resets mask toggle back to ``False``\n\n Returns\n -------\n bool\n ``True`` if the mask should be toggled otherwise ``False``. ", "n_words": 34, "vocab_size": 26, "n_whitespaces": 74, "language": "en" } }, { "id": 104426, "commit_id": "e35be138148333078284b942ccc9ed7b1d826f97", "repo": "datasets", "path": "src/datasets/table.py", "file_name": "table.py", "fun_name": "shape", "commit_message": "Update docs to new frontend/UI (#3690)\n\n* WIP: update docs to new UI\r\n\r\n* make style\r\n\r\n* Rm unused\r\n\r\n* inject_arrow_table_documentation __annotations__\r\n\r\n* hasattr(arrow_table_method, \"__annotations__\")\r\n\r\n* Update task_template.rst\r\n\r\n* Codeblock PT-TF-SPLIT\r\n\r\n* Convert loading scripts\r\n\r\n* Convert docs to mdx\r\n\r\n* Fix mdx\r\n\r\n* Add \r\n\r\n* Convert mdx tables\r\n\r\n* Fix codeblock\r\n\r\n* Rm unneded hashlinks\r\n\r\n* Update index.mdx\r\n\r\n* Redo dev change\r\n\r\n* Rm circle ci `build_doc` & `deploy_doc`\r\n\r\n* Rm unneeded files\r\n\r\n* Update docs reamde\r\n\r\n* Standardize to `Example::`\r\n\r\n* mdx logging levels doc\r\n\r\n* Table properties inject_arrow_table_documentation\r\n\r\n* ``` to ```py mdx\r\n\r\n* Add Tips mdx\r\n\r\n* important,None -> \r\n\r\n* More misc\r\n\r\n* Center imgs\r\n\r\n* Update instllation page\r\n\r\n* `setup.py` docs section\r\n\r\n* Rm imgs since they are in hf.co\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* Update index mdx\r\n\r\n* Update docs/source/access.mdx\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\n\r\n* just `Dataset` obj\r\n\r\n* Addedversion just italics\r\n\r\n* Update ReadInstruction doc example syntax\r\n\r\n* Change docstring for `prepare_for_task`\r\n\r\n* Chore\r\n\r\n* Remove `code` syntax from headings\r\n\r\n* Rm `code` syntax from headings\r\n\r\n* Hashlink backward compatability\r\n\r\n* S3FileSystem doc\r\n\r\n* S3FileSystem doc updates\r\n\r\n* index.mdx updates\r\n\r\n* Add darkmode gifs\r\n\r\n* Index logo img css classes\r\n\r\n* Index mdx dataset logo img size\r\n\r\n* Docs for DownloadMode class\r\n\r\n* Doc DownloadMode table\r\n\r\n* format docstrings\r\n\r\n* style\r\n\r\n* Add doc builder scripts (#3790)\r\n\r\n* add doc builder scripts\r\n\r\n* fix docker image\r\n\r\n* Docs new UI actions no self hosted (#3793)\r\n\r\n* No self hosted\r\n\r\n* replace doc injection by actual docstrings\r\n\r\n* Docstring formatted\r\n\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Mishig Davaadorj \r\n\r\nCo-authored-by: Lysandre Debut \r\nCo-authored-by: Mishig Davaadorj \r\n\r\n* Rm notebooks from docs actions since they dont exi\r\n\r\n* Update tsting branch\r\n\r\n* More docstring\r\n\r\n* Chore\r\n\r\n* bump up node version\r\n\r\n* bump up node\r\n\r\n* ``` -> ```py for audio_process.mdx\r\n\r\n* Update .github/workflows/build_documentation.yml\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Uodate dev doc build\r\n\r\n* remove run on PR\r\n\r\n* fix action\r\n\r\n* Fix gh doc workflow\r\n\r\n* forgot this change when merging master\r\n\r\n* Update build doc\r\n\r\nCo-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com>\r\nCo-authored-by: Quentin Lhoest \r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\nCo-authored-by: Lysandre Debut ", "code": "def shape(self):\n \n return self.table.shape\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 3, "d_id": 21862, "documentation": { "docstring": "\n Dimensions of the table: (#rows, #columns).\n\n Returns:\n :obj:`(int, int)`: Number of rows and number of columns.\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 49, "language": "en" } }, { "id": 118567, "commit_id": "704eab3478cf69847825b23dabf15813a8ac9fa2", "repo": "streamlit", "path": "lib/streamlit/server/server.py", "file_name": "server.py", "fun_name": "add_preheated_app_session", "commit_message": "Rename and refactor `Report` machinery (#4141)\n\nThis refactor renames (almost) everything related to the outdated \"report\" concept with more precise concepts that we use throughout our code, primarily \"script run\", \"session\", and \"app\".", "code": "def add_preheated_app_session(self) -> None:\n \n session = self._create_or_reuse_app_session(ws=None)\n session.handle_rerun_script_request(is_preheat=True)\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 8, "token_counts": 26, "n_ast_nodes": 45, "n_identifiers": 7, "d_id": 26297, "documentation": { "docstring": "Register a fake browser with the server and run the script.\n\n This is used to start running the user's script even before the first\n browser connects.\n ", "n_words": 26, "vocab_size": 22, "n_whitespaces": 47, "language": "en" } }, { "id": 258794, "commit_id": "3786daf7dc5c301478d489b0756f90d0ac5d010f", "repo": "scikit-learn", "path": "sklearn/gaussian_process/_gpr.py", "file_name": "_gpr.py", "fun_name": "sample_y", "commit_message": "BUG Fix covariance and stdev shape in GPR with normalize_y (#22199)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: Nakamura-Zimmerer, Tenavi (ARC-AF) ", "code": "def sample_y(self, X, n_samples=1, random_state=0):\n \n rng = check_random_state(random_state)\n\n y_mean, y_cov = self.predict(X, return_cov=True)\n if y_mean.ndim == 1:\n y_samples = rng.multivariate_normal(y_mean, y_cov, n_samples).T\n else:\n y_samples = [\n rng.multivariate_normal(\n y_mean[:, target], y_cov[..., target], n_samples\n ).T[:, np.newaxis]\n for target in range(y_mean.shape[1])\n ]\n y_samples = np.hstack(y_samples)\n return y_samples\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 194, "n_words": 44, "vocab_size": 36, "complexity": 3, "nloc": 14, "token_counts": 114, "n_ast_nodes": 171, "n_identifiers": 21, "d_id": 75430, "documentation": { "docstring": "Draw samples from Gaussian process and evaluate at X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples_X, n_features) or list of object\n Query points where the GP is evaluated.\n\n n_samples : int, default=1\n Number of samples drawn from the Gaussian process per query point.\n\n random_state : int, RandomState instance or None, default=0\n Determines random number generation to randomly draw samples.\n Pass an int for reproducible results across multiple function\n calls.\n See :term:`Glossary `.\n\n Returns\n -------\n y_samples : ndarray of shape (n_samples_X, n_samples), or \\\n (n_samples_X, n_targets, n_samples)\n Values of n_samples samples drawn from Gaussian process and\n evaluated at query points.\n ", "n_words": 100, "vocab_size": 73, "n_whitespaces": 262, "language": "en" } }, { "id": 320963, "commit_id": "21419c9ef5a90ea36a27afaf2503a57f8f9f8536", "repo": "qutebrowser", "path": "tests/unit/javascript/test_greasemonkey.py", "file_name": "test_greasemonkey.py", "fun_name": "test_regex_includes_scripts_for", "commit_message": "greasemonkey: Don't implicitly load scripts\n\nNeeded for #7245 and also seems like cleaner code.", "code": "def test_regex_includes_scripts_for(gm_manager, url, expected_matches):\n \n gh_dark_example = textwrap.dedent(r)\n _save_script(gh_dark_example, 'test.user.js')\n gm_manager.load_scripts()\n\n scripts = gm_manager.scripts_for(QUrl(url))\n assert len(scripts.start + scripts.end + scripts.idle) == expected_matches\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 39, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 12, "token_counts": 58, "n_ast_nodes": 96, "n_identifiers": 16, "d_id": 117467, "documentation": { "docstring": "Ensure our GM @*clude support supports regular expressions.\n // ==UserScript==\n // @include /^https?://((gist|guides|help|raw|status|developer)\\.)?github\\.com/((?!generated_pages\\/preview).)*$/\n // @exclude /https?://github\\.com/foo/\n // @run-at document-start\n // ==/UserScript==\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 67, "language": "en" } }, { "id": 49450, "commit_id": "9b3119dfb63c4cbb7acfb9f1f1c09ac24e6d68d2", "repo": "PaddleHub", "path": "modules/image/text_recognition/ppocrv3_rec_ch/character.py", "file_name": "character.py", "fun_name": "cal_predicts_accuracy", "commit_message": "add module", "code": "def cal_predicts_accuracy(char_ops, preds, preds_lod, labels, labels_lod, is_remove_duplicate=False):\n \n acc_num = 0\n img_num = 0\n for ino in range(len(labels_lod) - 1):\n beg_no = preds_lod[ino]\n end_no = preds_lod[ino + 1]\n preds_text = preds[beg_no:end_no].reshape(-1)\n preds_text = char_ops.decode(preds_text, is_remove_duplicate)\n\n beg_no = labels_lod[ino]\n end_no = labels_lod[ino + 1]\n labels_text = labels[beg_no:end_no].reshape(-1)\n labels_text = char_ops.decode(labels_text, is_remove_duplicate)\n img_num += 1\n\n if preds_text == labels_text:\n acc_num += 1\n acc = acc_num * 1.0 / img_num\n return acc, acc_num, img_num\n\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 169, "n_words": 70, "vocab_size": 44, "complexity": 3, "nloc": 17, "token_counts": 139, "n_ast_nodes": 209, "n_identifiers": 19, "d_id": 9747, "documentation": { "docstring": "\n Calculate prediction accuracy\n Args:\n char_ops: CharacterOps\n preds: preds result,text index\n preds_lod: lod tensor of preds\n labels: label of input image, text index\n labels_lod: lod tensor of label\n is_remove_duplicate: Whether to remove duplicate characters,\n The default is False\n Return:\n acc: The accuracy of test set\n acc_num: The correct number of samples predicted\n img_num: The total sample number of the test set\n ", "n_words": 60, "vocab_size": 43, "n_whitespaces": 169, "language": "en" } }, { "id": 260993, "commit_id": "60cc5b596f38d0d236dab34e02c05d98b5a72bad", "repo": "scikit-learn", "path": "sklearn/neighbors/_lof.py", "file_name": "_lof.py", "fun_name": "fit_predict", "commit_message": "FEA Fused sparse-dense support for `PairwiseDistancesReduction` (#23585)\n\n\r\n\r\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Christian Lorentzen \r\nCo-authored-by: Jérémie du Boisberranger \r\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Meekail Zain ", "code": "def fit_predict(self, X, y=None):\n \n\n # As fit_predict would be different from fit.predict, fit_predict is\n # only available for outlier detection (novelty=False)\n\n return self.fit(X)._predict()\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 51, "n_words": 23, "vocab_size": 21, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 40, "n_identifiers": 6, "d_id": 76611, "documentation": { "docstring": "Fit the model to the training set X and return the labels.\n\n **Not available for novelty detection (when novelty is set to True).**\n Label is 1 for an inlier and -1 for an outlier according to the LOF\n score and the contamination parameter.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features), default=None\n The query sample or samples to compute the Local Outlier Factor\n w.r.t. to the training samples.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n Returns\n -------\n is_inlier : ndarray of shape (n_samples,)\n Returns -1 for anomalies/outliers and 1 for inliers.\n ", "n_words": 98, "vocab_size": 67, "n_whitespaces": 219, "language": "en" } }, { "id": 68213, "commit_id": "e79d292233000985a04c5d46859513c1e0d7c88c", "repo": "erpnext", "path": "erpnext/hr/report/monthly_attendance_sheet/monthly_attendance_sheet.py", "file_name": "monthly_attendance_sheet.py", "fun_name": "get_attendance_years", "commit_message": "refactor: Monthly Attendance Sheet\n\n- split into smaller functions\n\n- add type hints\n\n- get rid of unnecessary db calls and loops\n\n- add docstrings for functions", "code": "def get_attendance_years() -> str:\n\t\n\tAttendance = frappe.qb.DocType('Attendance')\n\tyear_list = (\n\t\tfrappe.qb.from_(Attendance)\n\t\t.select(Extract('year', Attendance.attendance_date).as_('year'))\n\t\t.distinct()\n\t).run(as_dict=True)\n\n\tif year_list:\n\t\tyear_list.sort(key=lambda d: d.year, reverse=True)\n\telse:\n\t\tyear_list = [getdate().year]\n\n\treturn \"\\n\".join(cstr(entry.year) for entry in year_list)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 19, "n_words": 31, "vocab_size": 28, "complexity": 3, "nloc": 13, "token_counts": 104, "n_ast_nodes": 177, "n_identifiers": 24, "d_id": 14743, "documentation": { "docstring": "Returns all the years for which attendance records exist", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 34876, "commit_id": "84eec9e6ba55c5aceee2a92fd820fcca4b67c510", "repo": "transformers", "path": "src/transformers/models/convnext/modeling_convnext.py", "file_name": "modeling_convnext.py", "fun_name": "_set_gradient_checkpointing", "commit_message": "Add ConvNeXT (#15277)\n\n* First draft\r\n\r\n* Add conversion script\r\n\r\n* Improve conversion script\r\n\r\n* Improve docs and implement tests\r\n\r\n* Define model output class\r\n\r\n* Fix tests\r\n\r\n* Fix more tests\r\n\r\n* Add model to README\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Apply more suggestions from code review\r\n\r\n* Apply suggestions from code review\r\n\r\n* Rename dims to hidden_sizes\r\n\r\n* Fix equivalence test\r\n\r\n* Rename gamma to gamma_parameter\r\n\r\n* Clean up conversion script\r\n\r\n* Add ConvNextFeatureExtractor\r\n\r\n* Add corresponding tests\r\n\r\n* Implement feature extractor correctly\r\n\r\n* Make implementation cleaner\r\n\r\n* Add ConvNextStem class\r\n\r\n* Improve design\r\n\r\n* Update design to also include encoder\r\n\r\n* Fix gamma parameter\r\n\r\n* Use sample docstrings\r\n\r\n* Finish conversion, add center cropping\r\n\r\n* Replace nielsr by facebook, make feature extractor tests smaller\r\n\r\n* Fix integration test\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, ConvNextModel):\n module.gradient_checkpointing = value\n\n\nCONVNEXT_START_DOCSTRING = r\n\nCONVNEXT_INPUTS_DOCSTRING = r\n\n\n@add_start_docstrings(\n \"The bare ConvNext model outputting raw features without any specific head on top.\",\n CONVNEXT_START_DOCSTRING,\n)", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@add_start_docstrings(\n \"The bare ConvNext model outputting raw features without any specific head on top.\",\n CONVNEXT_START_DOCSTRING,\n)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 51, "n_words": 32, "vocab_size": 29, "complexity": 2, "nloc": 3, "token_counts": 24, "n_ast_nodes": 64, "n_identifiers": 10, "d_id": 6354, "documentation": { "docstring": "\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ConvNextConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See\n [`AutoFeatureExtractor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n", "n_words": 128, "vocab_size": 88, "n_whitespaces": 248, "language": "en" } }, { "id": 261468, "commit_id": "61ae92a7786baa132970cdc69da786f9952d8bda", "repo": "scikit-learn", "path": "sklearn/tests/test_kernel_approximation.py", "file_name": "test_kernel_approximation.py", "fun_name": "test_rbf_sampler_gamma_scale", "commit_message": "ENH Add gamma='scale' option to RBFSampler (#24755)\n\nCo-authored-by: Guillaume Lemaitre ", "code": "def test_rbf_sampler_gamma_scale():\n \n X, y = [[0.0], [1.0]], [0, 1]\n rbf = RBFSampler(gamma=\"scale\")\n rbf.fit(X, y)\n assert rbf._gamma == pytest.approx(4)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 33, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 5, "token_counts": 55, "n_ast_nodes": 83, "n_identifiers": 10, "d_id": 76824, "documentation": { "docstring": "Check the inner value computed when `gamma='scale'`.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 8246, "commit_id": "1caede3a2da4ec71cb8650c7e45120c26948a5b9", "repo": "ludwig", "path": "ludwig/explain/explainer.py", "file_name": "explainer.py", "fun_name": "vocab_size", "commit_message": "Explanation API and feature importance for GBM (#2564)\n\n* add docstring for explain_ig\r\n\r\n* solidify Explainer API\r\n\r\n* add gbm explainer\r\n\r\n* add dataclasses for typed explanations\r\n\r\n* add GBM feature importance\r\n\r\n* remove unused imports\r\n\r\n* add tests\r\n\r\n* fix test\r\n\r\n* extract explanation into file\r\n\r\n* rename base to explainer\r\n\r\n* remove unused kwargs\r\n\r\n* remove device placement from base explainer\r\n\r\n* use proper field from gbm", "code": "def vocab_size(self) -> int:\n \n if self.is_category_target:\n return self.model.training_set_metadata[self.target_feature_name][\"vocab_size\"]\n elif self.is_binary_target:\n return 2\n return 1\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 64, "n_words": 14, "vocab_size": 12, "complexity": 3, "nloc": 10, "token_counts": 36, "n_ast_nodes": 60, "n_identifiers": 8, "d_id": 1380, "documentation": { "docstring": "The vocab size of the target feature.\n\n For regression (number) this is 1, for binary it is 2, and for category it is the vocab size.\n ", "n_words": 26, "vocab_size": 20, "n_whitespaces": 40, "language": "en" } }, { "id": 231647, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/_scene.py", "file_name": "_scene.py", "fun_name": "zaxis", "commit_message": "switch to black .22", "code": "def zaxis(self):\n \n return self[\"zaxis\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 63091, "documentation": { "docstring": "\n The 'zaxis' property is an instance of ZAxis\n that may be specified as:\n - An instance of :class:`plotly.graph_objs.layout.scene.ZAxis`\n - A dict of string/value properties that will be passed\n to the ZAxis constructor\n\n Supported dict properties:\n\n autorange\n Determines whether or not the range of this\n axis is computed in relation to the input data.\n See `rangemode` for more info. If `range` is\n provided, then `autorange` is set to False.\n autotypenumbers\n Using \"strict\" a numeric string in trace data\n is not converted to a number. Using *convert\n types* a numeric string in trace data may be\n treated as a number during automatic axis\n `type` detection. Defaults to\n layout.autotypenumbers.\n backgroundcolor\n Sets the background color of this axis' wall.\n calendar\n Sets the calendar system to use for `range` and\n `tick0` if this is a date axis. This does not\n set the calendar for interpreting data on this\n axis, that's specified in the trace or via the\n global `layout.calendar`\n categoryarray\n Sets the order in which categories on this axis\n appear. Only has an effect if `categoryorder`\n is set to \"array\". Used with `categoryorder`.\n categoryarraysrc\n Sets the source reference on Chart Studio Cloud\n for `categoryarray`.\n categoryorder\n Specifies the ordering logic for the case of\n categorical variables. By default, plotly uses\n \"trace\", which specifies the order that is\n present in the data supplied. Set\n `categoryorder` to *category ascending* or\n *category descending* if order should be\n determined by the alphanumerical order of the\n category names. Set `categoryorder` to \"array\"\n to derive the ordering from the attribute\n `categoryarray`. If a category is not found in\n the `categoryarray` array, the sorting behavior\n for that attribute will be identical to the\n \"trace\" mode. The unspecified categories will\n follow the categories in `categoryarray`. Set\n `categoryorder` to *total ascending* or *total\n descending* if order should be determined by\n the numerical order of the values. Similarly,\n the order can be determined by the min, max,\n sum, mean or median of all the values.\n color\n Sets default for all colors associated with\n this axis all at once: line, font, tick, and\n grid colors. Grid color is lightened by\n blending this with the plot background\n Individual pieces can override this.\n dtick\n Sets the step in-between ticks on this axis.\n Use with `tick0`. Must be a positive number, or\n special strings available to \"log\" and \"date\"\n axes. If the axis `type` is \"log\", then ticks\n are set every 10^(n*dtick) where n is the tick\n number. For example, to set a tick mark at 1,\n 10, 100, 1000, ... set dtick to 1. To set tick\n marks at 1, 100, 10000, ... set dtick to 2. To\n set tick marks at 1, 5, 25, 125, 625, 3125, ...\n set dtick to log_10(5), or 0.69897000433. \"log\"\n has several special values; \"L\", where `f`\n is a positive number, gives ticks linearly\n spaced in value (but not position). For example\n `tick0` = 0.1, `dtick` = \"L0.5\" will put ticks\n at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10\n plus small digits between, use \"D1\" (all\n digits) or \"D2\" (only 2 and 5). `tick0` is\n ignored for \"D1\" and \"D2\". If the axis `type`\n is \"date\", then you must convert the time to\n milliseconds. For example, to set the interval\n between ticks to one day, set `dtick` to\n 86400000.0. \"date\" also has special values\n \"M\" gives ticks spaced by a number of\n months. `n` must be a positive integer. To set\n ticks on the 15th of every third month, set\n `tick0` to \"2000-01-15\" and `dtick` to \"M3\". To\n set ticks every 4 years, set `dtick` to \"M48\"\n exponentformat\n Determines a formatting rule for the tick\n exponents. For example, consider the number\n 1,000,000,000. If \"none\", it appears as\n 1,000,000,000. If \"e\", 1e+9. If \"E\", 1E+9. If\n \"power\", 1x10^9 (with 9 in a super script). If\n \"SI\", 1G. If \"B\", 1B.\n gridcolor\n Sets the color of the grid lines.\n gridwidth\n Sets the width (in px) of the grid lines.\n hoverformat\n Sets the hover text formatting rule using d3\n formatting mini-languages which are very\n similar to those in Python. For numbers, see: h\n ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f\n ormat. And for dates see:\n https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format. We add two\n items to d3's date formatter: \"%h\" for half of\n the year as a decimal number as well as \"%{n}f\"\n for fractional seconds with n digits. For\n example, *2016-10-13 09:15:23.456* with\n tickformat \"%H~%M~%S.%2f\" would display\n \"09~15~23.46\"\n linecolor\n Sets the axis line color.\n linewidth\n Sets the width (in px) of the axis line.\n minexponent\n Hide SI prefix for 10^n if |n| is below this\n number. This only has an effect when\n `tickformat` is \"SI\" or \"B\".\n mirror\n Determines if the axis lines or/and ticks are\n mirrored to the opposite side of the plotting\n area. If True, the axis lines are mirrored. If\n \"ticks\", the axis lines and ticks are mirrored.\n If False, mirroring is disable. If \"all\", axis\n lines are mirrored on all shared-axes subplots.\n If \"allticks\", axis lines and ticks are\n mirrored on all shared-axes subplots.\n nticks\n Specifies the maximum number of ticks for the\n particular axis. The actual number of ticks\n will be chosen automatically to be less than or\n equal to `nticks`. Has an effect only if\n `tickmode` is set to \"auto\".\n range\n Sets the range of this axis. If the axis `type`\n is \"log\", then you must take the log of your\n desired range (e.g. to set the range from 1 to\n 100, set the range from 0 to 2). If the axis\n `type` is \"date\", it should be date strings,\n like date data, though Date objects and unix\n milliseconds will be accepted and converted to\n strings. If the axis `type` is \"category\", it\n should be numbers, using the scale where each\n category is assigned a serial number from zero\n in the order it appears.\n rangemode\n If \"normal\", the range is computed in relation\n to the extrema of the input data. If *tozero*`,\n the range extends to 0, regardless of the input\n data If \"nonnegative\", the range is non-\n negative, regardless of the input data. Applies\n only to linear axes.\n separatethousands\n If \"true\", even 4-digit integers are separated\n showaxeslabels\n Sets whether or not this axis is labeled\n showbackground\n Sets whether or not this axis' wall has a\n background color.\n showexponent\n If \"all\", all exponents are shown besides their\n significands. If \"first\", only the exponent of\n the first tick is shown. If \"last\", only the\n exponent of the last tick is shown. If \"none\",\n no exponents appear.\n showgrid\n Determines whether or not grid lines are drawn.\n If True, the grid lines are drawn at every tick\n mark.\n showline\n Determines whether or not a line bounding this\n axis is drawn.\n showspikes\n Sets whether or not spikes starting from data\n points to this axis' wall are shown on hover.\n showticklabels\n Determines whether or not the tick labels are\n drawn.\n showtickprefix\n If \"all\", all tick labels are displayed with a\n prefix. If \"first\", only the first tick is\n displayed with a prefix. If \"last\", only the\n last tick is displayed with a suffix. If\n \"none\", tick prefixes are hidden.\n showticksuffix\n Same as `showtickprefix` but for tick suffixes.\n spikecolor\n Sets the color of the spikes.\n spikesides\n Sets whether or not spikes extending from the\n projection data points to this axis' wall\n boundaries are shown on hover.\n spikethickness\n Sets the thickness (in px) of the spikes.\n tick0\n Sets the placement of the first tick on this\n axis. Use with `dtick`. If the axis `type` is\n \"log\", then you must take the log of your\n starting tick (e.g. to set the starting tick to\n 100, set the `tick0` to 2) except when\n `dtick`=*L* (see `dtick` for more info). If\n the axis `type` is \"date\", it should be a date\n string, like date data. If the axis `type` is\n \"category\", it should be a number, using the\n scale where each category is assigned a serial\n number from zero in the order it appears.\n tickangle\n Sets the angle of the tick labels with respect\n to the horizontal. For example, a `tickangle`\n of -90 draws the tick labels vertically.\n tickcolor\n Sets the tick color.\n tickfont\n Sets the tick font.\n tickformat\n Sets the tick label formatting rule using d3\n formatting mini-languages which are very\n similar to those in Python. For numbers, see: h\n ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f\n ormat. And for dates see:\n https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format. We add two\n items to d3's date formatter: \"%h\" for half of\n the year as a decimal number as well as \"%{n}f\"\n for fractional seconds with n digits. For\n example, *2016-10-13 09:15:23.456* with\n tickformat \"%H~%M~%S.%2f\" would display\n \"09~15~23.46\"\n tickformatstops\n A tuple of :class:`plotly.graph_objects.layout.\n scene.zaxis.Tickformatstop` instances or dicts\n with compatible properties\n tickformatstopdefaults\n When used in a template (as layout.template.lay\n out.scene.zaxis.tickformatstopdefaults), sets\n the default property values to use for elements\n of layout.scene.zaxis.tickformatstops\n ticklen\n Sets the tick length (in px).\n tickmode\n Sets the tick mode for this axis. If \"auto\",\n the number of ticks is set via `nticks`. If\n \"linear\", the placement of the ticks is\n determined by a starting position `tick0` and a\n tick step `dtick` (\"linear\" is the default\n value if `tick0` and `dtick` are provided). If\n \"array\", the placement of the ticks is set via\n `tickvals` and the tick text is `ticktext`.\n (\"array\" is the default value if `tickvals` is\n provided).\n tickprefix\n Sets a tick label prefix.\n ticks\n Determines whether ticks are drawn or not. If\n \"\", this axis' ticks are not drawn. If\n \"outside\" (\"inside\"), this axis' are drawn\n outside (inside) the axis lines.\n ticksuffix\n Sets a tick label suffix.\n ticktext\n Sets the text displayed at the ticks position\n via `tickvals`. Only has an effect if\n `tickmode` is set to \"array\". Used with\n `tickvals`.\n ticktextsrc\n Sets the source reference on Chart Studio Cloud\n for `ticktext`.\n tickvals\n Sets the values at which ticks on this axis\n appear. Only has an effect if `tickmode` is set\n to \"array\". Used with `ticktext`.\n tickvalssrc\n Sets the source reference on Chart Studio Cloud\n for `tickvals`.\n tickwidth\n Sets the tick width (in px).\n title\n :class:`plotly.graph_objects.layout.scene.zaxis\n .Title` instance or dict with compatible\n properties\n titlefont\n Deprecated: Please use\n layout.scene.zaxis.title.font instead. Sets\n this axis' title font. Note that the title's\n font used to be customized by the now\n deprecated `titlefont` attribute.\n type\n Sets the axis type. By default, plotly attempts\n to determined the axis type by looking into the\n data of the traces that referenced the axis in\n question.\n visible\n A single toggle to hide the axis while\n preserving interaction like dragging. Default\n is true when a cheater plot is present on the\n axis, otherwise false\n zeroline\n Determines whether or not a line is drawn at\n along the 0 value of this axis. If True, the\n zero line is drawn on top of the grid lines.\n zerolinecolor\n Sets the line color of the zero line.\n zerolinewidth\n Sets the width (in px) of the zero line.\n\n Returns\n -------\n plotly.graph_objs.layout.scene.ZAxis\n ", "n_words": 1773, "vocab_size": 608, "n_whitespaces": 7328, "language": "en" } }, { "id": 147381, "commit_id": "60054995e65304fb14e6d0ab69bdec07aa9389fe", "repo": "ray", "path": "python/ray/serve/api.py", "file_name": "api.py", "fun_name": "get_deployment_statuses", "commit_message": "[docs] fix doctests and activate CI (#23418)", "code": "def get_deployment_statuses() -> Dict[str, DeploymentStatusInfo]:\n \n\n return internal_get_global_client().get_deployment_statuses()\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 18, "token_counts": 20, "n_ast_nodes": 35, "n_identifiers": 5, "d_id": 33930, "documentation": { "docstring": "Returns a dictionary of deployment statuses.\n\n A deployment's status is one of {UPDATING, UNHEALTHY, and HEALTHY}.\n\n Example:\n >>> from ray.serve.api import get_deployment_statuses\n >>> statuses = get_deployment_statuses() # doctest: +SKIP\n >>> status_info = statuses[\"deployment_name\"] # doctest: +SKIP\n >>> status = status_info.status # doctest: +SKIP\n >>> message = status_info.message # doctest: +SKIP\n\n Returns:\n Dict[str, DeploymentStatus]: This dictionary maps the running\n deployment's name to a DeploymentStatus object containing its\n status and a message explaining the status.\n ", "n_words": 73, "vocab_size": 47, "n_whitespaces": 141, "language": "en" } }, { "id": 177149, "commit_id": "99a925f695080787d077f620972c6552c4b0b4ba", "repo": "networkx", "path": "networkx/algorithms/dag.py", "file_name": "dag.py", "fun_name": "lexicographical_topological_sort", "commit_message": "docstring update to lexicographical_topological_sort issue 5681 (#5930)\n\n* docstring update to lex-topo-sort\r\n\r\n- explain effect and purpose for lexi sort\r\n- add hints for fixing non-sortable nodes\r\n- add hint to exception msg\r\n- Add examples\r\n\r\n* Shorten the first line of the doc_string\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Generalize the description of sort failures\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* more succinct description of key function\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* improve description of key function\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Black'd it.\r\n\r\nCo-authored-by: Dan Schult ", "code": "def lexicographical_topological_sort(G, key=None):\n \n if not G.is_directed():\n msg = \"Topological sort not defined on undirected graphs.\"\n raise nx.NetworkXError(msg)\n\n if key is None:\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 44, "n_words": 21, "vocab_size": 19, "complexity": 15, "nloc": 32, "token_counts": 212, "n_ast_nodes": 58, "n_identifiers": 7, "d_id": 42289, "documentation": { "docstring": "Generate the nodes in the unique lexicographical topological sort order.\n\n Generates a unique ordering of nodes by first sorting topologically (for which there are often\n multiple valid orderings) and then additionally by sorting lexicographically.\n\n A topological sort arranges the nodes of a directed graph so that the\n upstream node of each directed edge precedes the downstream node.\n It is always possible to find a solution for directed graphs that have no cycles.\n There may be more than one valid solution.\n\n Lexicographical sorting is just sorting alphabetically. It is used here to break ties in the\n topological sort and to determine a single, unique ordering. This can be useful in comparing\n sort results.\n\n The lexicographical order can be customized by providing a function to the `key=` parameter.\n The definition of the key function is the same as used in python's built-in `sort()`.\n The function takes a single argument and returns a key to use for sorting purposes.\n\n Lexicographical sorting can fail if the node names are un-sortable. See the example below.\n The solution is to provide a function to the `key=` argument that returns sortable keys.\n\n\n Parameters\n ----------\n G : NetworkX digraph\n A directed acyclic graph (DAG)\n\n key : function, optional\n A function of one argument that converts a node name to a comparison key.\n It defines and resolves ambiguities in the sort order. Defaults to the identity function.\n\n Yields\n ------\n nodes\n Yields the nodes of G in lexicographical topological sort order.\n\n Raises\n ------\n NetworkXError\n Topological sort is defined for directed graphs only. If the graph `G`\n is undirected, a :exc:`NetworkXError` is raised.\n\n NetworkXUnfeasible\n If `G` is not a directed acyclic graph (DAG) no topological sort exists\n and a :exc:`NetworkXUnfeasible` exception is raised. This can also be\n raised if `G` is changed while the returned iterator is being processed\n\n RuntimeError\n If `G` is changed while the returned iterator is being processed.\n\n TypeError\n Results from un-sortable node names.\n Consider using `key=` parameter to resolve ambiguities in the sort order.\n\n Examples\n --------\n >>> DG = nx.DiGraph([(2, 1), (2, 5), (1, 3), (1, 4), (5, 4)])\n >>> list(nx.lexicographical_topological_sort(DG))\n [2, 1, 3, 5, 4]\n >>> list(nx.lexicographical_topological_sort(DG, key=lambda x: -x))\n [2, 5, 1, 4, 3]\n\n The sort will fail for any graph with integer and string nodes. Comparison of integer to strings\n is not defined in python. Is 3 greater or less than 'red'?\n\n >>> DG = nx.DiGraph([(1, 'red'), (3, 'red'), (1, 'green'), (2, 'blue')])\n >>> list(nx.lexicographical_topological_sort(DG))\n Traceback (most recent call last):\n ...\n TypeError: '<' not supported between instances of 'str' and 'int'\n ...\n\n Incomparable nodes can be resolved using a `key` function. This example function\n allows comparison of integers and strings by returning a tuple where the first\n element is True for `str`, False otherwise. The second element is the node name.\n This groups the strings and integers separately so they can be compared only among themselves.\n\n >>> key = lambda node: (isinstance(node, str), node)\n >>> list(nx.lexicographical_topological_sort(DG, key=key))\n [1, 2, 3, 'blue', 'green', 'red']\n\n Notes\n -----\n This algorithm is based on a description and proof in\n \"Introduction to Algorithms: A Creative Approach\" [1]_ .\n\n See also\n --------\n topological_sort\n\n References\n ----------\n .. [1] Manber, U. (1989).\n *Introduction to Algorithms - A Creative Approach.* Addison-Wesley.\n ", "n_words": 528, "vocab_size": 279, "n_whitespaces": 802, "language": "en" } }, { "id": 32333, "commit_id": "99eb9b523f9b9ea6096323ce5610ce6633acc88a", "repo": "transformers", "path": "examples/pytorch/test_accelerate_examples.py", "file_name": "test_accelerate_examples.py", "fun_name": "test_run_summarization_no_trainer", "commit_message": "Fix `no_trainer` CI (#18242)\n\n* Fix all tests", "code": "def test_run_summarization_no_trainer(self):\n tmp_dir = self.get_auto_remove_tmp_dir()\n testargs = f.split()\n\n run_command(self._launch_args + testargs)\n result = get_results(tmp_dir)\n self.assertGreaterEqual(result[\"eval_rouge1\"], 10)\n self.assertGreaterEqual(result[\"eval_rouge2\"], 2)\n self.assertGreaterEqual(result[\"eval_rougeL\"], 7)\n self.assertGreaterEqual(result[\"eval_rougeLsum\"], 7)\n self.assertTrue(os.path.exists(os.path.join(tmp_dir, \"epoch_0\")))\n self.assertTrue(os.path.exists(os.path.join(tmp_dir, \"summarization_no_trainer\")))\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 95, "n_words": 26, "vocab_size": 22, "complexity": 1, "nloc": 24, "token_counts": 122, "n_ast_nodes": 213, "n_identifiers": 17, "d_id": 5907, "documentation": { "docstring": "\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 157, "language": "en" } }, { "id": 118187, "commit_id": "b96825c643cb2ce062d80868a5b7824d99bca07f", "repo": "mindsdb", "path": "tests/integration_tests/flows/test_company_independent.py", "file_name": "test_company_independent.py", "fun_name": "test_views", "commit_message": "fix tests", "code": "def test_views(self, postgres_db):\n\n query = \n\n for cid, char in [(CID_A, 'a'), (CID_B, 'b')]:\n self.sql_via_http(\n query.format(f'test_view_{char}', char),\n company_id=cid,\n expected_resp_type=RESPONSE_TYPE.OK\n )\n\n tables = self.get_tables_in('mindsdb', cid)\n self.assert_list(\n tables, {\n 'models',\n 'models_versions',\n f'test_view_{char}'\n }\n )\n\n for cid, char in [(CID_A, 'a'), (CID_B, 'b')]:\n response = self.sql_via_http(\n f\"select * from mindsdb.test_view_{char}\",\n company_id=cid,\n expected_resp_type=RESPONSE_TYPE.TABLE\n )\n assert len(response['data']) == 50\n\n response = self.sql_via_http(\n f\"DROP VIEW mindsdb.test_view_{char}\",\n company_id=cid,\n expected_resp_type=RESPONSE_TYPE.OK\n )\n\n tables = self.get_tables_in('mindsdb', cid)\n self.assert_list(\n tables, {\n 'models',\n 'models_versions'\n }\n )\n\n self.sql_via_http(\n f\"select * from mindsdb.test_view_{char}\",\n company_id=cid,\n expected_resp_type=RESPONSE_TYPE.ERROR\n )\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 602, "n_words": 81, "vocab_size": 43, "complexity": 3, "nloc": 54, "token_counts": 200, "n_ast_nodes": 309, "n_identifiers": 21, "d_id": 26187, "documentation": { "docstring": "\n CREATE VIEW mindsdb.{}\n FROM test_integration_{} (\n select * from rentals limit 50\n )\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 69, "language": "en" } }, { "id": 197253, "commit_id": "1473b1782d0e440c17ee0ce6283bff0aa7f515af", "repo": "sympy", "path": "sympy/plotting/plot.py", "file_name": "plot.py", "fun_name": "plot", "commit_message": "Use LaTeX for labels in matplotlib backend", "code": "def plot(*args, show=True, **kwargs):\n \n args = list(map(sympify, args))\n free = set()\n for a in args:\n if isinstance(a, Expr):\n free |= a.free_symbols\n if len(free) > 1:\n raise ValueError(\n 'The same variable should be used in all '\n 'univariate expressions being plotted.')\n x = free.pop() if free else Symbol('x')\n kwargs.setdefault('xlabel', x)\n kwargs.setdefault('ylabel', Function('f')(x))\n series = []\n plot_expr = check_arguments(args, 1, 1)\n series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]\n\n plots = Plot(*series, **kwargs)\n if show:\n plots.show()\n return plots\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 204, "n_words": 76, "vocab_size": 59, "complexity": 7, "nloc": 20, "token_counts": 148, "n_ast_nodes": 246, "n_identifiers": 27, "d_id": 48412, "documentation": { "docstring": "Plots a function of a single variable as a curve.\n\n Parameters\n ==========\n\n args :\n The first argument is the expression representing the function\n of single variable to be plotted.\n\n The last argument is a 3-tuple denoting the range of the free\n variable. e.g. ``(x, 0, 5)``\n\n Typical usage examples are in the followings:\n\n - Plotting a single expression with a single range.\n ``plot(expr, range, **kwargs)``\n - Plotting a single expression with the default range (-10, 10).\n ``plot(expr, **kwargs)``\n - Plotting multiple expressions with a single range.\n ``plot(expr1, expr2, ..., range, **kwargs)``\n - Plotting multiple expressions with multiple ranges.\n ``plot((expr1, range1), (expr2, range2), ..., **kwargs)``\n\n It is best practice to specify range explicitly because default\n range may change in the future if a more advanced default range\n detection algorithm is implemented.\n\n show : bool, optional\n The default value is set to ``True``. Set show to ``False`` and\n the function will not display the plot. The returned instance of\n the ``Plot`` class can then be used to save or display the plot\n by calling the ``save()`` and ``show()`` methods respectively.\n\n line_color : string, or float, or function, optional\n Specifies the color for the plot.\n See ``Plot`` to see how to set color for the plots.\n Note that by setting ``line_color``, it would be applied simultaneously\n to all the series.\n\n title : str, optional\n Title of the plot. It is set to the latex representation of\n the expression, if the plot has only one expression.\n\n label : str, optional\n The label of the expression in the plot. It will be used when\n called with ``legend``. Default is the name of the expression.\n e.g. ``sin(x)``\n\n xlabel : str or expression, optional\n Label for the x-axis.\n\n ylabel : str or expression, optional\n Label for the y-axis.\n\n xscale : 'linear' or 'log', optional\n Sets the scaling of the x-axis.\n\n yscale : 'linear' or 'log', optional\n Sets the scaling of the y-axis.\n\n axis_center : (float, float), optional\n Tuple of two floats denoting the coordinates of the center or\n {'center', 'auto'}\n\n xlim : (float, float), optional\n Denotes the x-axis limits, ``(min, max)```.\n\n ylim : (float, float), optional\n Denotes the y-axis limits, ``(min, max)```.\n\n annotations : list, optional\n A list of dictionaries specifying the type of annotation\n required. The keys in the dictionary should be equivalent\n to the arguments of the matplotlib's annotate() function.\n\n markers : list, optional\n A list of dictionaries specifying the type the markers required.\n The keys in the dictionary should be equivalent to the arguments\n of the matplotlib's plot() function along with the marker\n related keyworded arguments.\n\n rectangles : list, optional\n A list of dictionaries specifying the dimensions of the\n rectangles to be plotted. The keys in the dictionary should be\n equivalent to the arguments of the matplotlib's\n patches.Rectangle class.\n\n fill : dict, optional\n A dictionary specifying the type of color filling required in\n the plot. The keys in the dictionary should be equivalent to the\n arguments of the matplotlib's fill_between() function.\n\n adaptive : bool, optional\n The default value is set to ``True``. Set adaptive to ``False``\n and specify ``nb_of_points`` if uniform sampling is required.\n\n The plotting uses an adaptive algorithm which samples\n recursively to accurately plot. The adaptive algorithm uses a\n random point near the midpoint of two points that has to be\n further sampled. Hence the same plots can appear slightly\n different.\n\n depth : int, optional\n Recursion depth of the adaptive algorithm. A depth of value\n ``n`` samples a maximum of `2^{n}` points.\n\n If the ``adaptive`` flag is set to ``False``, this will be\n ignored.\n\n nb_of_points : int, optional\n Used when the ``adaptive`` is set to ``False``. The function\n is uniformly sampled at ``nb_of_points`` number of points.\n\n If the ``adaptive`` flag is set to ``True``, this will be\n ignored.\n\n size : (float, float), optional\n A tuple in the form (width, height) in inches to specify the size of\n the overall figure. The default value is set to ``None``, meaning\n the size will be set by the default backend.\n\n Examples\n ========\n\n .. plot::\n :context: close-figs\n :format: doctest\n :include-source: True\n\n >>> from sympy import symbols\n >>> from sympy.plotting import plot\n >>> x = symbols('x')\n\n Single Plot\n\n .. plot::\n :context: close-figs\n :format: doctest\n :include-source: True\n\n >>> plot(x**2, (x, -5, 5))\n Plot object containing:\n [0]: cartesian line: x**2 for x over (-5.0, 5.0)\n\n Multiple plots with single range.\n\n .. plot::\n :context: close-figs\n :format: doctest\n :include-source: True\n\n >>> plot(x, x**2, x**3, (x, -5, 5))\n Plot object containing:\n [0]: cartesian line: x for x over (-5.0, 5.0)\n [1]: cartesian line: x**2 for x over (-5.0, 5.0)\n [2]: cartesian line: x**3 for x over (-5.0, 5.0)\n\n Multiple plots with different ranges.\n\n .. plot::\n :context: close-figs\n :format: doctest\n :include-source: True\n\n >>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))\n Plot object containing:\n [0]: cartesian line: x**2 for x over (-6.0, 6.0)\n [1]: cartesian line: x for x over (-5.0, 5.0)\n\n No adaptive sampling.\n\n .. plot::\n :context: close-figs\n :format: doctest\n :include-source: True\n\n >>> plot(x**2, adaptive=False, nb_of_points=400)\n Plot object containing:\n [0]: cartesian line: x**2 for x over (-10.0, 10.0)\n\n See Also\n ========\n\n Plot, LineOver1DRangeSeries\n\n ", "n_words": 831, "vocab_size": 317, "n_whitespaces": 1639, "language": "en" } }, { "id": 182009, "commit_id": "54e63428644710112215c4f2d27cd64daeeda6fa", "repo": "textual", "path": "src/textual/drivers/win32.py", "file_name": "win32.py", "fun_name": "setcbreak", "commit_message": "windows driver", "code": "def setcbreak(filehandle):\n \n\n set_console_mode(filehandle, CBREAK_MODE)\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 10, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 4, "d_id": 43728, "documentation": { "docstring": "\n Args:\n filehandle(int): Windows filehandle object as returned by :py:func:`msvcrt.get_osfhandle`\n\n Raises:\n OSError: Error calling Windows API\n\n Convenience function which mimics :py:func:`tty.setcbreak` behavior\n\n All console input options are disabled except ``ENABLE_PROCESSED_INPUT``\n and, if supported, ``ENABLE_VIRTUAL_TERMINAL_INPUT``\n ", "n_words": 33, "vocab_size": 32, "n_whitespaces": 66, "language": "en" } }, { "id": 260158, "commit_id": "8515b486810e844bc7f5f1a4fb2227405d46871e", "repo": "scikit-learn", "path": "sklearn/datasets/tests/test_arff_parser.py", "file_name": "test_arff_parser.py", "fun_name": "test_pandas_arff_parser_strip_double_quotes", "commit_message": "FIX make pandas and liac arff parser quoting behaviour closer (#23497)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Loïc Estève ", "code": "def test_pandas_arff_parser_strip_double_quotes(parser_func):\n \n pd = pytest.importorskip(\"pandas\")\n\n arff_file = BytesIO(\n textwrap.dedent(\n ", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "arff_file = BytesIO(\n textwrap.dedent(\n \"\"\"", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 37, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 54, "token_counts": 186, "n_ast_nodes": 39, "n_identifiers": 9, "d_id": 76098, "documentation": { "docstring": "Check that we properly strip double quotes from the data.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 162332, "commit_id": "311b6615d85d3530f2709c50e4223ff3b6b14361", "repo": "yt-dlp", "path": "yt_dlp/extractor/common.py", "file_name": "common.py", "fun_name": "url_result", "commit_message": "[extractor] Improve `url_result` and related", "code": "def url_result(url, ie=None, video_id=None, video_title=None, *, url_transparent=False, **kwargs):\n \n if ie is not None:\n kwargs['ie_key'] = ie if isinstance(ie, str) else ie.ie_key()\n if video_id is not None:\n kwargs['id'] = video_id\n if video_title is not None:\n kwargs['title'] = video_title\n return {\n **kwargs,\n '_type': 'url_transparent' if url_transparent else 'url',\n 'url': url,\n }\n", "url": "https://github.com/yt-dlp/yt-dlp.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 157, "n_words": 49, "vocab_size": 33, "complexity": 6, "nloc": 12, "token_counts": 94, "n_ast_nodes": 151, "n_identifiers": 10, "d_id": 39190, "documentation": { "docstring": "Returns a URL that points to a page that should be processed", "n_words": 12, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 208054, "commit_id": "1c4ff33bd22cf94e297bd6449a06b5a30c2c1fbc", "repo": "celery", "path": "celery/canvas.py", "file_name": "canvas.py", "fun_name": "stamp", "commit_message": "Canvas Header Stamping (#7384)\n\n* Strip down the header-stamping PR to the basics.\r\n\r\n* Serialize groups.\r\n\r\n* Add groups to result backend meta data.\r\n\r\n* Fix spelling mistake.\r\n\r\n* Revert changes to canvas.py\r\n\r\n* Revert changes to app/base.py\r\n\r\n* Add stamping implementation to canvas.py\r\n\r\n* Send task to AMQP with groups.\r\n\r\n* Successfully pass single group to result.\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* First draft of the visitor API.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed lint and elements\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* type -> isinstance\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Redo header stamping (#7341)\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Fixed lint and elements\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* type -> isinstance\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Omer Katz \r\n\r\n* Added stamping mechanism\r\n\r\n* Manual stamping improved\r\n\r\n* flake8 fixed\r\n\r\n* Added subtests\r\n\r\n* Add comma.\r\n\r\n* Moved groups to stamps\r\n\r\n* Fixed chord and added test for that\r\n\r\n* Strip down the header-stamping PR to the basics.\r\n\r\n* Serialize groups.\r\n\r\n* Add groups to result backend meta data.\r\n\r\n* Fix spelling mistake.\r\n\r\n* Revert changes to canvas.py\r\n\r\n* Revert changes to app/base.py\r\n\r\n* Add stamping implementation to canvas.py\r\n\r\n* Send task to AMQP with groups.\r\n\r\n* Successfully pass single group to result.\r\n\r\n* _freeze_gid dict merge fixed\r\n\r\n* First draft of the visitor API.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* OptionsVisitor created\r\n\r\n* Fixed canvas.py\r\n\r\n* Added test for simple test for chord and fixed chord implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Changed _IMMUTABLE_OPTIONS\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed chord interface\r\n\r\n* Fixed list order\r\n\r\n* Fixed tests (stamp test and chord test), fixed order in groups\r\n\r\n* Fixed lint and elements\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Changed implementation of stamp API and fix lint\r\n\r\n* Added documentation to Stamping API. Added chord with groups test\r\n\r\n* Implemented stamping inside replace and added test for an implementation\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Added test additonal tests for chord, improved coverage\r\n\r\n* Splitted into subtests\r\n\r\n* Group stamping rollback\r\n\r\n* group.id is None fixed\r\n\r\n* Added integration test\r\n\r\n* Added integration test\r\n\r\n* apply_async fixed\r\n\r\n* Integration test and test_chord fixed\r\n\r\n* Lint fixed\r\n\r\n* chord freeze fixed\r\n\r\n* Minor fixes.\r\n\r\n* Chain apply_async fixed and tests fixed\r\n\r\n* lint fixed\r\n\r\n* Added integration test for chord\r\n\r\n* type -> isinstance\r\n\r\n* Added stamping mechanism\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Manual stamping improved\r\n\r\n* fail_ci_if_error uncommented\r\n\r\n* flake8 fixed\r\n\r\n* Added subtests\r\n\r\n* Changes\r\n\r\n* Add comma.\r\n\r\n* Fixed chord and added test for that\r\n\r\n* canvas.py fixed\r\n\r\n* Test chord.py fixed\r\n\r\n* Fixed stamped_headers\r\n\r\n* collections import fixed\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* collections import fixed\r\n\r\n* Update celery/backends/base.py\r\n\r\nCo-authored-by: Omer Katz \r\n\r\n* ampq.py fixed\r\n\r\n* Refrain from using deprecated import path.\r\n\r\n* Fix test_complex_chain regression.\r\n\r\nWhenever we stamp a group we need to freeze it first if it wasn't already frozen.\r\nSomewhere along the line, the group id changed because we were freezing twice.\r\nThis commit places the stamping operation after preparing the chain's steps which fixes the problem somehow.\r\n\r\nWe don't know why yet.\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed integration tests\r\n\r\n* Fixed issues with maybe_list. Add documentation\r\n\r\n* Fixed potential issue with integration tests\r\n\r\n* Fixed issues with _regen\r\n\r\n* Fixed issues with _regen\r\n\r\n* Fixed test_generator issues\r\n\r\n* Fixed _regen stamping\r\n\r\n* Fixed _regen stamping\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Fixed TimeOut issue\r\n\r\n* Update docs/userguide/canvas.rst\r\n\r\nCo-authored-by: Omer Katz \r\n\r\n* Fixed Couchbase\r\n\r\n* Better stamping intro\r\n\r\n* New GroupVisitor example\r\n\r\n* Adjust documentation.\r\n\r\nCo-authored-by: Naomi Elstein \r\nCo-authored-by: Omer Katz \r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Asif Saif Uddin \r\nCo-authored-by: Omer Katz ", "code": "def stamp(self, visitor=None, **headers):\n \n headers = headers.copy()\n if visitor is not None:\n headers.update(visitor.on_signature(self, **headers))\n else:\n headers[\"stamped_headers\"] = [header for header in headers.keys() if header not in self.options]\n _merge_dictionaries(headers, self.options)\n return self.set(**headers)\n", "url": "https://github.com/celery/celery.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 99, "n_words": 31, "vocab_size": 26, "complexity": 4, "nloc": 8, "token_counts": 81, "n_ast_nodes": 130, "n_identifiers": 12, "d_id": 52183, "documentation": { "docstring": "Apply this task asynchronously.\n\n Arguments:\n visitor (StampingVisitor): Visitor API object.\n headers (Dict): Stamps that should be added to headers.\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 55, "language": "en" } }, { "id": 134355, "commit_id": "37de814a8598e0ea3dea23d5ae0caf9df54fa0e6", "repo": "ray", "path": "rllib/core/rl_module/rl_module.py", "file_name": "rl_module.py", "fun_name": "input_specs_inference", "commit_message": "[RLlib] RLModule base class, RLModule PR 3/N (#29642)\n\nSigned-off-by: Kourosh Hakhamaneshi ", "code": "def input_specs_inference(self) -> ModelSpec:\n \n return ModelSpec()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 12, "n_ast_nodes": 23, "n_identifiers": 3, "d_id": 30267, "documentation": { "docstring": "Returns the input specs of the forward_inference method.", "n_words": 8, "vocab_size": 7, "n_whitespaces": 7, "language": "en" } }, { "id": 26688, "commit_id": "0881beec1ac02dfa97525c5173687defb356d85c", "repo": "saleor", "path": "saleor/payment/tests/test_gateway.py", "file_name": "test_gateway.py", "fun_name": "test_payment_refund_or_void_void_called", "commit_message": "Fix payment flow (#9504)\n\n* Do not capture payment again when it should be refunded or voided\r\n\r\n* Do not create order when then is ongoing refund", "code": "def test_payment_refund_or_void_void_called(void_mock, payment):\n \n # given\n payment.can_void = Mock(return_value=True)\n assert payment.can_void() is True\n payment.transactions.count() == 0\n\n # when\n gateway.payment_refund_or_void(payment, get_plugins_manager(), None)\n\n # then\n assert void_mock.called_once()\n\n\n@patch(\"saleor.payment.gateway.void\")", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "@patch(\"saleor.payment.gateway.void\")", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 51, "n_words": 25, "vocab_size": 22, "complexity": 1, "nloc": 6, "token_counts": 53, "n_ast_nodes": 101, "n_identifiers": 13, "d_id": 5047, "documentation": { "docstring": "Ensure that the refund method is called when payment can be voided\n and there is no void transaction for given payment.", "n_words": 21, "vocab_size": 20, "n_whitespaces": 23, "language": "en" } }, { "id": 166614, "commit_id": "244f747bb63f45c1c439193f0672c6162853b168", "repo": "pandas", "path": "pandas/core/series.py", "file_name": "series.py", "fun_name": "idxmin", "commit_message": "make series axis parameter docs consistent (#47109)\n\n* make series docs consistent\r\n\r\nadd series unused param info to DF docs\r\n\r\n* fix trailing whitespace\r\n\r\n* fix docs build\r\n\r\n* add unused\r\n\r\n* add or update docs for all series methods\r\n\r\n* small fix\r\n\r\n* fix line length\r\n\r\n* fix param order\r\n\r\n* fix param order\r\n\r\n* add\r\n\r\n* add backticks to None and fix space\r\n\r\nCo-authored-by: uncjackg ", "code": "def idxmin(self, axis=0, skipna=True, *args, **kwargs):\n \n i = self.argmin(axis, skipna, *args, **kwargs)\n if i == -1:\n return np.nan\n return self.index[i]\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 59, "n_words": 20, "vocab_size": 17, "complexity": 2, "nloc": 5, "token_counts": 53, "n_ast_nodes": 80, "n_identifiers": 11, "d_id": 39843, "documentation": { "docstring": "\n Return the row label of the minimum value.\n\n If multiple values equal the minimum, the first row label with that\n value is returned.\n\n Parameters\n ----------\n axis : {0 or 'index'}\n Unused. Parameter needed for compatibility with DataFrame.\n skipna : bool, default True\n Exclude NA/null values. If the entire Series is NA, the result\n will be NA.\n *args, **kwargs\n Additional arguments and keywords have no effect but might be\n accepted for compatibility with NumPy.\n\n Returns\n -------\n Index\n Label of the minimum value.\n\n Raises\n ------\n ValueError\n If the Series is empty.\n\n See Also\n --------\n numpy.argmin : Return indices of the minimum values\n along the given axis.\n DataFrame.idxmin : Return index of first occurrence of minimum\n over requested axis.\n Series.idxmax : Return index *label* of the first occurrence\n of maximum of values.\n\n Notes\n -----\n This method is the Series version of ``ndarray.argmin``. This method\n returns the label of the minimum, while ``ndarray.argmin`` returns\n the position. To get the position, use ``series.values.argmin()``.\n\n Examples\n --------\n >>> s = pd.Series(data=[1, None, 4, 1],\n ... index=['A', 'B', 'C', 'D'])\n >>> s\n A 1.0\n B NaN\n C 4.0\n D 1.0\n dtype: float64\n\n >>> s.idxmin()\n 'A'\n\n If `skipna` is False and there is an NA value in the data,\n the function returns ``nan``.\n\n >>> s.idxmin(skipna=False)\n nan\n ", "n_words": 207, "vocab_size": 132, "n_whitespaces": 631, "language": "en" } }, { "id": 116582, "commit_id": "b999051fd8153a1d3624471cac5483867116f985", "repo": "mindsdb", "path": "mindsdb/integrations/handlers/lightwood_handler/tests/test_lightwood_handler.py", "file_name": "test_lightwood_handler.py", "fun_name": "test_04_query_predictor_single_where_condition", "commit_message": "test fix", "code": "def test_04_query_predictor_single_where_condition(self):\n time.sleep(120) # TODO \n query = f\n response = self.handler.native_query(query)\n self.assertTrue(response.type == RESPONSE_TYPE.TABLE)\n self.assertTrue(len(response.data_frame) == 1)\n self.assertTrue(response.data_frame['sqft'][0] == 100)\n self.assertTrue(response.data_frame['rental_price'][0] is not None)\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 73, "n_words": 24, "vocab_size": 21, "complexity": 1, "nloc": 12, "token_counts": 83, "n_ast_nodes": 143, "n_identifiers": 15, "d_id": 25781, "documentation": { "docstring": "\n SELECT target\n from {self.test_model_1}\n WHERE sqft=100\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 47, "language": "en" } }, { "id": 90777, "commit_id": "8cdaa4e86e8296cdbc145f2a53d3eb38cb7a1c2b", "repo": "sentry", "path": "tests/sentry/lang/javascript/test_processor.py", "file_name": "test_processor.py", "fun_name": "test_archive_too_large_for_disk_cache", "commit_message": "ref: close files explicitly in tests.sentry.lang.javascript.test_processor (#35262)", "code": "def test_archive_too_large_for_disk_cache(self, cache_getfile):\n \n\n release = Release.objects.create(version=\"1\", organization_id=self.project.organization_id)\n self._create_archive(release, \"foo\")\n\n # cache.getfile is only called for index, not for the archive\n with override_options({\"releasefile.cache-max-archive-size\": 9}):\n result = fetch_release_archive_for_url(release, dist=None, url=\"foo\")\n assert result is not None\n result.close()\n assert len(cache_getfile.mock_calls) == 1\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 105, "n_words": 38, "vocab_size": 32, "complexity": 1, "nloc": 8, "token_counts": 79, "n_ast_nodes": 134, "n_identifiers": 19, "d_id": 18688, "documentation": { "docstring": "ReleaseFile.cache is not used if the archive is too large", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 269492, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "_is_current_explicit_device", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _is_current_explicit_device(device_type):\n \n device_type = device_type.upper()\n if device_type not in [\"CPU\", \"GPU\"]:\n raise ValueError('`device_type` should be either \"CPU\" or \"GPU\".')\n device = _get_current_tf_device()\n return device is not None and device.device_type == device_type.upper()\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 53, "n_words": 31, "vocab_size": 26, "complexity": 3, "nloc": 6, "token_counts": 48, "n_ast_nodes": 85, "n_identifiers": 6, "d_id": 80124, "documentation": { "docstring": "Check if the current device is explicitly set on the device type specified.\n\n Args:\n device_type: A string containing `GPU` or `CPU` (case-insensitive).\n\n Returns:\n A boolean indicating if the current device scope is explicitly set on the\n device type.\n\n Raises:\n ValueError: If the `device_type` string indicates an unsupported device.\n ", "n_words": 48, "vocab_size": 33, "n_whitespaces": 88, "language": "en" } }, { "id": 160283, "commit_id": "a0c2e826738daa0cbd83aba85852405b73878f5b", "repo": "numpy", "path": "numpy/core/_internal.py", "file_name": "_internal.py", "fun_name": "_promote_fields", "commit_message": "API: Fix structured dtype cast-safety, promotion, and comparison\n\nThis PR replaces the old gh-15509 implementing proper type promotion\nfor structured voids. It further fixes the casting safety to consider\ncasts with equivalent field number and matching order as \"safe\"\nand if the names, titles, and offsets match as \"equiv\".\n\nThe change perculates into the void comparison, and since it fixes\nthe order, it removes the current FutureWarning there as well.\n\nThis addresses https://github.com/liberfa/pyerfa/issues/77\nand replaces gh-15509 (the implementation has changed too much).\n\nFixes gh-15494 (and probably a few more)\n\nCo-authored-by: Allan Haldane ", "code": "def _promote_fields(dt1, dt2):\n \n # Both must be structured and have the same names in the same order\n if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names:\n raise TypeError(\"invalid type promotion\")\n\n new_fields = []\n for name in dt1.names:\n field1 = dt1.fields[name]\n field2 = dt2.fields[name]\n new_descr = promote_types(field1[0], field2[0])\n # Check that the titles match (if given):\n if field1[2:] != field2[2:]:\n raise TypeError(\"invalid type promotion\")\n if len(field1) == 2:\n new_fields.append((name, new_descr))\n else:\n new_fields.append(((field1[2], name), new_descr))\n\n return dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 188, "n_words": 81, "vocab_size": 62, "complexity": 8, "nloc": 15, "token_counts": 147, "n_ast_nodes": 231, "n_identifiers": 17, "d_id": 38591, "documentation": { "docstring": " Perform type promotion for two structured dtypes.\n\n Parameters\n ----------\n dt1 : structured dtype\n First dtype.\n dt2 : structured dtype\n Second dtype.\n\n Returns\n -------\n out : dtype\n The promoted dtype\n\n Notes\n -----\n If one of the inputs is aligned, the result will be. The titles of\n both descriptors must match (point to the same field).\n ", "n_words": 54, "vocab_size": 42, "n_whitespaces": 113, "language": "en" } }, { "id": 192311, "commit_id": "c50d48845f7b1ca86d6a3b7f37a59be0ae11e36b", "repo": "vision", "path": "test/test_video_reader.py", "file_name": "test_video_reader.py", "fun_name": "test_read_video_from_file_rescale_width_and_height", "commit_message": "Improve test_video_reader (#5498)\n\n* Improve test_video_reader\r\n\r\n* Fix linter error", "code": "def test_read_video_from_file_rescale_width_and_height(self, test_video):\n \n # video related\n width, height, min_dimension, max_dimension = 320, 240, 0, 0\n video_start_pts, video_end_pts = 0, -1\n video_timebase_num, video_timebase_den = 0, 1\n # audio related\n samples, channels = 0, 0\n audio_start_pts, audio_end_pts = 0, -1\n audio_timebase_num, audio_timebase_den = 0, 1\n\n full_path = os.path.join(VIDEO_DIR, test_video)\n\n tv_result = torch.ops.video_reader.read_video_from_file(\n full_path,\n SEEK_FRAME_MARGIN,\n 0, # getPtsOnly\n 1, # readVideoStream\n width,\n height,\n min_dimension,\n max_dimension,\n video_start_pts,\n video_end_pts,\n video_timebase_num,\n video_timebase_den,\n 1, # readAudioStream\n samples,\n channels,\n audio_start_pts,\n audio_end_pts,\n audio_timebase_num,\n audio_timebase_den,\n )\n assert tv_result[0].size(1) == height\n assert tv_result[0].size(2) == width\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 394, "n_words": 84, "vocab_size": 52, "complexity": 1, "nloc": 31, "token_counts": 145, "n_ast_nodes": 208, "n_identifiers": 29, "d_id": 46880, "documentation": { "docstring": "\n Test the case when decoder starts with a video file to decode frames, and\n both video height and width are set.\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 43, "language": "en" } }, { "id": 201818, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/backends/tests.py", "file_name": "tests.py", "fun_name": "test_queries", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_queries(self):\n \n sql = \"SELECT 1\" + connection.features.bare_select_suffix\n with connection.cursor() as cursor:\n reset_queries()\n cursor.execute(sql)\n self.assertEqual(1, len(connection.queries))\n self.assertIsInstance(connection.queries, list)\n self.assertIsInstance(connection.queries[0], dict)\n self.assertEqual(list(connection.queries[0]), [\"sql\", \"time\"])\n self.assertEqual(connection.queries[0][\"sql\"], sql)\n\n reset_queries()\n self.assertEqual(0, len(connection.queries))\n\n sql = \"INSERT INTO %s (%s, %s) VALUES (%%s, %%s)\" % (\n connection.introspection.identifier_converter(\"backends_square\"),\n connection.ops.quote_name(\"root\"),\n connection.ops.quote_name(\"square\"),\n )\n with connection.cursor() as cursor:\n cursor.executemany(sql, [(1, 1), (2, 4)])\n self.assertEqual(1, len(connection.queries))\n self.assertIsInstance(connection.queries, list)\n self.assertIsInstance(connection.queries[0], dict)\n self.assertEqual(list(connection.queries[0]), [\"sql\", \"time\"])\n self.assertEqual(connection.queries[0][\"sql\"], \"2 times: %s\" % sql)\n\n # Unfortunately with sqlite3 the in-memory test database cannot be closed.", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 274, "n_words": 79, "vocab_size": 58, "complexity": 1, "nloc": 24, "token_counts": 257, "n_ast_nodes": 421, "n_identifiers": 20, "d_id": 50001, "documentation": { "docstring": "\n Test the documented API of connection.queries.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 221084, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/base64.py", "file_name": "base64.py", "fun_name": "b16decode", "commit_message": "add python 3.10.4 for windows", "code": "def b16decode(s, casefold=False):\n \n s = _bytes_from_decode_data(s)\n if casefold:\n s = s.upper()\n if re.search(b'[^0-9A-F]', s):\n raise binascii.Error('Non-base16 digit found')\n return binascii.unhexlify(s)\n\n#\n# Ascii85 encoding/decoding\n#\n\n_a85chars = None\n_a85chars2 = None\n_A85START = b\"<~\"\n_A85END = b\"~>\"\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 59, "n_words": 37, "vocab_size": 27, "complexity": 3, "nloc": 7, "token_counts": 51, "n_ast_nodes": 113, "n_identifiers": 14, "d_id": 56192, "documentation": { "docstring": "Decode the Base16 encoded bytes-like object or ASCII string s.\n\n Optional casefold is a flag specifying whether a lowercase alphabet is\n acceptable as input. For security purposes, the default is False.\n\n The result is returned as a bytes object. A binascii.Error is raised if\n s is incorrectly padded or if there are non-alphabet characters present\n in the input.\n ", "n_words": 58, "vocab_size": 45, "n_whitespaces": 78, "language": "en" } }, { "id": 206336, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/templatetags/tz.py", "file_name": "tz.py", "fun_name": "timezone_tag", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def timezone_tag(parser, token):\n \n bits = token.split_contents()\n if len(bits) != 2:\n raise TemplateSyntaxError(\"'%s' takes one argument (timezone)\" % bits[0])\n tz = parser.compile_filter(bits[1])\n nodelist = parser.parse((\"endtimezone\",))\n parser.delete_first_token()\n return TimezoneNode(nodelist, tz)\n\n\n@register.tag(\"get_current_timezone\")", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "@register.tag(\"get_current_timezone\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 56, "n_words": 29, "vocab_size": 27, "complexity": 2, "nloc": 8, "token_counts": 67, "n_ast_nodes": 126, "n_identifiers": 15, "d_id": 51491, "documentation": { "docstring": "\n Enable a given time zone just for this block.\n\n The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a\n time zone name, or ``None``. If it is ``None``, the default time zone is\n used within the block.\n\n Sample usage::\n\n {% timezone \"Europe/Paris\" %}\n It is {{ now }} in Paris.\n {% endtimezone %}\n ", "n_words": 55, "vocab_size": 43, "n_whitespaces": 99, "language": "en" } }, { "id": 183097, "commit_id": "4090d351684342b8e28ef9d5451c7c821e18d1ae", "repo": "textual", "path": "src/textual/dom.py", "file_name": "dom.py", "fun_name": "rich_text_style", "commit_message": "new layout", "code": "def rich_text_style(self) -> Style:\n \n\n # TODO: Feels like there may be opportunity for caching here.\n\n background = Color(0, 0, 0, 0)\n color = Color(255, 255, 255, 0)\n style = Style()\n for node in reversed(self.ancestors):\n styles = node.styles\n if styles.has_rule(\"background\"):\n background += styles.background\n if styles.has_rule(\"color\"):\n color = styles.color\n style += styles.text_style\n\n style = Style(bgcolor=background.rich_color, color=color.rich_color) + style\n return style\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 188, "n_words": 58, "vocab_size": 41, "complexity": 4, "nloc": 22, "token_counts": 103, "n_ast_nodes": 165, "n_identifiers": 15, "d_id": 44049, "documentation": { "docstring": "Get the text style object.\n\n A widget's style is influenced by its parent. For instance if a widgets background has an alpha,\n then its parent's background color will show throw. Additionally, widgets will inherit their\n parent's text style (i.e. bold, italic etc).\n\n Returns:\n Style: Rich Style object.\n ", "n_words": 47, "vocab_size": 38, "n_whitespaces": 93, "language": "en" } }, { "id": 176299, "commit_id": "78cd999e9b60d1b403cb4b736311cb0e00335eea", "repo": "networkx", "path": "networkx/convert_matrix.py", "file_name": "convert_matrix.py", "fun_name": "to_numpy_recarray", "commit_message": "Document default dtype in to_numpy_recarray docstring. (#5315)", "code": "def to_numpy_recarray(G, nodelist=None, dtype=None, order=None):\n \n import numpy as np\n\n if dtype is None:\n dtype = [(\"weight\", float)]\n\n if nodelist is None:\n nodelist = list(G)\n nodeset = G\n nlen = len(G)\n else:\n nlen = len(nodelist)\n nodeset = set(G.nbunch_iter(nodelist))\n if nlen != len(nodeset):\n for n in nodelist:\n if n not in G:\n raise nx.NetworkXError(f\"Node {n} in nodelist is not in G\")\n raise nx.NetworkXError(\"nodelist contains duplicates.\")\n\n undirected = not G.is_directed()\n index = dict(zip(nodelist, range(nlen)))\n M = np.zeros((nlen, nlen), dtype=dtype, order=order)\n\n names = M.dtype.names\n for u, v, attrs in G.edges(data=True):\n if (u in nodeset) and (v in nodeset):\n i, j = index[u], index[v]\n values = tuple(attrs[n] for n in names)\n M[i, j] = values\n if undirected:\n M[j, i] = M[i, j]\n\n return M.view(np.recarray)\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 323, "n_words": 119, "vocab_size": 75, "complexity": 11, "nloc": 28, "token_counts": 246, "n_ast_nodes": 385, "n_identifiers": 37, "d_id": 41816, "documentation": { "docstring": "Returns the graph adjacency matrix as a NumPy recarray.\n\n Parameters\n ----------\n G : graph\n The NetworkX graph used to construct the NumPy recarray.\n\n nodelist : list, optional\n The rows and columns are ordered according to the nodes in `nodelist`.\n If `nodelist` is None, then the ordering is produced by G.nodes().\n\n dtype : NumPy data-type, optional\n A valid NumPy named dtype used to initialize the NumPy recarray.\n The data type names are assumed to be keys in the graph edge attribute\n dictionary. The default is ``dtype([(\"weight\", float)])``.\n\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory. If None, then the NumPy default\n is used.\n\n Returns\n -------\n M : NumPy recarray\n The graph with specified edge data as a Numpy recarray\n\n Notes\n -----\n When `nodelist` does not contain every node in `G`, the adjacency\n matrix is built from the subgraph of `G` that is induced by the nodes in\n `nodelist`.\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> G.add_edge(1, 2, weight=7.0, cost=5)\n >>> A = nx.to_numpy_recarray(G, dtype=[(\"weight\", float), (\"cost\", int)])\n >>> print(A.weight)\n [[0. 7.]\n [7. 0.]]\n >>> print(A.cost)\n [[0 5]\n [5 0]]\n\n ", "n_words": 190, "vocab_size": 116, "n_whitespaces": 337, "language": "en" } }, { "id": 197095, "commit_id": "f8674bfe4988332e7ce60ceb36b365ce9aff662a", "repo": "sympy", "path": "sympy/diffgeom/diffgeom.py", "file_name": "diffgeom.py", "fun_name": "__new__", "commit_message": "Update the sympy.diffgeom mutability deprecations", "code": "def __new__(cls, name, manifold, **kwargs):\n if not isinstance(name, Str):\n name = Str(name)\n obj = super().__new__(cls, name, manifold)\n\n obj.manifold.patches.append(obj) # deprecated\n obj.coord_systems = _deprecated_list(\n , [])\n return obj\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 83, "n_words": 27, "vocab_size": 23, "complexity": 2, "nloc": 12, "token_counts": 64, "n_ast_nodes": 101, "n_identifiers": 13, "d_id": 48335, "documentation": { "docstring": "\n Patch.coord_systms is deprecated. The Patch class is now\n immutable. Instead use a separate list to keep track of coordinate\n systems.\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 65, "language": "en" } }, { "id": 222485, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/difflib.py", "file_name": "difflib.py", "fun_name": "real_quick_ratio", "commit_message": "add python 3.10.4 for windows", "code": "def real_quick_ratio(self):\n \n\n la, lb = len(self.a), len(self.b)\n # can't have more matches than the number of elements in the\n # shorter sequence\n return _calculate_ratio(min(la, lb), la + lb)\n\n __class_getitem__ = classmethod(GenericAlias)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 69, "n_words": 31, "vocab_size": 28, "complexity": 1, "nloc": 3, "token_counts": 37, "n_ast_nodes": 72, "n_identifiers": 12, "d_id": 56586, "documentation": { "docstring": "Return an upper bound on ratio() very quickly.\n\n This isn't defined beyond that it is an upper bound on .ratio(), and\n is faster to compute than either .ratio() or .quick_ratio().\n ", "n_words": 30, "vocab_size": 25, "n_whitespaces": 51, "language": "en" } }, { "id": 14093, "commit_id": "5490ad5173743ef2bf85216d11b9ff0822b3d25b", "repo": "pydantic", "path": "tests/test_main.py", "file_name": "test_main.py", "fun_name": "test_model_exclude_copy_on_model_validation", "commit_message": "fix: `Config.copy_on_model_validation` does a deep copy and not a shallow one (#3642)\n\n* fix: `Config.copy_on_model_validation` does a deep copy and not a shallow one\r\n\r\ncloses #3641\r\n\r\n* fix: typo\r\n\r\n* use python 3.10 to run fastapi tests\r\n\r\n* fix fastapi test call\r\n\r\nCo-authored-by: Samuel Colvin ", "code": "def test_model_exclude_copy_on_model_validation():\n \n", "url": "https://github.com/pydantic/pydantic.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 29, "token_counts": 236, "n_ast_nodes": 12, "n_identifiers": 1, "d_id": 2817, "documentation": { "docstring": "When `Config.copy_on_model_validation` is set, it should keep private attributes and excluded fields", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 47323, "commit_id": "d8889da29ccfcbecd2c89b9e8e278c480767d678", "repo": "airflow", "path": "airflow/cli/commands/standalone_command.py", "file_name": "standalone_command.py", "fun_name": "calculate_env", "commit_message": "Move the database configuration to a new section (#22284)\n\nCo-authored-by: gitstart-airflow \r\nCo-authored-by: GitStart <1501599+gitstart@users.noreply.github.com>\r\nCo-authored-by: Egbosi Kelechi ", "code": "def calculate_env(self):\n \n env = dict(os.environ)\n # Make sure we're using a local executor flavour\n if conf.get(\"core\", \"executor\") not in [\n executor_constants.LOCAL_EXECUTOR,\n executor_constants.SEQUENTIAL_EXECUTOR,\n ]:\n if \"sqlite\" in conf.get(\"database\", \"sql_alchemy_conn\"):\n self.print_output(\"standalone\", \"Forcing executor to SequentialExecutor\")\n env[\"AIRFLOW__CORE__EXECUTOR\"] = executor_constants.SEQUENTIAL_EXECUTOR\n else:\n self.print_output(\"standalone\", \"Forcing executor to LocalExecutor\")\n env[\"AIRFLOW__CORE__EXECUTOR\"] = executor_constants.LOCAL_EXECUTOR\n return env\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 193, "n_words": 47, "vocab_size": 36, "complexity": 3, "nloc": 13, "token_counts": 84, "n_ast_nodes": 153, "n_identifiers": 12, "d_id": 9065, "documentation": { "docstring": "\n Works out the environment variables needed to run subprocesses.\n We override some settings as part of being standalone.\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 40, "language": "en" } }, { "id": 180915, "commit_id": "597337dcb8762cca6e718b59a4ab6f5e333645fd", "repo": "gradio", "path": "demo/xgboost-income-prediction-with-explainability/run.py", "file_name": "run.py", "fun_name": "interpret", "commit_message": "Adding a Playground Tab to the Website (#1860)\n\n* added playground with 12 demos\r\n\r\n* change name to recipes, restyle navbar\r\n\r\n* add explanatory text to page\r\n\r\n* fix demo mapping\r\n\r\n* categorize demos, clean up design\r\n\r\n* styling\r\n\r\n* cateogry naming and emojis\r\n\r\n* refactor and add text demos\r\n\r\n* add view code button\r\n\r\n* remove opening slash in embed\r\n\r\n* styling\r\n\r\n* add image demos\r\n\r\n* adding plot demos\r\n\r\n* remove see code button\r\n\r\n* removed submodules\r\n\r\n* changes\r\n\r\n* add audio models\r\n\r\n* remove fun section\r\n\r\n* remove tests in image semgentation demo repo\r\n\r\n* requested changes\r\n\r\n* add outbreak_forecast\r\n\r\n* fix broken demos\r\n\r\n* remove images and models, add new demos\r\n\r\n* remove readmes, change to run.py, add description as comment\r\n\r\n* move to /demos folder, clean up dict\r\n\r\n* add upload_to_spaces script\r\n\r\n* fix script, clean repos, and add to docker file\r\n\r\n* fix python versioning issue\r\n\r\n* env variable\r\n\r\n* fix\r\n\r\n* env fixes\r\n\r\n* spaces instead of tabs\r\n\r\n* revert to original networking.py\r\n\r\n* fix rate limiting in asr and autocomplete\r\n\r\n* change name to demos\r\n\r\n* clean up navbar\r\n\r\n* move url and description, remove code comments\r\n\r\n* add tabs to demos\r\n\r\n* remove margins and footer from embedded demo\r\n\r\n* font consistency\r\n\r\nCo-authored-by: Abubakar Abid ", "code": "def interpret(*args):\n df = pd.DataFrame([args], columns=X_train.columns)\n df = df.astype({col: \"category\" for col in categorical_columns})\n shap_values = explainer.shap_values(xgb.DMatrix(df, enable_categorical=True))\n scores_desc = list(zip(shap_values[0], X_train.columns))\n scores_desc = sorted(scores_desc)\n fig_m = plt.figure(tight_layout=True)\n plt.barh([s[1] for s in scores_desc], [s[0] for s in scores_desc])\n plt.title(\"Feature Shap Values\")\n plt.ylabel(\"Shap Value\")\n plt.xlabel(\"Feature\")\n plt.tight_layout()\n return fig_m\n\n\nunique_class = sorted(X_train[\"workclass\"].unique())\nunique_education = sorted(X_train[\"education\"].unique())\nunique_marital_status = sorted(X_train[\"marital.status\"].unique())\nunique_relationship = sorted(X_train[\"relationship\"].unique())\nunique_occupation = sorted(X_train[\"occupation\"].unique())\nunique_sex = sorted(X_train[\"sex\"].unique())\nunique_country = sorted(X_train[\"native.country\"].unique())\n\nwith gr.Blocks() as demo:\n gr.Markdown()\n with gr.Row():\n with gr.Column():\n age = gr.Slider(label=\"Age\", minimum=17, maximum=90, step=1, randomize=True)\n work_class = gr.Dropdown(\n label=\"Workclass\",\n choices=unique_class,\n value=lambda: random.choice(unique_class),\n )\n education = gr.Dropdown(\n label=\"Education Level\",\n choices=unique_education,\n value=lambda: random.choice(unique_education),\n )\n years = gr.Slider(\n label=\"Years of schooling\",\n minimum=1,\n maximum=16,\n step=1,\n randomize=True,\n )\n marital_status = gr.Dropdown(\n label=\"Marital Status\",\n choices=unique_marital_status,\n value=lambda: random.choice(unique_marital_status),\n )\n occupation = gr.Dropdown(\n label=\"Occupation\",\n choices=unique_occupation,\n value=lambda: random.choice(unique_occupation),\n )\n relationship = gr.Dropdown(\n label=\"Relationship Status\",\n choices=unique_relationship,\n value=lambda: random.choice(unique_relationship),\n )\n sex = gr.Dropdown(\n label=\"Sex\", choices=unique_sex, value=lambda: random.choice(unique_sex)\n )\n capital_gain = gr.Slider(\n label=\"Capital Gain\",\n minimum=0,\n maximum=100000,\n step=500,\n randomize=True,\n )\n capital_loss = gr.Slider(\n label=\"Capital Loss\", minimum=0, maximum=10000, step=500, randomize=True\n )\n hours_per_week = gr.Slider(\n label=\"Hours Per Week Worked\", minimum=1, maximum=99, step=1\n )\n country = gr.Dropdown(\n label=\"Native Country\",\n choices=unique_country,\n value=lambda: random.choice(unique_country),\n )\n with gr.Column():\n label = gr.Label()\n plot = gr.Plot()\n with gr.Row():\n predict_btn = gr.Button(value=\"Predict\")\n interpret_btn = gr.Button(value=\"Explain\")\n predict_btn.click(\n predict,\n inputs=[\n age,\n work_class,\n education,\n years,\n marital_status,\n occupation,\n relationship,\n sex,\n capital_gain,\n capital_loss,\n hours_per_week,\n country,\n ],\n outputs=[label],\n )\n interpret_btn.click(\n interpret,\n inputs=[\n age,\n work_class,\n education,\n years,\n marital_status,\n occupation,\n relationship,\n sex,\n capital_gain,\n capital_loss,\n hours_per_week,\n country,\n ],\n outputs=[plot],\n )\n\ndemo.launch()\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1686, "n_words": 239, "vocab_size": 149, "complexity": 4, "nloc": 13, "token_counts": 138, "n_ast_nodes": 1103, "n_identifiers": 76, "d_id": 43252, "documentation": { "docstring": "\n **Income Classification with XGBoost 💰**: This demo uses an XGBoost classifier predicts income based on demographic factors, along with Shapley value-based *explanations*. The [source code for this Gradio demo is here](https://huggingface.co/spaces/gradio/xgboost-income-prediction-with-explainability/blob/main/app.py).\n ", "n_words": 31, "vocab_size": 28, "n_whitespaces": 39, "language": "en" } }, { "id": 204878, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/operations.py", "file_name": "operations.py", "fun_name": "regex_lookup", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def regex_lookup(self, lookup_type):\n \n raise NotImplementedError(\n \"subclasses of BaseDatabaseOperations may require a regex_lookup() method\"\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 46, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 4, "token_counts": 13, "n_ast_nodes": 25, "n_identifiers": 4, "d_id": 50951, "documentation": { "docstring": "\n Return the string to use in a query when performing regular expression\n lookups (using \"regex\" or \"iregex\"). It should contain a '%s'\n placeholder for the column being searched against.\n\n If the feature is not supported (or part of it is not supported), raise\n NotImplementedError.\n ", "n_words": 44, "vocab_size": 39, "n_whitespaces": 87, "language": "en" } }, { "id": 66693, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v12_0/update_due_date_in_gle.py", "file_name": "update_due_date_in_gle.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tfrappe.reload_doc(\"accounts\", \"doctype\", \"gl_entry\")\n\n\tfor doctype in [\"Sales Invoice\", \"Purchase Invoice\", \"Journal Entry\"]:\n\t\tfrappe.reload_doc(\"accounts\", \"doctype\", frappe.scrub(doctype))\n\n\t\tfrappe.db.sql(\n\t\t\t.format( # nosec\n\t\t\t\tdoctype=doctype\n\t\t\t)\n\t\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 16, "n_words": 24, "vocab_size": 20, "complexity": 2, "nloc": 15, "token_counts": 55, "n_ast_nodes": 101, "n_identifiers": 8, "d_id": 14296, "documentation": { "docstring": " UPDATE `tabGL Entry`, `tab{doctype}`\n SET\n `tabGL Entry`.due_date = `tab{doctype}`.due_date\n WHERE\n `tabGL Entry`.voucher_no = `tab{doctype}`.name and `tabGL Entry`.party is not null\n and `tabGL Entry`.voucher_type in ('Sales Invoice', 'Purchase Invoice', 'Journal Entry')\n and `tabGL Entry`.account in (select name from `tabAccount` where account_type in ('Receivable', 'Payable'))", "n_words": 43, "vocab_size": 32, "n_whitespaces": 125, "language": "en" } }, { "id": 296364, "commit_id": "3b2aae5045f9f08dc8f174c5d975852588e1a132", "repo": "core", "path": "homeassistant/components/mqtt/mixins.py", "file_name": "mixins.py", "fun_name": "async_tear_down", "commit_message": "Refactor MQTT discovery (#67966)\n\n* Proof of concept\r\n\r\n* remove notify platform\r\n\r\n* remove loose test\r\n\r\n* Add rework from #67912 (#1)\r\n\r\n* Move notify serviceupdater to Mixins\r\n\r\n* Move tag discovery handler to Mixins\r\n\r\n* fix tests\r\n\r\n* Add typing for async_load_platform_helper\r\n\r\n* Add add entry unload support for notify platform\r\n\r\n* Simplify discovery updates\r\n\r\n* Remove not needed extra logic\r\n\r\n* Cleanup inrelevant or duplicate code\r\n\r\n* reuse update_device and move to mixins\r\n\r\n* Remove notify platform\r\n\r\n* revert changes to notify platform\r\n\r\n* Rename update class\r\n\r\n* unify tag entry setup\r\n\r\n* Use shared code for device_trigger `update_device`\r\n\r\n* PoC shared dispatcher for device_trigger\r\n\r\n* Fix bugs\r\n\r\n* Improve typing - remove async_update\r\n\r\n* Unload config_entry and tests\r\n\r\n* Release dispatcher after setup and deduplicate\r\n\r\n* closures to methods, revert `in` to `=`, updates\r\n\r\n* Re-add update support for tag platform\r\n\r\n* Re-add update support for device-trigger platform\r\n\r\n* Cleanup rediscovery code revert related changes\r\n\r\n* Undo discovery code shift\r\n\r\n* Update homeassistant/components/mqtt/mixins.py\r\n\r\nCo-authored-by: Erik Montnemery \r\n\r\n* Update homeassistant/components/mqtt/device_trigger.py\r\n\r\nCo-authored-by: Erik Montnemery \r\n\r\n* Update homeassistant/components/mqtt/mixins.py\r\n\r\nCo-authored-by: Erik Montnemery \r\n\r\n* revert doc string changes\r\n\r\n* move conditions\r\n\r\n* typing and check config_entry_id\r\n\r\n* Update homeassistant/components/mqtt/mixins.py\r\n\r\nCo-authored-by: Erik Montnemery \r\n\r\n* cleanup not used attribute\r\n\r\n* Remove entry_unload code and tests\r\n\r\n* update comment\r\n\r\n* add second comment\r\n\r\nCo-authored-by: Erik Montnemery ", "code": "async def async_tear_down(self) -> None:\n \n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 12, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 8, "n_ast_nodes": 17, "n_identifiers": 2, "d_id": 95348, "documentation": { "docstring": "Handle the cleanup of platform specific parts, extend to the platform.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 195857, "commit_id": "cda8dfe6f45dc5ed394c2f5cda706cd6c729f713", "repo": "sympy", "path": "sympy/functions/elementary/complexes.py", "file_name": "complexes.py", "fun_name": "unpolarify", "commit_message": "Improved documentation formatting", "code": "def unpolarify(eq, subs=None, exponents_only=False):\n \n if isinstance(eq, bool):\n return eq\n\n eq = sympify(eq)\n if subs is not None:\n return unpolarify(eq.subs(subs))\n changed = True\n pause = False\n if exponents_only:\n pause = True\n while changed:\n changed = False\n res = _unpolarify(eq, exponents_only, pause)\n if res != eq:\n changed = True\n eq = res\n if isinstance(res, bool):\n return res\n # Finally, replacing Exp(0) by 1 is always correct.\n # So is polar_lift(0) -> 0.\n return res.subs({exp_polar(0): 1, polar_lift(0): 0})\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 190, "n_words": 75, "vocab_size": 46, "complexity": 7, "nloc": 19, "token_counts": 116, "n_ast_nodes": 184, "n_identifiers": 13, "d_id": 47444, "documentation": { "docstring": "\n If `p` denotes the projection from the Riemann surface of the logarithm to\n the complex line, return a simplified version `eq'` of `eq` such that\n `p(eq') = p(eq)`.\n Also apply the substitution subs in the end. (This is a convenience, since\n ``unpolarify``, in a certain sense, undoes :func:`polarify`.)\n\n Examples\n ========\n\n >>> from sympy import unpolarify, polar_lift, sin, I\n >>> unpolarify(polar_lift(I + 2))\n 2 + I\n >>> unpolarify(sin(polar_lift(I + 7)))\n sin(7 + I)\n ", "n_words": 72, "vocab_size": 56, "n_whitespaces": 112, "language": "en" } }, { "id": 77721, "commit_id": "b4d3cf1c30b5fbe7eed09fab90c845f0cd0f678c", "repo": "wagtail", "path": "wagtail/admin/viewsets/base.py", "file_name": "base.py", "fun_name": "get_url_name", "commit_message": "Docs for base ViewSet class", "code": "def get_url_name(self, view_name):\n \n return self.name + \":\" + view_name\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 23, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 29, "n_identifiers": 4, "d_id": 16698, "documentation": { "docstring": "\n Returns the namespaced URL name for the given view.\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 24, "language": "en" } }, { "id": 62629, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/html5lib/treewalkers/base.py", "file_name": "base.py", "fun_name": "text", "commit_message": "upd; format", "code": "def text(self, data):\n \n data = data\n middle = data.lstrip(spaceCharacters)\n left = data[:len(data) - len(middle)]\n if left:\n yield {\"type\": \"SpaceCharacters\", \"data\": left}\n data = middle\n middle = data.rstrip(spaceCharacters)\n right = data[len(middle):]\n if middle:\n yield {\"type\": \"Characters\", \"data\": middle}\n if right:\n yield {\"type\": \"SpaceCharacters\", \"data\": right}\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 147, "n_words": 44, "vocab_size": 26, "complexity": 4, "nloc": 13, "token_counts": 94, "n_ast_nodes": 169, "n_identifiers": 10, "d_id": 13021, "documentation": { "docstring": "Generates SpaceCharacters and Characters tokens\n\n Depending on what's in the data, this generates one or more\n ``SpaceCharacters`` and ``Characters`` tokens.\n\n For example:\n\n >>> from html5lib.treewalkers.base import TreeWalker\n >>> # Give it an empty tree just so it instantiates\n >>> walker = TreeWalker([])\n >>> list(walker.text(''))\n []\n >>> list(walker.text(' '))\n [{u'data': ' ', u'type': u'SpaceCharacters'}]\n >>> list(walker.text(' abc ')) # doctest: +NORMALIZE_WHITESPACE\n [{u'data': ' ', u'type': u'SpaceCharacters'},\n {u'data': u'abc', u'type': u'Characters'},\n {u'data': u' ', u'type': u'SpaceCharacters'}]\n\n :arg data: the text data\n\n :returns: one or more ``SpaceCharacters`` and ``Characters`` tokens\n\n ", "n_words": 87, "vocab_size": 60, "n_whitespaces": 253, "language": "en" } }, { "id": 205092, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/oracle/operations.py", "file_name": "operations.py", "fun_name": "adapt_datetimefield_value", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def adapt_datetimefield_value(self, value):\n \n\n if value is None:\n return None\n\n # Expression values are adapted by the database.\n if hasattr(value, \"resolve_expression\"):\n return value\n\n # cx_Oracle doesn't support tz-aware datetimes\n if timezone.is_aware(value):\n if settings.USE_TZ:\n value = timezone.make_naive(value, self.connection.timezone)\n else:\n raise ValueError(\n \"Oracle backend does not support timezone-aware datetimes when USE_TZ is False.\"\n )\n\n return Oracle_datetime.from_datetime(value)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 210, "n_words": 53, "vocab_size": 42, "complexity": 5, "nloc": 13, "token_counts": 66, "n_ast_nodes": 112, "n_identifiers": 13, "d_id": 51013, "documentation": { "docstring": "\n Transform a datetime value to an object compatible with what is expected\n by the backend driver for datetime columns.\n\n If naive datetime is passed assumes that is in UTC. Normally Django\n models.DateTimeField makes sure that if USE_TZ is True passed datetime\n is timezone aware.\n ", "n_words": 44, "vocab_size": 35, "n_whitespaces": 87, "language": "en" } }, { "id": 45428, "commit_id": "5d89dea56843d7b76d5e308e373ba16ecbcffa77", "repo": "airflow", "path": "airflow/providers/docker/example_dags/tutorial_taskflow_api_etl_docker_virtualenv.py", "file_name": "tutorial_taskflow_api_etl_docker_virtualenv.py", "fun_name": "tutorial_taskflow_api_etl_docker_virtualenv", "commit_message": "Switch to Debian 11 (bullseye) as base for our dockerfiles (#21378)\n\nDebian 11 Bullseye have been released some time ago as the new\r\nLTS Debian release and already all our dependencies (including\r\nMySQL and MSSQL ODBC drivers) caught up with it so we can finally\r\nmigrate to it.\r\n\r\nThis change switches base images to bullsey for our Dockerfiles\r\nas well as for Redis image we are using in CI.\r\n\r\nThe relevant packages have been updated to include that\r\nand documentation have been updated.\r\n\r\nExamples of ours also are updated to use \"bullseye\" rather than\r\nbuster.\r\n\r\nCloses: #18190\r\nCloses: #18279", "code": "def tutorial_taskflow_api_etl_docker_virtualenv():\n \n # [END instantiate_dag]\n\n # [START extract_virtualenv]", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 17, "n_words": 8, "vocab_size": 7, "complexity": 1, "nloc": 14, "token_counts": 66, "n_ast_nodes": 14, "n_identifiers": 1, "d_id": 8558, "documentation": { "docstring": "\n ### TaskFlow API Tutorial Documentation\n This is a simple ETL data pipeline example which demonstrates the use of\n the TaskFlow API using three simple tasks for Extract, Transform, and Load.\n Documentation that goes along with the Airflow TaskFlow API tutorial is\n located\n [here](https://airflow.apache.org/docs/apache-airflow/stable/tutorial_taskflow_api.html)\n ", "n_words": 43, "vocab_size": 34, "n_whitespaces": 65, "language": "en" } }, { "id": 281134, "commit_id": "ea964109d654394cc0a5237e6ec5510ba6404097", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/cryptocurrency/due_diligence/dd_controller.py", "file_name": "dd_controller.py", "fun_name": "call_score", "commit_message": "Crypto menu refactor (#1119)\n\n* enabled some crypto commands in dd to be called independent of source loaded\r\n\r\n* support for coin_map_df in all dd functions + load ta and plot chart refactor\r\n\r\n* updated tests and removed coingecko scrapping where possible\r\n\r\n* removed ref of command from hugo\r\n\r\n* updated pycoingecko version\r\n\r\n* refactoring load\r\n\r\n* refactored load to fetch prices; pred can run independent of source now\r\n\r\n* load by default usd on cp/cg and usdt on cb/bin\r\n\r\n* updated to rich for formatting and updated dependencies\r\n\r\n* fixed changes requested\r\n\r\n* update docs\r\n\r\n* revert discord requirements\r\n\r\n* removed absolute from calculate change for price\r\n\r\n* fixing pr issues\r\n\r\n* fix loading issue when similar coins exist, move coins to home, fill n/a\r\n\r\n* update docs for coins\r\n\r\n* adds load to ta and pred menu", "code": "def call_score(self, other_args):\n \n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"score\",\n description=,\n )\n\n ns_parser = parse_known_args_and_warn(\n parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED\n )\n\n if ns_parser:\n pycoingecko_view.display_score(\n self.coin_map_df[\"CoinGecko\"], ns_parser.export\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 158, "n_words": 24, "vocab_size": 21, "complexity": 2, "nloc": 18, "token_counts": 61, "n_ast_nodes": 97, "n_identifiers": 18, "d_id": 83546, "documentation": { "docstring": "Process score command\n In this view you can find different kind of scores for loaded coin.\n Those scores represents different rankings, sentiment metrics, some user stats and others.\n You will see CoinGecko scores, Developer Scores, Community Scores, Sentiment, Reddit scores\n and many others.", "n_words": 43, "vocab_size": 37, "n_whitespaces": 86, "language": "en" } }, { "id": 209547, "commit_id": "08b1f9d67c8e716fd44036a027bdc90dcb9fcfdf", "repo": "scapy", "path": "scapy/layers/inet6.py", "file_name": "inet6.py", "fun_name": "defragment6", "commit_message": "E275 - Missing whitespace after keyword (#3711)\n\nCo-authored-by: Alexander Aring \r\nCo-authored-by: Anmol Sarma \r\nCo-authored-by: antoine.torre \r\nCo-authored-by: Antoine Vacher \r\nCo-authored-by: Arnaud Ebalard \r\nCo-authored-by: atlowl <86038305+atlowl@users.noreply.github.com>\r\nCo-authored-by: Brian Bienvenu \r\nCo-authored-by: Chris Packham \r\nCo-authored-by: CQ \r\nCo-authored-by: Daniel Collins \r\nCo-authored-by: Federico Maggi \r\nCo-authored-by: Florian Maury \r\nCo-authored-by: _Frky <3105926+Frky@users.noreply.github.com>\r\nCo-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com>\r\nCo-authored-by: gpotter2 \r\nCo-authored-by: Guillaume Valadon \r\nCo-authored-by: Hao Zheng \r\nCo-authored-by: Haresh Khandelwal \r\nCo-authored-by: Harri Hämäläinen \r\nCo-authored-by: hecke \r\nCo-authored-by: Jan Romann \r\nCo-authored-by: Jan Sebechlebsky \r\nCo-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com>\r\nCo-authored-by: jockque <38525640+jockque@users.noreply.github.com>\r\nCo-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com>\r\nCo-authored-by: Keith Scott \r\nCo-authored-by: Kfir Gollan \r\nCo-authored-by: Lars Munch \r\nCo-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com>\r\nCo-authored-by: Leonard Crestez \r\nCo-authored-by: Marcel Patzlaff \r\nCo-authored-by: Martijn Thé \r\nCo-authored-by: Martine Lenders \r\nCo-authored-by: Michael Farrell \r\nCo-authored-by: Michał Mirosław \r\nCo-authored-by: mkaliszan \r\nCo-authored-by: mtury \r\nCo-authored-by: Neale Ranns \r\nCo-authored-by: Octavian Toader \r\nCo-authored-by: Peter Eisenlohr \r\nCo-authored-by: Phil \r\nCo-authored-by: Pierre Lalet \r\nCo-authored-by: Pierre Lorinquer \r\nCo-authored-by: piersoh <42040737+piersoh@users.noreply.github.com>\r\nCo-authored-by: plorinquer \r\nCo-authored-by: pvinci \r\nCo-authored-by: Rahul Jadhav \r\nCo-authored-by: Robin Jarry \r\nCo-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com>\r\nCo-authored-by: rperez \r\nCo-authored-by: Sabrina Dubroca \r\nCo-authored-by: Sebastian Baar \r\nCo-authored-by: sebastien mainand \r\nCo-authored-by: smehner1 \r\nCo-authored-by: speakinghedge \r\nCo-authored-by: Steven Van Acker \r\nCo-authored-by: Thomas Faivre \r\nCo-authored-by: Tran Tien Dat \r\nCo-authored-by: Wael Mahlous \r\nCo-authored-by: waeva <74464394+waeva@users.noreply.github.com>\r\n\r\nCo-authored-by: Alexander Aring \r\nCo-authored-by: Anmol Sarma \r\nCo-authored-by: antoine.torre \r\nCo-authored-by: Antoine Vacher \r\nCo-authored-by: Arnaud Ebalard \r\nCo-authored-by: atlowl <86038305+atlowl@users.noreply.github.com>\r\nCo-authored-by: Brian Bienvenu \r\nCo-authored-by: Chris Packham \r\nCo-authored-by: CQ \r\nCo-authored-by: Daniel Collins \r\nCo-authored-by: Federico Maggi \r\nCo-authored-by: Florian Maury \r\nCo-authored-by: _Frky <3105926+Frky@users.noreply.github.com>\r\nCo-authored-by: g-mahieux <37588339+g-mahieux@users.noreply.github.com>\r\nCo-authored-by: gpotter2 \r\nCo-authored-by: Guillaume Valadon \r\nCo-authored-by: Hao Zheng \r\nCo-authored-by: Haresh Khandelwal \r\nCo-authored-by: Harri Hämäläinen \r\nCo-authored-by: hecke \r\nCo-authored-by: Jan Romann \r\nCo-authored-by: Jan Sebechlebsky \r\nCo-authored-by: jdiog0 <43411724+jdiog0@users.noreply.github.com>\r\nCo-authored-by: jockque <38525640+jockque@users.noreply.github.com>\r\nCo-authored-by: Julien Bedel <30991560+JulienBedel@users.noreply.github.com>\r\nCo-authored-by: Keith Scott \r\nCo-authored-by: Kfir Gollan \r\nCo-authored-by: Lars Munch \r\nCo-authored-by: ldp77 <52221370+ldp77@users.noreply.github.com>\r\nCo-authored-by: Leonard Crestez \r\nCo-authored-by: Marcel Patzlaff \r\nCo-authored-by: Martijn Thé \r\nCo-authored-by: Martine Lenders \r\nCo-authored-by: Michael Farrell \r\nCo-authored-by: Michał Mirosław \r\nCo-authored-by: mkaliszan \r\nCo-authored-by: mtury \r\nCo-authored-by: Neale Ranns \r\nCo-authored-by: Octavian Toader \r\nCo-authored-by: Peter Eisenlohr \r\nCo-authored-by: Phil \r\nCo-authored-by: Pierre Lalet \r\nCo-authored-by: Pierre Lorinquer \r\nCo-authored-by: piersoh <42040737+piersoh@users.noreply.github.com>\r\nCo-authored-by: pvinci \r\nCo-authored-by: Rahul Jadhav \r\nCo-authored-by: Robin Jarry \r\nCo-authored-by: romain-perez <51962832+romain-perez@users.noreply.github.com>\r\nCo-authored-by: rperez \r\nCo-authored-by: Sabrina Dubroca \r\nCo-authored-by: Sebastian Baar \r\nCo-authored-by: sebastien mainand \r\nCo-authored-by: smehner1 \r\nCo-authored-by: Steven Van Acker \r\nCo-authored-by: Thomas Faivre \r\nCo-authored-by: Tran Tien Dat \r\nCo-authored-by: Wael Mahlous \r\nCo-authored-by: waeva <74464394+waeva@users.noreply.github.com>", "code": "def defragment6(packets):\n \n\n # Remove non fragments\n lst = [x for x in packets if IPv6ExtHdrFragment in x]\n if not lst:\n return []\n\n id = lst[0][IPv6ExtHdrFragment].id\n\n llen = len(lst)\n lst = [x for x in lst if x[IPv6ExtHdrFragment].id == id]\n if len(lst) != llen:\n warning(\"defragment6: some fragmented packets have been removed from list\") # noqa: E501\n\n # reorder fragments\n res = []\n while lst:\n min_pos = 0\n min_offset = lst[0][IPv6ExtHdrFragment].offset\n for p in lst:\n cur_offset = p[IPv6ExtHdrFragment].offset\n if cur_offset < min_offset:\n min_pos = 0\n min_offset = cur_offset\n res.append(lst[min_pos])\n del lst[min_pos]\n\n # regenerate the fragmentable part\n fragmentable = b\"\"\n for p in res:\n q = p[IPv6ExtHdrFragment]\n offset = 8 * q.offset\n if offset != len(fragmentable):\n warning(\"Expected an offset of %d. Found %d. Padding with XXXX\" % (len(fragmentable), offset)) # noqa: E501\n fragmentable += b\"X\" * (offset - len(fragmentable))\n fragmentable += raw(q.payload)\n\n # Regenerate the unfragmentable part.\n q = res[0].copy()\n nh = q[IPv6ExtHdrFragment].nh\n q[IPv6ExtHdrFragment].underlayer.nh = nh\n q[IPv6ExtHdrFragment].underlayer.plen = len(fragmentable)\n del q[IPv6ExtHdrFragment].underlayer.payload\n q /= conf.raw_layer(load=fragmentable)\n del q.plen\n\n if q[IPv6].underlayer:\n q[IPv6] = IPv6(raw(q[IPv6]))\n else:\n q = IPv6(raw(q))\n return q\n\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 412, "n_words": 174, "vocab_size": 102, "complexity": 13, "nloc": 40, "token_counts": 297, "n_ast_nodes": 473, "n_identifiers": 28, "d_id": 52730, "documentation": { "docstring": "\n Performs defragmentation of a list of IPv6 packets. Packets are reordered.\n Crap is dropped. What lacks is completed by 'X' characters.\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 31, "language": "en" } }, { "id": 95431, "commit_id": "2a4da479b2d4a2faa901701f4c73ff823236e9e8", "repo": "sentry", "path": "src/sentry/search/events/builder.py", "file_name": "builder.py", "fun_name": "flattened_having", "commit_message": "fix(snql): Add aggregations to select in auto_aggregation (#31061)\n\n- This is to fix an issue for queries that have the uniq aggregation in\r\n the HAVING clause, and is not selected.\r\n - Previously we would not add the aggregation to the select clause in\r\n these cases\r\n - Now anything in the having clause will get added to the select\r\n clause as well if auto_aggregation is enabled\r\n - if its disabled we raise an invalid search query error\r\n- This also fixes a bug where this having validation wasn't working\r\n correctly for boolean conditions", "code": "def flattened_having(self) -> List[Condition]:\n \n flattened: List[Condition] = []\n boolean_conditions: List[BooleanCondition] = []\n\n for condition in self.having:\n if isinstance(condition, Condition):\n flattened.append(condition)\n elif isinstance(condition, BooleanCondition):\n boolean_conditions.append(condition)\n\n while len(boolean_conditions) > 0:\n boolean_condition = boolean_conditions.pop()\n for condition in boolean_condition.conditions:\n if isinstance(condition, Condition):\n flattened.append(condition)\n elif isinstance(condition, BooleanCondition):\n boolean_conditions.append(condition)\n\n return flattened\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 229, "n_words": 45, "vocab_size": 30, "complexity": 8, "nloc": 20, "token_counts": 116, "n_ast_nodes": 184, "n_identifiers": 15, "d_id": 19211, "documentation": { "docstring": "Return self.having as a flattened list ignoring boolean operators\n This is because self.having can have a mix of BooleanConditions and Conditions. And each BooleanCondition can in\n turn be a mix of either type.\n ", "n_words": 33, "vocab_size": 27, "n_whitespaces": 54, "language": "en" } }, { "id": 101568, "commit_id": "7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5", "repo": "faceswap", "path": "lib/training/preview_tk.py", "file_name": "preview_tk.py", "fun_name": "scale_var", "commit_message": "Training - Use custom preview pop-out", "code": "def scale_var(self) -> tk.StringVar:\n \n retval = self._vars[\"scale\"]\n assert isinstance(retval, tk.StringVar)\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 40, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 6, "token_counts": 29, "n_ast_nodes": 49, "n_identifiers": 7, "d_id": 20978, "documentation": { "docstring": ":class:`tkinter.StringVar`: The variable holding the currently selected \"##%\" formatted\n percentage scaling amount displayed in the Combobox. ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 23, "language": "en" } }, { "id": 248623, "commit_id": "99d3931974e65865d1102ee79d7b7e2b017a3180", "repo": "synapse", "path": "tests/rest/client/test_upgrade_room.py", "file_name": "test_upgrade_room.py", "fun_name": "test_second_upgrade_from_different_user", "commit_message": "Add more tests for room upgrades (#13074)\n\nSigned-off-by: Sean Quah ", "code": "def test_second_upgrade_from_different_user(self) -> None:\n \n channel = self._upgrade_room()\n self.assertEqual(200, channel.code, channel.result)\n\n channel = self._upgrade_room(self.other_token, expire_cache=False)\n self.assertEqual(400, channel.code, channel.result)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 52, "n_words": 17, "vocab_size": 13, "complexity": 1, "nloc": 6, "token_counts": 57, "n_ast_nodes": 89, "n_identifiers": 9, "d_id": 72381, "documentation": { "docstring": "A second room upgrade from a different user is blocked.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 190078, "commit_id": "9d1f066d637cb15baea10e6907ab85efff8fb36f", "repo": "manim", "path": "manim/utils/tex_file_writing.py", "file_name": "tex_file_writing.py", "fun_name": "generate_tex_file", "commit_message": "Migrate more `os.path` to `pathlib` (#2980)\n\n* Migrate more `os.path` to `pathlib`\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix type errors with recent pathlib code\r\n\r\n* pathlib fixes\r\n\r\n* more pathlib fixes\r\n\r\n* remove unused imports introduced by pathlib migration\r\n\r\n* convert `open()` calls to pathlib\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Migrate tex_file_writing to pathlib\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* converted more old code to pathlib, and fixed a bug in module_ops\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix test failures\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix test failures\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Benjamin Hackl \r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Benjamin Hackl ", "code": "def generate_tex_file(expression, environment=None, tex_template=None):\n \n if tex_template is None:\n tex_template = config[\"tex_template\"]\n if environment is not None:\n output = tex_template.get_texcode_for_expression_in_env(expression, environment)\n else:\n output = tex_template.get_texcode_for_expression(expression)\n\n tex_dir = config.get_dir(\"tex_dir\")\n if not tex_dir.exists():\n tex_dir.mkdir()\n\n result = tex_dir / (tex_hash(output) + \".tex\")\n if not result.exists():\n logger.info(\n \"Writing %(expression)s to %(path)s\",\n {\"expression\": expression, \"path\": f\"{result}\"},\n )\n result.write_text(output, encoding=\"utf-8\")\n return result\n\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 153, "n_words": 55, "vocab_size": 40, "complexity": 5, "nloc": 18, "token_counts": 122, "n_ast_nodes": 213, "n_identifiers": 18, "d_id": 46314, "documentation": { "docstring": "Takes a tex expression (and an optional tex environment),\n and returns a fully formed tex file ready for compilation.\n\n Parameters\n ----------\n expression : :class:`str`\n String containing the TeX expression to be rendered, e.g. ``\\\\sqrt{2}`` or ``foo``\n environment : Optional[:class:`str`], optional\n The string containing the environment in which the expression should be typeset, e.g. ``align*``\n tex_template : Optional[:class:`~.TexTemplate`], optional\n Template class used to typesetting. If not set, use default template set via `config[\"tex_template\"]`\n\n Returns\n -------\n :class:`Path`\n Path to generated TeX file\n ", "n_words": 80, "vocab_size": 60, "n_whitespaces": 138, "language": "en" } }, { "id": 300743, "commit_id": "a70e2a33dcd85608f1145d8fc2e89a87620f4ef3", "repo": "core", "path": "homeassistant/components/recorder/queries.py", "file_name": "queries.py", "fun_name": "find_legacy_row", "commit_message": "Fixing purging legacy rows and improve performance (#71916)", "code": "def find_legacy_row() -> StatementLambdaElement:\n \n return lambda_stmt(lambda: select(func.max(States.event_id)))\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 42, "n_identifiers": 8, "d_id": 99602, "documentation": { "docstring": "Check if there are still states in the table with an event_id.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 208244, "commit_id": "720d1928c4b583f36ca0cce7607b616466f2ffbb", "repo": "celery", "path": "celery/result.py", "file_name": "result.py", "fun_name": "completed_count", "commit_message": "Add clarifying information to completed_count documentation (#7873)\n\n* Add clarifying information to completed_count docstring\r\n\r\n* Update canvas documentation", "code": "def completed_count(self):\n \n return sum(int(result.successful()) for result in self.results)\n", "url": "https://github.com/celery/celery.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 2, "nloc": 2, "token_counts": 24, "n_ast_nodes": 41, "n_identifiers": 7, "d_id": 52260, "documentation": { "docstring": "Task completion count.\n\n Note that `complete` means `successful` in this context. In other words, the\n return value of this method is the number of ``successful`` tasks.\n\n Returns:\n int: the number of complete (i.e. successful) tasks.\n ", "n_words": 35, "vocab_size": 28, "n_whitespaces": 74, "language": "en" } }, { "id": 118583, "commit_id": "704eab3478cf69847825b23dabf15813a8ac9fa2", "repo": "streamlit", "path": "lib/tests/server_test_case.py", "file_name": "server_test_case.py", "fun_name": "_create_mock_app_session", "commit_message": "Rename and refactor `Report` machinery (#4141)\n\nThis refactor renames (almost) everything related to the outdated \"report\" concept with more precise concepts that we use throughout our code, primarily \"script run\", \"session\", and \"app\".", "code": "def _create_mock_app_session(*args, **kwargs):\n \n mock_id = mock.PropertyMock(\n return_value=\"mock_id:%s\" % ServerTestCase._next_session_id\n )\n ServerTestCase._next_session_id += 1\n\n mock_session = mock.MagicMock(AppSession, autospec=True, *args, **kwargs)\n type(mock_session).id = mock_id\n return mock_session\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 84, "n_words": 24, "vocab_size": 19, "complexity": 1, "nloc": 8, "token_counts": 57, "n_ast_nodes": 93, "n_identifiers": 15, "d_id": 26306, "documentation": { "docstring": "Create a mock AppSession. Each mocked instance will have\n its own unique ID.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 19, "language": "en" } }, { "id": 216507, "commit_id": "9e1ca8b5b9e7006fea28f473711917755cf5a262", "repo": "salt", "path": "salt/modules/aptpkg.py", "file_name": "aptpkg.py", "fun_name": "upgrade", "commit_message": "Add --allow-downgrades capability for apt upgrade", "code": "def upgrade(refresh=True, dist_upgrade=False, **kwargs):\n \n cache_valid_time = kwargs.pop(\"cache_valid_time\", 0)\n if salt.utils.data.is_true(refresh):\n refresh_db(cache_valid_time)\n\n old = list_pkgs()\n if \"force_conf_new\" in kwargs and kwargs[\"force_conf_new\"]:\n dpkg_options = [\"--force-confnew\"]\n else:\n dpkg_options = [\"--force-confold\", \"--force-confdef\"]\n cmd = [\n \"apt-get\",\n \"-q\",\n \"-y\",\n ]\n for option in dpkg_options:\n cmd.append(\"-o\")\n cmd.append(\"DPkg::Options::={}\".format(option))\n\n if kwargs.get(\"force_yes\", False):\n cmd.append(\"--force-yes\")\n if kwargs.get(\"skip_verify\", False):\n cmd.append(\"--allow-unauthenticated\")\n if kwargs.get(\"download_only\", False) or kwargs.get(\"downloadonly\", False):\n cmd.append(\"--download-only\")\n if kwargs.get(\"allow_downgrades\", False):\n cmd.append(\"--allow-downgrades\")\n\n cmd.append(\"dist-upgrade\" if dist_upgrade else \"upgrade\")\n result = _call_apt(cmd, env=DPKG_ENV_VARS.copy())\n __context__.pop(\"pkg.list_pkgs\", None)\n new = list_pkgs()\n ret = salt.utils.data.compare_dicts(old, new)\n\n if result[\"retcode\"] != 0:\n raise CommandExecutionError(\n \"Problem encountered upgrading packages\",\n info={\"changes\": ret, \"result\": result},\n )\n\n return ret\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 275, "n_words": 95, "vocab_size": 74, "complexity": 12, "nloc": 36, "token_counts": 254, "n_ast_nodes": 446, "n_identifiers": 30, "d_id": 54618, "documentation": { "docstring": "\n .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0\n On minions running systemd>=205, `systemd-run(1)`_ is now used to\n isolate commands which modify installed packages from the\n ``salt-minion`` daemon's control group. This is done to keep systemd\n from killing any apt-get/dpkg commands spawned by Salt when the\n ``salt-minion`` service is restarted. (see ``KillMode`` in the\n `systemd.kill(5)`_ manpage for more information). If desired, usage of\n `systemd-run(1)`_ can be suppressed by setting a :mod:`config option\n ` called ``systemd.scope``, with a value of\n ``False`` (no quotes).\n\n .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html\n .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html\n\n Upgrades all packages via ``apt-get upgrade`` or ``apt-get dist-upgrade``\n if ``dist_upgrade`` is ``True``.\n\n Returns a dictionary containing the changes:\n\n .. code-block:: python\n\n {'': {'old': '',\n 'new': ''}}\n\n dist_upgrade\n Whether to perform the upgrade using dist-upgrade vs upgrade. Default\n is to use upgrade.\n\n .. versionadded:: 2014.7.0\n\n refresh : True\n If ``True``, the apt cache will be refreshed first. By default,\n this is ``True`` and a refresh is performed.\n\n cache_valid_time\n\n .. versionadded:: 2016.11.0\n\n Skip refreshing the package database if refresh has already occurred within\n seconds\n\n download_only (or downloadonly)\n Only download the packages, don't unpack or install them. Use\n downloadonly to be in line with yum and zypper module.\n\n .. versionadded:: 2018.3.0\n\n force_conf_new\n Always install the new version of any configuration files.\n\n .. versionadded:: 2015.8.0\n\n allow_downgrades\n Allow apt to downgrade packages without a prompt.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' pkg.upgrade\n ", "n_words": 222, "vocab_size": 163, "n_whitespaces": 472, "language": "en" } }, { "id": 269313, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/activations.py", "file_name": "activations.py", "fun_name": "elu", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def elu(x, alpha=1.0):\n \n return backend.elu(x, alpha)\n\n\n@keras_export(\"keras.activations.selu\")\n@tf.__internal__.dispatch.add_dispatch_support", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.activations.selu\")\n@tf.__internal__.dispatch.add_dispatch_support", "n_ast_errors": 1, "ast_levels": 7, "n_whitespaces": 12, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 54, "n_identifiers": 9, "d_id": 80027, "documentation": { "docstring": "Exponential Linear Unit.\n\n The exponential linear unit (ELU) with `alpha > 0` is:\n `x` if `x > 0` and\n `alpha * (exp(x) - 1)` if `x < 0`\n The ELU hyperparameter `alpha` controls the value to which an\n ELU saturates for negative net inputs. ELUs diminish the\n vanishing gradient effect.\n\n ELUs have negative values which pushes the mean of the activations\n closer to zero.\n Mean activations that are closer to zero enable faster learning as they\n bring the gradient closer to the natural gradient.\n ELUs saturate to a negative value when the argument gets smaller.\n Saturation means a small derivative which decreases the variation\n and the information that is propagated to the next layer.\n\n Example Usage:\n\n >>> import tensorflow as tf\n >>> model = tf.keras.Sequential()\n >>> model.add(tf.keras.layers.Conv2D(32, (3, 3), activation='elu',\n ... input_shape=(28, 28, 1)))\n >>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n >>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))\n >>> model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n >>> model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='elu'))\n\n \n\n Args:\n x: Input tensor.\n alpha: A scalar, slope of negative section. `alpha` controls the value to\n which an ELU saturates for negative net inputs.\n\n Returns:\n The exponential linear unit (ELU) activation function: `x` if `x > 0` and\n `alpha * (exp(x) - 1)` if `x < 0`.\n\n\n Reference:\n [Fast and Accurate Deep Network Learning by Exponential Linear Units\n (ELUs) (Clevert et al, 2016)](https://arxiv.org/abs/1511.07289)\n ", "n_words": 216, "vocab_size": 128, "n_whitespaces": 357, "language": "en" } }, { "id": 308863, "commit_id": "395093351428d349246e4c1de526b159a167f382", "repo": "core", "path": "homeassistant/components/trafikverket_weatherstation/sensor.py", "file_name": "sensor.py", "fun_name": "async_update", "commit_message": "Code improvements to trafikverket_weatherstation (#62854)\n\n* Code cleanup\r\n\r\n* Fix extra state attributes\r\n\r\n* Fix review comments\r\n\r\n* Fix precipitation_amount if None\r\n\r\n* Fix sensors returning None\r\n\r\n* Use const for sensors reporting None", "code": "async def async_update(self) -> None:\n \n try:\n self._weather = await self._weather_api.async_get_weather(self._station)\n except (asyncio.TimeoutError, aiohttp.ClientError, ValueError) as error:\n _LOGGER.error(\"Could not fetch weather data: %s\", error)\n return\n self._attr_native_value = getattr(\n self._weather, self.entity_description.api_key\n )\n if (\n self._attr_native_value is None\n and self.entity_description.key in NONE_IS_ZERO_SENSORS\n ):\n self._attr_native_value = 0\n\n self._attr_extra_state_attributes = {\n ATTR_ACTIVE: self._weather.active,\n ATTR_MEASURE_TIME: self._weather.measure_time,\n }\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 213, "n_words": 51, "vocab_size": 46, "complexity": 4, "nloc": 19, "token_counts": 109, "n_ast_nodes": 173, "n_identifiers": 24, "d_id": 107592, "documentation": { "docstring": "Get the latest data from Trafikverket and updates the states.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 127516, "commit_id": "96cceb08e8bf73df990437002e25883c5a72d30c", "repo": "ray", "path": "python/ray/tune/execution/placement_groups.py", "file_name": "placement_groups.py", "fun_name": "required_resources", "commit_message": "[tune] Raise error in PGF if head and worker bundles are empty (#28445)\n\nScheduling empty placement groups is not supported by Ray core (see e.g. #28443), so we shouldn't allow them to be created in the first place.\r\n\r\nIf we need fully empty resource requests, we can include this in the upcoming execution/resource refactor.\r\n\r\nSigned-off-by: Kai Fricke ", "code": "def required_resources(self) -> Dict[str, float]:\n \n return _sum_bundles(self._bundles)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 33, "n_identifiers": 7, "d_id": 28458, "documentation": { "docstring": "Returns a dict containing the sums of all resources", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 176152, "commit_id": "dec723f072eb997a497a159dbe8674cd39999ee9", "repo": "networkx", "path": "networkx/generators/small.py", "file_name": "small.py", "fun_name": "chvatal_graph", "commit_message": "Docstrings for the small.py module (#5240)\n\n* added description for the first 5 small graphs\r\n\r\n* modified descriptions based on comment and added description for two more functions\r\n\r\n* added doctrings to all the functions\r\n\r\n* Minor touchups.\r\n\r\nCo-authored-by: Ross Barnowski ", "code": "def chvatal_graph(create_using=None):\n \n description = [\n \"adjacencylist\",\n \"Chvatal Graph\",\n 12,\n [\n [2, 5, 7, 10],\n [3, 6, 8],\n [4, 7, 9],\n [5, 8, 10],\n [6, 9],\n [11, 12],\n [11, 12],\n [9, 12],\n [11],\n [11, 12],\n [],\n [],\n ],\n ]\n G = make_small_undirected_graph(description, create_using)\n return G\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 226, "n_words": 44, "vocab_size": 32, "complexity": 1, "nloc": 22, "token_counts": 105, "n_ast_nodes": 138, "n_identifiers": 5, "d_id": 41722, "documentation": { "docstring": "\n Returns the Chvátal Graph\n\n The Chvátal Graph is an undirected graph with 12 nodes and 24 edges [1]_.\n It has 370 distinct (directed) Hamiltonian cycles, giving a unique generalized\n LCF notation of order 4, two of order 6 , and 43 of order 1 [2]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n The Chvátal graph with 12 nodes and 24 edges\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Chv%C3%A1tal_graph\n .. [2] https://mathworld.wolfram.com/ChvatalGraph.html\n\n ", "n_words": 88, "vocab_size": 64, "n_whitespaces": 147, "language": "en" } }, { "id": 289512, "commit_id": "67d1dde69fbacf33f2c39ea14d89f2afa425ed18", "repo": "core", "path": "tests/test_config.py", "file_name": "test_config.py", "fun_name": "test_igration_and_updating_configuration", "commit_message": "Rename IMPERIAL_SYSTEM to US_CUSTOMARY_SYSTEM (#80253)\n\n* Rename IMPERIAL_SYSTEM\r\n\r\n* Deprecate is_metric property and adjust tests\r\n\r\n* Adjust unit_system config validation\r\n\r\n* Add yaml tests\r\n\r\n* Add tests for private name\r\n\r\n* Fix incorrect rebase\r\n\r\n* Adjust docstring\r\n\r\n* Add store migration\r\n\r\n* Update unit_system.py\r\n\r\n* Minimise test tweaks\r\n\r\n* Fix tests\r\n\r\n* Add conversion to migration\r\n\r\n* Rename new key and adjust tests\r\n\r\n* Adjust websocket_detect_config\r\n\r\n* Move original_unit_system tracking to subclass", "code": "async def test_igration_and_updating_configuration(hass, hass_storage):\n \n core_data = {\n \"data\": {\n \"elevation\": 10,\n \"latitude\": 55,\n \"location_name\": \"Home\",\n \"longitude\": 13,\n \"time_zone\": \"Europe/Copenhagen\",\n \"unit_system\": \"imperial\",\n \"external_url\": \"https://www.example.com\",\n \"internal_url\": \"http://example.local\",\n \"currency\": \"BTC\",\n },\n \"key\": \"core.config\",\n \"version\": 1,\n \"minor_version\": 1,\n }\n hass_storage[\"core.config\"] = dict(core_data)\n await config_util.async_process_ha_core_config(\n hass, {\"allowlist_external_dirs\": \"/etc\"}\n )\n await hass.config.async_update(latitude=50, currency=\"USD\")\n\n expected_new_core_data = copy.deepcopy(core_data)\n # From async_update above\n expected_new_core_data[\"data\"][\"latitude\"] = 50\n expected_new_core_data[\"data\"][\"currency\"] = \"USD\"\n # 1.1 -> 1.2 store migration with migrated unit system\n expected_new_core_data[\"data\"][\"unit_system_v2\"] = \"us_customary\"\n expected_new_core_data[\"minor_version\"] = 2\n assert hass_storage[\"core.config\"] == expected_new_core_data\n assert hass.config.latitude == 50\n assert hass.config.currency == \"USD\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 280, "n_words": 88, "vocab_size": 70, "complexity": 1, "nloc": 30, "token_counts": 166, "n_ast_nodes": 314, "n_identifiers": 14, "d_id": 88654, "documentation": { "docstring": "Test updating configuration stores the new configuration.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 291535, "commit_id": "63d519c1a896c6eb20f7ffb032cb7712bbac6b5c", "repo": "core", "path": "tests/components/cpuspeed/conftest.py", "file_name": "conftest.py", "fun_name": "mock_cpuinfo_config_flow", "commit_message": "Spelling updates (#82867)", "code": "def mock_cpuinfo_config_flow() -> Generator[MagicMock, None, None]:\n \n with patch(\n \"homeassistant.components.cpuspeed.config_flow.cpuinfo.get_cpu_info\",\n return_value=True,\n ) as cpuinfo_mock:\n yield cpuinfo_mock\n\n\n@pytest.fixture", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 45, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 11, "token_counts": 29, "n_ast_nodes": 58, "n_identifiers": 8, "d_id": 90642, "documentation": { "docstring": "Return a mocked get_cpu_info.\n\n It is only used to check truthy or falsy values, so it is mocked\n to return True.\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 30, "language": "en" } }, { "id": 152956, "commit_id": "3c740dbfcdd69ddc3ab45a42be996e5c61104342", "repo": "modin", "path": "modin/core/dataframe/pandas/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "_propagate_index_objs", "commit_message": "FEAT-#3111: Ensure relabeling Modin Frame does not lose partition shape (#3662)\n\nCo-authored-by: Devin Petersohn \r\nSigned-off-by: Naren Krishna ", "code": "def _propagate_index_objs(self, axis=None):\n \n self._filter_empties()\n if axis is None or axis == 0:\n cum_row_lengths = np.cumsum([0] + self._row_lengths)\n if axis is None or axis == 1:\n cum_col_widths = np.cumsum([0] + self._column_widths)\n\n if axis is None:\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 91, "n_words": 34, "vocab_size": 20, "complexity": 15, "nloc": 64, "token_counts": 373, "n_ast_nodes": 107, "n_identifiers": 10, "d_id": 35202, "documentation": { "docstring": "\n Synchronize labels by applying the index object for specific `axis` to the `self._partitions` lazily.\n\n Adds `set_axis` function to call-queue of each partition from `self._partitions`\n to apply new axis.\n\n Parameters\n ----------\n axis : int, default: None\n The axis to apply to. If it's None applies to both axes.\n ", "n_words": 47, "vocab_size": 38, "n_whitespaces": 108, "language": "en" } }, { "id": 259242, "commit_id": "7f0006c8aad1a09621ad19c3db19c3ff0555a183", "repo": "scikit-learn", "path": "sklearn/utils/_encode.py", "file_name": "_encode.py", "fun_name": "_get_counts", "commit_message": "ENH Adds infrequent categories to OneHotEncoder (#16018)\n\n* ENH Completely adds infrequent categories\r\n\r\n* STY Linting\r\n\r\n* STY Linting\r\n\r\n* DOC Improves wording\r\n\r\n* DOC Lint\r\n\r\n* BUG Fixes\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* DOC Uses math to description float min_frequency\r\n\r\n* DOC Adds comment regarding drop\r\n\r\n* BUG Fixes method name\r\n\r\n* DOC Clearer docstring\r\n\r\n* TST Adds more tests\r\n\r\n* FIX Fixes mege\r\n\r\n* CLN More pythonic\r\n\r\n* CLN Address comments\r\n\r\n* STY Flake8\r\n\r\n* CLN Address comments\r\n\r\n* DOC Fix\r\n\r\n* MRG\r\n\r\n* WIP\r\n\r\n* ENH Address comments\r\n\r\n* STY Fix\r\n\r\n* ENH Use functiion call instead of property\r\n\r\n* ENH Adds counts feature\r\n\r\n* CLN Rename variables\r\n\r\n* DOC More details\r\n\r\n* CLN Remove unneeded line\r\n\r\n* CLN Less lines is less complicated\r\n\r\n* CLN Less diffs\r\n\r\n* CLN Improves readiabilty\r\n\r\n* BUG Fix\r\n\r\n* CLN Address comments\r\n\r\n* TST Fix\r\n\r\n* CLN Address comments\r\n\r\n* CLN Address comments\r\n\r\n* CLN Move docstring to userguide\r\n\r\n* DOC Better wrapping\r\n\r\n* TST Adds test to handle_unknown='error'\r\n\r\n* ENH Spelling error in docstring\r\n\r\n* BUG Fixes counter with nan values\r\n\r\n* BUG Removes unneeded test\r\n\r\n* BUG Fixes issue\r\n\r\n* ENH Sync with main\r\n\r\n* DOC Correct settings\r\n\r\n* DOC Adds docstring\r\n\r\n* DOC Immprove user guide\r\n\r\n* DOC Move to 1.0\r\n\r\n* DOC Update docs\r\n\r\n* TST Remove test\r\n\r\n* DOC Update docstring\r\n\r\n* STY Linting\r\n\r\n* DOC Address comments\r\n\r\n* ENH Neater code\r\n\r\n* DOC Update explaination for auto\r\n\r\n* Update sklearn/preprocessing/_encoders.py\r\n\r\nCo-authored-by: Roman Yurchak \r\n\r\n* TST Uses docstring instead of comments\r\n\r\n* TST Remove call to fit\r\n\r\n* TST Spelling error\r\n\r\n* ENH Adds support for drop + infrequent categories\r\n\r\n* ENH Adds infrequent_if_exist option\r\n\r\n* DOC Address comments for user guide\r\n\r\n* DOC Address comments for whats_new\r\n\r\n* DOC Update docstring based on comments\r\n\r\n* CLN Update test with suggestions\r\n\r\n* ENH Adds computed property infrequent_categories_\r\n\r\n* DOC Adds where the infrequent column is located\r\n\r\n* TST Adds more test for infrequent_categories_\r\n\r\n* DOC Adds docstring for _compute_drop_idx\r\n\r\n* CLN Moves _convert_to_infrequent_idx into its own method\r\n\r\n* TST Increases test coverage\r\n\r\n* TST Adds failing test\r\n\r\n* CLN Careful consideration of dropped and inverse_transform\r\n\r\n* STY Linting\r\n\r\n* DOC Adds docstrinb about dropping infrequent\r\n\r\n* DOC Uses only\r\n\r\n* DOC Numpydoc\r\n\r\n* TST Includes test for get_feature_names_out\r\n\r\n* DOC Move whats new\r\n\r\n* DOC Address docstring comments\r\n\r\n* DOC Docstring changes\r\n\r\n* TST Better comments\r\n\r\n* TST Adds check for handle_unknown='ignore' for infrequent\r\n\r\n* CLN Make _infrequent_indices private\r\n\r\n* CLN Change min_frequency default to None\r\n\r\n* DOC Adds comments\r\n\r\n* ENH adds support for max_categories=1\r\n\r\n* ENH Describe lexicon ordering for ties\r\n\r\n* DOC Better docstring\r\n\r\n* STY Fix\r\n\r\n* CLN Error when explicity dropping an infrequent category\r\n\r\n* STY Grammar\r\n\r\nCo-authored-by: Joel Nothman \r\nCo-authored-by: Roman Yurchak \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def _get_counts(values, uniques):\n \n if values.dtype.kind in \"OU\":\n counter = _NaNCounter(values)\n output = np.zeros(len(uniques), dtype=np.int64)\n for i, item in enumerate(uniques):\n with suppress(KeyError):\n output[i] = counter[item]\n return output\n\n unique_values, counts = _unique_np(values, return_counts=True)\n\n # Recorder unique_values based on input: `uniques`\n uniques_in_values = np.isin(uniques, unique_values, assume_unique=True)\n if np.isnan(unique_values[-1]) and np.isnan(uniques[-1]):\n uniques_in_values[-1] = True\n\n unique_valid_indices = np.searchsorted(unique_values, uniques[uniques_in_values])\n output = np.zeros_like(uniques, dtype=np.int64)\n output[uniques_in_values] = counts[unique_valid_indices]\n return output\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 154, "n_words": 63, "vocab_size": 47, "complexity": 5, "nloc": 16, "token_counts": 161, "n_ast_nodes": 252, "n_identifiers": 28, "d_id": 75673, "documentation": { "docstring": "Get the count of each of the `uniques` in `values`.\n\n The counts will use the order passed in by `uniques`. For non-object dtypes,\n `uniques` is assumed to be sorted and `np.nan` is at the end.\n ", "n_words": 35, "vocab_size": 28, "n_whitespaces": 44, "language": "en" } }, { "id": 217889, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/http/server.py", "file_name": "server.py", "fun_name": "log_error", "commit_message": "add python 3.10.4 for windows", "code": "def log_error(self, format, *args):\n \n\n self.log_message(format, *args)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 33, "n_identifiers": 5, "d_id": 54992, "documentation": { "docstring": "Log an error.\n\n This is called when a request cannot be fulfilled. By\n default it passes the message on to log_message().\n\n Arguments are the same as for log_message().\n\n XXX This should go to the separate error log.\n\n ", "n_words": 37, "vocab_size": 32, "n_whitespaces": 73, "language": "en" } }, { "id": 100621, "commit_id": "60291d49c4da1cd260fbc0b04aa6a312eedfefbb", "repo": "faceswap", "path": "plugins/convert/writer/ffmpeg.py", "file_name": "ffmpeg.py", "fun_name": "_video_tmp_file", "commit_message": "ffmpeg writer: Create new filename if output pre-exists", "code": "def _video_tmp_file(self) -> str:\n \n path, filename = os.path.split(self._output_filename)\n retval = os.path.join(path, f\"__tmp_{filename}\")\n logger.debug(retval)\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 50, "n_words": 15, "vocab_size": 13, "complexity": 1, "nloc": 7, "token_counts": 43, "n_ast_nodes": 75, "n_identifiers": 12, "d_id": 20083, "documentation": { "docstring": " str: Full path to the temporary video file that is generated prior to muxing final\n audio. ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 24, "language": "en" } }, { "id": 118728, "commit_id": "72703b38029f9358a0ec7ca5ed875a6b438ece19", "repo": "streamlit", "path": "lib/streamlit/elements/dataframe_selector.py", "file_name": "dataframe_selector.py", "fun_name": "bar_chart", "commit_message": "Replace static apps with live Cloud apps (#4317)\n\nCo-authored-by: kajarenc ", "code": "def bar_chart(self, data=None, width=0, height=0, use_container_width=True):\n \n\n if _use_arrow():\n return self.dg._arrow_bar_chart(data, width, height, use_container_width)\n else:\n return self.dg._legacy_bar_chart(data, width, height, use_container_width)\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 62, "n_words": 19, "vocab_size": 15, "complexity": 2, "nloc": 5, "token_counts": 59, "n_ast_nodes": 86, "n_identifiers": 10, "d_id": 26385, "documentation": { "docstring": "Display a bar chart.\n\n This is just syntax-sugar around st.altair_chart. The main difference\n is this command uses the data's own column and indices to figure out\n the chart's spec. As a result this is easier to use for many \"just plot\n this\" scenarios, while being less customizable.\n\n If st.bar_chart does not guess the data specification\n correctly, try specifying your desired chart using st.altair_chart.\n\n Parameters\n ----------\n data : pandas.DataFrame, pandas.Styler, pyarrow.Table, numpy.ndarray, Iterable, or dict\n Data to be plotted.\n Pyarrow tables are not supported by Streamlit's legacy DataFrame serialization\n (i.e. with `config.dataFrameSerialization = \"legacy\"`).\n To use pyarrow tables, please enable pyarrow by changing the config setting,\n `config.dataFrameSerialization = \"arrow\"`.\n\n width : int\n The chart width in pixels. If 0, selects the width automatically.\n\n height : int\n The chart height in pixels. If 0, selects the height automatically.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over the width argument.\n\n Example\n -------\n >>> chart_data = pd.DataFrame(\n ... np.random.randn(50, 3),\n ... columns=[\"a\", \"b\", \"c\"])\n ...\n >>> st.bar_chart(chart_data)\n\n .. output::\n https://share.streamlit.io/streamlit/docs/main/python/api-examples-source/charts.bar_chart.py\n height: 400px\n\n ", "n_words": 177, "vocab_size": 125, "n_whitespaces": 451, "language": "en" } }, { "id": 270311, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/distribute/distributed_file_utils.py", "file_name": "distributed_file_utils.py", "fun_name": "write_filepath", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def write_filepath(filepath, strategy):\n \n dirpath = os.path.dirname(filepath)\n base = os.path.basename(filepath)\n return os.path.join(write_dirpath(dirpath, strategy), base)\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 25, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 44, "n_ast_nodes": 70, "n_identifiers": 11, "d_id": 80425, "documentation": { "docstring": "Returns the writing file path to be used to save file distributedly.\n\n Directory to contain `filepath` would be created if it doesn't exist.\n\n Args:\n filepath: Original filepath that would be used without distribution.\n strategy: The tf.distribute strategy object currently used.\n\n Returns:\n The writing filepath that should be used to save file with distribution.\n ", "n_words": 53, "vocab_size": 36, "n_whitespaces": 80, "language": "en" } }, { "id": 109806, "commit_id": "3b52d2b64f58c1eb912bd343e7c197a1ed0b92b5", "repo": "matplotlib", "path": "lib/matplotlib/backend_bases.py", "file_name": "backend_bases.py", "fun_name": "draw_tex", "commit_message": "Remove redundant method, fix signature and add doc-string to draw_tex", "code": "def draw_tex(self, gc, x, y, s, prop, angle, *, mtext=None):\n \n self._draw_text_as_path(gc, x, y, s, prop, angle, ismath=\"TeX\")\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 17, "vocab_size": 12, "complexity": 1, "nloc": 2, "token_counts": 44, "n_ast_nodes": 60, "n_identifiers": 11, "d_id": 23757, "documentation": { "docstring": "\n Draw a TeX instance.\n\n Parameters\n ----------\n gc : `.GraphicsContextBase`\n The graphics context.\n x : float\n The x location of the text in display coords.\n y : float\n The y location of the text baseline in display coords.\n s : str\n The TeX text string.\n prop : `matplotlib.font_manager.FontProperties`\n The font properties.\n angle : float\n The rotation angle in degrees anti-clockwise.\n mtext : `matplotlib.text.Text`\n The original text object to be rendered.\n ", "n_words": 69, "vocab_size": 41, "n_whitespaces": 224, "language": "en" } }, { "id": 68540, "commit_id": "a1e3ae8869194a487acccc706a381db74c4aa1ff", "repo": "erpnext", "path": "erpnext/controllers/queries.py", "file_name": "queries.py", "fun_name": "tax_account_query", "commit_message": "fix: user can select disabled accounts in taxes table", "code": "def tax_account_query(doctype, txt, searchfield, start, page_len, filters):\n\tcompany_currency = erpnext.get_company_currency(filters.get(\"company\"))\n\n\tdef get_accounts(with_account_type_filter):\n\t\taccount_type_condition = \"\"\n\t\tif with_account_type_filter:\n\t\t\taccount_type_condition = \"AND account_type in %(account_types)s\"\n\n\t\taccounts = frappe.db.sql(\n\t\t\t.format(\n\t\t\t\taccount_type_condition=account_type_condition,\n\t\t\t\tsearchfield=searchfield,\n\t\t\t\tmcond=get_match_cond(doctype),\n\t\t\t),\n\t\t\tdict(\n\t\t\t\taccount_types=filters.get(\"account_type\"),\n\t\t\t\tcompany=filters.get(\"company\"),\n\t\t\t\tdisabled=filters.get(\"disabled\", 0),\n\t\t\t\tcurrency=company_currency,\n\t\t\t\ttxt=\"%{}%\".format(txt),\n\t\t\t\toffset=start,\n\t\t\t\tlimit=page_len,\n\t\t\t),\n\t\t)\n\n\t\treturn accounts\n\n\ttax_accounts = get_accounts(True)\n\n\tif not tax_accounts:\n\t\ttax_accounts = get_accounts(False)\n\n\treturn tax_accounts\n\n\n@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "n_ast_errors": 1, "ast_levels": 16, "n_whitespaces": 28, "n_words": 57, "vocab_size": 44, "complexity": 2, "nloc": 7, "token_counts": 48, "n_ast_nodes": 249, "n_identifiers": 31, "d_id": 14815, "documentation": { "docstring": "\n\t\t\tSELECT name, parent_account\n\t\t\tFROM `tabAccount`\n\t\t\tWHERE `tabAccount`.docstatus!=2\n\t\t\t\t{account_type_condition}\n\t\t\t\tAND is_group = 0\n\t\t\t\tAND company = %(company)s\n\t\t\t\tAND disabled = %(disabled)s\n\t\t\t\tAND (account_currency = %(currency)s or ifnull(account_currency, '') = '')\n\t\t\t\tAND `{searchfield}` LIKE %(txt)s\n\t\t\t\t{mcond}\n\t\t\tORDER BY idx DESC, name\n\t\t\tLIMIT %(offset)s, %(limit)s\n\t\t", "n_words": 42, "vocab_size": 33, "n_whitespaces": 30, "language": "en" } }, { "id": 319884, "commit_id": "0fdd3d56f43c8442a0c9ecd3cad07a88137ff7de", "repo": "paperless-ngx", "path": ".github/scripts/cleanup-tags.py", "file_name": "cleanup-tags.py", "fun_name": "filter_packages_untagged", "commit_message": "Changes the cleanup images workflow so it uses a OAuth token with the correct scope (GITHUB_TOKEN is not enough). Also prevents running if the token is not defined and generally does commenting/cleanups\"", "code": "def filter_packages_untagged(self, package_data):\n \n matches = {}\n\n for package in package_data:\n if \"metadata\" in package and \"container\" in package[\"metadata\"]:\n container_metadata = package[\"metadata\"][\"container\"]\n if \"tags\" in container_metadata:\n container_tags = container_metadata[\"tags\"]\n if not len(container_tags):\n matches[package[\"name\"]] = package\n\n return matches\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 166, "n_words": 36, "vocab_size": 25, "complexity": 6, "nloc": 10, "token_counts": 67, "n_ast_nodes": 121, "n_identifiers": 8, "d_id": 117011, "documentation": { "docstring": "\n Filters the given package data to those which have no tags at all\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 28, "language": "en" } }, { "id": 66903, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/payroll/doctype/payroll_entry/payroll_entry.py", "file_name": "payroll_entry.py", "fun_name": "employee_query", "commit_message": "style: format code with black", "code": "def employee_query(doctype, txt, searchfield, start, page_len, filters):\n\tfilters = frappe._dict(filters)\n\tconditions = []\n\tinclude_employees = []\n\temp_cond = \"\"\n\n\tif not filters.payroll_frequency:\n\t\tfrappe.throw(_(\"Select Payroll Frequency.\"))\n\n\tif filters.start_date and filters.end_date:\n\t\temployee_list = get_employee_list(filters)\n\t\temp = filters.get(\"employees\") or []\n\t\tinclude_employees = [\n\t\t\temployee.employee for employee in employee_list if employee.employee not in emp\n\t\t]\n\t\tfilters.pop(\"start_date\")\n\t\tfilters.pop(\"end_date\")\n\t\tfilters.pop(\"salary_slip_based_on_timesheet\")\n\t\tfilters.pop(\"payroll_frequency\")\n\t\tfilters.pop(\"payroll_payable_account\")\n\t\tfilters.pop(\"currency\")\n\t\tif filters.employees is not None:\n\t\t\tfilters.pop(\"employees\")\n\n\t\tif include_employees:\n\t\t\temp_cond += \"and employee in %(include_employees)s\"\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\t**{\n\t\t\t\t\"key\": searchfield,\n\t\t\t\t\"fcond\": get_filters_cond(doctype, filters, conditions),\n\t\t\t\t\"mcond\": get_match_cond(doctype),\n\t\t\t\t\"emp_cond\": emp_cond,\n\t\t\t}\n\t\t),\n\t\t{\n\t\t\t\"txt\": \"%%%s%%\" % txt,\n\t\t\t\"_txt\": txt.replace(\"%\", \"\"),\n\t\t\t\"start\": start,\n\t\t\t\"page_len\": page_len,\n\t\t\t\"include_employees\": include_employees,\n\t\t},\n\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 63, "n_words": 103, "vocab_size": 77, "complexity": 9, "nloc": 52, "token_counts": 224, "n_ast_nodes": 387, "n_identifiers": 30, "d_id": 14374, "documentation": { "docstring": "select name, employee_name from `tabEmployee`\n\t\twhere status = 'Active'\n\t\t\tand docstatus < 2\n\t\t\tand ({key} like %(txt)s\n\t\t\t\tor employee_name like %(txt)s)\n\t\t\t{emp_cond}\n\t\t\t{fcond} {mcond}\n\t\torder by\n\t\t\tif(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),\n\t\t\tif(locate(%(_txt)s, employee_name), locate(%(_txt)s, employee_name), 99999),\n\t\t\tidx desc,\n\t\t\tname, employee_name\n\t\tlimit %(start)s, %(page_len)s", "n_words": 43, "vocab_size": 33, "n_whitespaces": 30, "language": "en" } }, { "id": 110019, "commit_id": "1068a6faa19767724437461bcfb88c6852ec435c", "repo": "matplotlib", "path": "lib/matplotlib/contour.py", "file_name": "contour.py", "fun_name": "_process_contour_level_args", "commit_message": "Remove unnecessary np.{,as}array / astype calls.\n\nQuite often numpy will call asarray for us, saving us the need to call\nasarray explicitly.\n\nWhen we do call asarray (or array) ourselves, a dtype can directly be\npassed in, rather than immediately calling astype immediately after.\nPassing the dtype makes it unnecessary for asarray to infer the dtype\nof the passed-in container, and can also save an extra array allocation\nif asarray first has to allocate an array of a type and astype\nimmediately has to allocate an array of another type.", "code": "def _process_contour_level_args(self, args):\n \n if self.levels is None:\n if len(args) == 0:\n levels_arg = 7 # Default, hard-wired.\n else:\n levels_arg = args[0]\n else:\n levels_arg = self.levels\n if isinstance(levels_arg, Integral):\n self.levels = self._autolev(levels_arg)\n else:\n self.levels = np.asarray(levels_arg, np.float64)\n\n if not self.filled:\n inside = (self.levels > self.zmin) & (self.levels < self.zmax)\n levels_in = self.levels[inside]\n if len(levels_in) == 0:\n self.levels = [self.zmin]\n _api.warn_external(\n \"No contour levels were found within the data range.\")\n\n if self.filled and len(self.levels) < 2:\n raise ValueError(\"Filled contours require at least 2 levels.\")\n\n if len(self.levels) > 1 and np.min(np.diff(self.levels)) <= 0.0:\n raise ValueError(\"Contour levels must be increasing\")\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 342, "n_words": 96, "vocab_size": 66, "complexity": 10, "nloc": 23, "token_counts": 185, "n_ast_nodes": 299, "n_identifiers": 22, "d_id": 23889, "documentation": { "docstring": "\n Determine the contour levels and store in self.levels.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 19048, "commit_id": "964f5ab75098c55f028f8acfeeae05df35ea68d5", "repo": "mlflow", "path": "mlflow/models/evaluation/default_evaluator.py", "file_name": "default_evaluator.py", "fun_name": "_get_classifier_global_metrics", "commit_message": "Evaluation Default evaluator (#5092)\n\n* init\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* rename module\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert black change\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* change module path\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* lazy load pyspark\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert export\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix curcit import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix conftest.py\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* Revert \"fix conftest.py\"\r\n\r\nThis reverts commit 2ea29c62bfffc5461bf77f3da15b5c00f51de19b.\r\n\r\n* fix tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* default evaluator\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update hash algo\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comment\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix lint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add more tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix lint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update shap explainer\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* remove scikitplot dep\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add pr curve\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add shap.summary_plot\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* log explainer\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* improve explainer code\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update shap init\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update explainer creating\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update predict_proba\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add multi-class metrics artifacts\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add log_loss metric\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* lazy load pyspark\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address ben comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* prevent show shap logo, add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* support spark model\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add shap version check\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update docs, loose classifier label limit\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* multiclass classifier merge metrics/plots\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* zfill feature name\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add config max_num_classes_threshold_logging_roc_pr_curve_for_multiclass_classifier\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* improve label handling\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* black\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* increase plot dpi\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix test fixture\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* use matplot rc_context\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix shap import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor EvaluationDataset\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* limit user specify shap algos\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* clean\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update evaluation dataset\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* use svg fig\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert svg\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* curve dashline, legend display ap/roc, legend move out\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* linewidth 1\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* keyword arguments for evaluate, fix tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* mark abc.abstractmethod, kw args for ModelEvaluator methods\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu ", "code": "def _get_classifier_global_metrics(is_binomial, y, y_pred, y_probs, labels):\n \n metrics = {}\n metrics[\"accuracy\"] = sk_metrics.accuracy_score(y, y_pred)\n metrics[\"example_count\"] = len(y)\n\n if not is_binomial:\n metrics[\"f1_score_micro\"] = sk_metrics.f1_score(y, y_pred, average=\"micro\", labels=labels)\n metrics[\"f1_score_macro\"] = sk_metrics.f1_score(y, y_pred, average=\"macro\", labels=labels)\n\n if y_probs is not None:\n metrics[\"log_loss\"] = sk_metrics.log_loss(y, y_probs, labels=labels)\n\n return metrics\n\n", "url": "https://github.com/mlflow/mlflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 85, "n_words": 43, "vocab_size": 29, "complexity": 3, "nloc": 10, "token_counts": 111, "n_ast_nodes": 176, "n_identifiers": 13, "d_id": 2882, "documentation": { "docstring": "\n get classifier metrics which computing over all classes examples.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 320942, "commit_id": "676e01677183825d19107d3b2fbf1bb2c0684ede", "repo": "qutebrowser", "path": "tests/unit/mainwindow/test_messageview.py", "file_name": "test_messageview.py", "fun_name": "test_show_message_twice", "commit_message": "Only replace the exact same message\n\nIf we have a error message followed by an info message with the same text, they\nshould both be shown, not replaced automatically.", "code": "def test_show_message_twice(view, info1, info2, count):\n \n view.show_message(info1)\n view.show_message(info2)\n assert len(view._messages) == count\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 23, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 33, "n_ast_nodes": 53, "n_identifiers": 8, "d_id": 117451, "documentation": { "docstring": "Show the exact same message twice -> only one should be shown.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 106773, "commit_id": "60c90e313e106c0af62339d29eeda0e62823c648", "repo": "visdom", "path": "py/visdom/utils/server_utils.py", "file_name": "server_utils.py", "fun_name": "check_auth", "commit_message": "Refactoring server.py into more intentional files", "code": "def check_auth(f):\n ", "url": "https://github.com/fossasia/visdom.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 3, "token_counts": 10, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 22434, "documentation": { "docstring": "\n Wrapper for server access methods to ensure that the access\n is authorized.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 22, "language": "en" } }, { "id": 150410, "commit_id": "9f6bba40af1a407f190a89f5c0c8b4e3f528ba46", "repo": "freqtrade", "path": "freqtrade/rpc/replicate/__init__.py", "file_name": "__init__.py", "fun_name": "start_leader_mode", "commit_message": "initial concept for replicate, basic leader and follower logic", "code": "def start_leader_mode(self):\n \n\n logger.info(\"Running rpc.replicate in Leader mode\")\n logger.info(\"-\" * 15)\n logger.info(f\"API_KEY: {self.secret_api_key}\")\n logger.info(\"-\" * 15)\n\n self.register_leader_endpoint()\n self.submit_coroutine(self.leader_loop())\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 66, "n_words": 17, "vocab_size": 14, "complexity": 1, "nloc": 7, "token_counts": 50, "n_ast_nodes": 100, "n_identifiers": 8, "d_id": 34734, "documentation": { "docstring": "\n Register the endpoint and start the leader loop\n ", "n_words": 8, "vocab_size": 7, "n_whitespaces": 23, "language": "en" } }, { "id": 314752, "commit_id": "a8349a4866d22cddbca9ac9367d4affae39a8325", "repo": "core", "path": "tests/helpers/test_entityfilter.py", "file_name": "test_entityfilter.py", "fun_name": "test_exclude_glob_case5", "commit_message": "Adjust entity filters to make includes stronger than excludes (#74080)\n\n* Adjust entity filters to make includes stronger than excludes\r\n\r\nFixes #59080\r\n\r\n* adjust test for stronger entity glob includes\r\n\r\n* sync with docs", "code": "def test_exclude_glob_case5():\n \n incl_dom = {}\n incl_glob = {}\n incl_ent = {\"binary_sensor.working\"}\n excl_dom = {}\n excl_glob = {\"binary_sensor.*\"}\n excl_ent = {\"light.ignoreme\", \"sensor.notworking\"}\n testfilter = generate_filter(\n incl_dom, incl_ent, excl_dom, excl_ent, incl_glob, excl_glob\n )\n\n assert testfilter(\"sensor.test\")\n assert testfilter(\"sensor.notworking\") is False\n assert testfilter(\"light.test\")\n assert testfilter(\"light.ignoreme\") is False\n assert testfilter(\"binary_sensor.working\")\n assert testfilter(\"binary_sensor.another\") is False\n assert testfilter(\"sun.sun\") is True\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 108, "n_words": 53, "vocab_size": 33, "complexity": 1, "nloc": 17, "token_counts": 93, "n_ast_nodes": 169, "n_identifiers": 9, "d_id": 113356, "documentation": { "docstring": "Test case 5 - include and exclude specified, with excluded glob.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 288716, "commit_id": "5b0a37a44752edbbf785d6a200e3b7a3f5fa2047", "repo": "core", "path": "homeassistant/components/jellyfin/config_flow.py", "file_name": "config_flow.py", "fun_name": "_generate_client_device_id", "commit_message": "Use persistent device id for jellyfin requests (#79840)", "code": "def _generate_client_device_id() -> str:\n \n return random_uuid_hex()\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 3, "d_id": 87869, "documentation": { "docstring": "Generate a random UUID4 string to identify ourselves.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 158210, "commit_id": "b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2", "repo": "d2l-zh", "path": "d2l/mxnet.py", "file_name": "mxnet.py", "fun_name": "evaluate_loss", "commit_message": "[PaddlePaddle] Merge master into Paddle branch (#1186)\n\n* change 15.2 title in chinese version (#1109)\r\n\r\nchange title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘\r\n\r\n* 修改部分语义表述 (#1105)\r\n\r\n* Update r0.17.5 (#1120)\r\n\r\n* Bump versions in installation\r\n\r\n* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)\r\n\r\n* line 313: \"bert.mall\" -> \"bert.small\" (#1130)\r\n\r\n* fix: update language as native reader (#1114)\r\n\r\n* Fix the translation of \"stride\" (#1115)\r\n\r\n* Update index.md (#1118)\r\n\r\n修改部分语义表述\r\n\r\n* Update self-attention-and-positional-encoding.md (#1133)\r\n\r\n依照本书的翻译习惯,将pooling翻译成汇聚\r\n\r\n* maybe a comment false (#1149)\r\n\r\n* maybe a little false\r\n\r\n* maybe a little false\r\n\r\n* A minor bug in the rcnn section (Chinese edition) (#1148)\r\n\r\n* Update bert.md (#1137)\r\n\r\n一个笔误\r\n# 假设batch_size=2,num_pred_positions=3\r\n# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]\r\n\r\n* Update calculus.md (#1135)\r\n\r\n* fix typo in git documentation (#1106)\r\n\r\n* fix: Update the Chinese translation in lr-scheduler.md (#1136)\r\n\r\n* Update lr-scheduler.md\r\n\r\n* Update chapter_optimization/lr-scheduler.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* fix translation for kaggle-house-price.md (#1107)\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\nSigned-off-by: sunhaizhou \r\n\r\n* Update weight-decay.md (#1150)\r\n\r\n* Update weight-decay.md\r\n\r\n关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解\r\n关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。\r\n并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释\r\n解释为何会增加复杂性以及为何需要细粒度工具。\r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Fix a spelling error (#1161)\r\n\r\n* Update gru.md (#1152)\r\n\r\nThe key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.\r\n翻译错误\r\n\r\n* Unify the function naming (#1113)\r\n\r\nUnify naming of the function 'init_xavier()'.\r\n\r\n* Update mlp-concise.md (#1166)\r\n\r\n* Update mlp-concise.md\r\n\r\n语句不通顺\r\n\r\n* Update environment.md\r\n\r\n语序异常\r\n\r\n* Update config.ini\r\n\r\n* fix the imprecise description (#1168)\r\n\r\nCo-authored-by: yuande \r\n\r\n* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)\r\n\r\n* Fix some typos. (#1163)\r\n\r\n* Update batch-norm.md (#1170)\r\n\r\nfixing typos u->x in article\r\n\r\n* Update linear-regression.md (#1090)\r\n\r\nWe invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that\r\n\r\n原译文把who也直接翻译出来了。\r\n\r\n* Update mlp.md (#1117)\r\n\r\n* Update mlp.md\r\n\r\n修改部分语义表述\r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: goldmermaid \r\n\r\n* Correct a translation error. (#1091)\r\n\r\n* Correct a translation error.\r\n\r\n* Update chapter_computer-vision/image-augmentation.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update aws.md (#1121)\r\n\r\n* Update aws.md\r\n\r\n* Update chapter_appendix-tools-for-deep-learning/aws.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update image-augmentation.md (#1093)\r\n\r\n* Update anchor.md (#1088)\r\n\r\nfix a minor issue in code\r\n\r\n* Update anchor.md\r\n\r\n* Update image-augmentation.md\r\n\r\n* fix typo and improve translation in chapter_linear-networks\\softmax-regression.md (#1087)\r\n\r\n* Avoid `torch.meshgrid` user warning (#1174)\r\n\r\nAvoids the following user warning:\r\n```python\r\n~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)\r\n return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\r\n```\r\n\r\n* bump to 2.0.0-beta1\r\n\r\n* Update sequence.md\r\n\r\n* bump beta1 on readme\r\n\r\n* Add latex code block background to config\r\n\r\n* BLD: Bump python support version 3.9 (#1183)\r\n\r\n* BLD: Bump python support version 3.9\r\n\r\n* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4\r\n\r\n* BLD: Bump torch and tensorflow\r\n\r\n* Update Jenkinsfile\r\n\r\n* Update chapter_installation/index.md\r\n\r\n* Update chapter_installation/index.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update config.ini\r\n\r\n* Update INFO.md\r\n\r\n* Update INFO.md\r\n\r\n* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)\r\n\r\n* resolve the conflicts\r\n\r\n* revise from publisher (#1089)\r\n\r\n* revise from publisher\r\n\r\n* d2l api\r\n\r\n* post_latex\r\n\r\n* revise from publisher\r\n\r\n* revise ch11\r\n\r\n* Delete d2l-Copy1.bib\r\n\r\n* clear cache\r\n\r\n* rm d2lbook clear\r\n\r\n* debug anchor\r\n\r\n* keep original d2l doc\r\n\r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\n\r\n* 重复语句 (#1188)\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improve expression for chapter_preliminaries/pandas.md (#1184)\r\n\r\n* Update pandas.md\r\n\r\n* Improve expression\r\n\r\n* Improve expression\r\n\r\n* Update chapter_preliminaries/pandas.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)\r\n\r\n* Improce expression\r\n\r\n* Improve code comments\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Fix multibox_detection bugs\r\n\r\n* Update d2l to 0.17.5 version\r\n\r\n* restore older version\r\n\r\n* Upgrade pandas\r\n\r\n* change to python3.8\r\n\r\n* Test warning log\r\n\r\n* relocate warning log\r\n\r\n* test logs filtering\r\n\r\n* Update gru.md\r\n\r\n* Add DeprecationWarning filter\r\n\r\n* Test warning log\r\n\r\n* Update attention mechanisms & computational performance\r\n\r\n* Update multilayer perceptron& linear & convolution networks & computer vision\r\n\r\n* Update recurrent&optimition&nlp pretraining & nlp applications\r\n\r\n* ignore warnings\r\n\r\n* Update index.md\r\n\r\n* Update linear networks\r\n\r\n* Update multilayer perceptrons&deep learning computation\r\n\r\n* Update preliminaries\r\n\r\n* Check and Add warning filter\r\n\r\n* Update kaggle-cifar10.md\r\n\r\n* Update object-detection-dataset.md\r\n\r\n* Update ssd.md fcn.md\r\n\r\n* Update hybridize.md\r\n\r\n* Update hybridize.md\r\n\r\nSigned-off-by: sunhaizhou \r\nCo-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com>\r\nCo-authored-by: Xinwei Liu \r\nCo-authored-by: Anirudh Dagar \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com>\r\nCo-authored-by: gyro永不抽风 <1247006353@qq.com>\r\nCo-authored-by: CanChengZheng \r\nCo-authored-by: linlin \r\nCo-authored-by: iuk \r\nCo-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com>\r\nCo-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com>\r\nCo-authored-by: Chiyuan Fu \r\nCo-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com>\r\nCo-authored-by: Haiker Sun \r\nCo-authored-by: Ming Liu \r\nCo-authored-by: goldmermaid \r\nCo-authored-by: silenceZheng66 <13754430639@163.com>\r\nCo-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com>\r\nCo-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com>\r\nCo-authored-by: Krahets \r\nCo-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com>\r\nCo-authored-by: Jameson \r\nCo-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com>\r\nCo-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com>\r\nCo-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com>\r\nCo-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com>\r\nCo-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com>\r\nCo-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com>\r\nCo-authored-by: VigourJiang \r\nCo-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com>\r\nCo-authored-by: LYF <27893441+liyufan@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\nCo-authored-by: xiaotinghe \r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com>\r\nCo-authored-by: HinGwenWoong \r\nCo-authored-by: Shuai Zhang ", "code": "def evaluate_loss(net, data_iter, loss):\n \n metric = d2l.Accumulator(2) # Sum of losses, no. of examples\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(d2l.reduce_sum(l), d2l.size(l))\n return metric[0] / metric[1]\n\nDATA_HUB = dict()\nDATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 60, "n_words": 35, "vocab_size": 31, "complexity": 2, "nloc": 6, "token_counts": 64, "n_ast_nodes": 116, "n_identifiers": 16, "d_id": 37379, "documentation": { "docstring": "Evaluate the loss of a model on the given dataset.\n\n Defined in :numref:`sec_model_selection`", "n_words": 13, "vocab_size": 12, "n_whitespaces": 15, "language": "en" } }, { "id": 60951, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/operations/prepare.py", "file_name": "prepare.py", "fun_name": "_copy2_ignoring_special_files", "commit_message": "upd; format", "code": "def _copy2_ignoring_special_files(src, dest):\n # type: (str, str) -> None\n \n try:\n copy2_fixed(src, dest)\n except shutil.SpecialFileError as e:\n # SpecialFileError may be raised due to either the source or\n # destination. If the destination was the cause then we would actually\n # care, but since the destination directory is deleted prior to\n # copy we ignore all of them assuming it is caused by the source.\n logger.warning(\n \"Ignoring special file error '%s' encountered copying %s to %s.\",\n str(e),\n src,\n dest,\n )\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 184, "n_words": 79, "vocab_size": 66, "complexity": 2, "nloc": 10, "token_counts": 39, "n_ast_nodes": 69, "n_identifiers": 10, "d_id": 12357, "documentation": { "docstring": "Copying special files is not supported, but as a convenience to users\n we skip errors copying them. This supports tools that may create e.g.\n socket files in the project source directory.\n ", "n_words": 31, "vocab_size": 30, "n_whitespaces": 40, "language": "en" } }, { "id": 44166, "commit_id": "ff3bbc3db24f9f3f4f88033d48859fb08fc3237b", "repo": "airflow", "path": "airflow/models/base.py", "file_name": "base.py", "fun_name": "prepare_template", "commit_message": "Implement enough interface for MappedOperator to be baggable (#20945)", "code": "def prepare_template(self) -> None:\n \n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 6, "token_counts": 8, "n_ast_nodes": 16, "n_identifiers": 2, "d_id": 8185, "documentation": { "docstring": "Hook triggered after the templated fields get replaced by their content.\n\n If you need your operator to alter the content of the file before the\n template is rendered, it should override this method to do so.\n ", "n_words": 36, "vocab_size": 32, "n_whitespaces": 57, "language": "en" } }, { "id": 227329, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_layout.py", "file_name": "_layout.py", "fun_name": "hiddenlabels", "commit_message": "switch to black .22", "code": "def hiddenlabels(self):\n \n return self[\"hiddenlabels\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 59002, "documentation": { "docstring": "\n hiddenlabels is the funnelarea & pie chart analog of\n visible:'legendonly' but it can contain many labels, and can\n simultaneously hide slices from several pies/funnelarea charts\n\n The 'hiddenlabels' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n ", "n_words": 47, "vocab_size": 45, "n_whitespaces": 111, "language": "en" } }, { "id": 125006, "commit_id": "569fe0109629048d08e1d9e023f7769f10bd2244", "repo": "ray", "path": "rllib/offline/tests/test_dataset_reader.py", "file_name": "test_dataset_reader.py", "fun_name": "test_dataset_shard_error_with_both_format_and_loader_fn", "commit_message": "[RLlib] improved unittests for dataset_reader and fixed bugs (#26458)", "code": "def test_dataset_shard_error_with_both_format_and_loader_fn(self):\n \n dset = ray.data.range(100)\n config = {\n \"input\": \"dataset\",\n \"input_config\": {\n \"format\": \"json\",\n \"paths\": self.dset_path,\n \"loader_fn\": lambda: dset,\n },\n }\n\n with self.assertRaises(ValueError):\n get_dataset_and_shards(config)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 148, "n_words": 24, "vocab_size": 22, "complexity": 1, "nloc": 12, "token_counts": 57, "n_ast_nodes": 105, "n_identifiers": 11, "d_id": 27746, "documentation": { "docstring": "Tests whether the dataset_shard function raises an error when both format\n and loader_fn are specified.", "n_words": 15, "vocab_size": 15, "n_whitespaces": 21, "language": "en" } }, { "id": 305878, "commit_id": "474844744bdd2b0dcba46b82d9d3fcd8e3dbad24", "repo": "core", "path": "homeassistant/components/plex/sensor.py", "file_name": "sensor.py", "fun_name": "async_refresh_sensor", "commit_message": "Improve entity type hints [p] (#77871)", "code": "async def async_refresh_sensor(self) -> None:\n \n _LOGGER.debug(\"Refreshing library sensor for '%s'\", self.name)\n try:\n await self.hass.async_add_executor_job(self._update_state_and_attrs)\n self._attr_available = True\n except NotFound:\n self._attr_available = False\n except requests.exceptions.RequestException as err:\n _LOGGER.error(\n \"Could not update library sensor for '%s': %s\",\n self.library_section.title,\n err,\n )\n self._attr_available = False\n self.async_write_ha_state()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 195, "n_words": 42, "vocab_size": 33, "complexity": 3, "nloc": 16, "token_counts": 78, "n_ast_nodes": 132, "n_identifiers": 18, "d_id": 104662, "documentation": { "docstring": "Update state and attributes for the library sensor.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 257752, "commit_id": "408d8e6ff559ed0a195b68284baf3ab23707ffd7", "repo": "haystack", "path": "test/nodes/test_other.py", "file_name": "test_other.py", "fun_name": "test_joindocuments_score_none", "commit_message": "Enable the `JoinDocuments` node to work with documents with `score=None` (#2984)\n\n* Enable the `JoinDocuments` node to work with documents with `score=None`\r\n\r\nThis fixes #2983\r\n\r\nAs of now, the `JoinDocuments` node will error out if any of the documents has `score=None` - which is possible, as some retriever are not able to provide a score, like the `TfidfRetriever` on Elasticsearch or the `BM25Retriever` on Weaviate.\r\nTHe reason for the error is that the `JoinDocuments` always sorts the documents by score and cannot sort when `score=None`.\r\n\r\nThere was a very similar issue for `JoinAnswers` too, which was addressed by this PR: https://github.com/deepset-ai/haystack/pull/2436\r\nThis solution applies the same solution to `JoinDocuments` - so both the `JoinAnswers` and `JoinDocuments` now will have the same additional argument to disable sorting when that is requried.\r\n\r\nThe solution is to add an argument to `JoinDocuments` called `sort_by_score: bool`, which allows the user to turn off the sorting of documents by score, but keeps the current functionality of sorting being performed as the default.\r\n\r\n* Fixing test bug\r\n\r\n* Addressing PR review comments\r\n\r\n- Extending unit tests\r\n- Simplifying logic\r\n\r\n* Making the sorting work even with no scores\r\n\r\nBy making the no score being sorted as -Inf\r\n\r\n* Forgot to commit the change in `join_docs.py`\r\n\r\n* [EMPTY] Re-trigger CI\r\n\r\n* Added am INFO log if the `JoinDocuments` is sorting while some of the docs have `score=None`\r\n\r\n* Adjusting the arguments of `any()`\r\n\r\n* [EMPTY] Re-trigger CI", "code": "def test_joindocuments_score_none(join_mode, sort_by_score):\n \n inputs = [\n {\"documents\": [Document(content=\"text document 1\", content_type=\"text\", score=0.2)]},\n {\"documents\": [Document(content=\"text document 2\", content_type=\"text\", score=None)]},\n ]\n\n join_docs = JoinDocuments(join_mode=join_mode, sort_by_score=sort_by_score)\n result, _ = join_docs.run(inputs)\n assert len(result[\"documents\"]) == 2\n\n result, _ = join_docs.run(inputs, top_k_join=1)\n assert len(result[\"documents\"]) == 1\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 78, "n_words": 40, "vocab_size": 28, "complexity": 1, "nloc": 10, "token_counts": 112, "n_ast_nodes": 181, "n_identifiers": 15, "d_id": 75129, "documentation": { "docstring": "Testing JoinDocuments() node when some of the documents have `score=None`", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 47672, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/ti_deps/deps/test_not_previously_skipped_dep.py", "file_name": "test_not_previously_skipped_dep.py", "fun_name": "test_parent_skip_branch", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def test_parent_skip_branch(session, dag_maker):\n \n start_date = pendulum.datetime(2020, 1, 1)\n with dag_maker(\n \"test_parent_skip_branch_dag\",\n schedule_interval=None,\n start_date=start_date,\n session=session,\n ):\n op1 = BranchPythonOperator(task_id=\"op1\", python_callable=lambda: \"op3\")\n op2 = EmptyOperator(task_id=\"op2\")\n op3 = EmptyOperator(task_id=\"op3\")\n op1 >> [op2, op3]\n\n tis = {\n ti.task_id: ti\n for ti in dag_maker.create_dagrun(run_type=DagRunType.MANUAL, state=State.RUNNING).task_instances\n }\n tis[\"op1\"].run()\n\n dep = NotPreviouslySkippedDep()\n assert len(list(dep.get_dep_statuses(tis[\"op2\"], session, DepContext()))) == 1\n assert not dep.is_met(tis[\"op2\"], session)\n assert tis[\"op2\"].state == State.SKIPPED\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 162, "n_words": 59, "vocab_size": 49, "complexity": 2, "nloc": 21, "token_counts": 168, "n_ast_nodes": 272, "n_identifiers": 33, "d_id": 9207, "documentation": { "docstring": "\n A simple DAG with a BranchPythonOperator that does not follow op2. NotPreviouslySkippedDep is not met.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 22, "language": "en" } }, { "id": 259864, "commit_id": "b94bc5ea6821607d1e9826ce2d084c76379820ba", "repo": "scikit-learn", "path": "sklearn/neighbors/_base.py", "file_name": "_base.py", "fun_name": "_check_precomputed", "commit_message": "ENH add new function sort_graph_by_row_values (#23139)\n\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Guillaume Lemaitre ", "code": "def _check_precomputed(X):\n \n if not issparse(X):\n X = check_array(X)\n check_non_negative(X, whom=\"precomputed distance matrix.\")\n return X\n else:\n graph = X\n\n if graph.format not in (\"csr\", \"csc\", \"coo\", \"lil\"):\n raise TypeError(\n \"Sparse matrix in {!r} format is not supported due to \"\n \"its handling of explicit zeros\".format(graph.format)\n )\n copied = graph.format != \"csr\"\n graph = check_array(graph, accept_sparse=\"csr\")\n check_non_negative(graph, whom=\"precomputed distance matrix.\")\n graph = sort_graph_by_row_values(graph, copy=not copied, warn_when_not_sorted=True)\n\n return graph\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 157, "n_words": 66, "vocab_size": 48, "complexity": 3, "nloc": 17, "token_counts": 105, "n_ast_nodes": 184, "n_identifiers": 14, "d_id": 75958, "documentation": { "docstring": "Check precomputed distance matrix.\n\n If the precomputed distance matrix is sparse, it checks that the non-zero\n entries are sorted by distances. If not, the matrix is copied and sorted.\n\n Parameters\n ----------\n X : {sparse matrix, array-like}, (n_samples, n_samples)\n Distance matrix to other samples. X may be a sparse matrix, in which\n case only non-zero elements may be considered neighbors.\n\n Returns\n -------\n X : {sparse matrix, array-like}, (n_samples, n_samples)\n Distance matrix to other samples. X may be a sparse matrix, in which\n case only non-zero elements may be considered neighbors.\n ", "n_words": 89, "vocab_size": 48, "n_whitespaces": 144, "language": "en" } }, { "id": 272015, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_v1.py", "file_name": "training_v1.py", "fun_name": "_set_per_output_metric_attributes", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _set_per_output_metric_attributes(self, metrics_dict, output_index):\n \n updated_metrics_dict = collections.OrderedDict()\n for metric_name, metric_fn in metrics_dict.items():\n metric_name = self._add_unique_metric_name(\n metric_name, metric_fn, output_index\n )\n\n # Update the name on the metric class to be the unique generated name.\n metric_fn._name = metric_name # pylint: disable=protected-access\n updated_metrics_dict[metric_name] = metric_fn\n # Keep track of metric name and function.\n self._compile_metric_functions.append(metric_fn)\n return updated_metrics_dict\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 174, "n_words": 53, "vocab_size": 40, "complexity": 2, "nloc": 10, "token_counts": 61, "n_ast_nodes": 99, "n_identifiers": 14, "d_id": 80937, "documentation": { "docstring": "Sets the metric attributes on the model for the given output.\n\n Args:\n metrics_dict: A dict with metric names as keys and metric fns as values.\n output_index: The index of the model output for which the metric\n attributes are added.\n\n Returns:\n Metrics dict updated with unique metric names as keys.\n ", "n_words": 49, "vocab_size": 33, "n_whitespaces": 108, "language": "en" } }, { "id": 292578, "commit_id": "938b64081b0cbc21d1a9c1141c1e575824ce31ae", "repo": "core", "path": "tests/components/http/test_init.py", "file_name": "test_init.py", "fun_name": "test_peer_cert_ignored_with_supervisor", "commit_message": "Block peer certs on supervisor (#66837)\n\nCo-authored-by: Pascal Vizeli \r\nCo-authored-by: Mike Degatano ", "code": "async def test_peer_cert_ignored_with_supervisor(hass, tmpdir):\n \n cert_path, key_path, peer_cert_path = await hass.async_add_executor_job(\n _setup_empty_ssl_pem_files, tmpdir\n )\n\n with patch(\"ssl.SSLContext.load_cert_chain\"), patch(\n \"homeassistant.components.http.supervisor.has_supervisor\", return_value=True\n ), patch(\n \"ssl.SSLContext.load_verify_locations\"\n ) as mock_load_verify_locations, patch(\n \"homeassistant.util.ssl.server_context_modern\",\n side_effect=server_context_modern,\n ) as mock_context:\n assert (\n await async_setup_component(\n hass,\n \"http\",\n {\n \"http\": {\n \"ssl_peer_certificate\": peer_cert_path,\n \"ssl_profile\": \"modern\",\n \"ssl_certificate\": cert_path,\n \"ssl_key\": key_path,\n }\n },\n )\n is True\n )\n await hass.async_start()\n await hass.async_block_till_done()\n\n assert len(mock_context.mock_calls) == 1\n mock_load_verify_locations.assert_not_called()\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 375, "n_words": 62, "vocab_size": 48, "complexity": 1, "nloc": 31, "token_counts": 119, "n_ast_nodes": 209, "n_identifiers": 20, "d_id": 91654, "documentation": { "docstring": "Test peer certiicate requirement ignored in supervised deployments.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 73907, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/models/i18n.py", "file_name": "i18n.py", "fun_name": "get_default_locale", "commit_message": "Reformat with black", "code": "def get_default_locale(self):\n \n # Check if the object has any parental keys to another translatable model\n # If so, take the locale from the object referenced in that parental key\n parental_keys = [\n field\n for field in self._meta.get_fields()\n if isinstance(field, ParentalKey)\n and issubclass(field.related_model, TranslatableMixin)\n ]\n\n if parental_keys:\n parent_id = parental_keys[0].value_from_object(self)\n return (\n parental_keys[0]\n .related_model.objects.defer()\n .select_related(\"locale\")\n .get(id=parent_id)\n .locale\n )\n\n return Locale.get_default()\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 260, "n_words": 59, "vocab_size": 48, "complexity": 5, "nloc": 17, "token_counts": 86, "n_ast_nodes": 140, "n_identifiers": 21, "d_id": 16170, "documentation": { "docstring": "\n Finds the default locale to use for this object.\n\n This will be called just before the initial save.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 40, "language": "en" } }, { "id": 45915, "commit_id": "401419432082d222b823e4f2a66f21e5cc3ab28d", "repo": "airflow", "path": "tests/providers/databricks/operators/test_databricks_sql.py", "file_name": "test_databricks_sql.py", "fun_name": "test_copy_with_encryption", "commit_message": "Add new options to DatabricksCopyIntoOperator (#22076)\n\nThis includes:\r\n* `encryption` - to specify encryption options for a given location\r\n* `credential` - to specify authentication options for a given location\r\n* `validate` - to control validation of schema & data", "code": "def test_copy_with_encryption(self):\n op = DatabricksCopyIntoOperator(\n file_location=COPY_FILE_LOCATION,\n file_format='CSV',\n table_name='test',\n task_id=TASK_ID,\n encryption={'TYPE': 'AWS_SSE_C', 'MASTER_KEY': 'abc'},\n )\n assert (\n op._create_sql_query()\n == f.strip()\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 124, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 15, "token_counts": 53, "n_ast_nodes": 95, "n_identifiers": 13, "d_id": 8742, "documentation": { "docstring": "COPY INTO test\nFROM '{COPY_FILE_LOCATION}' WITH ( ENCRYPTION (TYPE = 'AWS_SSE_C', MASTER_KEY = 'abc'))\nFILEFORMAT = CSV\n", "n_words": 17, "vocab_size": 15, "n_whitespaces": 14, "language": "en" } }, { "id": 158197, "commit_id": "b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2", "repo": "d2l-zh", "path": "d2l/mxnet.py", "file_name": "mxnet.py", "fun_name": "tokenize", "commit_message": "[PaddlePaddle] Merge master into Paddle branch (#1186)\n\n* change 15.2 title in chinese version (#1109)\r\n\r\nchange title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘\r\n\r\n* 修改部分语义表述 (#1105)\r\n\r\n* Update r0.17.5 (#1120)\r\n\r\n* Bump versions in installation\r\n\r\n* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)\r\n\r\n* line 313: \"bert.mall\" -> \"bert.small\" (#1130)\r\n\r\n* fix: update language as native reader (#1114)\r\n\r\n* Fix the translation of \"stride\" (#1115)\r\n\r\n* Update index.md (#1118)\r\n\r\n修改部分语义表述\r\n\r\n* Update self-attention-and-positional-encoding.md (#1133)\r\n\r\n依照本书的翻译习惯,将pooling翻译成汇聚\r\n\r\n* maybe a comment false (#1149)\r\n\r\n* maybe a little false\r\n\r\n* maybe a little false\r\n\r\n* A minor bug in the rcnn section (Chinese edition) (#1148)\r\n\r\n* Update bert.md (#1137)\r\n\r\n一个笔误\r\n# 假设batch_size=2,num_pred_positions=3\r\n# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]\r\n\r\n* Update calculus.md (#1135)\r\n\r\n* fix typo in git documentation (#1106)\r\n\r\n* fix: Update the Chinese translation in lr-scheduler.md (#1136)\r\n\r\n* Update lr-scheduler.md\r\n\r\n* Update chapter_optimization/lr-scheduler.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* fix translation for kaggle-house-price.md (#1107)\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\nSigned-off-by: sunhaizhou \r\n\r\n* Update weight-decay.md (#1150)\r\n\r\n* Update weight-decay.md\r\n\r\n关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解\r\n关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。\r\n并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释\r\n解释为何会增加复杂性以及为何需要细粒度工具。\r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Fix a spelling error (#1161)\r\n\r\n* Update gru.md (#1152)\r\n\r\nThe key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.\r\n翻译错误\r\n\r\n* Unify the function naming (#1113)\r\n\r\nUnify naming of the function 'init_xavier()'.\r\n\r\n* Update mlp-concise.md (#1166)\r\n\r\n* Update mlp-concise.md\r\n\r\n语句不通顺\r\n\r\n* Update environment.md\r\n\r\n语序异常\r\n\r\n* Update config.ini\r\n\r\n* fix the imprecise description (#1168)\r\n\r\nCo-authored-by: yuande \r\n\r\n* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)\r\n\r\n* Fix some typos. (#1163)\r\n\r\n* Update batch-norm.md (#1170)\r\n\r\nfixing typos u->x in article\r\n\r\n* Update linear-regression.md (#1090)\r\n\r\nWe invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that\r\n\r\n原译文把who也直接翻译出来了。\r\n\r\n* Update mlp.md (#1117)\r\n\r\n* Update mlp.md\r\n\r\n修改部分语义表述\r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: goldmermaid \r\n\r\n* Correct a translation error. (#1091)\r\n\r\n* Correct a translation error.\r\n\r\n* Update chapter_computer-vision/image-augmentation.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update aws.md (#1121)\r\n\r\n* Update aws.md\r\n\r\n* Update chapter_appendix-tools-for-deep-learning/aws.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update image-augmentation.md (#1093)\r\n\r\n* Update anchor.md (#1088)\r\n\r\nfix a minor issue in code\r\n\r\n* Update anchor.md\r\n\r\n* Update image-augmentation.md\r\n\r\n* fix typo and improve translation in chapter_linear-networks\\softmax-regression.md (#1087)\r\n\r\n* Avoid `torch.meshgrid` user warning (#1174)\r\n\r\nAvoids the following user warning:\r\n```python\r\n~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)\r\n return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\r\n```\r\n\r\n* bump to 2.0.0-beta1\r\n\r\n* Update sequence.md\r\n\r\n* bump beta1 on readme\r\n\r\n* Add latex code block background to config\r\n\r\n* BLD: Bump python support version 3.9 (#1183)\r\n\r\n* BLD: Bump python support version 3.9\r\n\r\n* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4\r\n\r\n* BLD: Bump torch and tensorflow\r\n\r\n* Update Jenkinsfile\r\n\r\n* Update chapter_installation/index.md\r\n\r\n* Update chapter_installation/index.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update config.ini\r\n\r\n* Update INFO.md\r\n\r\n* Update INFO.md\r\n\r\n* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)\r\n\r\n* resolve the conflicts\r\n\r\n* revise from publisher (#1089)\r\n\r\n* revise from publisher\r\n\r\n* d2l api\r\n\r\n* post_latex\r\n\r\n* revise from publisher\r\n\r\n* revise ch11\r\n\r\n* Delete d2l-Copy1.bib\r\n\r\n* clear cache\r\n\r\n* rm d2lbook clear\r\n\r\n* debug anchor\r\n\r\n* keep original d2l doc\r\n\r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\n\r\n* 重复语句 (#1188)\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improve expression for chapter_preliminaries/pandas.md (#1184)\r\n\r\n* Update pandas.md\r\n\r\n* Improve expression\r\n\r\n* Improve expression\r\n\r\n* Update chapter_preliminaries/pandas.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)\r\n\r\n* Improce expression\r\n\r\n* Improve code comments\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Fix multibox_detection bugs\r\n\r\n* Update d2l to 0.17.5 version\r\n\r\n* restore older version\r\n\r\n* Upgrade pandas\r\n\r\n* change to python3.8\r\n\r\n* Test warning log\r\n\r\n* relocate warning log\r\n\r\n* test logs filtering\r\n\r\n* Update gru.md\r\n\r\n* Add DeprecationWarning filter\r\n\r\n* Test warning log\r\n\r\n* Update attention mechanisms & computational performance\r\n\r\n* Update multilayer perceptron& linear & convolution networks & computer vision\r\n\r\n* Update recurrent&optimition&nlp pretraining & nlp applications\r\n\r\n* ignore warnings\r\n\r\n* Update index.md\r\n\r\n* Update linear networks\r\n\r\n* Update multilayer perceptrons&deep learning computation\r\n\r\n* Update preliminaries\r\n\r\n* Check and Add warning filter\r\n\r\n* Update kaggle-cifar10.md\r\n\r\n* Update object-detection-dataset.md\r\n\r\n* Update ssd.md fcn.md\r\n\r\n* Update hybridize.md\r\n\r\n* Update hybridize.md\r\n\r\nSigned-off-by: sunhaizhou \r\nCo-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com>\r\nCo-authored-by: Xinwei Liu \r\nCo-authored-by: Anirudh Dagar \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com>\r\nCo-authored-by: gyro永不抽风 <1247006353@qq.com>\r\nCo-authored-by: CanChengZheng \r\nCo-authored-by: linlin \r\nCo-authored-by: iuk \r\nCo-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com>\r\nCo-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com>\r\nCo-authored-by: Chiyuan Fu \r\nCo-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com>\r\nCo-authored-by: Haiker Sun \r\nCo-authored-by: Ming Liu \r\nCo-authored-by: goldmermaid \r\nCo-authored-by: silenceZheng66 <13754430639@163.com>\r\nCo-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com>\r\nCo-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com>\r\nCo-authored-by: Krahets \r\nCo-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com>\r\nCo-authored-by: Jameson \r\nCo-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com>\r\nCo-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com>\r\nCo-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com>\r\nCo-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com>\r\nCo-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com>\r\nCo-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com>\r\nCo-authored-by: VigourJiang \r\nCo-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com>\r\nCo-authored-by: LYF <27893441+liyufan@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\nCo-authored-by: xiaotinghe \r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com>\r\nCo-authored-by: HinGwenWoong \r\nCo-authored-by: Shuai Zhang ", "code": "def tokenize(lines, token='word'):\n \n if token == 'word':\n return [line.split() for line in lines]\n elif token == 'char':\n return [list(line) for line in lines]\n else:\n print('ERROR: unknown token type: ' + token)\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 64, "n_words": 31, "vocab_size": 23, "complexity": 5, "nloc": 7, "token_counts": 51, "n_ast_nodes": 90, "n_identifiers": 7, "d_id": 37370, "documentation": { "docstring": "Split text lines into word or character tokens.\n\n Defined in :numref:`sec_text_preprocessing`", "n_words": 11, "vocab_size": 11, "n_whitespaces": 13, "language": "en" } }, { "id": 113612, "commit_id": "d68c786ff81bad19c04619d6a999ff34aaa724e7", "repo": "nni", "path": "nni/compression/pytorch/quantization/bnn_quantizer.py", "file_name": "bnn_quantizer.py", "fun_name": "export_model", "commit_message": "[Compression] remove pruning v1 & refactor directory (#5228)", "code": "def export_model(self, model_path, calibration_path=None, onnx_path=None, input_shape=None, device=None):\n \n assert model_path is not None, 'model_path must be specified'\n self._unwrap_model()\n calibration_config = {}\n\n for name, module in self.bound_model.named_modules():\n if hasattr(module, 'weight_bits'):\n calibration_config[name] = {}\n calibration_config[name]['weight_bits'] = int(module.weight_bits)\n self._del_simulated_attr(module)\n\n self.export_model_save(self.bound_model, model_path, calibration_config, calibration_path, onnx_path, input_shape, device)\n\n return calibration_config\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 145, "n_words": 44, "vocab_size": 39, "complexity": 3, "nloc": 11, "token_counts": 110, "n_ast_nodes": 168, "n_identifiers": 18, "d_id": 24975, "documentation": { "docstring": "\n Export quantized model weights and calibration parameters(optional)\n\n Parameters\n ----------\n model_path : str\n path to save quantized model weight\n calibration_path : str\n (optional) path to save quantize parameters after calibration\n onnx_path : str\n (optional) path to save onnx model\n input_shape : list or tuple\n input shape to onnx model\n device : torch.device\n device of the model, used to place the dummy input tensor for exporting onnx file.\n the tensor is placed on cpu if ```device``` is None\n\n Returns\n -------\n Dict\n ", "n_words": 79, "vocab_size": 51, "n_whitespaces": 230, "language": "en" } }, { "id": 249342, "commit_id": "2281427175e4c93a30c39607fb4ac23c2a1f399f", "repo": "synapse", "path": "tests/rest/admin/test_room.py", "file_name": "test_room.py", "fun_name": "test_new_room_user_is_not_local", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13488)\n\n* Use literals in place of `HTTPStatus` constants in tests\r\n\r\n* newsfile\r\n\r\n* code style\r\n\r\n* code style", "code": "def test_new_room_user_is_not_local(self) -> None:\n \n\n channel = self.make_request(\n \"DELETE\",\n self.url,\n content={\"new_room_user_id\": \"@not:exist.bla\"},\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(400, channel.code, msg=channel.json_body)\n self.assertEqual(\n \"User must be our own: @not:exist.bla\",\n channel.json_body[\"error\"],\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 133, "n_words": 25, "vocab_size": 24, "complexity": 1, "nloc": 15, "token_counts": 65, "n_ast_nodes": 107, "n_identifiers": 12, "d_id": 72845, "documentation": { "docstring": "\n Check that only local users can create new room to move members.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 21352, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distlib/_backport/shutil.py", "file_name": "shutil.py", "fun_name": "_make_zipfile", "commit_message": "Vendor in pip 22.1.2", "code": "def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):\n \n zip_filename = base_name + \".zip\"\n archive_dir = os.path.dirname(base_name)\n\n if not os.path.exists(archive_dir):\n if logger is not None:\n logger.info(\"creating %s\", archive_dir)\n if not dry_run:\n os.makedirs(archive_dir)\n\n # If zipfile module is not available, try spawning an external 'zip'\n # command.\n try:\n import zipfile\n except ImportError:\n zipfile = None\n\n if zipfile is None:\n _call_external_zip(base_dir, zip_filename, verbose, dry_run)\n else:\n if logger is not None:\n logger.info(\"creating '%s' and adding '%s' to it\",\n zip_filename, base_dir)\n\n if not dry_run:\n zip = zipfile.ZipFile(zip_filename, \"w\",\n compression=zipfile.ZIP_DEFLATED)\n\n for dirpath, dirnames, filenames in os.walk(base_dir):\n for name in filenames:\n path = os.path.normpath(os.path.join(dirpath, name))\n if os.path.isfile(path):\n zip.write(path, path)\n if logger is not None:\n logger.info(\"adding '%s'\", path)\n zip.close()\n\n return zip_filename\n\n_ARCHIVE_FORMATS = {\n 'gztar': (_make_tarball, [('compress', 'gzip')], \"gzip'ed tar-file\"),\n 'bztar': (_make_tarball, [('compress', 'bzip2')], \"bzip2'ed tar-file\"),\n 'tar': (_make_tarball, [('compress', None)], \"uncompressed tar file\"),\n 'zip': (_make_zipfile, [], \"ZIP file\"),\n }\n\nif _BZ2_SUPPORTED:\n _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],\n \"bzip2'ed tar-file\")\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 529, "n_words": 148, "vocab_size": 97, "complexity": 12, "nloc": 30, "token_counts": 210, "n_ast_nodes": 479, "n_identifiers": 34, "d_id": 3780, "documentation": { "docstring": "Create a zip file from all the files under 'base_dir'.\n\n The output zip file will be named 'base_name' + \".zip\". Uses either the\n \"zipfile\" Python module (if available) or the InfoZIP \"zip\" utility\n (if installed and found on the default search path). If neither tool is\n available, raises ExecError. Returns the name of the output zip\n file.\n ", "n_words": 57, "vocab_size": 47, "n_whitespaces": 78, "language": "en" } }, { "id": 48681, "commit_id": "56946fac8f29aa44ce84391f138d63c4c8a2a285", "repo": "django-rest-framework", "path": "tests/test_generics.py", "file_name": "test_generics.py", "fun_name": "test_get_instance_view_filters_out_name_with_filter_backend", "commit_message": "Preserve exception messages for wrapped Django exceptions (#8051)\n\n* Preserve messages for wrapped Django exceptions\r\n\r\n* Fix the test\r\n\r\n* Update test_generics.py\r\n\r\n* Update test_generics.py\r\n\r\nCo-authored-by: Tom Christie ", "code": "def test_get_instance_view_filters_out_name_with_filter_backend(self):\n \n instance_view = InstanceView.as_view(filter_backends=(ExclusiveFilterBackend,))\n request = factory.get('/1')\n response = instance_view(request, pk=1).render()\n assert response.status_code == status.HTTP_404_NOT_FOUND\n assert response.data == {\n 'detail': ErrorDetail(\n string='No BasicModel matches the given query.',\n code='not_found'\n )\n }\n", "url": "https://github.com/encode/django-rest-framework.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 132, "n_words": 31, "vocab_size": 27, "complexity": 1, "nloc": 11, "token_counts": 68, "n_ast_nodes": 116, "n_identifiers": 20, "d_id": 9567, "documentation": { "docstring": "\n GET requests to RetrieveUpdateDestroyAPIView should raise 404 when model filtered out.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 178438, "commit_id": "87c7dd5551f276dc0b68168d952c55aa3e4d07f8", "repo": "Nuitka", "path": "nuitka/plugins/PluginBase.py", "file_name": "PluginBase.py", "fun_name": "onCopiedDLL", "commit_message": "Plugins: Add support for modifying DLLs after standalone copy", "code": "def onCopiedDLL(self, dll_filename):\n \n # Virtual method, pylint: disable=no-self-use,unused-argument\n return None\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 42698, "documentation": { "docstring": "Chance for a plugin to modify DLLs after copy, e.g. to compress it, remove attributes, etc.\n\n Args:\n dll_filename: the filename of the DLL\n\n Notes:\n Do not remove or add any files in this method, this will not work well, there\n is e.g. getExtraDLLs API to add things. This is only for post processing as\n described above.\n\n ", "n_words": 56, "vocab_size": 46, "n_whitespaces": 121, "language": "en" } }, { "id": 194093, "commit_id": "2e833520618dc460cbeb693e29e40b65a02ccafb", "repo": "vision", "path": "torchvision/__init__.py", "file_name": "__init__.py", "fun_name": "set_video_backend", "commit_message": "Pyav backend for VideoReader API (#6598)\n\n* Test: add backend parameter\r\n\r\n* VideoReader object now works on backend\r\n\r\n* Frame reading now passes\r\n\r\n* Keyframe seek now passes\r\n\r\n* Pyav backend now supports metadata\r\n\r\n* changes in test to reflect GPU decoder change\r\n\r\n* Linter?\r\n\r\n* Test GPU output\r\n\r\n* Addressing Joao's comments\r\n\r\n* lint\r\n\r\n* lint\r\n\r\n* Revert \"Test GPU output\"\r\n\r\nThis reverts commit f62e955d7dc81bcb23b40d58ea75413b9b62e76d.\r\n\r\n* lint?\r\n\r\n* lint\r\n\r\n* lint\r\n\r\n* Address issues in build?\r\n\r\n* hopefully doc fix\r\n\r\n* Arrgh\r\n\r\n* arrgh\r\n\r\n* fix typos\r\n\r\n* fix input options\r\n\r\n* remove read from memory option in pyav\r\n\r\n* skip read from mem test for gpu and pyab be\r\n\r\n* fix test\r\n\r\n* remove unused import\r\n\r\n* Hack to get reading from memory work with pyav\r\n\r\n* patch audio test\r\n\r\nCo-authored-by: Bruno Korbar \r\nCo-authored-by: Joao Gomes ", "code": "def set_video_backend(backend):\n \n global _video_backend\n if backend not in [\"pyav\", \"video_reader\", \"cuda\"]:\n raise ValueError(\"Invalid video backend '%s'. Options are 'pyav', 'video_reader' and 'cuda'\" % backend)\n if backend == \"video_reader\" and not io._HAS_VIDEO_OPT:\n # TODO: better messages\n message = \"video_reader video backend is not available. Please compile torchvision from source and try again\"\n raise RuntimeError(message)\n elif backend == \"cuda\" and not _HAS_GPU_VIDEO_DECODER:\n # TODO: better messages\n message = \"cuda video backend is not available.\"\n raise RuntimeError(message)\n else:\n _video_backend = backend\n\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 152, "n_words": 78, "vocab_size": 49, "complexity": 6, "nloc": 12, "token_counts": 66, "n_ast_nodes": 125, "n_identifiers": 9, "d_id": 46938, "documentation": { "docstring": "\n Specifies the package used to decode videos.\n\n Args:\n backend (string): Name of the video backend. one of {'pyav', 'video_reader'}.\n The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic\n binding for the FFmpeg libraries.\n The :mod:`video_reader` package includes a native C++ implementation on\n top of FFMPEG libraries, and a python API of TorchScript custom operator.\n It generally decodes faster than :mod:`pyav`, but is perhaps less robust.\n\n .. note::\n Building with FFMPEG is disabled by default in the latest `main`. If you want to use the 'video_reader'\n backend, please compile torchvision from source.\n ", "n_words": 95, "vocab_size": 77, "n_whitespaces": 184, "language": "en" } }, { "id": 143721, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/examples/simulators/sumo/marlenvironment.py", "file_name": "marlenvironment.py", "fun_name": "step", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def step(self, action, sumo_handler):\n \n logger.debug(\"Agent %s: action %d\", self.agent_id, action)\n # Subscriptions EXAMPLE:\n # {\"agent_0\": {64: 14.603468282230542, 104: None},\n # \"agent_1\": {64: 12.922797055918513,\n # 104: (\"veh.19\", 27.239870121802596)}}\n logger.debug(\n \"Subscriptions: %s\", pformat(sumo_handler.veh_subscriptions[self.agent_id])\n )\n previous_speed = sumo_handler.veh_subscriptions[self.agent_id][tc.VAR_SPEED]\n new_speed = previous_speed + self.action_to_meaning[action]\n logger.debug(\"Before %.2f\", previous_speed)\n sumo_handler.traci_handler.vehicle.setSpeed(self.agent_id, new_speed)\n logger.debug(\"After %.2f\", new_speed)\n return\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 184, "n_words": 49, "vocab_size": 40, "complexity": 1, "nloc": 11, "token_counts": 96, "n_ast_nodes": 157, "n_identifiers": 17, "d_id": 33027, "documentation": { "docstring": "Implements the logic of each specific action passed as input.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 259155, "commit_id": "36c6c74e5fd9033e9b89a1348de2466e7ac48a8c", "repo": "scikit-learn", "path": "sklearn/preprocessing/tests/test_discretization.py", "file_name": "test_discretization.py", "fun_name": "test_kbinsdiscrtizer_get_feature_names_out", "commit_message": "FIX Fixes KBinsDiscretizer for encode=ordinal (#22735)\n\nCo-authored-by: Guillaume Lemaitre ", "code": "def test_kbinsdiscrtizer_get_feature_names_out(encode, expected_names):\n \n X = [[-2, 1, -4], [-1, 2, -3], [0, 3, -2], [1, 4, -1]]\n\n kbd = KBinsDiscretizer(n_bins=4, encode=encode).fit(X)\n Xt = kbd.transform(X)\n\n input_features = [f\"feat{i}\" for i in range(3)]\n output_names = kbd.get_feature_names_out(input_features)\n assert Xt.shape[1] == output_names.shape[0]\n\n assert_array_equal(output_names, expected_names)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 64, "n_words": 40, "vocab_size": 36, "complexity": 2, "nloc": 8, "token_counts": 115, "n_ast_nodes": 174, "n_identifiers": 17, "d_id": 75608, "documentation": { "docstring": "Check get_feature_names_out for different settings.\n Non-regression test for #22731\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 154793, "commit_id": "027f92a7655ae5b473839b7956ff52bf7879f3cc", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/hdk_on_native/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "_has_arrow_table", "commit_message": "FIX-#4022: Fixed empty data frame with index (#4910)\n\nSigned-off-by: Andrey Pavlenko ", "code": "def _has_arrow_table(self):\n \n if not isinstance(self._op, FrameNode):\n return False\n return all(p.arrow_table is not None for p in self._partitions.flatten())\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 49, "n_words": 17, "vocab_size": 15, "complexity": 3, "nloc": 4, "token_counts": 39, "n_ast_nodes": 63, "n_identifiers": 10, "d_id": 36150, "documentation": { "docstring": "\n Return True for materialized frame with Arrow table.\n\n Returns\n -------\n bool\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 47, "language": "en" } }, { "id": 146782, "commit_id": "814b49356c1e773f2ec8d7c643acef48b6db08cf", "repo": "ray", "path": "python/ray/tune/experiment.py", "file_name": "experiment.py", "fun_name": "get_experiment_checkpoint_dir", "commit_message": "[tuner] Tuner impl. (#22848)", "code": "def get_experiment_checkpoint_dir(cls, run_obj, local_dir=None, name=None):\n \n assert run_obj\n local_dir = _get_local_dir_with_expand_user(local_dir)\n run_identifier = cls.get_trainable_name(run_obj)\n combined_name = name or run_identifier\n\n dir_name = _get_dir_name(run_obj, name, combined_name)\n\n return os.path.join(local_dir, dir_name)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 75, "n_words": 26, "vocab_size": 22, "complexity": 2, "nloc": 7, "token_counts": 58, "n_ast_nodes": 89, "n_identifiers": 14, "d_id": 33777, "documentation": { "docstring": "Get experiment checkpoint dir without setting up an experiment.\n\n This is only used internally for better support of Tuner API.\n\n Args:\n run_obj (str|function|class): Trainable to run.\n name (str): The name of the experiment specified by user.\n local_dir (str): The local_dir path.\n\n Returns:\n Checkpoint directory for experiment.\n ", "n_words": 46, "vocab_size": 38, "n_whitespaces": 118, "language": "en" } }, { "id": 46825, "commit_id": "34154803ac73d62d3e969e480405df3073032622", "repo": "airflow", "path": "airflow/models/taskmixin.py", "file_name": "taskmixin.py", "fun_name": "dag_id", "commit_message": "Show tasks in grid view based on topological sort. (#22741)\n\nThis takes the existing topological sort that existed on a DAG and moves\r\nit down to TaskGroup.\r\n\r\nIn order to do this (and not have duplicated sort) the existing sort on\r\nDAG is re-implemented on top of the new method.\r\n\r\nThis also surfaced a tiny bug in deserialize_task_group where the\r\nSerializedTaskGroup did not have `dag` set -- it didn't cause any\r\nproblems until now but was needed to call `upstream_list` on a\r\nSerializedTaskGroup object.", "code": "def dag_id(self) -> str:\n \n if self.dag:\n return self.dag.dag_id\n return \"_in_memory_dag_\"\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 42, "n_words": 10, "vocab_size": 9, "complexity": 2, "nloc": 5, "token_counts": 21, "n_ast_nodes": 38, "n_identifiers": 4, "d_id": 9007, "documentation": { "docstring": "Returns dag id if it has one or an adhoc/meaningless ID", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 215103, "commit_id": "b2f8271fed3f05160431c55ad7c4e8f3e3e95c3e", "repo": "salt", "path": "salt/modules/aixpkg.py", "file_name": "aixpkg.py", "fun_name": "_check_pkg", "commit_message": "Complete intial tests for AIX yum and dnf support", "code": "def _check_pkg(target):\n \n log.debug(f\"_check_pkg target '{target}'\")\n ret = {}\n cmd = [\"/usr/bin/lslpp\", \"-Lc\", target]\n lines = __salt__[\"cmd.run_all\"](cmd, python_shell=False).splitlines()\n\n name = \"\"\n version_num = \"\"\n rpmpkg = False\n for line in lines:\n if line.startswith(\"#\"):\n continue\n\n comps = line.split(\":\")\n if len(comps) < 7:\n raise CommandExecutionError(\n \"Error occurred finding fileset/package\",\n info={\"errors\": comps[1].strip()},\n )\n\n # handle first matching line\n if \"R\" in comps[6]:\n name = comps[0]\n rpmpkg = True\n else:\n name = comps[1] # use fileset rather than rpm package\n\n version_num = comps[2]\n break\n\n log.debug(\n f\"_check_pkg returning name '{name}', version number '{version_num}', rpmpkg '{rpmpkg}'\"\n )\n return name, version_num, rpmpkg\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 290, "n_words": 94, "vocab_size": 70, "complexity": 5, "nloc": 28, "token_counts": 148, "n_ast_nodes": 272, "n_identifiers": 21, "d_id": 53820, "documentation": { "docstring": "\n Return name, version and if rpm package for specified target\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 17, "language": "en" } }, { "id": 160848, "commit_id": "4f479744bb9f5150d1406fdb0203c5d8714e7283", "repo": "numpy", "path": "numpy/polynomial/polynomial.py", "file_name": "polynomial.py", "fun_name": "polyfit", "commit_message": "DOC: Replace the mathematical notation N(...) with text.\n\nThe meaning of the second argument in the mathematical notation\nN(mu, b) for the normal distribution is not consistent. In some\nreferences it is the variance while in others it is the standard\ndeviation. Let's avoid the ambiguity by not using the notation.\n\nFixes #21296", "code": "def polyfit(x, y, deg, rcond=None, full=False, w=None):\n \n return pu._fit(polyvander, x, y, deg, rcond, full, w)\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 21, "n_words": 15, "vocab_size": 13, "complexity": 1, "nloc": 2, "token_counts": 41, "n_ast_nodes": 56, "n_identifiers": 10, "d_id": 38758, "documentation": { "docstring": "\n Least-squares fit of a polynomial to data.\n\n Return the coefficients of a polynomial of degree `deg` that is the\n least squares fit to the data values `y` given at points `x`. If `y` is\n 1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple\n fits are done, one for each column of `y`, and the resulting\n coefficients are stored in the corresponding columns of a 2-D return.\n The fitted polynomial(s) are in the form\n\n .. math:: p(x) = c_0 + c_1 * x + ... + c_n * x^n,\n\n where `n` is `deg`.\n\n Parameters\n ----------\n x : array_like, shape (`M`,)\n x-coordinates of the `M` sample (data) points ``(x[i], y[i])``.\n y : array_like, shape (`M`,) or (`M`, `K`)\n y-coordinates of the sample points. Several sets of sample points\n sharing the same x-coordinates can be (independently) fit with one\n call to `polyfit` by passing in for `y` a 2-D array that contains\n one data set per column.\n deg : int or 1-D array_like\n Degree(s) of the fitting polynomials. If `deg` is a single integer\n all terms up to and including the `deg`'th term are included in the\n fit. For NumPy versions >= 1.11.0 a list of integers specifying the\n degrees of the terms to include may be used instead.\n rcond : float, optional\n Relative condition number of the fit. Singular values smaller\n than `rcond`, relative to the largest singular value, will be\n ignored. The default value is ``len(x)*eps``, where `eps` is the\n relative precision of the platform's float type, about 2e-16 in\n most cases.\n full : bool, optional\n Switch determining the nature of the return value. When ``False``\n (the default) just the coefficients are returned; when ``True``,\n diagnostic information from the singular value decomposition (used\n to solve the fit's matrix equation) is also returned.\n w : array_like, shape (`M`,), optional\n Weights. If not None, the weight ``w[i]`` applies to the unsquared\n residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are\n chosen so that the errors of the products ``w[i]*y[i]`` all have the\n same variance. When using inverse-variance weighting, use\n ``w[i] = 1/sigma(y[i])``. The default value is None.\n\n .. versionadded:: 1.5.0\n\n Returns\n -------\n coef : ndarray, shape (`deg` + 1,) or (`deg` + 1, `K`)\n Polynomial coefficients ordered from low to high. If `y` was 2-D,\n the coefficients in column `k` of `coef` represent the polynomial\n fit to the data in `y`'s `k`-th column.\n\n [residuals, rank, singular_values, rcond] : list\n These values are only returned if ``full == True``\n\n - residuals -- sum of squared residuals of the least squares fit\n - rank -- the numerical rank of the scaled Vandermonde matrix\n - singular_values -- singular values of the scaled Vandermonde matrix\n - rcond -- value of `rcond`.\n\n For more details, see `numpy.linalg.lstsq`.\n\n Raises\n ------\n RankWarning\n Raised if the matrix in the least-squares fit is rank deficient.\n The warning is only raised if ``full == False``. The warnings can\n be turned off by:\n\n >>> import warnings\n >>> warnings.simplefilter('ignore', np.RankWarning)\n\n See Also\n --------\n numpy.polynomial.chebyshev.chebfit\n numpy.polynomial.legendre.legfit\n numpy.polynomial.laguerre.lagfit\n numpy.polynomial.hermite.hermfit\n numpy.polynomial.hermite_e.hermefit\n polyval : Evaluates a polynomial.\n polyvander : Vandermonde matrix for powers.\n numpy.linalg.lstsq : Computes a least-squares fit from the matrix.\n scipy.interpolate.UnivariateSpline : Computes spline fits.\n\n Notes\n -----\n The solution is the coefficients of the polynomial `p` that minimizes\n the sum of the weighted squared errors\n\n .. math:: E = \\\\sum_j w_j^2 * |y_j - p(x_j)|^2,\n\n where the :math:`w_j` are the weights. This problem is solved by\n setting up the (typically) over-determined matrix equation:\n\n .. math:: V(x) * c = w * y,\n\n where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the\n coefficients to be solved for, `w` are the weights, and `y` are the\n observed values. This equation is then solved using the singular value\n decomposition of `V`.\n\n If some of the singular values of `V` are so small that they are\n neglected (and `full` == ``False``), a `RankWarning` will be raised.\n This means that the coefficient values may be poorly determined.\n Fitting to a lower order polynomial will usually get rid of the warning\n (but may not be what you want, of course; if you have independent\n reason(s) for choosing the degree which isn't working, you may have to:\n a) reconsider those reasons, and/or b) reconsider the quality of your\n data). The `rcond` parameter can also be set to a value smaller than\n its default, but the resulting fit may be spurious and have large\n contributions from roundoff error.\n\n Polynomial fits using double precision tend to \"fail\" at about\n (polynomial) degree 20. Fits using Chebyshev or Legendre series are\n generally better conditioned, but much can still depend on the\n distribution of the sample points and the smoothness of the data. If\n the quality of the fit is inadequate, splines may be a good\n alternative.\n\n Examples\n --------\n >>> np.random.seed(123)\n >>> from numpy.polynomial import polynomial as P\n >>> x = np.linspace(-1,1,51) # x \"data\": [-1, -0.96, ..., 0.96, 1]\n >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + Gaussian noise\n >>> c, stats = P.polyfit(x,y,3,full=True)\n >>> np.random.seed(123)\n >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1\n array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) # may vary\n >>> stats # note the large SSR, explaining the rather poor results\n [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, # may vary\n 0.28853036]), 1.1324274851176597e-014]\n\n Same thing without the added noise\n\n >>> y = x**3 - x\n >>> c, stats = P.polyfit(x,y,3,full=True)\n >>> c # c[0], c[2] should be \"very close to 0\", c[1] ~= -1, c[3] ~= 1\n array([-6.36925336e-18, -1.00000000e+00, -4.08053781e-16, 1.00000000e+00])\n >>> stats # note the minuscule SSR\n [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, # may vary\n 0.50443316, 0.28853036]), 1.1324274851176597e-014]\n\n ", "n_words": 936, "vocab_size": 463, "n_whitespaces": 1500, "language": "en" } }, { "id": 108619, "commit_id": "85f30cbd485eddc93e3c9ff115ac21c0886909d5", "repo": "matplotlib", "path": "lib/matplotlib/_mathtext.py", "file_name": "_mathtext.py", "fun_name": "get_metrics", "commit_message": "Remove *math* parameter of various mathtext internal APIs.\n\nThe *math* parameter is passed through many layers of the call stack\nbut is ultimately only used for a single purpose: deciding whether to\nreplace the ASCII hyphen by a (longer) unicode minus. Instead of doing\nthat, just do the substitution at the parsing stage. In particular,\nthis fixes problematic unicode minus support with the \"cm\" fontset.\n\nThis patch also reverts a significant part of 52003e4, as LogFormatters\nno longer need to pass unicode minuses in mathtext -- everything gets\nconverted by mathtext. Likewise, this change also invalidates the\ntest_log_scales baseline image (old, buggy wrt. unicode minus); replace\nit by a test that the drawn ticks are as expected (which was the intent\nin 90c1aa3).", "code": "def get_metrics(self, font, font_class, sym, fontsize, dpi):\n r\n info = self._get_info(font, font_class, sym, fontsize, dpi)\n return info.metrics\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 37, "n_words": 17, "vocab_size": 14, "complexity": 1, "nloc": 38, "token_counts": 37, "n_ast_nodes": 50, "n_identifiers": 10, "d_id": 23274, "documentation": { "docstring": "\n Parameters\n ----------\n font : str\n One of the TeX font names: \"tt\", \"it\", \"rm\", \"cal\", \"sf\", \"bf\",\n \"default\", \"regular\", \"bb\", \"frak\", \"scr\". \"default\" and \"regular\"\n are synonyms and use the non-math font.\n font_class : str\n One of the TeX font names (as for *font*), but **not** \"bb\",\n \"frak\", or \"scr\". This is used to combine two font classes. The\n only supported combination currently is ``get_metrics(\"frak\", \"bf\",\n ...)``.\n sym : str\n A symbol in raw TeX form, e.g., \"1\", \"x\", or \"\\sigma\".\n fontsize : float\n Font size in points.\n dpi : float\n Rendering dots-per-inch.\n\n Returns\n -------\n object\n\n The returned object has the following attributes (all floats,\n except *slanted*):\n\n - *advance*: The advance distance (in points) of the glyph.\n - *height*: The height of the glyph in points.\n - *width*: The width of the glyph in points.\n - *xmin*, *xmax*, *ymin*, *ymax*: The ink rectangle of the glyph\n - *iceberg*: The distance from the baseline to the top of the\n glyph. (This corresponds to TeX's definition of \"height\".)\n - *slanted*: Whether the glyph should be considered as \"slanted\"\n (currently used for kerning sub/superscripts).\n ", "n_words": 181, "vocab_size": 117, "n_whitespaces": 487, "language": "en" } }, { "id": 264331, "commit_id": "b67859832afa52742defa0a5bd60f9be1ddbe8e4", "repo": "netbox", "path": "netbox/netbox/models/features.py", "file_name": "features.py", "fun_name": "to_objectchange", "commit_message": "Refactor to_objectchange()", "code": "def to_objectchange(self, action):\n \n from extras.models import ObjectChange\n objectchange = ObjectChange(\n changed_object=self,\n object_repr=str(self)[:200],\n action=action\n )\n if hasattr(self, '_prechange_snapshot'):\n objectchange.prechange_data = self._prechange_snapshot\n if action in (ObjectChangeActionChoices.ACTION_CREATE, ObjectChangeActionChoices.ACTION_UPDATE):\n objectchange.postchange_data = serialize_object(self)\n\n return objectchange\n\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 134, "n_words": 30, "vocab_size": 26, "complexity": 3, "nloc": 12, "token_counts": 75, "n_ast_nodes": 118, "n_identifiers": 18, "d_id": 77693, "documentation": { "docstring": "\n Return a new ObjectChange representing a change made to this object. This will typically be called automatically\n by ChangeLoggingMiddleware.\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 41, "language": "en" } }, { "id": 42581, "commit_id": "3ca43e26efd7d5aa37b3cd79446258d8bfa79561", "repo": "nltk", "path": "nltk/corpus/reader/wordnet.py", "file_name": "wordnet.py", "fun_name": "custom_lemmas", "commit_message": "Fix wordnet's all_synsets() function (#3078)\n\n* Fix all_synsets() function\r\n\r\n* Add simple regression tests for #3077\r\n\r\n* Add suggestions by @tomaarsen\r\n\r\nCo-authored-by: Tom Aarsen ", "code": "def custom_lemmas(self, tab_file, lang):\n \n lg = lang.split(\"_\")[0]\n if len(lg) != 3:\n raise ValueError(\"lang should be a (3 character) ISO 639-3 code\")\n self._lang_data[lang] = [\n defaultdict(list),\n defaultdict(list),\n defaultdict(list),\n defaultdict(list),\n ]\n for line in tab_file.readlines():\n if isinstance(line, bytes):\n # Support byte-stream files (e.g. as returned by Python 2's\n # open() function) as well as text-stream ones\n line = line.decode(\"utf-8\")\n if not line.startswith(\"#\"):\n triple = line.strip().split(\"\\t\")\n if len(triple) < 3:\n continue\n offset_pos, label = triple[:2]\n val = triple[-1]\n if self.map30:\n if offset_pos in self.map30:\n # Map offset_pos to current Wordnet version:\n offset_pos = self.map30[offset_pos]\n else:\n # Some OMW offsets were never in Wordnet:\n if (\n offset_pos not in self.nomap\n and offset_pos.replace(\"a\", \"s\") not in self.nomap\n ):\n warnings.warn(\n f\"{lang}: invalid offset {offset_pos} in '{line}'\"\n )\n continue\n elif offset_pos[-1] == \"a\":\n wnss = self.of2ss(offset_pos)\n if wnss and wnss.pos() == \"s\": # Wordnet pos is \"s\"\n # Label OMW adjective satellites back to their Wordnet pos (\"s\")\n offset_pos = self.ss2of(wnss)\n pair = label.split(\":\")\n attr = pair[-1]\n if len(pair) == 1 or pair[0] == lg:\n if attr == \"lemma\":\n val = val.strip().replace(\" \", \"_\")\n self._lang_data[lang][1][val.lower()].append(offset_pos)\n if attr in self.lg_attrs:\n self._lang_data[lang][self.lg_attrs.index(attr)][\n offset_pos\n ].append(val)\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 1060, "n_words": 185, "vocab_size": 122, "complexity": 17, "nloc": 45, "token_counts": 325, "n_ast_nodes": 554, "n_identifiers": 37, "d_id": 7637, "documentation": { "docstring": "\n Reads a custom tab file containing mappings of lemmas in the given\n language to Princeton WordNet 3.0 synset offsets, allowing NLTK's\n WordNet functions to then be used with that language.\n\n See the \"Tab files\" section at http://compling.hss.ntu.edu.sg/omw/ for\n documentation on the Multilingual WordNet tab file format.\n\n :param tab_file: Tab file as a file or file-like object\n :type: lang str\n :param: lang ISO 639-3 code of the language of the tab file\n ", "n_words": 71, "vocab_size": 53, "n_whitespaces": 135, "language": "en" } }, { "id": 115077, "commit_id": "c40c732253043ea111fbf197248a1bff4b7a524e", "repo": "mindsdb", "path": "mindsdb/integrations/libs/base_handler.py", "file_name": "base_handler.py", "fun_name": "connect", "commit_message": "handlers", "code": "def connect(self, **kwargs) -> Dict[str, int]:\n \n raise NotImplementedError()\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 7, "token_counts": 20, "n_ast_nodes": 33, "n_identifiers": 7, "d_id": 25356, "documentation": { "docstring": "\n Set up any connections required by the handler here.\n\n Should return output of check_status() method after attempting connection.\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 40, "language": "en" } }, { "id": 87555, "commit_id": "a00ada51c238564b48412cd59f261b84492b96a5", "repo": "sentry", "path": "src/sentry/lang/javascript/processor.py", "file_name": "processor.py", "fun_name": "get_function_for_token", "commit_message": "ref(processor): Try to fallback to previous frames token function name (#40602)\n\nThis change applies the same heuristic that we previously used in the\r\noriginal `JavaScriptStacktraceProcessor`.\r\nThe rest is described in `get_function_for_token` function comment.", "code": "def get_function_for_token(frame, token, previous_frame=None):\n \n\n frame_function_name = frame.get(\"function\")\n token_function_name = token.function_name\n\n # Try to use the function name we got from sourcemap-cache, filtering useless names.\n if token_function_name not in USELESS_FN_NAMES:\n return token_function_name\n\n # If not found, ask the callsite (previous token) for function name if possible.\n if previous_frame is not None:\n # `preprocess_frame` is supposed to make sure that `data` is present,\n # but better safe than sorry.\n last_token = (previous_frame.get(\"data\") or {}).get(\"token\")\n if last_token:\n return last_token.name\n\n # If there was no minified name at all, return even useless, filtered one from the original token.\n if not frame_function_name:\n return token_function_name\n\n # Otherwise fallback to the old, minified name.\n return frame_function_name\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 195, "n_words": 109, "vocab_size": 74, "complexity": 6, "nloc": 12, "token_counts": 72, "n_ast_nodes": 127, "n_identifiers": 11, "d_id": 18314, "documentation": { "docstring": "\n Get function name for a given frame based on the token resolved by symbolic.\n It tries following paths in order:\n - return token function name if we have a usable value (filtered through `USELESS_FN_NAMES` list),\n - return mapped name of the caller (previous frame) token if it had,\n - return token function name, including filtered values if it mapped to anything in the first place,\n - return current frames function name as a fallback\n ", "n_words": 74, "vocab_size": 50, "n_whitespaces": 96, "language": "en" } }, { "id": 50182, "commit_id": "ffcde21305c61d950a9f93e57e6180c9a9665b87", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/beam.py", "file_name": "beam.py", "fun_name": "_expand_to_beam_size", "commit_message": "add disco_diffusion_ernievil_base", "code": "def _expand_to_beam_size(self, x):\n r\n check_type(x, 'x', (Variable), 'BeamSearchDecoder._expand_to_beam_size')\n x = nn.unsqueeze(x, [1])\n expand_times = [1] * len(x.shape)\n expand_times[1] = self.beam_size\n x = paddle.tile(x, expand_times)\n return x\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 74, "n_words": 26, "vocab_size": 21, "complexity": 1, "nloc": 22, "token_counts": 65, "n_ast_nodes": 102, "n_identifiers": 13, "d_id": 10040, "documentation": { "docstring": "\n This function takes a tensor t shaped `[batch_size, s0, s1, ...]` composed\n of minibatch entries `t[0], ..., t[batch_size - 1]` and tiles it to have a\n shape `[batch_size, beam_size, s0, s1, ...]` composed of minibatch entries\n `t[0], t[0], ..., t[1], t[1], ...` where each minibatch entry is repeated\n `beam_size` times.\n\n Parameters:\n x(Variable): A tensor with shape `[batch_size, ...]`, The data type\n should be float32, float64, int32, int64 or bool.\n\n Returns:\n Variable: A tensor with shape `[batch_size, beam_size, ...]`, whose \\\n data type is same as `x`.\n ", "n_words": 86, "vocab_size": 60, "n_whitespaces": 195, "language": "en" } }, { "id": 29997, "commit_id": "db7e91a3e124b0df2c08d373a541d9a225ebcb05", "repo": "saleor", "path": "saleor/graphql/core/mutations.py", "file_name": "mutations.py", "fun_name": "get_object_id", "commit_message": "Allow external references to be used instead of Saleor-assigned IDs (#11410)\n\n* Add external_reference to Product model; handle product query, update and delete by external_reference; cover changes with tests\r\n\r\nHandle ProductVariant\r\n\r\nHandle Order\r\n\r\nHandle Attribute\r\n\r\nHandle Warehouse query only\r\n\r\nRefactor resolvers\r\n\r\nHandle Account\r\n\r\nCode rafctor, fix tests\r\n\r\nAllow updating external_reference field; rename ext_ref in resolvers;\r\n\r\n* update changelog\r\n\r\n* Add tests to check externalReference uniqueness", "code": "def get_object_id(cls, **data):\n \n object_id, ext_ref = data.get(\"id\"), data.get(\"external_reference\")\n validate_one_of_args_is_in_mutation(\n CoreErrorCode, \"id\", object_id, \"external_reference\", ext_ref\n )\n\n if ext_ref and not object_id:\n object_id = ext_ref_to_global_id_or_error(cls._meta.model, ext_ref)\n\n return object_id\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 90, "n_words": 26, "vocab_size": 21, "complexity": 3, "nloc": 8, "token_counts": 58, "n_ast_nodes": 98, "n_identifiers": 11, "d_id": 5272, "documentation": { "docstring": "Resolve object id by given id or external reference.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 160160, "commit_id": "729ad4f92420231e2a7009b3223c6c7620b8b808", "repo": "numpy", "path": "numpy/f2py/tests/test_f2py2e.py", "file_name": "test_f2py2e.py", "fun_name": "test_mod_gen_f77", "commit_message": "TST: Initialize f2py2e tests of the F2PY CLI (#20668)\n\nIncreases F2PY coverage by around 15 percent. For the CLI itself it covers the major features (around 70 percent), with the exception of mostly numpy.distutils stuff.\r\n\r\nMore importantly, sets the groundwork for #20056, in that passing the same testsuite should indicate feature parity.", "code": "def test_mod_gen_f77(capfd, hello_world_f90, monkeypatch):\n \n MNAME = \"hi\"\n foutl = get_io_paths(hello_world_f90, mname=MNAME)\n ipath = foutl.f90inp\n monkeypatch.setattr(sys, \"argv\", f'f2py {ipath} -m {MNAME}'.split())\n with util.switchdir(ipath.parent):\n f2pycli()\n\n # Always generate C module\n assert Path.exists(foutl.cmodf)\n # File contains a function, check for F77 wrappers\n assert Path.exists(foutl.wrap77)\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 78, "n_words": 41, "vocab_size": 37, "complexity": 1, "nloc": 9, "token_counts": 74, "n_ast_nodes": 134, "n_identifiers": 21, "d_id": 38532, "documentation": { "docstring": "Checks the generation of files based on a module name\n CLI :: -m\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 19, "language": "en" } }, { "id": 272643, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/merging/average.py", "file_name": "average.py", "fun_name": "average", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def average(inputs, **kwargs):\n \n return Average(**kwargs)(inputs)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 32, "n_identifiers": 4, "d_id": 81029, "documentation": { "docstring": "Functional interface to the `tf.keras.layers.Average` layer.\n\n Example:\n\n >>> x1 = np.ones((2, 2))\n >>> x2 = np.zeros((2, 2))\n >>> y = tf.keras.layers.Average()([x1, x2])\n >>> y.numpy().tolist()\n [[0.5, 0.5], [0.5, 0.5]]\n\n Usage in a functional model:\n\n >>> input1 = tf.keras.layers.Input(shape=(16,))\n >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1)\n >>> input2 = tf.keras.layers.Input(shape=(32,))\n >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2)\n >>> avg = tf.keras.layers.Average()([x1, x2])\n >>> out = tf.keras.layers.Dense(4)(avg)\n >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)\n\n Args:\n inputs: A list of input tensors.\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor, the average of the inputs.\n\n Raises:\n ValueError: If there is a shape mismatch between the inputs and the shapes\n cannot be broadcasted to match.\n ", "n_words": 105, "vocab_size": 72, "n_whitespaces": 192, "language": "en" } }, { "id": 67202, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/report/eway_bill/eway_bill.py", "file_name": "eway_bill.py", "fun_name": "get_data", "commit_message": "style: format code with black", "code": "def get_data(filters):\n\n\tconditions = get_conditions(filters)\n\n\tdata = frappe.db.sql(\n\t\t\n\t\t% conditions,\n\t\tas_dict=1,\n\t)\n\n\tunit = {\n\t\t\"Bag\": \"BAGS\",\n\t\t\"Bottle\": \"BOTTLES\",\n\t\t\"Kg\": \"KILOGRAMS\",\n\t\t\"Liter\": \"LITERS\",\n\t\t\"Meter\": \"METERS\",\n\t\t\"Nos\": \"NUMBERS\",\n\t\t\"PKT\": \"PACKS\",\n\t\t\"Roll\": \"ROLLS\",\n\t\t\"Set\": \"SETS\",\n\t}\n\n\t# Regular expression set to remove all the special characters\n\tspecial_characters = r\"[$%^*()+\\\\[\\]{};':\\\"\\\\|<>.?]\"\n\n\tfor row in data:\n\t\tset_defaults(row)\n\t\tset_taxes(row, filters)\n\t\tset_address_details(row, special_characters)\n\n\t\t# Eway Bill accepts date as dd/mm/yyyy and not dd-mm-yyyy\n\t\trow.posting_date = \"/\".join(str(row.posting_date).replace(\"-\", \"/\").split(\"/\")[::-1])\n\t\trow.lr_date = \"/\".join(str(row.lr_date).replace(\"-\", \"/\").split(\"/\")[::-1])\n\n\t\tif row.gst_vehicle_type == \"Over Dimensional Cargo (ODC)\":\n\t\t\trow.gst_vehicle_type = \"ODC\"\n\n\t\trow.item_name = re.sub(special_characters, \" \", row.item_name)\n\t\trow.description = row.item_name\n\n\t\trow.uom = unit.get(row.uom, row.uom)\n\n\t\t# For removing special charactes and numbers from customer.\n\t\trow.customer = re.sub(special_characters[:-1] + \"&0-9\" + \"]\", \"\", row.customer)\n\n\treturn data\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 83, "n_words": 117, "vocab_size": 98, "complexity": 3, "nloc": 39, "token_counts": 235, "n_ast_nodes": 423, "n_identifiers": 29, "d_id": 14440, "documentation": { "docstring": "\n\t\tSELECT\n\t\t\tdn.name as dn_id, dn.posting_date, dn.company, dn.company_gstin, dn.customer, dn.customer_gstin, dni.item_code, dni.item_name, dni.description, dni.gst_hsn_code, dni.uom, dni.qty, dni.amount, dn.mode_of_transport, dn.distance, dn.transporter_name, dn.gst_transporter_id, dn.lr_no, dn.lr_date, dn.vehicle_no, dn.gst_vehicle_type, dn.company_address, dn.shipping_address_name\n\t\tFROM\n\t\t\t`tabDelivery Note` AS dn join `tabDelivery Note Item` AS dni on (dni.parent = dn.name)\n\t\tWHERE\n\t\t\tdn.docstatus < 2\n\t\t\t%s ", "n_words": 46, "vocab_size": 44, "n_whitespaces": 40, "language": "en" } }, { "id": 42898, "commit_id": "55fd02a38919526776cfe69d715873da75d6f26f", "repo": "airflow", "path": "airflow/providers/google/common/hooks/base_google.py", "file_name": "base_google.py", "fun_name": "_get_credentials_and_project_id", "commit_message": "Add key_secret_project_id parameter which specifies a project with KeyFile (#23930)", "code": "def _get_credentials_and_project_id(self) -> Tuple[google.auth.credentials.Credentials, Optional[str]]:\n \n if self._cached_credentials is not None:\n return self._cached_credentials, self._cached_project_id\n\n key_path: Optional[str] = self._get_field('key_path', None)\n try:\n keyfile_dict: Optional[str] = self._get_field('keyfile_dict', None)\n keyfile_dict_json: Optional[Dict[str, str]] = None\n if keyfile_dict:\n keyfile_dict_json = json.loads(keyfile_dict)\n except json.decoder.JSONDecodeError:\n raise AirflowException('Invalid key JSON.')\n key_secret_name: Optional[str] = self._get_field('key_secret_name', None)\n key_secret_project_id: Optional[str] = self._get_field('key_secret_project_id', None)\n\n target_principal, delegates = _get_target_principal_and_delegates(self.impersonation_chain)\n\n credentials, project_id = get_credentials_and_project_id(\n key_path=key_path,\n keyfile_dict=keyfile_dict_json,\n key_secret_name=key_secret_name,\n key_secret_project_id=key_secret_project_id,\n scopes=self.scopes,\n delegate_to=self.delegate_to,\n target_principal=target_principal,\n delegates=delegates,\n )\n\n overridden_project_id = self._get_field('project')\n if overridden_project_id:\n project_id = overridden_project_id\n\n self._cached_credentials = credentials\n self._cached_project_id = project_id\n\n return credentials, project_id\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 358, "n_words": 84, "vocab_size": 56, "complexity": 5, "nloc": 31, "token_counts": 217, "n_ast_nodes": 337, "n_identifiers": 32, "d_id": 7759, "documentation": { "docstring": "Returns the Credentials object for Google API and the associated project_id", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 215590, "commit_id": "43277294a3454e5dcd9079e005f747bf880801f6", "repo": "salt", "path": "salt/transport/tcp.py", "file_name": "tcp.py", "fun_name": "connect", "commit_message": "Test fix", "code": "def connect(self):\n \n if hasattr(self, \"_connecting_future\") and not self._connecting_future.done():\n future = self._connecting_future\n else:\n future = salt.ext.tornado.concurrent.Future()\n self._connecting_future = future\n self.io_loop.add_callback(self._connect)\n\n # Add the callback only when a new future is created\n if self.connect_callback is not None:\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 122, "n_words": 35, "vocab_size": 26, "complexity": 4, "nloc": 11, "token_counts": 76, "n_ast_nodes": 112, "n_identifiers": 15, "d_id": 54043, "documentation": { "docstring": "\n Ask for this client to reconnect to the origin\n ", "n_words": 9, "vocab_size": 8, "n_whitespaces": 24, "language": "en" } }, { "id": 321887, "commit_id": "79bb6670d8969b850965cd5d895bcd8f09d59311", "repo": "qutebrowser", "path": "tests/unit/mainwindow/statusbar/test_backforward.py", "file_name": "test_backforward.py", "fun_name": "test_state_changes_on_tab_change", "commit_message": "bar: Test enabled attribute on progress and backforward\n\nThere is now some code in statusbar relying on the enabled attribute\nstopping events from being processed (or at least stopping them from\nshowing the widget again). So add tests to make sure that behaviour\nkeeps working.\n\nAlso split the big test in test_backforward into a couple of smaller\nones and pull some common lines out to a (still clunky) fixture.", "code": "def test_state_changes_on_tab_change(backforward_widget, tabs, fake_web_tab):\n \n tab_with_history = fake_web_tab(can_go_back=True, can_go_forward=True)\n tab_without_history = fake_web_tab(can_go_back=False, can_go_forward=False)\n tabs.widget.tabs = [tab_with_history]\n backforward_widget.enabled = True\n\n backforward_widget.on_tab_cur_url_changed(tabs)\n assert backforward_widget.isVisible()\n\n tabs.widget.tabs = [tab_without_history]\n backforward_widget.on_tab_cur_url_changed(tabs)\n assert backforward_widget.text() == ''\n assert not backforward_widget.isVisible()\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 65, "n_words": 32, "vocab_size": 23, "complexity": 1, "nloc": 11, "token_counts": 90, "n_ast_nodes": 146, "n_identifiers": 13, "d_id": 117965, "documentation": { "docstring": "Test we go invisible when switching to a tab without history.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 66651, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v12_0/rename_lost_reason_detail.py", "file_name": "rename_lost_reason_detail.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tif frappe.db.exists(\"DocType\", \"Lost Reason Detail\"):\n\t\tfrappe.reload_doc(\"crm\", \"doctype\", \"opportunity_lost_reason\")\n\t\tfrappe.reload_doc(\"crm\", \"doctype\", \"opportunity_lost_reason_detail\")\n\t\tfrappe.reload_doc(\"setup\", \"doctype\", \"quotation_lost_reason_detail\")\n\n\t\tfrappe.db.sql(\n\t\t\t\n\t\t)\n\n\t\tfrappe.db.sql(\n\t\t\t\n\t\t)\n\n\t\tfrappe.db.sql(\n\t\t\t\n\t\t)\n\n\t\tfrappe.delete_doc(\"DocType\", \"Lost Reason Detail\")\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 14, "n_words": 26, "vocab_size": 17, "complexity": 2, "nloc": 17, "token_counts": 78, "n_ast_nodes": 151, "n_identifiers": 7, "d_id": 14272, "documentation": { "docstring": "INSERT INTO `tabOpportunity Lost Reason Detail` SELECT * FROM `tabLost Reason Detail` WHERE `parenttype` = 'Opportunity'INSERT INTO `tabQuotation Lost Reason Detail` SELECT * FROM `tabLost Reason Detail` WHERE `parenttype` = 'Quotation'INSERT INTO `tabQuotation Lost Reason` (`name`, `creation`, `modified`, `modified_by`, `owner`, `docstatus`, `parent`, `parentfield`, `parenttype`, `idx`, `_comments`, `_assign`, `_user_tags`, `_liked_by`, `order_lost_reason`)\n SELECT o.`name`, o.`creation`, o.`modified`, o.`modified_by`, o.`owner`, o.`docstatus`, o.`parent`, o.`parentfield`, o.`parenttype`, o.`idx`, o.`_comments`, o.`_assign`, o.`_user_tags`, o.`_liked_by`, o.`lost_reason`\n FROM `tabOpportunity Lost Reason` o LEFT JOIN `tabQuotation Lost Reason` q ON q.name = o.name WHERE q.name IS NULL", "n_words": 85, "vocab_size": 56, "n_whitespaces": 106, "language": "en" } }, { "id": 290641, "commit_id": "435fc237375b86a5d6d8498ba5216c208b665ecc", "repo": "core", "path": "homeassistant/components/shelly/coordinator.py", "file_name": "coordinator.py", "fun_name": "_async_run_connected_events", "commit_message": "Add shelly ble scanner support (#82007)", "code": "async def _async_run_connected_events(self) -> None:\n \n await self._async_connect_ble_scanner()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 7, "token_counts": 14, "n_ast_nodes": 28, "n_identifiers": 3, "d_id": 89755, "documentation": { "docstring": "Run connected events.\n\n This will be executed on connect or when the config entry\n is updated.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 37, "language": "en" } }, { "id": 276314, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/testing_infra/keras_doctest_lib.py", "file_name": "keras_doctest_lib.py", "fun_name": "_tf_tensor_numpy_output", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _tf_tensor_numpy_output(self, string):\n modified_string = self._NUMPY_OUTPUT_RE.sub(r\"\\1\", string)\n return modified_string, modified_string != string\n\n MESSAGE = textwrap.dedent(\n \n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 43, "n_words": 16, "vocab_size": 14, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 56, "n_identifiers": 9, "d_id": 81627, "documentation": { "docstring": "\\n\n #############################################################\n Check the documentation (go/testable-docstrings) on how to\n write testable docstrings.\n #############################################################", "n_words": 13, "vocab_size": 12, "n_whitespaces": 40, "language": "en" } }, { "id": 247752, "commit_id": "12d1f82db213603972d60be3f46f6a36c3c2330f", "repo": "synapse", "path": "scripts-dev/release.py", "file_name": "release.py", "fun_name": "cli", "commit_message": "Generate announcement links in release script (#12242)", "code": "def cli():\n \n\n\n@cli.command()", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "@cli.command()", "n_ast_errors": 1, "ast_levels": 6, "n_whitespaces": 5, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 1, "token_counts": 5, "n_ast_nodes": 23, "n_identifiers": 2, "d_id": 71897, "documentation": { "docstring": "An interactive script to walk through the parts of creating a release.\n\n Requires the dev dependencies be installed, which can be done via:\n\n pip install -e .[dev]\n\n Then to use:\n\n ./scripts-dev/release.py prepare\n\n # ... ask others to look at the changelog ...\n\n ./scripts-dev/release.py tag\n\n # ... wait for assets to build ...\n\n ./scripts-dev/release.py publish\n ./scripts-dev/release.py upload\n\n # Optional: generate some nice links for the announcement\n\n ./scripts-dev/release.py upload\n\n If the env var GH_TOKEN (or GITHUB_TOKEN) is set, or passed into the\n `tag`/`publish` command, then a new draft release will be created/published.\n ", "n_words": 90, "vocab_size": 68, "n_whitespaces": 168, "language": "en" } }, { "id": 66743, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v13_0/delete_orphaned_tables.py", "file_name": "delete_orphaned_tables.py", "fun_name": "check_for_new_doc_with_same_name_as_deleted_parent", "commit_message": "style: format code with black", "code": "def check_for_new_doc_with_same_name_as_deleted_parent(doc):\n\t\n\n\tparent_creation_time = frappe.db.get_value(doc[\"parenttype\"], doc[\"parent\"], \"creation\")\n\tchild_creation_time = doc[\"creation\"]\n\n\treturn getdate(parent_creation_time) > getdate(child_creation_time)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 10, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 4, "token_counts": 42, "n_ast_nodes": 74, "n_identifiers": 8, "d_id": 14315, "documentation": { "docstring": "\n\tCompares creation times of parent and child docs.\n\tSince Transaction Deletion Record resets the naming series after deletion,\n\tit allows the creation of new docs with the same names as the deleted ones.\n\t", "n_words": 33, "vocab_size": 28, "n_whitespaces": 30, "language": "en" } }, { "id": 107201, "commit_id": "b24acb7772e0534f4bcdb0b5b492d9d94954dd91", "repo": "matplotlib", "path": "lib/matplotlib/patches.py", "file_name": "patches.py", "fun_name": "set_capstyle", "commit_message": "DOC: Document default cap styles\n\n- remove '(default)' from cap style demo as this is only true for Line2D\n and the default rcParameters\n- document default cap styles for Line2D and Patch in their cap style\n setters\n- document default cap style for GraphicsContextBase in the same way as\n it's already done for joinstyle", "code": "def set_capstyle(self, s):\n \n cs = CapStyle(s)\n self._capstyle = cs\n self.stale = True\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 40, "n_words": 12, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 24, "n_ast_nodes": 41, "n_identifiers": 7, "d_id": 22641, "documentation": { "docstring": "\n Set the `.CapStyle`.\n\n The default capstyle is 'round' for `.FancyArrowPatch` and 'butt' for\n all other patches.\n\n Parameters\n ----------\n s : `.CapStyle` or %(CapStyle)s\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 73, "language": "en" } }, { "id": 153576, "commit_id": "605efa618e7994681f57b11d04d417f353ef8d50", "repo": "modin", "path": "modin/pandas/base.py", "file_name": "base.py", "fun_name": "mode", "commit_message": "DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333)\n\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Alexander Myskov ", "code": "def mode(self, axis=0, numeric_only=False, dropna=True): # noqa: PR01, RT01, D200\n \n axis = self._get_axis_number(axis)\n return self.__constructor__(\n query_compiler=self._query_compiler.mode(\n axis=axis, numeric_only=numeric_only, dropna=dropna\n )\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 87, "n_words": 21, "vocab_size": 20, "complexity": 1, "nloc": 7, "token_counts": 52, "n_ast_nodes": 80, "n_identifiers": 9, "d_id": 35457, "documentation": { "docstring": "\n Get the mode(s) of each element along the selected axis.\n ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 25, "language": "en" } }, { "id": 218296, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/importlib/metadata/__init__.py", "file_name": "__init__.py", "fun_name": "read_text", "commit_message": "add python 3.10.4 for windows", "code": "def read_text(self, filename):\n \n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 10, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 1, "token_counts": 8, "n_ast_nodes": 15, "n_identifiers": 3, "d_id": 55244, "documentation": { "docstring": "Attempt to load metadata file given by the name.\n\n :param filename: The name of the file in the distribution info.\n :return: The text if found, otherwise None.\n ", "n_words": 27, "vocab_size": 23, "n_whitespaces": 48, "language": "en" } }, { "id": 303600, "commit_id": "7cd4be1310b3f76398b4404d3f4ecb26b9533cee", "repo": "core", "path": "tests/components/hdmi_cec/test_switch.py", "file_name": "test_switch.py", "fun_name": "test_load_types", "commit_message": "Add tests for the HDMI-CEC integration (#75094)\n\n* Add basic tests to the HDMI-CEC component\r\n\r\n* Add tests for the HDMI-CEC switch component\r\n\r\n* Add test for watchdog code\r\n\r\n* Start adding tests for the HDMI-CEC media player platform\r\n\r\nAlso some cleanup and code move.\r\n\r\n* Add more tests for media_player\r\n\r\nAnd cleanup some switch tests.\r\n\r\n* Improve xfail message for features\r\n\r\n* Align test pyCEC dependency with main dependency\r\n\r\n* Make fixtures snake_case\r\n\r\n* Cleanup call asserts\r\n\r\n* Cleanup service tests\r\n\r\n* fix issues with media player tests\r\n\r\n* Cleanup MockHDMIDevice class\r\n\r\n* Cleanup watchdog tests\r\n\r\n* Add myself as code owner for the HDMI-CEC integration\r\n\r\n* Fix async fire time changed time jump\r\n\r\n* Fix event api sync context\r\n\r\n* Delint tests\r\n\r\n* Parametrize watchdog test\r\n\r\nCo-authored-by: Martin Hjelmare ", "code": "async def test_load_types(hass, create_hdmi_network, create_cec_entity):\n \n config = {\"platform\": \"media_player\", \"types\": {\"hdmi_cec.hdmi_3\": \"switch\"}}\n hdmi_network = await create_hdmi_network(config=config)\n mock_hdmi_device = MockHDMIDevice(logical_address=3)\n await create_cec_entity(hdmi_network, mock_hdmi_device)\n mock_hdmi_device.set_update_callback.assert_called_once()\n state = hass.states.get(\"media_player.hdmi_3\")\n assert state is None\n\n state = hass.states.get(\"switch.hdmi_3\")\n assert state is not None\n\n mock_hdmi_device = MockHDMIDevice(logical_address=4)\n await create_cec_entity(hdmi_network, mock_hdmi_device)\n mock_hdmi_device.set_update_callback.assert_called_once()\n state = hass.states.get(\"media_player.hdmi_4\")\n assert state is not None\n\n state = hass.states.get(\"switch.hdmi_4\")\n assert state is None\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 111, "n_words": 60, "vocab_size": 30, "complexity": 1, "nloc": 17, "token_counts": 136, "n_ast_nodes": 235, "n_identifiers": 14, "d_id": 102418, "documentation": { "docstring": "Test that switch entity is loaded when types is set.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 274464, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/legacy_tf_layers/variable_scope_shim.py", "file_name": "variable_scope_shim.py", "fun_name": "track_tf1_style_variables", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def track_tf1_style_variables(method):\n ", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "\"\"\"Wrapmodulethis decorator to capturestyle weights.\n\n Decorating a `tf.keras.Layer`'s or `tf.Module`'s methods withwill cause the layermodule to track weightsviaand by extensioninside the decorated method.\n\n In addition to tracking the weights themselves under theif the methodto a `tf.keras.Layer` then any regularization losses specified via`tf.compat.v1.layers` regularizer arguments willtracked by the layer under the standard `layer.losses` property.\n\n This tracking enables using large classes ofpropertymodelcode inside of KerasTF2 behaviors enabled.\n\n Example of capturingmodeling codea Keras```", "n_ast_errors": 20, "ast_levels": 12, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 5, "token_counts": 25, "n_ast_nodes": 244, "n_identifiers": 73, "d_id": 81205, "documentation": { "docstring": "Wrap layer & module methods in this decorator to capture tf1-style weights.\n\n Decorating a `tf.keras.Layer`'s or `tf.Module`'s methods with this\n decorator will cause the layer/module to track weights created/used\n via `tf.compat.v1.get_variable` (and by extension `tf.compat.v1.layers`)\n inside the decorated method.\n\n In addition to tracking the weights themselves under the standard\n `layer.variable`/`module.variable`/etc. properties, if the method belongs\n to a `tf.keras.Layer` then any regularization losses specified via the\n `get_variable` or `tf.compat.v1.layers` regularizer arguments will get\n tracked by the layer under the standard `layer.losses` property.\n\n This tracking enables using large classes of TF1-style model-forward-pass\n code inside of Keras layers or `tf.Modules` in TF2 with TF2 behaviors enabled.\n\n Example of capturing tf.compat.v1.layer-based modeling code as a Keras layer:\n\n ```python", "n_words": 114, "vocab_size": 81, "n_whitespaces": 153, "language": "en" } }, { "id": 61500, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/cachecontrol/heuristics.py", "file_name": "heuristics.py", "fun_name": "warning", "commit_message": "upd; format", "code": "def warning(self, response):\n \n return '110 - \"Response is Stale\"'\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 20, "n_identifiers": 3, "d_id": 12595, "documentation": { "docstring": "\n Return a valid 1xx warning header value describing the cache\n adjustments.\n\n The response is provided too allow warnings like 113\n http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need\n to explicitly say response is over 24 hours old.\n ", "n_words": 33, "vocab_size": 31, "n_whitespaces": 76, "language": "en" } }, { "id": 337603, "commit_id": "f6ec2660f01e5bb37399407b3a01b72a43ceb328", "repo": "accelerate", "path": "src/accelerate/launchers.py", "file_name": "launchers.py", "fun_name": "debug_launcher", "commit_message": "Refactor version checking into a utility (#395)\n\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def debug_launcher(function, args=(), num_processes=2):\n \n if is_torch_version(\"<\", \"1.5.0\"):\n raise ImportError(\n \"Using `debug_launcher` for distributed training on GPUs require torch >= 1.5.0, got \"\n f\"{torch.__version__}.\"\n )\n\n from torch.multiprocessing import start_processes\n\n with tempfile.NamedTemporaryFile() as tmp_file:\n # torch.distributed will expect a few environment variable to be here. We set the ones common to each\n # process here (the other ones will be set be the launcher).\n with patch_environment(\n world_size=num_processes,\n master_addr=\"127.0.01\",\n master_port=\"29500\",\n mixed_precision=\"no\",\n accelerate_debug_rdv_file=tmp_file.name,\n use_cpu=\"yes\",\n ):\n launcher = PrepareForLaunch(function, debug=True)\n start_processes(launcher, args=args, nprocs=num_processes, start_method=\"fork\")\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 243, "n_words": 79, "vocab_size": 70, "complexity": 2, "nloc": 36, "token_counts": 102, "n_ast_nodes": 182, "n_identifiers": 26, "d_id": 121089, "documentation": { "docstring": "\n Launches a training function using several processes on CPU for debugging purposes.\n\n \n\n This function is provided for internal testing and debugging, but it's not intended for real trainings. It will\n only use the CPU.\n\n \n\n Args:\n function (`Callable`):\n The training function to execute.\n args (`Tuple`):\n Tuple of arguments to pass to the function (it will receive `*args`).\n num_processes (`int`, *optional*, defaults to 2):\n The number of processes to use for training.\n ", "n_words": 73, "vocab_size": 55, "n_whitespaces": 149, "language": "en" } }, { "id": 125305, "commit_id": "b87731c1b64988cea5ce80a6aec55207ef7efd6f", "repo": "ray", "path": "python/ray/_private/resource_spec.py", "file_name": "resource_spec.py", "fun_name": "_autodetect_num_gpus", "commit_message": "Windows gpu detection workaround with GPUtil (#25701)\n\nBecause [WMIC is now deprecated](https://docs.microsoft.com/en-us/windows/deployment/planning/windows-10-deprecated-features), #9300 may stop working on recent Windows systems. As a workaround this PR extends GPUtil to do GPU detection when installed on Windows systems.\r\n\r\nCo-authored-by: Matti Picus ", "code": "def _autodetect_num_gpus():\n \n result = 0\n if importlib.util.find_spec(\"GPUtil\"):\n gpu_list = GPUtil.getGPUs()\n result = len(gpu_list)\n elif sys.platform.startswith(\"linux\"):\n proc_gpus_path = \"/proc/driver/nvidia/gpus\"\n if os.path.isdir(proc_gpus_path):\n result = len(os.listdir(proc_gpus_path))\n elif sys.platform == \"win32\":\n props = \"AdapterCompatibility\"\n cmdargs = [\"WMIC\", \"PATH\", \"Win32_VideoController\", \"GET\", props]\n lines = subprocess.check_output(cmdargs).splitlines()[1:]\n result = len([x.rstrip() for x in lines if x.startswith(b\"NVIDIA\")])\n return result\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 136, "n_words": 51, "vocab_size": 35, "complexity": 7, "nloc": 15, "token_counts": 130, "n_ast_nodes": 227, "n_identifiers": 25, "d_id": 27830, "documentation": { "docstring": "Attempt to detect the number of GPUs on this machine.\n\n TODO(rkn): Only detects NVidia GPUs (except when using WMIC on windows)\n\n Returns:\n The number of GPUs if any were detected, otherwise 0.\n ", "n_words": 32, "vocab_size": 27, "n_whitespaces": 48, "language": "en" } }, { "id": 279206, "commit_id": "4f1308112f4188c4e14fdf3a59af8fe5f30db61f", "repo": "keras", "path": "keras/utils/losses_utils.py", "file_name": "losses_utils.py", "fun_name": "apply_valid_mask", "commit_message": "Update docs", "code": "def apply_valid_mask(losses, sw, mask, reduction):\n \n if mask is not None:\n mask = tf.cast(mask, losses.dtype)\n\n if reduction in (ReductionV2.AUTO, ReductionV2.SUM_OVER_BATCH_SIZE):\n # Valid entries have weight `total/valid`, while invalid ones\n # have 0. When summed over batch, they will be reduced to:\n #\n # mean(loss * sample_weight * total / valid)\n # = sum(loss * sample_weight * total / valid) / total\n # = sum(loss * sample_weight) / total * total / valid\n # = sum(loss * sample_weight) / valid\n\n total = tf.cast(tf.size(mask), losses.dtype)\n valid = tf.reduce_sum(mask)\n mask *= total / valid\n\n return apply_mask(losses, sw, mask)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 233, "n_words": 94, "vocab_size": 51, "complexity": 3, "nloc": 8, "token_counts": 83, "n_ast_nodes": 131, "n_identifiers": 16, "d_id": 82878, "documentation": { "docstring": "Redistribute sample weights considering only valid entries.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 101524, "commit_id": "dc18c74eea0c7837a820d27628cb12b0824fa30e", "repo": "faceswap", "path": "lib/gui/utils.py", "file_name": "utils.py", "fun_name": "modified_vars", "commit_message": "Bugfix: Preview for extract in batch mode", "code": "def modified_vars(self) -> Dict[str, \"tk.BooleanVar\"]:\n \n assert self.command_notebook is not None\n return self.command_notebook.modified_vars\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 33, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 26, "n_ast_nodes": 43, "n_identifiers": 5, "d_id": 20935, "documentation": { "docstring": " dict: The command notebook modified tkinter variables. ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 8, "language": "en" } }, { "id": 153666, "commit_id": "0c1a2129df64cf45bf1ff49c8ed92c510fdb1c82", "repo": "modin", "path": "modin/experimental/core/execution/native/implementations/omnisci_on_native/exchange/dataframe_protocol/dataframe.py", "file_name": "dataframe.py", "fun_name": "_is_zero_copy_possible", "commit_message": "FEAT-#4244: Implement dataframe exchange protocol for OmniSci (#4269)\n\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Vasily Litvinov \r\nSigned-off-by: Dmitry Chigarev ", "code": "def _is_zero_copy_possible(self) -> bool:\n \n if self.__is_zero_copy_possible is None:\n if self._df._has_arrow_table():\n # If PyArrow table is already materialized then we can\n # retrieve data zero-copy\n self.__is_zero_copy_possible = True\n elif not self._df._can_execute_arrow():\n # When not able to execute the plan via PyArrow means\n # that we have to involve OmniSci, so no zero-copy.\n self.__is_zero_copy_possible = False\n else:\n # Check whether the plan for PyArrow can be executed zero-copy\n self.__is_zero_copy_possible = self._is_zero_copy_arrow_op(self._df._op)\n return self.__is_zero_copy_possible\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 245, "n_words": 71, "vocab_size": 50, "complexity": 4, "nloc": 19, "token_counts": 64, "n_ast_nodes": 112, "n_identifiers": 9, "d_id": 35528, "documentation": { "docstring": "\n Check whether it's possible to retrieve data from the DataFrame zero-copy.\n\n The 'zero-copy' term also means that no extra computations or data transers\n are needed to access the data.\n\n Returns\n -------\n bool\n ", "n_words": 32, "vocab_size": 29, "n_whitespaces": 82, "language": "en" } }, { "id": 259435, "commit_id": "75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc", "repo": "scikit-learn", "path": "sklearn/_loss/tests/test_loss.py", "file_name": "test_loss.py", "fun_name": "test_loss_of_perfect_prediction", "commit_message": "ENH migrate GLMs / TweedieRegressor to linear loss (#22548)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Thomas J. Fan ", "code": "def test_loss_of_perfect_prediction(loss, sample_weight):\n \n if not loss.is_multiclass:\n # Use small values such that exp(value) is not nan.\n raw_prediction = np.array([-10, -0.1, 0, 0.1, 3, 10])\n # If link is identity, we must respect the interval of y_pred:\n if isinstance(loss.link, IdentityLink):\n eps = 1e-10\n low = loss.interval_y_pred.low\n if not loss.interval_y_pred.low_inclusive:\n low = low + eps\n high = loss.interval_y_pred.high\n if not loss.interval_y_pred.high_inclusive:\n high = high - eps\n raw_prediction = np.clip(raw_prediction, low, high)\n y_true = loss.link.inverse(raw_prediction)\n else:\n # HalfMultinomialLoss\n y_true = np.arange(loss.n_classes).astype(float)\n # raw_prediction with entries -exp(10), but +exp(10) on the diagonal\n # this is close enough to np.inf which would produce nan\n raw_prediction = np.full(\n shape=(loss.n_classes, loss.n_classes),\n fill_value=-np.exp(10),\n dtype=float,\n )\n raw_prediction.flat[:: loss.n_classes + 1] = np.exp(10)\n\n if sample_weight == \"range\":\n sample_weight = np.linspace(1, y_true.shape[0], num=y_true.shape[0])\n\n loss_value = loss.loss(\n y_true=y_true,\n raw_prediction=raw_prediction,\n sample_weight=sample_weight,\n )\n constant_term = loss.constant_to_optimal_zero(\n y_true=y_true, sample_weight=sample_weight\n )\n # Comparing loss_value + constant_term to zero would result in large\n # round-off errors.\n assert_allclose(loss_value, -constant_term, atol=1e-14, rtol=1e-15)\n\n\n@pytest.mark.parametrize(\"loss\", LOSS_INSTANCES, ids=loss_instance_name)\n@pytest.mark.parametrize(\"sample_weight\", [None, \"range\"])", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"loss\", LOSS_INSTANCES, ids=loss_instance_name)\n@pytest.mark.parametrize(\"sample_weight\", [None, \"range\"])", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 438, "n_words": 159, "vocab_size": 110, "complexity": 6, "nloc": 32, "token_counts": 266, "n_ast_nodes": 446, "n_identifiers": 43, "d_id": 75769, "documentation": { "docstring": "Test value of perfect predictions.\n\n Loss of y_pred = y_true plus constant_to_optimal_zero should sums up to\n zero.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 26, "language": "en" } }, { "id": 138036, "commit_id": "edb17fd2069844f12237c85ba6607afae536401d", "repo": "ray", "path": "python/ray/air/execution/resources/request.py", "file_name": "request.py", "fun_name": "head_bundle_is_empty", "commit_message": "[air/tune] Internal resource management 1 - Ray AIR resource manager implementation (#30777)\n\nPrerequisite to #30016\r\n\r\nThis PR adds a new Ray AIR resource manager to replace the PlacementGroupManager of Ray Tune. Details can be found in #30016.\r\n\r\nSpecifically, this PR\r\n- Adds the main resource manager abstractions\r\n- Renames (and moves) PlacementGroupFactory to ResourceRequest\r\n- Adds implementations and tests for a placement group based manager and a budget based manager\r\n\r\nSigned-off-by: Kai Fricke \r\nSigned-off-by: Kai Fricke \r\nCo-authored-by: matthewdeng ", "code": "def head_bundle_is_empty(self):\n \n return self._head_bundle_is_empty\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 31282, "documentation": { "docstring": "Returns True if head bundle is empty while child bundles\n need resources.\n\n This is considered an internal API within Tune.\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 41, "language": "en" } }, { "id": 275502, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_v2/optimizer_v2.py", "file_name": "optimizer_v2.py", "fun_name": "_transform_unaggregated_gradients", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _transform_unaggregated_gradients(self, grads_and_vars):\n \n return grads_and_vars\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 18, "n_identifiers": 3, "d_id": 81400, "documentation": { "docstring": "Called in `apply_gradients` before gradient aggregation.", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 66677, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v12_0/set_quotation_status.py", "file_name": "set_quotation_status.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\n\tfrappe.db.sql(\n\t\t\n\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 1, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 5, "token_counts": 12, "n_ast_nodes": 23, "n_identifiers": 4, "d_id": 14287, "documentation": { "docstring": " UPDATE `tabQuotation` set status = 'Open'\n\t\twhere docstatus = 1 and status = 'Submitted' ", "n_words": 14, "vocab_size": 11, "n_whitespaces": 14, "language": "en" } }, { "id": 53411, "commit_id": "1d4218a287ef343f32f1e32482592b471be5df1d", "repo": "prefect", "path": "src/prefect/utilities/testing.py", "file_name": "testing.py", "fun_name": "temporary_settings", "commit_message": "Move `prefect.settings` to `prefect.settings.from_env()`", "code": "def temporary_settings(**kwargs):\n \n old_env = os.environ.copy()\n old_settings = prefect.settings.from_env()\n\n try:\n for setting in kwargs:\n os.environ[setting] = str(kwargs[setting])\n\n assert old_env != os.environ, \"Environment did not change\"\n new_settings = prefect.settings.from_env()\n assert new_settings != old_settings, \"Temporary settings did not change values\"\n yield new_settings\n\n finally:\n for setting in kwargs:\n if old_env.get(setting):\n os.environ[setting] = old_env[setting]\n else:\n os.environ.pop(setting, None)\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 172, "n_words": 52, "vocab_size": 35, "complexity": 5, "nloc": 16, "token_counts": 109, "n_ast_nodes": 178, "n_identifiers": 15, "d_id": 10795, "documentation": { "docstring": "\n Temporarily override setting values. \n \n This will _not_ mutate values that have been already been accessed at module\n load time.\n\n This function should only be used for testing.\n\n Example:\n >>> import prefect.settings\n >>> with temporary_settings(PREFECT_ORION_HOST=\"foo\"):\n >>> assert prefect.settings.from_env().orion_host == \"foo\"\n >>> assert prefect.settings.from_env().orion_host is None\n ", "n_words": 44, "vocab_size": 37, "n_whitespaces": 99, "language": "en" } }, { "id": 98450, "commit_id": "4ffb52489e662029a08169351cd997d525977e88", "repo": "sentry", "path": "src/sentry/search/events/filter.py", "file_name": "filter.py", "fun_name": "parse_semver", "commit_message": "fix(events-search): Return helpful error message on semver filter (#33785)\n\n'IN' type queries currently raise an unhandled KeyError, raising an\r\nInvalidSearchQuery instead.", "code": "def parse_semver(version, operator) -> Optional[SemverFilter]:\n \n (operator, negated) = handle_operator_negation(operator)\n try:\n operator = OPERATOR_TO_DJANGO[operator]\n except KeyError:\n raise InvalidSearchQuery(\"Invalid operation 'IN' for semantic version filter.\")\n\n version = version if \"@\" in version else f\"{SEMVER_FAKE_PACKAGE}@{version}\"\n parsed = parse_release_relay(version)\n parsed_version = parsed.get(\"version_parsed\")\n if parsed_version:\n # Convert `pre` to always be a string\n prerelease = parsed_version[\"pre\"] if parsed_version[\"pre\"] else \"\"\n semver_filter = SemverFilter(\n operator,\n [\n parsed_version[\"major\"],\n parsed_version[\"minor\"],\n parsed_version[\"patch\"],\n parsed_version[\"revision\"],\n 0 if prerelease else 1,\n prerelease,\n ],\n negated=negated,\n )\n if parsed[\"package\"] and parsed[\"package\"] != SEMVER_FAKE_PACKAGE:\n semver_filter.package = parsed[\"package\"]\n return semver_filter\n else:\n # Try to parse as a wildcard match\n package, version = version.split(\"@\", 1)\n version_parts = []\n if version:\n for part in version.split(\".\", 3):\n if part in SEMVER_WILDCARDS:\n break\n try:\n # We assume all ints for a wildcard match - not handling prerelease as\n # part of these\n version_parts.append(int(part))\n except ValueError:\n raise InvalidSearchQuery(INVALID_SEMVER_MESSAGE)\n\n package = package if package and package != SEMVER_FAKE_PACKAGE else None\n return SemverFilter(\"exact\", version_parts, package, negated)\n\n\nkey_conversion_map: Mapping[\n str,\n Callable[[SearchFilter, str, Mapping[str, Union[int, str, datetime]]], Optional[Sequence[any]]],\n] = {\n \"environment\": _environment_filter_converter,\n \"message\": _message_filter_converter,\n TRANSACTION_STATUS_ALIAS: _transaction_status_filter_converter,\n \"issue.id\": _issue_id_filter_converter,\n USER_DISPLAY_ALIAS: _user_display_filter_converter,\n ERROR_UNHANDLED_ALIAS: _error_unhandled_filter_converter,\n \"error.handled\": _error_handled_filter_converter,\n TEAM_KEY_TRANSACTION_ALIAS: _team_key_transaction_filter_converter,\n RELEASE_STAGE_ALIAS: _release_stage_filter_converter,\n SEMVER_ALIAS: _semver_filter_converter,\n SEMVER_PACKAGE_ALIAS: _semver_package_filter_converter,\n SEMVER_BUILD_ALIAS: _semver_build_filter_converter,\n}\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 651, "n_words": 191, "vocab_size": 132, "complexity": 14, "nloc": 50, "token_counts": 224, "n_ast_nodes": 498, "n_identifiers": 55, "d_id": 19571, "documentation": { "docstring": "\n Attempts to parse a release version using our semver syntax. version should be in\n format `@` or ``, where package_name is a string and\n version is a version string matching semver format (https://semver.org/). We've\n slightly extended this format to allow up to 4 integers. EG\n - sentry@1.2.3.4\n - sentry@1.2.3.4-alpha\n - 1.2.3.4\n - 1.2.3.4-alpha\n - 1.*\n ", "n_words": 55, "vocab_size": 39, "n_whitespaces": 91, "language": "en" } }, { "id": 280865, "commit_id": "39b3c96181ab9f33a44b4fe591b348b5b48ecf76", "repo": "allennlp", "path": "tests/training/learning_rate_schedulers/cosine_test.py", "file_name": "cosine_test.py", "fun_name": "test_schedules_with_save_and_resume", "commit_message": "Dependabot GitHub Actions (#5640)\n\n* chore: Included githubactions in the dependabot config\r\n\r\nThis should help with keeping the GitHub actions updated on new releases. This will also help with keeping it secure.\r\n\r\nDependabot helps in keeping the supply chain secure https://docs.github.com/en/code-security/dependabot\r\n\r\nGitHub actions up to date https://docs.github.com/en/code-security/dependabot/working-with-dependabot/keeping-your-actions-up-to-date-with-dependabot\r\n\r\nhttps://github.com/ossf/scorecard/blob/main/docs/checks.md#dependency-update-tool\r\nSigned-off-by: naveensrinivasan <172697+naveensrinivasan@users.noreply.github.com>\r\n\r\n* floats need approximate math\r\n\r\nCo-authored-by: naveensrinivasan <172697+naveensrinivasan@users.noreply.github.com>", "code": "def test_schedules_with_save_and_resume(self):\n \n", "url": "https://github.com/allenai/allennlp.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 9, "n_words": 2, "vocab_size": 2, "complexity": 5, "nloc": 15, "token_counts": 130, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 83445, "documentation": { "docstring": "Make sure scheduler will resume with the right state.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 111232, "commit_id": "91acc3ea75d219ad07ed2b106e7b8bdcb01516dd", "repo": "spaCy", "path": "spacy/pipeline/legacy/entity_linker.py", "file_name": "entity_linker.py", "fun_name": "to_bytes", "commit_message": "Fix entity linker batching (#9669)\n\n* Partial fix of entity linker batching\r\n\r\n* Add import\r\n\r\n* Better name\r\n\r\n* Add `use_gold_ents` option, docs\r\n\r\n* Change to v2, create stub v1, update docs etc.\r\n\r\n* Fix error type\r\n\r\nHonestly no idea what the right type to use here is.\r\nConfigValidationError seems wrong. Maybe a NotImplementedError?\r\n\r\n* Make mypy happy\r\n\r\n* Add hacky fix for init issue\r\n\r\n* Add legacy pipeline entity linker\r\n\r\n* Fix references to class name\r\n\r\n* Add __init__.py for legacy\r\n\r\n* Attempted fix for loss issue\r\n\r\n* Remove placeholder V1\r\n\r\n* formatting\r\n\r\n* slightly more interesting train data\r\n\r\n* Handle batches with no usable examples\r\n\r\nThis adds a test for batches that have docs but not entities, and a\r\ncheck in the component that detects such cases and skips the update step\r\nas thought the batch were empty.\r\n\r\n* Remove todo about data verification\r\n\r\nCheck for empty data was moved further up so this should be OK now - the\r\ncase in question shouldn't be possible.\r\n\r\n* Fix gradient calculation\r\n\r\nThe model doesn't know which entities are not in the kb, so it generates\r\nembeddings for the context of all of them.\r\n\r\nHowever, the loss does know which entities aren't in the kb, and it\r\nignores them, as there's no sensible gradient.\r\n\r\nThis has the issue that the gradient will not be calculated for some of\r\nthe input embeddings, which causes a dimension mismatch in backprop.\r\nThat should have caused a clear error, but with numpyops it was causing\r\nnans to happen, which is another problem that should be addressed\r\nseparately.\r\n\r\nThis commit changes the loss to give a zero gradient for entities not in\r\nthe kb.\r\n\r\n* add failing test for v1 EL legacy architecture\r\n\r\n* Add nasty but simple working check for legacy arch\r\n\r\n* Clarify why init hack works the way it does\r\n\r\n* Clarify use_gold_ents use case\r\n\r\n* Fix use gold ents related handling\r\n\r\n* Add tests for no gold ents and fix other tests\r\n\r\n* Use aligned ents function (not working)\r\n\r\nThis doesn't actually work because the \"aligned\" ents are gold-only. But\r\nif I have a different function that returns the intersection, *then*\r\nthis will work as desired.\r\n\r\n* Use proper matching ent check\r\n\r\nThis changes the process when gold ents are not used so that the\r\nintersection of ents in the pred and gold is used.\r\n\r\n* Move get_matching_ents to Example\r\n\r\n* Use model attribute to check for legacy arch\r\n\r\n* Rename flag\r\n\r\n* bump spacy-legacy to lower 3.0.9\r\n\r\nCo-authored-by: svlandeg ", "code": "def to_bytes(self, *, exclude=tuple()):\n \n self._validate_serialization_attrs()\n serialize = {}\n if hasattr(self, \"cfg\") and self.cfg is not None:\n serialize[\"cfg\"] = lambda: srsly.json_dumps(self.cfg)\n serialize[\"vocab\"] = lambda: self.vocab.to_bytes(exclude=exclude)\n serialize[\"kb\"] = self.kb.to_bytes\n serialize[\"model\"] = self.model.to_bytes\n return util.to_bytes(serialize, exclude)\n", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 100, "n_words": 33, "vocab_size": 28, "complexity": 3, "nloc": 16, "token_counts": 99, "n_ast_nodes": 165, "n_identifiers": 14, "d_id": 24362, "documentation": { "docstring": "Serialize the pipe to a bytestring.\n\n exclude (Iterable[str]): String names of serialization fields to exclude.\n RETURNS (bytes): The serialized object.\n\n DOCS: https://spacy.io/api/entitylinker#to_bytes\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 50, "language": "en" } }, { "id": 207316, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_scripts/tests.py", "file_name": "tests.py", "fun_name": "test_importable_project_name", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_importable_project_name(self):\n \n bad_name = \"os\"\n args = [\"startproject\", bad_name]\n testproject_dir = os.path.join(self.test_dir, bad_name)\n\n out, err = self.run_django_admin(args)\n self.assertOutput(\n err,\n \"CommandError: 'os' conflicts with the name of an existing \"\n \"Python module and cannot be used as a project name. Please try \"\n \"another name.\",\n )\n self.assertFalse(os.path.exists(testproject_dir))\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 146, "n_words": 46, "vocab_size": 42, "complexity": 1, "nloc": 12, "token_counts": 64, "n_ast_nodes": 112, "n_identifiers": 15, "d_id": 51927, "documentation": { "docstring": "\n startproject validates that project name doesn't clash with existing\n Python modules.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 33, "language": "en" } }, { "id": 220813, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/tasks.py", "file_name": "tasks.py", "fun_name": "_cancel_and_wait", "commit_message": "add python 3.10.4 for windows", "code": "async def _cancel_and_wait(fut, loop):\n \n\n waiter = loop.create_future()\n cb = functools.partial(_release_waiter, waiter)\n fut.add_done_callback(cb)\n\n try:\n fut.cancel()\n # We cannot wait on *fut* directly to make\n # sure _cancel_and_wait itself is reliably cancellable.\n await waiter\n finally:\n fut.remove_done_callback(cb)\n\n\n# This is *not* a @coroutine! It is just an iterator (yielding Futures).", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 100, "n_words": 47, "vocab_size": 41, "complexity": 2, "nloc": 9, "token_counts": 48, "n_ast_nodes": 87, "n_identifiers": 12, "d_id": 56126, "documentation": { "docstring": "Cancel the *fut* future or task and wait until it completes.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 264750, "commit_id": "4bb9b6ee2639db683b70d6ddbee055497e0a3647", "repo": "netbox", "path": "netbox/dcim/models/cables.py", "file_name": "cables.py", "fun_name": "from_db", "commit_message": "Extend Cable model to support multiple A/B terminations", "code": "def from_db(cls, db, field_names, values):\n \n instance = super().from_db(db, field_names, values)\n\n instance._orig_termination_a_type_id = instance.termination_a_type_id\n instance._orig_termination_a_ids = instance.termination_a_ids\n instance._orig_termination_b_type_id = instance.termination_b_type_id\n instance._orig_termination_b_ids = instance.termination_b_ids\n\n return instance\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 73, "n_words": 24, "vocab_size": 18, "complexity": 1, "nloc": 7, "token_counts": 56, "n_ast_nodes": 87, "n_identifiers": 15, "d_id": 77794, "documentation": { "docstring": "\n Cache the original A and B terminations of existing Cable instances for later reference inside clean().\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 31, "language": "en" } }, { "id": 157367, "commit_id": "ca86da3a30c4e080d4db8c25fca73de843663cb4", "repo": "stablediffusion", "path": "ldm/models/diffusion/dpm_solver/dpm_solver.py", "file_name": "dpm_solver.py", "fun_name": "expand_dims", "commit_message": "release more models", "code": "def expand_dims(v, dims):\n \n return v[(...,) + (None,) * (dims - 1)]", "url": "https://github.com/Stability-AI/stablediffusion.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 17, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 27, "n_ast_nodes": 42, "n_identifiers": 3, "d_id": 36907, "documentation": { "docstring": "\n Expand the tensor `v` to the dim `dims`.\n Args:\n `v`: a PyTorch tensor with shape [N].\n `dim`: a `int`.\n Returns:\n a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`.\n ", "n_words": 36, "vocab_size": 25, "n_whitespaces": 70, "language": "en" } }, { "id": 241651, "commit_id": "5b59c951e28ddc8bb884f044b1f46fb54c23a8b8", "repo": "lightning", "path": "tests/trainer/test_supporters.py", "file_name": "test_supporters.py", "fun_name": "test_combined_data_loader_with_max_size_cycle_and_ddp", "commit_message": "Deprecate `TrainerDataLoadingMixin` and move logic to `DataConnector` (#11282)\n\nCo-authored-by: Rohit Gupta \r\nCo-authored-by: Aki Nitta \r\nCo-authored-by: Carlos Mocholí ", "code": "def test_combined_data_loader_with_max_size_cycle_and_ddp(replace_sampler_ddp, tmpdir):\n \n trainer = Trainer(strategy=\"ddp\", accelerator=\"auto\", devices=2, replace_sampler_ddp=replace_sampler_ddp)\n\n dataloader = CombinedLoader(\n {\"a\": DataLoader(RandomDataset(32, 8), batch_size=1), \"b\": DataLoader(RandomDataset(32, 8), batch_size=1)},\n )\n dataloader = trainer._data_connector._prepare_dataloader(dataloader, shuffle=False)\n assert len(dataloader) == 4 if replace_sampler_ddp else 8\n\n for a_length in [6, 8, 10]:\n dataloader = CombinedLoader(\n {\n \"a\": DataLoader(range(a_length), batch_size=1),\n \"b\": DataLoader(range(8), batch_size=1),\n },\n mode=\"max_size_cycle\",\n )\n\n length = max(a_length, 8)\n assert len(dataloader) == length\n dataloader = trainer._data_connector._prepare_dataloader(dataloader, shuffle=False)\n assert len(dataloader) == length // 2 if replace_sampler_ddp else length\n if replace_sampler_ddp:\n last_batch = list(dataloader)[-1]\n if a_length == 6:\n assert last_batch == {\"a\": torch.tensor([0]), \"b\": torch.tensor([6])}\n elif a_length == 8:\n assert last_batch == {\"a\": torch.tensor([6]), \"b\": torch.tensor([6])}\n elif a_length == 10:\n assert last_batch == {\"a\": torch.tensor([8]), \"b\": torch.tensor([0])}\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 341, "n_words": 112, "vocab_size": 57, "complexity": 8, "nloc": 41, "token_counts": 399, "n_ast_nodes": 459, "n_identifiers": 26, "d_id": 69641, "documentation": { "docstring": "This test makes sure distributed sampler has been properly injected in dataloaders when using CombinedLoader\n with ddp and `max_size_cycle` mode.", "n_words": 20, "vocab_size": 20, "n_whitespaces": 22, "language": "en" } }, { "id": 9830, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "jina/parsers/__init__.py", "file_name": "__init__.py", "fun_name": "set_pea_parser", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "def set_pea_parser(parser=None):\n \n if not parser:\n from .base import set_base_parser\n\n parser = set_base_parser()\n\n from .peapods.base import mixin_base_ppr_parser\n from .peapods.runtimes.worker import mixin_worker_runtime_parser\n from .peapods.runtimes.container import mixin_container_runtime_parser\n from .peapods.runtimes.remote import mixin_remote_runtime_parser\n from .peapods.pea import mixin_pea_parser\n from .peapods.runtimes.distributed import mixin_distributed_feature_parser\n from .hubble.pull import mixin_hub_pull_options_parser\n\n mixin_base_ppr_parser(parser)\n mixin_worker_runtime_parser(parser)\n mixin_container_runtime_parser(parser)\n mixin_remote_runtime_parser(parser)\n mixin_distributed_feature_parser(parser)\n mixin_pea_parser(parser)\n mixin_hub_pull_options_parser(parser)\n mixin_head_parser(parser)\n\n return parser\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 118, "n_words": 50, "vocab_size": 35, "complexity": 2, "nloc": 20, "token_counts": 113, "n_ast_nodes": 193, "n_identifiers": 21, "d_id": 1716, "documentation": { "docstring": "Set the parser for the Pea\n\n :param parser: an optional existing parser to build upon\n :return: the parser\n ", "n_words": 18, "vocab_size": 14, "n_whitespaces": 27, "language": "en" } }, { "id": 204481, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/files/locks.py", "file_name": "locks.py", "fun_name": "_fd", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _fd(f):\n \n return f.fileno() if hasattr(f, \"fileno\") else f\n\n\nif os.name == \"nt\":\n import msvcrt\n from ctypes import (\n POINTER,\n Structure,\n Union,\n byref,\n c_int64,\n c_ulong,\n c_void_p,\n sizeof,\n windll,\n )\n from ctypes.wintypes import BOOL, DWORD, HANDLE\n\n LOCK_SH = 0 # the default\n LOCK_NB = 0x1 # LOCKFILE_FAIL_IMMEDIATELY\n LOCK_EX = 0x2 # LOCKFILE_EXCLUSIVE_LOCK\n\n # --- Adapted from the pyserial project ---\n # detect size of ULONG_PTR\n if sizeof(c_ulong) != sizeof(c_void_p):\n ULONG_PTR = c_int64\n else:\n ULONG_PTR = c_ulong\n PVOID = c_void_p\n\n # --- Union inside Structure by stackoverflow:3480240 ---", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 210, "n_words": 86, "vocab_size": 64, "complexity": 2, "nloc": 2, "token_counts": 21, "n_ast_nodes": 161, "n_identifiers": 26, "d_id": 50743, "documentation": { "docstring": "Get a filedescriptor from something which could be a file or an fd.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 197104, "commit_id": "905eb426131ca9542a6b258462d9ae984e5b2563", "repo": "sympy", "path": "sympy/physics/units/quantities.py", "file_name": "quantities.py", "fun_name": "set_scale_factor", "commit_message": "Update the deprecation warnings in sympy.physics.units", "code": "def set_scale_factor(self, scale_factor, unit_system=\"SI\"):\n sympy_deprecation_warning(\n f,\n deprecated_since_version=\"1.5\",\n active_deprecations_target=\"deprecated-quantity-methods\",\n )\n from sympy.physics.units import UnitSystem\n unit_system = UnitSystem.get_unit_system(unit_system)\n unit_system.set_quantity_scale_factor(self, scale_factor)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 85, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 13, "token_counts": 49, "n_ast_nodes": 82, "n_identifiers": 13, "d_id": 48341, "documentation": { "docstring": "\n Quantity.set_scale_factor() is deprecated. Use either\n unit_system.set_quantity_scale_factors() or\n {self}.set_global_relative_scale_factor() instead.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 54, "language": "en" } }, { "id": 156750, "commit_id": "2820bae493a49cb1d0a6e376985c5473b8f04fa8", "repo": "dask", "path": "dask/array/core.py", "file_name": "core.py", "fun_name": "trace", "commit_message": "Don't include docs in ``Array`` methods, just refer to module docs (#9244)\n\nCo-authored-by: James Bourbeau ", "code": "def trace(self, offset=0, axis1=0, axis2=1, dtype=None):\n \n from dask.array.reductions import trace\n\n return trace(self, offset=offset, axis1=axis1, axis2=axis2, dtype=dtype)\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 37, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 3, "token_counts": 51, "n_ast_nodes": 72, "n_identifiers": 9, "d_id": 36760, "documentation": { "docstring": "Return the sum along diagonals of the array.\n\n Refer to :func:`dask.array.trace` for full documentation.\n\n See Also\n --------\n dask.array.trace : equivalent function\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 56, "language": "en" } }, { "id": 312785, "commit_id": "8b38fa58aa45d1809f6900729b4046d6c02c2230", "repo": "core", "path": "tests/test_config.py", "file_name": "test_config.py", "fun_name": "test_ensure_config_exists_creates_config", "commit_message": "Bump pytest to 7.0.0 (#65981)", "code": "async def test_ensure_config_exists_creates_config(hass):\n \n assert not os.path.isfile(YAML_PATH)\n with patch(\"builtins.print\") as mock_print:\n await config_util.async_ensure_config_exists(hass)\n\n assert os.path.isfile(YAML_PATH)\n assert mock_print.called\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 38, "n_words": 16, "vocab_size": 13, "complexity": 1, "nloc": 6, "token_counts": 44, "n_ast_nodes": 80, "n_identifiers": 11, "d_id": 111422, "documentation": { "docstring": "Test that calling ensure_config_exists.\n\n If not creates a new config file.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 17, "language": "en" } }, { "id": 196182, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/permutations.py", "file_name": "permutations.py", "fun_name": "is_even", "commit_message": "Updated import locations", "code": "def is_even(self):\n \n return not self.is_odd\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 21, "n_identifiers": 3, "d_id": 47682, "documentation": { "docstring": "\n Checks if a permutation is even.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation\n >>> p = Permutation([0, 1, 2, 3])\n >>> p.is_even\n True\n >>> p = Permutation([3, 2, 1, 0])\n >>> p.is_even\n True\n\n See Also\n ========\n\n is_odd\n ", "n_words": 37, "vocab_size": 26, "n_whitespaces": 136, "language": "en" } }, { "id": 196844, "commit_id": "c5aa4e76c9642ebb2cf0fe105e46222b541966b2", "repo": "sympy", "path": "sympy/testing/pytest.py", "file_name": "pytest.py", "fun_name": "warns", "commit_message": "Fix the formatting of an error message", "code": "def warns(warningcls, *, match='', test_stacklevel=True):\n \n # Absorbs all warnings in warnrec\n with warnings.catch_warnings(record=True) as warnrec:\n # Hide all warnings but make sure that our warning is emitted\n warnings.simplefilter(\"ignore\")\n warnings.filterwarnings(\"always\", match, warningcls)\n # Now run the test\n yield warnrec\n\n # Raise if expected warning not found\n if not any(issubclass(w.category, warningcls) for w in warnrec):\n msg = ('Failed: DID NOT WARN.'\n ' No warnings of type %s was emitted.'\n ' The list of emitted warnings is: %s.'\n ) % (warningcls, [w.message for w in warnrec])\n raise Failed(msg)\n\n if test_stacklevel:\n for f in inspect.stack():\n thisfile = f.filename\n file = os.path.split(thisfile)[1]\n if file.startswith('test_'):\n break\n elif file == 'doctest.py':\n # skip the stacklevel testing in the doctests of this\n # function\n return\n else:\n raise RuntimeError(\"Could not find the file for the given warning to test the stacklevel\")\n for w in warnrec:\n if w.filename != thisfile:\n msg = f.replace('\\n', ' ')\n raise Failed(msg)\n\n if warningcls == SymPyDeprecationWarning:\n this_file = pathlib.Path(__file__)\n active_deprecations_file = (this_file.parent.parent.parent / 'doc' /\n 'src' / 'explanation' /\n 'active-deprecations.md')\n if not active_deprecations_file.exists():\n # We can only test that the active_deprecations_target works if we are\n # in the git repo.\n return\n targets = []\n for w in warnrec:\n targets.append(w.message.active_deprecations_target)\n with open(active_deprecations_file) as f:\n text = f.read()\n for target in targets:\n if f'({target})=' not in text:\n raise Failed(f\"The active deprecations target {target!r} does not appear to be a valid target in the active-deprecations.md file ({active_deprecations_file}).\")\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 718, "n_words": 229, "vocab_size": 139, "complexity": 15, "nloc": 44, "token_counts": 263, "n_ast_nodes": 485, "n_identifiers": 44, "d_id": 48214, "documentation": { "docstring": "\n Like raises but tests that warnings are emitted.\n\n >>> from sympy.testing.pytest import warns\n >>> import warnings\n\n >>> with warns(UserWarning):\n ... warnings.warn('deprecated', UserWarning, stacklevel=2)\n\n >>> with warns(UserWarning):\n ... pass\n Traceback (most recent call last):\n ...\n Failed: DID NOT WARN. No warnings of type UserWarning\\\n was emitted. The list of emitted warnings is: [].\n\n ``test_stacklevel`` makes it check that the ``stacklevel`` parameter to\n ``warn()`` is set so that the warning shows the user line of code (the\n code under the warns() context manager). Set this to False if this is\n ambiguous or if the context manager does not test the direct user code\n that emits the warning.\n\n If the warning is a ``SymPyDeprecationWarning``, this additionally tests that\n the ``active_deprecations_target`` is a real target in the\n ``active-deprecations.md`` file.\n\n \\\nFailed: Warning has the wrong stacklevel. The warning stacklevel needs to be\nset so that the line of code shown in the warning message is user code that\ncalls the deprecated code (the current stacklevel is showing code from\n{w.filename}, expected {thisfile})", "n_words": 168, "vocab_size": 101, "n_whitespaces": 232, "language": "en" } }, { "id": 221333, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/cmd.py", "file_name": "cmd.py", "fun_name": "precmd", "commit_message": "add python 3.10.4 for windows", "code": "def precmd(self, line):\n \n return line\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 18, "n_identifiers": 3, "d_id": 56352, "documentation": { "docstring": "Hook method executed just before the command line is\n interpreted, but after the input prompt is generated and issued.\n\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 33, "language": "en" } }, { "id": 291337, "commit_id": "e1338adf1a27edbc0e3513fa67cd6690c7a8fbc0", "repo": "core", "path": "tests/common.py", "file_name": "common.py", "fun_name": "async_test_home_assistant", "commit_message": "Allow configuring country and language in core config (#81734)\n\n* Allow configuring country and language in core config\r\n\r\n* Add script for updating list of countries\r\n\r\n* Use black for formatting\r\n\r\n* Fix quoting\r\n\r\n* Move country codes to a separate file\r\n\r\n* Address review comments\r\n\r\n* Add generated/countries.py\r\n\r\n* Get default language from owner account\r\n\r\n* Remove unused variable\r\n\r\n* Add script to generate list of supported languages\r\n\r\n* Add tests\r\n\r\n* Fix stale docsring\r\n\r\n* Use format_python_namespace\r\n\r\n* Correct async_user_store\r\n\r\n* Improve typing\r\n\r\n* Fix with_store decorator\r\n\r\n* Initialize language in core store migration\r\n\r\n* Fix startup\r\n\r\n* Tweak\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Franck Nijhof \r\n\r\n* Update storage.py\r\n\r\nCo-authored-by: Franck Nijhof ", "code": "async def async_test_home_assistant(loop, load_registries=True):\n \n hass = ha.HomeAssistant()\n store = auth_store.AuthStore(hass)\n hass.auth = auth.AuthManager(hass, store, {}, {})\n ensure_auth_manager_loaded(hass.auth)\n INSTANCES.append(hass)\n\n orig_async_add_job = hass.async_add_job\n orig_async_add_executor_job = hass.async_add_executor_job\n orig_async_create_task = hass.async_create_task\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 54, "n_words": 27, "vocab_size": 22, "complexity": 2, "nloc": 54, "token_counts": 320, "n_ast_nodes": 114, "n_identifiers": 20, "d_id": 90446, "documentation": { "docstring": "Return a Home Assistant object pointing at test config dir.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 231482, "commit_id": "ab7ddd3e8beeb1e70ce46447d26a0715fc92a5b7", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/_yaxis.py", "file_name": "_yaxis.py", "fun_name": "shift", "commit_message": "bump plotly.js to 2.17", "code": "def shift(self):\n \n return self[\"shift\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 62935, "documentation": { "docstring": "\n Moves the axis a given number of pixels from where it would\n have been otherwise. Accepts both positive and negative values,\n which will shift the axis either right or left, respectively.\n If `autoshift` is set to true, then this defaults to a padding\n of -3 if `side` is set to \"left\". and defaults to +3 if `side`\n is set to \"right\". Defaults to 0 if `autoshift` is set to\n false. Only has an effect if `anchor` is set to \"free\".\n\n The 'shift' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n ", "n_words": 99, "vocab_size": 68, "n_whitespaces": 193, "language": "en" } }, { "id": 42535, "commit_id": "8a4cf5d94eb94b6427c5d1d7907ba07b119932c5", "repo": "nltk", "path": "nltk/corpus/reader/framenet.py", "file_name": "framenet.py", "fun_name": "frame", "commit_message": "Docstring tests (#3050)\n\n* fixed pytests\r\n\r\n* fixed more pytests\r\n\r\n* fixed more pytest and changed multiline pytest issues fixes for snowball.py and causal.py\r\n\r\n* fixed pytests (mainly multiline or rounding issues)\r\n\r\n* fixed treebank pytests, removed test for return_string=True (deprecated)\r\n\r\n* fixed destructive.py pytests, removed test for return_string=True (deprecated)\r\n\r\n* fixed pytest (rounding issues)\r\n\r\n* fixed pytest (initialised missing object)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* fixed pytest (formatting issues)\r\n\r\n* added pytest +SKIP for deprecated module stanford\r\n\r\n* updated AUTHORS.md\r\n\r\n* changed docstring corrections by usage of ELLIPSIS and different roundings\r\n\r\n* fixed AUTHORS.md to be consistent\r\n\r\n* Fix framenet doctest formatting with pprint\r\n\r\n* Change docstring on MultiListBox.__init__\r\n\r\nI believe the original typo was misinterpreted and changed to something that was not originally intended.\r\n\r\nCo-authored-by: Jan Lennartz \r\nCo-authored-by: Tom Aarsen <37621491+tomaarsen@users.noreply.github.com>\r\nCo-authored-by: Tom Aarsen ", "code": "def frame(self, fn_fid_or_fname, ignorekeys=[]):\n \n\n # get the frame info by name or id number\n if isinstance(fn_fid_or_fname, str):\n f = self.frame_by_name(fn_fid_or_fname, ignorekeys)\n else:\n f = self.frame_by_id(fn_fid_or_fname, ignorekeys)\n\n return f\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 85, "n_words": 28, "vocab_size": 24, "complexity": 2, "nloc": 6, "token_counts": 45, "n_ast_nodes": 72, "n_identifiers": 9, "d_id": 7597, "documentation": { "docstring": "\n Get the details for the specified Frame using the frame's name\n or id number.\n\n Usage examples:\n\n >>> from nltk.corpus import framenet as fn\n >>> f = fn.frame(256)\n >>> f.name\n 'Medical_specialties'\n >>> f = fn.frame('Medical_specialties')\n >>> f.ID\n 256\n >>> # ensure non-ASCII character in definition doesn't trigger an encoding error:\n >>> fn.frame('Imposing_obligation') # doctest: +ELLIPSIS\n frame (1494): Imposing_obligation...\n\n\n The dict that is returned from this function will contain the\n following information about the Frame:\n\n - 'name' : the name of the Frame (e.g. 'Birth', 'Apply_heat', etc.)\n - 'definition' : textual definition of the Frame\n - 'ID' : the internal ID number of the Frame\n - 'semTypes' : a list of semantic types for this frame\n - Each item in the list is a dict containing the following keys:\n - 'name' : can be used with the semtype() function\n - 'ID' : can be used with the semtype() function\n\n - 'lexUnit' : a dict containing all of the LUs for this frame.\n The keys in this dict are the names of the LUs and\n the value for each key is itself a dict containing\n info about the LU (see the lu() function for more info.)\n\n - 'FE' : a dict containing the Frame Elements that are part of this frame\n The keys in this dict are the names of the FEs (e.g. 'Body_system')\n and the values are dicts containing the following keys\n\n - 'definition' : The definition of the FE\n - 'name' : The name of the FE e.g. 'Body_system'\n - 'ID' : The id number\n - '_type' : 'fe'\n - 'abbrev' : Abbreviation e.g. 'bod'\n - 'coreType' : one of \"Core\", \"Peripheral\", or \"Extra-Thematic\"\n - 'semType' : if not None, a dict with the following two keys:\n - 'name' : name of the semantic type. can be used with\n the semtype() function\n - 'ID' : id number of the semantic type. can be used with\n the semtype() function\n - 'requiresFE' : if not None, a dict with the following two keys:\n - 'name' : the name of another FE in this frame\n - 'ID' : the id of the other FE in this frame\n - 'excludesFE' : if not None, a dict with the following two keys:\n - 'name' : the name of another FE in this frame\n - 'ID' : the id of the other FE in this frame\n\n - 'frameRelation' : a list of objects describing frame relations\n - 'FEcoreSets' : a list of Frame Element core sets for this frame\n - Each item in the list is a list of FE objects\n\n :param fn_fid_or_fname: The Framenet name or id number of the frame\n :type fn_fid_or_fname: int or str\n :param ignorekeys: The keys to ignore. These keys will not be\n included in the output. (optional)\n :type ignorekeys: list(str)\n :return: Information about a frame\n :rtype: dict\n ", "n_words": 466, "vocab_size": 160, "n_whitespaces": 1166, "language": "en" } }, { "id": 202418, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/csrf_tests/tests.py", "file_name": "tests.py", "fun_name": "_get_cookies_set", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _get_cookies_set(self, req, resp):\n \n raise NotImplementedError(\"This method must be implemented by a subclass.\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 27, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 27, "n_identifiers": 5, "d_id": 50120, "documentation": { "docstring": "\n Return a list of the cookie values passed to set_cookie() over the\n course of the request-response.\n ", "n_words": 16, "vocab_size": 13, "n_whitespaces": 38, "language": "en" } }, { "id": 304562, "commit_id": "58b9785485af4b49097707edb7fbcc00c72a3df0", "repo": "core", "path": "homeassistant/components/fido/sensor.py", "file_name": "sensor.py", "fun_name": "async_update", "commit_message": "Improve entity type hints [f] (#77143)", "code": "async def async_update(self) -> None:\n \n await self.fido_data.async_update()\n if (sensor_type := self.entity_description.key) == \"balance\":\n if self.fido_data.data.get(sensor_type) is not None:\n self._attr_native_value = round(self.fido_data.data[sensor_type], 2)\n else:\n if self.fido_data.data.get(self._number, {}).get(sensor_type) is not None:\n self._attr_native_value = round(\n self.fido_data.data[self._number][sensor_type], 2\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 149, "n_words": 35, "vocab_size": 27, "complexity": 4, "nloc": 11, "token_counts": 110, "n_ast_nodes": 175, "n_identifiers": 11, "d_id": 103369, "documentation": { "docstring": "Get the latest data from Fido and update the state.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 175319, "commit_id": "acf7403f9baea3ae1119fc6b4a3298522188bf96", "repo": "cpython", "path": "Lib/enum.py", "file_name": "enum.py", "fun_name": "_missing_", "commit_message": "bpo-40066: [Enum] update str() and format() output (GH-30582)\n\nUndo rejected PEP-663 changes:\r\n\r\n- restore `repr()` to its 3.10 status\r\n- restore `str()` to its 3.10 status\r\n\r\nNew changes:\r\n\r\n- `IntEnum` and `IntFlag` now leave `__str__` as the original `int.__str__` so that str() and format() return the same result\r\n- zero-valued flags without a name have a slightly changed repr(), e.g. `repr(Color(0)) == ''`\r\n- update `dir()` for mixed-in types to return all the methods and attributes of the mixed-in type\r\n- added `_numeric_repr_` to `Flag` to control display of unnamed values\r\n- enums without doc strings have a more comprehensive doc string added\r\n- `ReprEnum` added -- inheriting from this makes it so only `__repr__` is replaced, not `__str__` nor `__format__`; `IntEnum`, `IntFlag`, and `StrEnum` all inherit from `ReprEnum`", "code": "def _missing_(cls, value):\n \n if not isinstance(value, int):\n raise ValueError(\n \"%r is not a valid %s\" % (value, cls.__qualname__)\n )\n # check boundaries\n # - value must be in range (e.g. -16 <-> +15, i.e. ~15 <-> 15)\n # - value must not include any skipped flags (e.g. if bit 2 is not\n # defined, then 0d10 is invalid)\n flag_mask = cls._flag_mask_\n all_bits = cls._all_bits_\n neg_value = None\n if (\n not ~all_bits <= value <= all_bits\n or value & (all_bits ^ flag_mask)\n ):\n if cls._boundary_ is STRICT:\n max_bits = max(value.bit_length(), flag_mask.bit_length())\n raise ValueError(\n \"%r invalid value %r\\n given %s\\n allowed %s\" % (\n cls, value, bin(value, max_bits), bin(flag_mask, max_bits),\n ))\n elif cls._boundary_ is CONFORM:\n value = value & flag_mask\n elif cls._boundary_ is EJECT:\n return value\n elif cls._boundary_ is KEEP:\n if value < 0:\n value = (\n max(all_bits+1, 2**(value.bit_length()))\n + value\n )\n else:\n raise ValueError(\n '%r unknown flag boundary %r' % (cls, cls._boundary_, )\n )\n if value < 0:\n neg_value = value\n value = all_bits + 1 + value\n # get members and unknown\n unknown = value & ~flag_mask\n member_value = value & flag_mask\n if unknown and cls._boundary_ is not KEEP:\n raise ValueError(\n '%s(%r) --> unknown values %r [%s]'\n % (cls.__name__, value, unknown, bin(unknown))\n )\n # normal Flag?\n __new__ = getattr(cls, '__new_member__', None)\n if cls._member_type_ is object and not __new__:\n # construct a singleton enum pseudo-member\n pseudo_member = object.__new__(cls)\n else:\n pseudo_member = (__new__ or cls._member_type_.__new__)(cls, value)\n if not hasattr(pseudo_member, '_value_'):\n pseudo_member._value_ = value\n if member_value:\n pseudo_member._name_ = '|'.join([\n m._name_ for m in cls._iter_member_(member_value)\n ])\n if unknown:\n pseudo_member._name_ += '|%s' % cls._numeric_repr_(unknown)\n else:\n pseudo_member._name_ = None\n # use setdefault in case another thread already created a composite\n # with this value, but only if all members are known\n # note: zero is a special case -- add it\n if not unknown:\n pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member)\n if neg_value is not None:\n cls._value2member_map_[neg_value] = pseudo_member\n return pseudo_member\n", "url": "https://github.com/python/cpython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 1215, "n_words": 312, "vocab_size": 166, "complexity": 21, "nloc": 62, "token_counts": 378, "n_ast_nodes": 611, "n_identifiers": 38, "d_id": 41600, "documentation": { "docstring": "\n Create a composite member containing all canonical members present in `value`.\n\n If non-member values are present, result depends on `_boundary_` setting.\n ", "n_words": 21, "vocab_size": 21, "n_whitespaces": 43, "language": "en" } }, { "id": 294423, "commit_id": "53245c65238e3009dd1f3412f7f9bef10385f64e", "repo": "core", "path": "homeassistant/components/alexa/capabilities.py", "file_name": "capabilities.py", "fun_name": "configuration", "commit_message": "Update pylint to 2.13.0 (#68656)", "code": "def configuration(self):\n \n return []\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 9, "n_ast_nodes": 18, "n_identifiers": 2, "d_id": 93460, "documentation": { "docstring": "Return the configuration object.\n\n Applicable to the ThermostatController, SecurityControlPanel, ModeController, RangeController,\n and EventDetectionSensor.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 34, "language": "en" } }, { "id": 258280, "commit_id": "a15af7f8c3ca504ceaa7c34e8487be2915cd6dc7", "repo": "haystack", "path": "test/document_stores/test_memory.py", "file_name": "test_memory.py", "fun_name": "test_get_documents_by_id", "commit_message": "refactor: Move `InMemoryDocumentStore` tests to their own class (#3614)\n\n* move tests to their own class\r\n\r\n* move more tests\r\n\r\n* add specific job\r\n\r\n* fix test\r\n\r\n* Update test/document_stores/test_memory.py\r\n\r\nCo-authored-by: Sara Zan \r\n\r\nCo-authored-by: Sara Zan ", "code": "def test_get_documents_by_id(self, ds, documents):\n \n ds.write_documents(documents)\n ids = [doc.id for doc in documents]\n result = {doc.id for doc in ds.get_documents_by_id(ids)}\n assert set(ids) == result\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 58, "n_words": 23, "vocab_size": 18, "complexity": 3, "nloc": 5, "token_counts": 50, "n_ast_nodes": 78, "n_identifiers": 11, "d_id": 75213, "documentation": { "docstring": "\n The base test uses the batch_size param that's not supported\n here, so we override the test case\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 39, "language": "en" } }, { "id": 130071, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "doc/source/ray-core/_examples/lm/ray_train.py", "file_name": "ray_train.py", "fun_name": "add_checkpoint_hook", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def add_checkpoint_hook(self, args):\n \n\n if args.cpu:\n original_n_cpus = args.distributed_world_size\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 33, "n_words": 8, "vocab_size": 8, "complexity": 2, "nloc": 8, "token_counts": 36, "n_ast_nodes": 31, "n_identifiers": 6, "d_id": 29088, "documentation": { "docstring": "Add a hook to the original save_checkpoint function.\n\n This checks if there are new computational resources available.\n If so, raise exception to restart the training process and\n make use of the new resources.\n ", "n_words": 33, "vocab_size": 29, "n_whitespaces": 61, "language": "en" } }, { "id": 178816, "commit_id": "ed2208484bde4bf78da0712d54ab18c192df7e2e", "repo": "Nuitka", "path": "nuitka/utils/WindowsResources.py", "file_name": "WindowsResources.py", "fun_name": "addResourceToFile", "commit_message": "Onefile: Attempt opening the binary for adding payload up to five seconds\n\n* This duplicates code from resource handling, where we needed to do\n this already, but due to hotfix intention, we don't want to make it\n as a reusable functionality yet.", "code": "def addResourceToFile(target_filename, data, resource_kind, lang_id, res_name, logger):\n max_attempts = 5\n\n for attempt in range(1, max_attempts + 1):\n update_handle = _openFileWindowsResources(target_filename)\n\n _updateWindowsResource(update_handle, resource_kind, res_name, lang_id, data)\n\n try:\n _closeFileWindowsResources(update_handle)\n except OSError as e:\n if e.errno in (110, 13):\n logger.warning(\n \n % (target_filename, attempt)\n )\n else:\n logger.warning(\n \n % (target_filename, attempt, e.errno)\n )\n\n time.sleep(1)\n continue\n else:\n if attempt != 1:\n logger.warning(\n \"Succeeded with resource update in attempt %d.\" % attempt\n )\n break\n else:\n logger.sysexit(\"Failed to update resources, the result is unusable.\")\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 390, "n_words": 76, "vocab_size": 55, "complexity": 5, "nloc": 32, "token_counts": 131, "n_ast_nodes": 208, "n_identifiers": 21, "d_id": 42833, "documentation": { "docstring": "\nFailed to add resources to file %r in attempt %d.\nDisable Anti-Virus, e.g. Windows Defender for build folders. Retrying after a second of delay.\nFailed to add resources to file %r in attempt %d with error code %d.\nDisable Anti-Virus, e.g. Windows Defender for build folders. Retrying after a second of delay.", "n_words": 52, "vocab_size": 27, "n_whitespaces": 48, "language": "en" } }, { "id": 43251, "commit_id": "f3aacebe502c4ea5dc2b7d29373539296fa037eb", "repo": "airflow", "path": "airflow/providers/sftp/hooks/sftp.py", "file_name": "sftp.py", "fun_name": "test_connection", "commit_message": "Convert sftp hook to use paramiko instead of pysftp (#24512)", "code": "def test_connection(self) -> Tuple[bool, str]:\n \n try:\n conn = self.get_conn()\n conn.normalize('.')\n return True, \"Connection successfully tested\"\n except Exception as e:\n return False, str(e)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 87, "n_words": 22, "vocab_size": 21, "complexity": 2, "nloc": 8, "token_counts": 44, "n_ast_nodes": 78, "n_identifiers": 10, "d_id": 7889, "documentation": { "docstring": "Test the SFTP connection by calling path with directory", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 248661, "commit_id": "f1145563f662653e451525032b043d1a58998b6d", "repo": "synapse", "path": "tests/test_server.py", "file_name": "test_server.py", "fun_name": "test_redirect_exception", "commit_message": "Extra type annotations in `test_server` (#13124)", "code": "def test_redirect_exception(self) -> None:\n \n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 3, "nloc": 15, "token_counts": 95, "n_ast_nodes": 16, "n_identifiers": 2, "d_id": 72408, "documentation": { "docstring": "\n If the callback raises a RedirectException, it is turned into a 30x\n with the right location.\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 38, "language": "en" } }, { "id": 218526, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ipaddress.py", "file_name": "ipaddress.py", "fun_name": "__format__", "commit_message": "add python 3.10.4 for windows", "code": "def __format__(self, fmt):\n \n\n # Support string formatting\n if not fmt or fmt[-1] == 's':\n return format(str(self), fmt)\n\n # From here on down, support for 'bnXx'\n global _address_fmt_re\n if _address_fmt_re is None:\n import re\n _address_fmt_re = re.compile('(#?)(_?)([xbnX])')\n\n m = _address_fmt_re.fullmatch(fmt)\n if not m:\n return super().__format__(fmt)\n\n alternate, grouping, fmt_base = m.groups()\n\n # Set some defaults\n if fmt_base == 'n':\n if self._version == 4:\n fmt_base = 'b' # Binary is default for ipv4\n else:\n fmt_base = 'x' # Hex is default for ipv6\n\n if fmt_base == 'b':\n padlen = self._max_prefixlen\n else:\n padlen = self._max_prefixlen // 4\n\n if grouping:\n padlen += padlen // 4 - 1\n\n if alternate:\n padlen += 2 # 0b or 0x\n\n return format(int(self), f'{alternate}0{padlen}{grouping}{fmt_base}')\n\n\n@functools.total_ordering", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "@functools.total_ordering", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 369, "n_words": 115, "vocab_size": 70, "complexity": 10, "nloc": 25, "token_counts": 141, "n_ast_nodes": 277, "n_identifiers": 21, "d_id": 55367, "documentation": { "docstring": "Returns an IP address as a formatted string.\n\n Supported presentation types are:\n 's': returns the IP address as a string (default)\n 'b': converts to binary and returns a zero-padded string\n 'X' or 'x': converts to upper- or lower-case hex and returns a zero-padded string\n 'n': the same as 'b' for IPv4 and 'x' for IPv6\n\n For binary and hex presentation types, the alternate form specifier\n '#' and the grouping option '_' are supported.\n ", "n_words": 73, "vocab_size": 47, "n_whitespaces": 129, "language": "en" } }, { "id": 202322, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/contenttypes_tests/test_management.py", "file_name": "test_management.py", "fun_name": "test_interactive_true_with_dependent_objects", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_interactive_true_with_dependent_objects(self):\n \n post = Post.objects.create(title=\"post\", content_type=self.content_type)\n # A related object is needed to show that a custom collector with\n # can_fast_delete=False is needed.\n ModelWithNullFKToSite.objects.create(post=post)\n with mock.patch(\"builtins.input\", return_value=\"yes\"):\n with captured_stdout() as stdout:\n call_command(\"remove_stale_contenttypes\", verbosity=2, stdout=stdout)\n self.assertEqual(Post.objects.count(), 0)\n output = stdout.getvalue()\n self.assertIn(\"- Content type for contenttypes_tests.Fake\", output)\n self.assertIn(\"- 1 contenttypes_tests.Post object(s)\", output)\n self.assertIn(\"- 1 contenttypes_tests.ModelWithNullFKToSite\", output)\n self.assertIn(\"Deleting stale content type\", output)\n self.assertEqual(ContentType.objects.count(), self.before_count)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 178, "n_words": 61, "vocab_size": 50, "complexity": 1, "nloc": 13, "token_counts": 134, "n_ast_nodes": 233, "n_identifiers": 23, "d_id": 50068, "documentation": { "docstring": "\n interactive mode (the default) deletes stale content types and warns of\n dependent objects.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 35, "language": "en" } }, { "id": 81351, "commit_id": "2d310dc4e50c6f7cd298f9fb8af69da258cd9ea6", "repo": "awx", "path": "awx/main/tests/unit/api/serializers/test_activity_stream_serializer.py", "file_name": "test_activity_stream_serializer.py", "fun_name": "test_activity_stream_related", "commit_message": "Optimize object creation by getting fewer empty relationships (#12508)\n\nThis optimizes the ActivityStreamSerializer by only getting many-to-many\r\n relationships that are speculatively non-empty\r\n based on information we have in other fields\r\n\r\nWe run this every time we create an object as an on_commit action\r\n so it is expected this will have a major impact on response times for launching jobs", "code": "def test_activity_stream_related():\n \n serializer_related = set(\n ActivityStream._meta.get_field(field_name).related_model\n for field_name, stuff in ActivityStreamSerializer()._local_summarizable_fk_fields(None)\n if hasattr(ActivityStream, field_name)\n )\n\n models = set(activity_stream_registrar.models)\n models.remove(Setting)\n\n missing_models = models - serializer_related\n assert not missing_models\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 69, "n_words": 27, "vocab_size": 22, "complexity": 3, "nloc": 10, "token_counts": 62, "n_ast_nodes": 102, "n_identifiers": 17, "d_id": 17190, "documentation": { "docstring": "\n If this test failed with content in `missing_models`, that means that a\n model has been connected to the activity stream, but the model has not\n been added to the activity stream serializer.\n\n How to fix this:\n Ideally, all models should be in awx.api.serializers.SUMMARIZABLE_FK_FIELDS\n\n If, for whatever reason, the missing model should not generally be\n summarized from related resources, then a special case can be carved out in\n ActivityStreamSerializer._local_summarizable_fk_fields\n ", "n_words": 68, "vocab_size": 50, "n_whitespaces": 96, "language": "en" } }, { "id": 241755, "commit_id": "82c8875f33addb0becd7761c95e9674ccc98c7ee", "repo": "lightning", "path": "tests/trainer/optimization/test_optimizers.py", "file_name": "test_optimizers.py", "fun_name": "test_invalid_scheduler_missing_state_dict", "commit_message": "Add `LightningModule.lr_scheduler_step` (#10249)\n\nCo-authored-by: Carlos Mocholi ", "code": "def test_invalid_scheduler_missing_state_dict():\n \n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 10, "token_counts": 48, "n_ast_nodes": 12, "n_identifiers": 1, "d_id": 69685, "documentation": { "docstring": "Test that custom lr scheduler raises an error if it's missing the state dict.", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 337272, "commit_id": "6ffab178accebda485295bddf8eb6bf436ff698f", "repo": "accelerate", "path": "src/accelerate/accelerator.py", "file_name": "accelerator.py", "fun_name": "register_for_checkpointing", "commit_message": "Implementation of saving and loading custom states (#270)", "code": "def register_for_checkpointing(self, *objects):\n \n invalid_objects = []\n for obj in objects:\n if not hasattr(obj, \"state_dict\") or not hasattr(obj, \"load_state_dict\"):\n invalid_objects.append(obj)\n if len(invalid_objects) > 0:\n err = \"All `objects` must include a `state_dict` and `load_state_dict` function to be stored. The following inputs are invalid:\"\n for index, obj in enumerate(invalid_objects):\n err += f\"\\n\\t- Item at index {index}, `{get_pretty_name(obj)}`\"\n raise ValueError(err)\n self._custom_objects.extend(objects)\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 167, "n_words": 58, "vocab_size": 50, "complexity": 6, "nloc": 11, "token_counts": 79, "n_ast_nodes": 149, "n_identifiers": 15, "d_id": 120989, "documentation": { "docstring": "\n Makes note of `objects` and will save or load them in during `save_state` or `load_state`.\n\n These should be utilized when the state is being loaded or saved in the same script. It is not designed to be\n used in different scripts\n\n Note: Every `object` must have a `load_state_dict` and `state_dict` function to be stored.\n ", "n_words": 54, "vocab_size": 44, "n_whitespaces": 90, "language": "en" } }, { "id": 161058, "commit_id": "b617a87ee40ab384767a27335313c2c65ee094ec", "repo": "MockingBird", "path": "ppg2mel/utils/nets_utils.py", "file_name": "nets_utils.py", "fun_name": "get_subsample", "commit_message": "Init ppg extractor and ppg2mel (#375)\n\n* Init ppg extractor and ppg2mel\r\n\r\n* add preprocess and training\r\n\r\n* FIx known issues\r\n\r\n* Update __init__.py\r\n\r\nAllow to gen audio\r\n\r\n* Fix length issue\r\n\r\n* Fix bug of preparing fid\r\n\r\n* Fix sample issues\r\n\r\n* Add UI usage of PPG-vc", "code": "def get_subsample(train_args, mode, arch):\n \n if arch == 'transformer':\n return np.array([1])\n\n elif mode == 'mt' and arch == 'rnn':\n # +1 means input (+1) and layers outputs (train_args.elayer)\n subsample = np.ones(train_args.elayers + 1, dtype=np.int)\n logging.warning('Subsampling is not performed for machine translation.')\n logging.info('subsample: ' + ' '.join([str(x) for x in subsample]))\n return subsample\n\n elif (mode == 'asr' and arch in ('rnn', 'rnn-t')) or \\\n (mode == 'mt' and arch == 'rnn') or \\\n (mode == 'st' and arch == 'rnn'):\n subsample = np.ones(train_args.elayers + 1, dtype=np.int)\n if train_args.etype.endswith(\"p\") and not train_args.etype.startswith(\"vgg\"):\n ss = train_args.subsample.split(\"_\")\n for j in range(min(train_args.elayers + 1, len(ss))):\n subsample[j] = int(ss[j])\n else:\n logging.warning(\n 'Subsampling is not performed for vgg*. It is performed in max pooling layers at CNN.')\n logging.info('subsample: ' + ' '.join([str(x) for x in subsample]))\n return subsample\n\n elif mode == 'asr' and arch == 'rnn_mix':\n subsample = np.ones(train_args.elayers_sd + train_args.elayers + 1, dtype=np.int)\n if train_args.etype.endswith(\"p\") and not train_args.etype.startswith(\"vgg\"):\n ss = train_args.subsample.split(\"_\")\n for j in range(min(train_args.elayers_sd + train_args.elayers + 1, len(ss))):\n subsample[j] = int(ss[j])\n else:\n logging.warning(\n 'Subsampling is not performed for vgg*. It is performed in max pooling layers at CNN.')\n logging.info('subsample: ' + ' '.join([str(x) for x in subsample]))\n return subsample\n\n elif mode == 'asr' and arch == 'rnn_mulenc':\n subsample_list = []\n for idx in range(train_args.num_encs):\n subsample = np.ones(train_args.elayers[idx] + 1, dtype=np.int)\n if train_args.etype[idx].endswith(\"p\") and not train_args.etype[idx].startswith(\"vgg\"):\n ss = train_args.subsample[idx].split(\"_\")\n for j in range(min(train_args.elayers[idx] + 1, len(ss))):\n subsample[j] = int(ss[j])\n else:\n logging.warning(\n 'Encoder %d: Subsampling is not performed for vgg*. '\n 'It is performed in max pooling layers at CNN.', idx + 1)\n logging.info('subsample: ' + ' '.join([str(x) for x in subsample]))\n subsample_list.append(subsample)\n return subsample_list\n\n else:\n raise ValueError('Invalid options: mode={}, arch={}'.format(mode, arch))\n\n", "url": "https://github.com/babysor/MockingBird.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 735, "n_words": 275, "vocab_size": 96, "complexity": 28, "nloc": 49, "token_counts": 534, "n_ast_nodes": 897, "n_identifiers": 33, "d_id": 38874, "documentation": { "docstring": "Parse the subsampling factors from the training args for the specified `mode` and `arch`.\n\n Args:\n train_args: argument Namespace containing options.\n mode: one of ('asr', 'mt', 'st')\n arch: one of ('rnn', 'rnn-t', 'rnn_mix', 'rnn_mulenc', 'transformer')\n\n Returns:\n np.ndarray / List[np.ndarray]: subsampling factors.\n ", "n_words": 40, "vocab_size": 35, "n_whitespaces": 77, "language": "en" } }, { "id": 62572, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/html5lib/treebuilders/__init__.py", "file_name": "__init__.py", "fun_name": "getTreeBuilder", "commit_message": "upd; format", "code": "def getTreeBuilder(treeType, implementation=None, **kwargs):\n \n\n treeType = treeType.lower()\n if treeType not in treeBuilderCache:\n if treeType == \"dom\":\n from . import dom\n # Come up with a sane default (pref. from the stdlib)\n if implementation is None:\n from xml.dom import minidom\n implementation = minidom\n # NEVER cache here, caching is done in the dom submodule\n return dom.getDomModule(implementation, **kwargs).TreeBuilder\n elif treeType == \"lxml\":\n from . import etree_lxml\n treeBuilderCache[treeType] = etree_lxml.TreeBuilder\n elif treeType == \"etree\":\n from . import etree\n if implementation is None:\n implementation = default_etree\n # NEVER cache here, caching is done in the etree submodule\n return etree.getETreeModule(implementation, **kwargs).TreeBuilder\n else:\n raise ValueError( % treeType)\n return treeBuilderCache.get(treeType)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 320, "n_words": 103, "vocab_size": 56, "complexity": 7, "nloc": 20, "token_counts": 123, "n_ast_nodes": 211, "n_identifiers": 17, "d_id": 12996, "documentation": { "docstring": "Get a TreeBuilder class for various types of trees with built-in support\n\n :arg treeType: the name of the tree type required (case-insensitive). Supported\n values are:\n\n * \"dom\" - A generic builder for DOM implementations, defaulting to a\n xml.dom.minidom based implementation.\n * \"etree\" - A generic builder for tree implementations exposing an\n ElementTree-like interface, defaulting to xml.etree.cElementTree if\n available and xml.etree.ElementTree if not.\n * \"lxml\" - A etree-based builder for lxml.etree, handling limitations\n of lxml's implementation.\n\n :arg implementation: (Currently applies to the \"etree\" and \"dom\" tree\n types). A module implementing the tree type e.g. xml.etree.ElementTree\n or xml.etree.cElementTree.\n\n :arg kwargs: Any additional options to pass to the TreeBuilder when\n creating it.\n\n Example:\n\n >>> from html5lib.treebuilders import getTreeBuilder\n >>> builder = getTreeBuilder('etree')\n\n Unrecognised treebuilder \"%s\" ", "n_words": 122, "vocab_size": 82, "n_whitespaces": 228, "language": "en" } }, { "id": 70057, "commit_id": "5ce964bac3a618229f593ad587cb704f783a470f", "repo": "glances", "path": "glances/stats.py", "file_name": "stats.py", "fun_name": "getAllExportsAsDict", "commit_message": "Remove the static exportable_plugins list from glances_export.py #1556\"\n Limiting data exported for economic storage #1443", "code": "def getAllExportsAsDict(self, plugin_list=None):\n \n if plugin_list is None:\n # All enabled plugins should be exported\n plugin_list = self.getPluginsList()\n return {p: self._plugins[p].get_export() for p in plugin_list}\n", "url": "https://github.com/nicolargo/glances.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 67, "n_words": 24, "vocab_size": 23, "complexity": 3, "nloc": 4, "token_counts": 41, "n_ast_nodes": 67, "n_identifiers": 7, "d_id": 15289, "documentation": { "docstring": "Return all the stats to be exported (list).\n\n Default behavior is to export all the stat\n if plugin_list is provided, only export stats of given plugin (list)\n ", "n_words": 27, "vocab_size": 21, "n_whitespaces": 48, "language": "en" } }, { "id": 13477, "commit_id": "de5942f19db46321d09d31ff62d60ac33e7e43d7", "repo": "jina", "path": "jina/parsers/client.py", "file_name": "client.py", "fun_name": "mixin_client_protocol_parser", "commit_message": "feat: allow multiple port and protocols for gateway (#5378)", "code": "def mixin_client_protocol_parser(parser):\n \n\n from jina.enums import GatewayProtocolType\n\n parser.add_argument(\n '--protocol',\n type=GatewayProtocolType.from_string,\n choices=list(GatewayProtocolType),\n default=GatewayProtocolType.GRPC,\n help='Communication protocol between server and client.',\n )\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 65, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 9, "token_counts": 42, "n_ast_nodes": 68, "n_identifiers": 13, "d_id": 2665, "documentation": { "docstring": "Add the arguments for the protocol to the client parser\n\n :param parser: the parser configure\n ", "n_words": 15, "vocab_size": 11, "n_whitespaces": 21, "language": "en" } }, { "id": 111575, "commit_id": "d61e742960ef230b423dfa157449b291a03bd119", "repo": "spaCy", "path": "spacy/tests/pipeline/test_entity_linker.py", "file_name": "test_entity_linker.py", "fun_name": "test_span_maker_forward_with_empty", "commit_message": "Handle Docs with no entities in EntityLinker (#11640)\n\n* Handle docs with no entities\r\n\r\nIf a whole batch contains no entities it won't make it to the model, but\r\nit's possible for individual Docs to have no entities. Before this\r\ncommit, those Docs would cause an error when attempting to concatenate\r\narrays because the dimensions didn't match.\r\n\r\nIt turns out the process of preparing the Ragged at the end of the span\r\nmaker forward was a little different from list2ragged, which just uses\r\nthe flatten function directly. Letting list2ragged do the conversion\r\navoids the dimension issue.\r\n\r\nThis did not come up before because in NEL demo projects it's typical\r\nfor data with no entities to be discarded before it reaches the NEL\r\ncomponent.\r\n\r\nThis includes a simple direct test that shows the issue and checks it's\r\nresolved. It doesn't check if there are any downstream changes, so a\r\nmore complete test could be added. A full run was tested by adding an\r\nexample with no entities to the Emerson sample project.\r\n\r\n* Add a blank instance to default training data in tests\r\n\r\nRather than adding a specific test, since not failing on instances with\r\nno entities is basic functionality, it makes sense to add it to the\r\ndefault set.\r\n\r\n* Fix without modifying architecture\r\n\r\nIf the architecture is modified this would have to be a new version, but\r\nthis change isn't big enough to merit that.", "code": "def test_span_maker_forward_with_empty():\n \n nlp = English()\n doc1 = nlp(\"a b c\")\n ent = doc1[0:1]\n ent.label_ = \"X\"\n doc1.ents = [ent]\n # no entities\n doc2 = nlp(\"x y z\")\n\n # just to get a model\n span_maker = build_span_maker()\n span_maker([doc1, doc2], False)\n", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 72, "n_words": 39, "vocab_size": 32, "complexity": 1, "nloc": 9, "token_counts": 57, "n_ast_nodes": 103, "n_identifiers": 10, "d_id": 24444, "documentation": { "docstring": "The forward pass of the span maker may have a doc with no entities.", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 158175, "commit_id": "b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2", "repo": "d2l-zh", "path": "d2l/mxnet.py", "file_name": "mxnet.py", "fun_name": "evaluate_accuracy_gpu", "commit_message": "[PaddlePaddle] Merge master into Paddle branch (#1186)\n\n* change 15.2 title in chinese version (#1109)\r\n\r\nchange title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘\r\n\r\n* 修改部分语义表述 (#1105)\r\n\r\n* Update r0.17.5 (#1120)\r\n\r\n* Bump versions in installation\r\n\r\n* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)\r\n\r\n* line 313: \"bert.mall\" -> \"bert.small\" (#1130)\r\n\r\n* fix: update language as native reader (#1114)\r\n\r\n* Fix the translation of \"stride\" (#1115)\r\n\r\n* Update index.md (#1118)\r\n\r\n修改部分语义表述\r\n\r\n* Update self-attention-and-positional-encoding.md (#1133)\r\n\r\n依照本书的翻译习惯,将pooling翻译成汇聚\r\n\r\n* maybe a comment false (#1149)\r\n\r\n* maybe a little false\r\n\r\n* maybe a little false\r\n\r\n* A minor bug in the rcnn section (Chinese edition) (#1148)\r\n\r\n* Update bert.md (#1137)\r\n\r\n一个笔误\r\n# 假设batch_size=2,num_pred_positions=3\r\n# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]\r\n\r\n* Update calculus.md (#1135)\r\n\r\n* fix typo in git documentation (#1106)\r\n\r\n* fix: Update the Chinese translation in lr-scheduler.md (#1136)\r\n\r\n* Update lr-scheduler.md\r\n\r\n* Update chapter_optimization/lr-scheduler.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* fix translation for kaggle-house-price.md (#1107)\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\nSigned-off-by: sunhaizhou \r\n\r\n* Update weight-decay.md (#1150)\r\n\r\n* Update weight-decay.md\r\n\r\n关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解\r\n关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。\r\n并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释\r\n解释为何会增加复杂性以及为何需要细粒度工具。\r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Fix a spelling error (#1161)\r\n\r\n* Update gru.md (#1152)\r\n\r\nThe key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.\r\n翻译错误\r\n\r\n* Unify the function naming (#1113)\r\n\r\nUnify naming of the function 'init_xavier()'.\r\n\r\n* Update mlp-concise.md (#1166)\r\n\r\n* Update mlp-concise.md\r\n\r\n语句不通顺\r\n\r\n* Update environment.md\r\n\r\n语序异常\r\n\r\n* Update config.ini\r\n\r\n* fix the imprecise description (#1168)\r\n\r\nCo-authored-by: yuande \r\n\r\n* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)\r\n\r\n* Fix some typos. (#1163)\r\n\r\n* Update batch-norm.md (#1170)\r\n\r\nfixing typos u->x in article\r\n\r\n* Update linear-regression.md (#1090)\r\n\r\nWe invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that\r\n\r\n原译文把who也直接翻译出来了。\r\n\r\n* Update mlp.md (#1117)\r\n\r\n* Update mlp.md\r\n\r\n修改部分语义表述\r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: goldmermaid \r\n\r\n* Correct a translation error. (#1091)\r\n\r\n* Correct a translation error.\r\n\r\n* Update chapter_computer-vision/image-augmentation.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update aws.md (#1121)\r\n\r\n* Update aws.md\r\n\r\n* Update chapter_appendix-tools-for-deep-learning/aws.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update image-augmentation.md (#1093)\r\n\r\n* Update anchor.md (#1088)\r\n\r\nfix a minor issue in code\r\n\r\n* Update anchor.md\r\n\r\n* Update image-augmentation.md\r\n\r\n* fix typo and improve translation in chapter_linear-networks\\softmax-regression.md (#1087)\r\n\r\n* Avoid `torch.meshgrid` user warning (#1174)\r\n\r\nAvoids the following user warning:\r\n```python\r\n~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)\r\n return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\r\n```\r\n\r\n* bump to 2.0.0-beta1\r\n\r\n* Update sequence.md\r\n\r\n* bump beta1 on readme\r\n\r\n* Add latex code block background to config\r\n\r\n* BLD: Bump python support version 3.9 (#1183)\r\n\r\n* BLD: Bump python support version 3.9\r\n\r\n* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4\r\n\r\n* BLD: Bump torch and tensorflow\r\n\r\n* Update Jenkinsfile\r\n\r\n* Update chapter_installation/index.md\r\n\r\n* Update chapter_installation/index.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update config.ini\r\n\r\n* Update INFO.md\r\n\r\n* Update INFO.md\r\n\r\n* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)\r\n\r\n* resolve the conflicts\r\n\r\n* revise from publisher (#1089)\r\n\r\n* revise from publisher\r\n\r\n* d2l api\r\n\r\n* post_latex\r\n\r\n* revise from publisher\r\n\r\n* revise ch11\r\n\r\n* Delete d2l-Copy1.bib\r\n\r\n* clear cache\r\n\r\n* rm d2lbook clear\r\n\r\n* debug anchor\r\n\r\n* keep original d2l doc\r\n\r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\n\r\n* 重复语句 (#1188)\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improve expression for chapter_preliminaries/pandas.md (#1184)\r\n\r\n* Update pandas.md\r\n\r\n* Improve expression\r\n\r\n* Improve expression\r\n\r\n* Update chapter_preliminaries/pandas.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)\r\n\r\n* Improce expression\r\n\r\n* Improve code comments\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Fix multibox_detection bugs\r\n\r\n* Update d2l to 0.17.5 version\r\n\r\n* restore older version\r\n\r\n* Upgrade pandas\r\n\r\n* change to python3.8\r\n\r\n* Test warning log\r\n\r\n* relocate warning log\r\n\r\n* test logs filtering\r\n\r\n* Update gru.md\r\n\r\n* Add DeprecationWarning filter\r\n\r\n* Test warning log\r\n\r\n* Update attention mechanisms & computational performance\r\n\r\n* Update multilayer perceptron& linear & convolution networks & computer vision\r\n\r\n* Update recurrent&optimition&nlp pretraining & nlp applications\r\n\r\n* ignore warnings\r\n\r\n* Update index.md\r\n\r\n* Update linear networks\r\n\r\n* Update multilayer perceptrons&deep learning computation\r\n\r\n* Update preliminaries\r\n\r\n* Check and Add warning filter\r\n\r\n* Update kaggle-cifar10.md\r\n\r\n* Update object-detection-dataset.md\r\n\r\n* Update ssd.md fcn.md\r\n\r\n* Update hybridize.md\r\n\r\n* Update hybridize.md\r\n\r\nSigned-off-by: sunhaizhou \r\nCo-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com>\r\nCo-authored-by: Xinwei Liu \r\nCo-authored-by: Anirudh Dagar \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com>\r\nCo-authored-by: gyro永不抽风 <1247006353@qq.com>\r\nCo-authored-by: CanChengZheng \r\nCo-authored-by: linlin \r\nCo-authored-by: iuk \r\nCo-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com>\r\nCo-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com>\r\nCo-authored-by: Chiyuan Fu \r\nCo-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com>\r\nCo-authored-by: Haiker Sun \r\nCo-authored-by: Ming Liu \r\nCo-authored-by: goldmermaid \r\nCo-authored-by: silenceZheng66 <13754430639@163.com>\r\nCo-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com>\r\nCo-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com>\r\nCo-authored-by: Krahets \r\nCo-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com>\r\nCo-authored-by: Jameson \r\nCo-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com>\r\nCo-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com>\r\nCo-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com>\r\nCo-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com>\r\nCo-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com>\r\nCo-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com>\r\nCo-authored-by: VigourJiang \r\nCo-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com>\r\nCo-authored-by: LYF <27893441+liyufan@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\nCo-authored-by: xiaotinghe \r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com>\r\nCo-authored-by: HinGwenWoong \r\nCo-authored-by: Shuai Zhang ", "code": "def evaluate_accuracy_gpu(net, data_iter, device=None):\n \n if not device: # Query the first device where the first parameter is on\n device = list(net.collect_params().values())[0].list_ctx()[0]\n # No. of correct predictions, no. of predictions\n metric = d2l.Accumulator(2)\n for X, y in data_iter:\n X, y = X.as_in_ctx(device), y.as_in_ctx(device)\n metric.add(d2l.accuracy(net(X), y), d2l.size(y))\n return metric[0] / metric[1]\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 89, "n_words": 49, "vocab_size": 40, "complexity": 3, "nloc": 8, "token_counts": 105, "n_ast_nodes": 167, "n_identifiers": 17, "d_id": 37352, "documentation": { "docstring": "Compute the accuracy for a model on a dataset using a GPU.\n\n Defined in :numref:`sec_lenet`", "n_words": 15, "vocab_size": 13, "n_whitespaces": 17, "language": "en" } }, { "id": 131220, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/test_advanced_3.py", "file_name": "test_advanced_3.py", "fun_name": "test_k8s_cpu", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_k8s_cpu():\n \n\n # Some experimentally-obtained K8S CPU usage files for use in test_k8s_cpu.\n PROCSTAT1 = # noqa\n\n PROCSTAT2 = # noqa\n\n CPUACCTUSAGE1 = \"2268980984108\"\n\n CPUACCTUSAGE2 = \"2270120061999\"\n\n CPUSHARES = \"2048\"\n\n shares_file, cpu_file, proc_stat_file = [\n tempfile.NamedTemporaryFile(\"w+\") for _ in range(3)\n ]\n shares_file.write(CPUSHARES)\n cpu_file.write(CPUACCTUSAGE1)\n proc_stat_file.write(PROCSTAT1)\n for file in shares_file, cpu_file, proc_stat_file:\n file.flush()\n with mock.patch(\n \"ray._private.utils.os.environ\", {\"KUBERNETES_SERVICE_HOST\"}\n ), mock.patch(\"ray.dashboard.k8s_utils.CPU_USAGE_PATH\", cpu_file.name), mock.patch(\n \"ray.dashboard.k8s_utils.PROC_STAT_PATH\", proc_stat_file.name\n ), mock.patch(\n \"ray._private.utils.get_k8s_cpus.__defaults__\", (shares_file.name,)\n ):\n\n # Test helpers\n assert ray._private.utils.get_num_cpus() == 2\n assert k8s_utils._cpu_usage() == 2268980984108\n assert k8s_utils._system_usage() == 1551775030000000\n assert k8s_utils._host_num_cpus() == 8\n\n # No delta for first computation, return 0.\n assert k8s_utils.cpu_percent() == 0.0\n\n # Write new usage info obtained after 1 sec wait.\n for file in cpu_file, proc_stat_file:\n file.truncate(0)\n file.seek(0)\n cpu_file.write(CPUACCTUSAGE2)\n proc_stat_file.write(PROCSTAT2)\n for file in cpu_file, proc_stat_file:\n file.flush()\n\n # Files were extracted under 1 CPU of load on a 2 CPU pod\n assert 50 < k8s_utils.cpu_percent() < 60\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 363, "n_words": 142, "vocab_size": 93, "complexity": 5, "nloc": 66, "token_counts": 220, "n_ast_nodes": 375, "n_identifiers": 30, "d_id": 29486, "documentation": { "docstring": "Test all the functions in dashboard/k8s_utils.py.\n Also test ray._private.utils.get_num_cpus when running in a K8s pod.\n Files were obtained from within a K8s pod with 2 CPU request, CPU limit\n unset, with 1 CPU of stress applied.\n cpu 2945022 98 3329420 148744854 39522 0 118587 0 0 0\n cpu0 370299 14 413841 18589778 5304 0 15288 0 0 0\n cpu1 378637 10 414414 18589275 5283 0 14731 0 0 0\n cpu2 367328 8 420914 18590974 4844 0 14416 0 0 0\n cpu3 368378 11 423720 18572899 4948 0 14394 0 0 0\n cpu4 369051 13 414615 18607285 4736 0 14383 0 0 0\n cpu5 362958 10 415984 18576655 4590 0 16614 0 0 0\n cpu6 362536 13 414430 18605197 4785 0 14353 0 0 0\n cpu7 365833 15 411499 18612787 5028 0 14405 0 0 0\n intr 1000694027 125 0 0 39 154 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1028 0 2160913 0 2779605 8 0 3981333 3665198 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n ctxt 1574979439\n btime 1615208601\n processes 857411\n procs_running 6\n procs_blocked 0\n softirq 524311775 0 230142964 27143 63542182 0 0 171 74042767 0 156556548\n cpu 2945152 98 3329436 148745483 39522 0 118587 0 0 0\n cpu0 370399 14 413841 18589778 5304 0 15288 0 0 0\n cpu1 378647 10 414415 18589362 5283 0 14731 0 0 0\n cpu2 367329 8 420916 18591067 4844 0 14416 0 0 0\n cpu3 368381 11 423724 18572989 4948 0 14395 0 0 0\n cpu4 369052 13 414618 18607374 4736 0 14383 0 0 0\n cpu5 362968 10 415986 18576741 4590 0 16614 0 0 0\n cpu6 362537 13 414432 18605290 4785 0 14353 0 0 0\n cpu7 365836 15 411502 18612878 5028 0 14405 0 0 0\n intr 1000700905 125 0 0 39 154 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1028 0 2160923 0 2779605 8 0 3981353 3665218 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n ctxt 1574988760\n btime 1615208601\n processes 857411\n procs_running 4\n procs_blocked 0\n softirq 524317451 0 230145523 27143 63542930 0 0 171 74043232 0 156558452\n ", "n_words": 1258, "vocab_size": 156, "n_whitespaces": 1369, "language": "en" } }, { "id": 147590, "commit_id": "0bb82f29b65dca348acf5aa516d21ef3f176a3e1", "repo": "ray", "path": "rllib/agents/alpha_star/league_builder.py", "file_name": "league_builder.py", "fun_name": "__getstate__", "commit_message": "[RLlib] AlphaStar polishing (fix logger.info bug). (#22281)", "code": "def __getstate__(self) -> Dict[str, Any]:\n \n return {}\n\n\n@ExperimentalAPI", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@ExperimentalAPI", "n_ast_errors": 1, "ast_levels": 6, "n_whitespaces": 21, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 7, "token_counts": 16, "n_ast_nodes": 31, "n_identifiers": 6, "d_id": 34015, "documentation": { "docstring": "Returns a state dict, mapping str keys to state variables.\n\n Returns:\n The current state dict of this LeagueBuilder.\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 43, "language": "en" } }, { "id": 200597, "commit_id": "1d8576449e7ab757f13f49a1d33faed602aa88fb", "repo": "sympy", "path": "sympy/algebras/quaternion.py", "file_name": "quaternion.py", "fun_name": "from_euler", "commit_message": "implemented to_euler and from_euler", "code": "def from_euler(cls, angles, seq):\n \n\n if len(angles) != 3:\n raise ValueError(\"3 angles must be given.\")\n\n extrinsic = _check_sequence(seq)\n i, j, k = seq.lower()\n\n q1 = cls.from_axis_angle(_elementary_axis(i), angles[0])\n q2 = cls.from_axis_angle(_elementary_axis(j), angles[1])\n q3 = cls.from_axis_angle(_elementary_axis(k), angles[2])\n\n if extrinsic:\n return trigsimp(q3 * q2 * q1)\n else:\n return trigsimp(q1 * q2 * q3)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 145, "n_words": 49, "vocab_size": 38, "complexity": 3, "nloc": 12, "token_counts": 111, "n_ast_nodes": 176, "n_identifiers": 18, "d_id": 49726, "documentation": { "docstring": "Returns quaternion equivalent to Euler angles represented same in\n the sequence defined by `seq`.\n\n Parameters\n ==========\n\n angles : list, tuple or Matrix of 3 numbers\n The Euler angles (in radians).\n seq : string of length 3\n Represents the sequence of rotations.\n For intrinsic rotations, seq but be all lowercase and its elements\n must be from the set `['x', 'y', 'z']`\n For extrinsic rotations, seq but be all uppercase and its elements\n must be from the set `['X', 'Y', 'Z']`\n\n Returns\n =======\n\n Quaternion\n The normalized rotation quaternion calculated from the Euler angles\n in the given sequence.\n\n Examples\n ========\n\n >>> from sympy import Quaternion\n >>> from sympy import pi\n >>> q = Quaternion.from_euler([pi/2, 0, 0], 'xyz')\n >>> q\n sqrt(2)/2 + sqrt(2)/2*i + 0*j + 0*k\n\n >>> q = Quaternion.from_euler([0, pi/2, pi] , 'zyz')\n >>> q\n 0 + (-sqrt(2)/2)*i + 0*j + sqrt(2)/2*k\n\n >>> q = Quaternion.from_euler([0, pi/2, pi] , 'ZYZ')\n >>> q\n 0 + sqrt(2)/2*i + 0*j + sqrt(2)/2*k\n\n ", "n_words": 157, "vocab_size": 86, "n_whitespaces": 399, "language": "en" } }, { "id": 130285, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/_private/thirdparty/pathspec/util.py", "file_name": "util.py", "fun_name": "lookup_pattern", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def lookup_pattern(name):\n \n return _registered_patterns[name]\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 10, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 20, "n_identifiers": 3, "d_id": 29210, "documentation": { "docstring": "\n Lookups a registered pattern factory by name.\n\n *name* (:class:`str`) is the name of the pattern factory.\n\n Returns the registered pattern factory (:class:`~collections.abc.Callable`).\n If no pattern factory is registered, raises :exc:`KeyError`.\n ", "n_words": 30, "vocab_size": 21, "n_whitespaces": 46, "language": "en" } }, { "id": 77827, "commit_id": "96a0eb0fa0cc0e28bcf5616987d193f6b2fcea82", "repo": "wagtail", "path": "wagtail/admin/views/generic/models.py", "file_name": "models.py", "fun_name": "save_instance", "commit_message": "Move logging in generic CreateView and EditView to save_instance() method", "code": "def save_instance(self):\n \n instance = self.form.save()\n log(instance=instance, action=\"wagtail.create\")\n return instance\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 37, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 27, "n_ast_nodes": 48, "n_identifiers": 7, "d_id": 16712, "documentation": { "docstring": "\n Called after the form is successfully validated - saves the object to the db\n and returns the new object. Override this to implement custom save logic.\n ", "n_words": 26, "vocab_size": 22, "n_whitespaces": 48, "language": "en" } }, { "id": 290620, "commit_id": "1ded3ac51ebc1915a5026af1998eb119972f6117", "repo": "core", "path": "tests/components/homekit_controller/common.py", "file_name": "common.py", "fun_name": "poll_and_get_state", "commit_message": "Poll HomeKit Controller locks for state after lock operation (#82058)", "code": "async def poll_and_get_state(self) -> State:\n \n await time_changed(self.hass, 60)\n await time_changed(self.hass, DEBOUNCE_COOLDOWN)\n\n state = self.hass.states.get(self.entity_id)\n assert state is not None\n return state\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 63, "n_words": 21, "vocab_size": 17, "complexity": 1, "nloc": 7, "token_counts": 47, "n_ast_nodes": 77, "n_identifiers": 10, "d_id": 89734, "documentation": { "docstring": "Trigger a time based poll and return the current entity state.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 287415, "commit_id": "4894e2e5a43e80a2f64d8f9486c7eb215fcdaa6b", "repo": "core", "path": "tests/components/mqtt/test_common.py", "file_name": "test_common.py", "fun_name": "help_test_setup_manual_entity_from_yaml", "commit_message": "Refactor common MQTT tests to use modern schema (#77583)\n\n* Common tests availability\r\n\r\n* Common tests attributes\r\n\r\n* Common tests unique id\r\n\r\n* Common tests discovery\r\n\r\n* Common tests encoding\r\n\r\n* Common tests device info\r\n\r\n* Common tests entity_id updated\r\n\r\n* Common tests entity debug info\r\n\r\n* Common test entity category\r\n\r\n* Common tests setup reload unload+corrections\r\n\r\n* Cleanup sweep\r\n\r\n* Comments from curent change\r\n\r\n* Cleanup\r\n\r\n* Remove unused legacy config", "code": "async def help_test_setup_manual_entity_from_yaml(hass, config):\n \n calls = MagicMock()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 14, "token_counts": 106, "n_ast_nodes": 25, "n_identifiers": 5, "d_id": 86608, "documentation": { "docstring": "Help to test setup from yaml through configuration entry.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 176736, "commit_id": "2a05ccdb07cff88e56661dee8a9271859354027f", "repo": "networkx", "path": "networkx/generators/random_graphs.py", "file_name": "random_graphs.py", "fun_name": "random_kernel_graph", "commit_message": "Remove redundant py2 numeric conversions (#5661)\n\n* Remove redundant float conversion\r\n\r\n* Remove redundant int conversion\r\n\r\n* Use integer division\r\n\r\nCo-authored-by: Miroslav Šedivý <6774676+eumiro@users.noreply.github.com>", "code": "def random_kernel_graph(n, kernel_integral, kernel_root=None, seed=None):\n r\n if kernel_root is None:\n import scipy as sp\n import scipy.optimize # call as sp.optimize\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 40, "n_words": 20, "vocab_size": 18, "complexity": 4, "nloc": 74, "token_counts": 143, "n_ast_nodes": 47, "n_identifiers": 8, "d_id": 42069, "documentation": { "docstring": "Returns an random graph based on the specified kernel.\n\n The algorithm chooses each of the $[n(n-1)]/2$ possible edges with\n probability specified by a kernel $\\kappa(x,y)$ [1]_. The kernel\n $\\kappa(x,y)$ must be a symmetric (in $x,y$), non-negative,\n bounded function.\n\n Parameters\n ----------\n n : int\n The number of nodes\n kernel_integral : function\n Function that returns the definite integral of the kernel $\\kappa(x,y)$,\n $F(y,a,b) := \\int_a^b \\kappa(x,y)dx$\n kernel_root: function (optional)\n Function that returns the root $b$ of the equation $F(y,a,b) = r$.\n If None, the root is found using :func:`scipy.optimize.brentq`\n (this requires SciPy).\n seed : integer, random_state, or None (default)\n Indicator of random number generation state.\n See :ref:`Randomness`.\n\n Notes\n -----\n The kernel is specified through its definite integral which must be\n provided as one of the arguments. If the integral and root of the\n kernel integral can be found in $O(1)$ time then this algorithm runs in\n time $O(n+m)$ where m is the expected number of edges [2]_.\n\n The nodes are set to integers from $0$ to $n-1$.\n\n Examples\n --------\n Generate an Erdős–Rényi random graph $G(n,c/n)$, with kernel\n $\\kappa(x,y)=c$ where $c$ is the mean expected degree.\n\n >>> def integral(u, w, z):\n ... return c * (z - w)\n >>> def root(u, w, r):\n ... return r / c + w\n >>> c = 1\n >>> graph = nx.random_kernel_graph(1000, integral, root)\n\n See Also\n --------\n gnp_random_graph\n expected_degree_graph\n\n References\n ----------\n .. [1] Bollobás, Béla, Janson, S. and Riordan, O.\n \"The phase transition in inhomogeneous random graphs\",\n *Random Structures Algorithms*, 31, 3--122, 2007.\n\n .. [2] Hagberg A, Lemons N (2015),\n \"Fast Generation of Sparse Random Kernel Graphs\".\n PLoS ONE 10(9): e0135177, 2015. doi:10.1371/journal.pone.0135177\n ", "n_words": 266, "vocab_size": 179, "n_whitespaces": 464, "language": "en" } }, { "id": 123270, "commit_id": "258b10edc90d53c31225962dde6dcc80b0fc9ba9", "repo": "hosts", "path": "updateHostsFile.py", "file_name": "updateHostsFile.py", "fun_name": "prompt_for_move", "commit_message": "refactor: more containerization", "code": "def prompt_for_move(final_file, **move_params):\n \n\n skip_static_hosts = move_params[\"skipstatichosts\"]\n\n if move_params[\"replace\"] and not skip_static_hosts:\n move_file = True\n elif move_params[\"auto\"] or skip_static_hosts:\n move_file = False\n else:\n prompt = \"Do you want to replace your existing hosts file with the newly generated file?\"\n move_file = query_yes_no(prompt)\n\n if move_file:\n move_file = move_hosts_file_into_place(final_file)\n\n return move_file\n\n\n# End Prompt the User\n\n", "url": "https://github.com/StevenBlack/hosts.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 108, "n_words": 53, "vocab_size": 41, "complexity": 6, "nloc": 12, "token_counts": 60, "n_ast_nodes": 108, "n_identifiers": 8, "d_id": 27313, "documentation": { "docstring": "\n Prompt the user to move the newly created hosts file to its designated\n location in the OS.\n\n Parameters\n ----------\n final_file : file\n The file object that contains the newly created hosts data.\n move_params : kwargs\n Dictionary providing additional parameters for moving the hosts file\n into place. Currently, those fields are:\n\n 1) auto\n 2) replace\n 3) skipstatichosts\n\n Returns\n -------\n move_file : bool\n Whether or not the final hosts file was moved.\n ", "n_words": 70, "vocab_size": 53, "n_whitespaces": 150, "language": "en" } }, { "id": 83879, "commit_id": "44ecd66eaec6533778bdff3fbb31ceb0acc0419a", "repo": "zulip", "path": "zerver/tests/test_subs.py", "file_name": "test_subs.py", "fun_name": "test_api_fields", "commit_message": "types: Better types for API fields.\n\nSigned-off-by: Zixuan James Li <359101898@qq.com>", "code": "def test_api_fields(self) -> None:\n \n expected_fields = set(Stream.API_FIELDS) | {\"stream_id\"}\n expected_fields -= {\"id\"}\n\n stream_dict_fields = set(APIStreamDict.__annotations__.keys())\n computed_fields = set([\"is_announcement_only\", \"is_default\"])\n\n self.assertEqual(stream_dict_fields - computed_fields, expected_fields)\n\n expected_fields = set(Subscription.API_FIELDS)\n\n subscription_dict_fields = set(APISubscriptionDict.__annotations__.keys())\n computed_fields = set(\n [\"in_home_view\", \"email_address\", \"stream_weekly_traffic\", \"subscribers\"]\n )\n # `APISubscriptionDict` is a subclass of `APIStreamDict`, therefore having all the\n # fields in addition to the computed fields and `Subscription.API_FIELDS` that\n # need to be excluded here.\n self.assertEqual(\n subscription_dict_fields - computed_fields - stream_dict_fields,\n expected_fields,\n )\n\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 211, "n_words": 73, "vocab_size": 55, "complexity": 1, "nloc": 18, "token_counts": 104, "n_ast_nodes": 183, "n_identifiers": 15, "d_id": 17743, "documentation": { "docstring": "Verify that all the fields from `Stream.API_FIELDS` and `Subscription.API_FIELDS` present\n in `APIStreamDict` and `APISubscriptionDict`, respectively.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 29, "language": "en" } }, { "id": 184828, "commit_id": "ca9492ac569510ce0a7e5387f81e763a99c7359e", "repo": "textual", "path": "src/textual/message_pump.py", "file_name": "message_pump.py", "fun_name": "log", "commit_message": "layout docs", "code": "def log(self) -> Logger:\n \n return self.app._logger\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 7, "token_counts": 14, "n_ast_nodes": 25, "n_identifiers": 5, "d_id": 44823, "documentation": { "docstring": "Get a logger for this object.\n\n Returns:\n Logger: A logger.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 35, "language": "en" } }, { "id": 213059, "commit_id": "a5db070f446b7cfebdaa6ad2e3dcf78f6105a272", "repo": "serverless-application-model", "path": "samtranslator/third_party/py27hash/hash.py", "file_name": "hash.py", "fun_name": "fhash", "commit_message": "fix: Py27hash fix (#2182)\n\n* Add third party py27hash code\r\n\r\n* Add Py27UniStr and unit tests\r\n\r\n* Add py27hash_fix utils and tests\r\n\r\n* Add to_py27_compatible_template and tests\r\n\r\n* Apply py27hash fix to wherever it is needed\r\n\r\n* Apply py27hash fix, all tests pass except api_with_any_method_in_swagger\r\n\r\n* apply py27hash fix in openapi + run black\r\n\r\n* remove py27 testing\r\n\r\n* remove other py27 references\r\n\r\n* black fixes\r\n\r\n* fixes/typos\r\n\r\n* remove py27 from tox.ini\r\n\r\n* refactoring\r\n\r\n* third party notice\r\n\r\n* black\r\n\r\n* Fix py27hash fix to deal with null events\r\n\r\n* Fix Py27UniStr repr for unicode literals\r\n\r\n* black reformat\r\n\r\n* Update _template_has_api_resource to check data type more defensively\r\n\r\n* Apply py27Dict in _get_authorizers\r\n\r\n* Apply Py27Dict to authorizers and gateway responses which will go into swagger\r\n\r\n* Update to_py27_compatible_template to handle parameter_values; Add Py27LongInt class\r\n\r\n* Rename _convert_to_py27_dict to _convert_to_py27_type\r\n\r\n* Apply Py27UniStr to path param name\r\n\r\n* Handle HttpApi resource under to_py27_compatible_template\r\n\r\n* Fix InvalidDocumentException to not sort different exceptions\r\n\r\n* black reformat\r\n\r\n* Remove unnecessary test files\r\n\r\nCo-authored-by: Wing Fung Lau <4760060+hawflau@users.noreply.github.com>", "code": "def fhash(value):\n \n\n fpart = math.modf(value)\n if fpart[0] == 0.0:\n return hash(int(fpart[1]))\n\n v, e = math.frexp(value)\n\n # 2**31\n v *= 2147483648.0\n\n # Top 32 bits\n hipart = int(v)\n\n # Next 32 bits\n v = (v - float(hipart)) * 2147483648.0\n\n x = hipart + int(v) + (e << 15)\n if x == -1:\n x = -2\n\n # Convert to C long type\n return ctypes.c_long(x).value\n", "url": "https://github.com/aws/serverless-application-model.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 182, "n_words": 62, "vocab_size": 42, "complexity": 3, "nloc": 12, "token_counts": 105, "n_ast_nodes": 162, "n_identifiers": 15, "d_id": 53613, "documentation": { "docstring": "\n Returns a Python 2.7 hash for a float.\n\n Logic ported from the 2.7 Python branch: cpython/Objects/object.c\n Method: long _Py_HashDouble(double v)\n\n Args:\n value: input float\n\n Returns:\n Python 2.7 hash\n ", "n_words": 28, "vocab_size": 22, "n_whitespaces": 93, "language": "en" } }, { "id": 261852, "commit_id": "127118c6378168e3d36a1e5d19ede777fd20684f", "repo": "TTS", "path": "TTS/speaker_encoder/models/resnet.py", "file_name": "resnet.py", "fun_name": "forward", "commit_message": "Update TTS.tts formatters (#1228)\n\n* Return Dict from tts formatters\r\n\r\n* Make style", "code": "def forward(self, x, l2_norm=False):\n \n with torch.no_grad():\n with torch.cuda.amp.autocast(enabled=False):\n x.squeeze_(1)\n # if you torch spec compute it otherwise use the mel spec computed by the AP\n if self.use_torch_spec:\n x = self.torch_spec(x)\n\n if self.log_input:\n x = (x + 1e-6).log()\n x = self.instancenorm(x).unsqueeze(1)\n\n x = self.conv1(x)\n x = self.relu(x)\n x = self.bn1(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = x.reshape(x.size()[0], -1, x.size()[-1])\n\n w = self.attention(x)\n\n if self.encoder_type == \"SAP\":\n x = torch.sum(x * w, dim=2)\n elif self.encoder_type == \"ASP\":\n mu = torch.sum(x * w, dim=2)\n sg = torch.sqrt((torch.sum((x**2) * w, dim=2) - mu**2).clamp(min=1e-5))\n x = torch.cat((mu, sg), 1)\n\n x = x.view(x.size()[0], -1)\n x = self.fc(x)\n\n if l2_norm:\n x = torch.nn.functional.normalize(x, p=2, dim=1)\n return x\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 416, "n_words": 118, "vocab_size": 68, "complexity": 6, "nloc": 29, "token_counts": 320, "n_ast_nodes": 505, "n_identifiers": 43, "d_id": 77033, "documentation": { "docstring": "Forward pass of the model.\n\n Args:\n x (Tensor): Raw waveform signal or spectrogram frames. If input is a waveform, `torch_spec` must be `True`\n to compute the spectrogram on-the-fly.\n l2_norm (bool): Whether to L2-normalize the outputs.\n\n Shapes:\n - x: :math:`(N, 1, T_{in})` or :math:`(N, D_{spec}, T_{in})`\n ", "n_words": 45, "vocab_size": 38, "n_whitespaces": 114, "language": "en" } }, { "id": 101408, "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", "repo": "faceswap", "path": "tools/preview/preview.py", "file_name": "preview.py", "fun_name": "_faces_from_frames", "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", "code": "def _faces_from_frames(self) -> None:\n \n logger.debug(\"Extracting faces from frames: Number images: %s\", len(self.source))\n if self.update_source:\n self._crop_source_faces()\n self._crop_destination_faces()\n logger.debug(\"Extracted faces from frames: %s\",\n {k: len(v) for k, v in self._faces.__dict__.items()})\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 94, "n_words": 28, "vocab_size": 24, "complexity": 3, "nloc": 8, "token_counts": 65, "n_ast_nodes": 110, "n_identifiers": 14, "d_id": 20822, "documentation": { "docstring": " Extract the preview faces from the source frames and apply the requisite padding. ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 14, "language": "en" } }, { "id": 83262, "commit_id": "b0ce4f1bce8031881addecb1e86073483517f392", "repo": "zulip", "path": "zerver/tests/test_push_notifications.py", "file_name": "test_push_notifications.py", "fun_name": "test_deleted_message", "commit_message": "docs: Fix many spelling mistakes.\n\nSigned-off-by: Anders Kaseorg ", "code": "def test_deleted_message(self) -> None:\n \n user_profile = self.example_user(\"hamlet\")\n message = self.get_message(Recipient.PERSONAL, type_id=1)\n UserMessage.objects.create(\n user_profile=user_profile,\n flags=UserMessage.flags.read,\n message=message,\n )\n missed_message = {\n \"message_id\": message.id,\n \"trigger\": \"private_message\",\n }\n # Now, delete the message the normal way\n do_delete_messages(user_profile.realm, [message])\n\n # This mock.patch() should be assertNoLogs once that feature\n # is added to Python.\n with mock.patch(\n \"zerver.lib.push_notifications.uses_notification_bouncer\"\n ) as mock_check, mock.patch(\"logging.error\") as mock_logging_error, mock.patch(\n \"zerver.lib.push_notifications.push_notifications_enabled\", return_value=True\n ) as mock_push_notifications:\n handle_push_notification(user_profile.id, missed_message)\n mock_push_notifications.assert_called_once()\n # Check we didn't proceed through and didn't log anything.\n mock_check.assert_not_called()\n mock_logging_error.assert_not_called()\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 308, "n_words": 78, "vocab_size": 65, "complexity": 1, "nloc": 23, "token_counts": 132, "n_ast_nodes": 229, "n_identifiers": 27, "d_id": 17643, "documentation": { "docstring": "Simulates the race where message is deleted before handling push notifications", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 22218, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/vendor/requirementslib/models/dependencies.py", "file_name": "dependencies.py", "fun_name": "get_abstract_dependencies", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def get_abstract_dependencies(reqs, sources=None, parent=None):\n \n\n deps = []\n from .requirements import Requirement\n\n for req in reqs:\n if isinstance(req, shims.InstallRequirement):\n requirement = Requirement.from_line(\"{0}{1}\".format(req.name, req.specifier))\n if req.link:\n requirement.req.link = req.link\n requirement.markers = req.markers\n requirement.req.markers = req.markers\n requirement.extras = req.extras\n requirement.req.extras = req.extras\n elif isinstance(req, Requirement):\n requirement = copy.deepcopy(req)\n else:\n requirement = Requirement.from_line(req)\n dep = AbstractDependency.from_requirement(requirement, parent=parent)\n deps.append(dep)\n return deps\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 225, "n_words": 56, "vocab_size": 40, "complexity": 5, "nloc": 19, "token_counts": 149, "n_ast_nodes": 238, "n_identifiers": 25, "d_id": 4264, "documentation": { "docstring": "Get all abstract dependencies for a given list of requirements.\n\n Given a set of requirements, convert each requirement to an Abstract Dependency.\n\n :param reqs: A list of Requirements\n :type reqs: list[:class:`~requirementslib.models.requirements.Requirement`]\n :param sources: Pipfile-formatted sources, defaults to None\n :param sources: list[dict], optional\n :param parent: The parent of this list of dependencies, defaults to None\n :param parent: :class:`~requirementslib.models.requirements.Requirement`, optional\n :return: A list of Abstract Dependencies\n :rtype: list[:class:`~requirementslib.models.dependency.AbstractDependency`]\n ", "n_words": 66, "vocab_size": 43, "n_whitespaces": 96, "language": "en" } }, { "id": 294023, "commit_id": "653305b998dd033365576db303b32dd5df3a6c54", "repo": "core", "path": "homeassistant/components/plex/media_browser.py", "file_name": "media_browser.py", "fun_name": "special_library_payload", "commit_message": "Support multiple Plex servers in media browser (#68321)", "code": "def special_library_payload(parent_payload, special_type):\n \n title = f\"{special_type} ({parent_payload.title})\"\n special_library_id = f\"{parent_payload.media_content_id}/{special_type}\"\n return BrowseMedia(\n title=title,\n media_class=parent_payload.media_class,\n media_content_id=special_library_id,\n media_content_type=parent_payload.media_content_type,\n can_play=False,\n can_expand=True,\n children_media_class=parent_payload.children_media_class,\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 84, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 12, "token_counts": 54, "n_ast_nodes": 102, "n_identifiers": 12, "d_id": 93067, "documentation": { "docstring": "Create response payload for special library folders.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 215759, "commit_id": "fb825aa760fa0585a2c8fdafc6e62be8aec8cecf", "repo": "salt", "path": "salt/modules/consul.py", "file_name": "consul.py", "fun_name": "acl_update", "commit_message": "[merge jam] Master port 49261 - consul modules (#58101)\n\n* add consul states and acl function present/absent\r\n\r\n* add consul to states doc index\r\n\r\n* refact/fix consul states\r\n\r\n* fix doc, fix states\r\n\r\n* fix name parameter for acl_changes\r\n\r\n* fixing pylint errors\r\n\r\n* small changes after review by @rallytime\r\n\r\n* fix header count\r\n\r\n* Update consul.py\r\n\r\n* fix acl_exists description, fix when both id and name are missing\r\n\r\n* Adding some tests for consul module and consul state module. Some additional fixes in the consul module.\r\n\r\n* Fixing tests.\r\n\r\n* Fixing failing tests on Windows.\r\n\r\n* Adding changelog.\r\n\r\n* Adding some tests for consul module and consul state module. Some additional fixes in the consul module.\r\n\r\n* moving tests to pytest.\r\n\r\n* manual black changes.\r\n\r\n* One more manual black change.\r\n\r\n* fixing formatting. Adding versionadded for state module.\r\n\r\nCo-authored-by: Rémi Jouannet \r\nCo-authored-by: Mike Place \r\nCo-authored-by: Daniel Wozniak \r\nCo-authored-by: Wayne Werner ", "code": "def acl_update(consul_url=None, token=None, **kwargs):\n \n ret = {}\n data = {}\n if not consul_url:\n consul_url = _get_config()\n if not consul_url:\n log.error(\"No Consul URL found.\")\n ret[\"message\"] = \"No Consul URL found.\"\n ret[\"res\"] = False\n return ret\n\n if \"id\" in kwargs:\n data[\"ID\"] = kwargs[\"id\"]\n else:\n ret[\"message\"] = 'Required parameter \"id\" is missing.'\n ret[\"res\"] = False\n return ret\n\n if \"name\" in kwargs:\n data[\"Name\"] = kwargs[\"name\"]\n else:\n raise SaltInvocationError('Required argument \"name\" is missing.')\n\n if \"type\" in kwargs:\n data[\"Type\"] = kwargs[\"type\"]\n\n if \"rules\" in kwargs:\n data[\"Rules\"] = kwargs[\"rules\"]\n\n function = \"acl/update\"\n res = _query(\n consul_url=consul_url, token=token, data=data, method=\"PUT\", function=function\n )\n\n if res[\"res\"]:\n ret[\"res\"] = True\n ret[\"message\"] = \"ACL {} created.\".format(kwargs[\"name\"])\n else:\n ret[\"res\"] = False\n ret[\"message\"] = \"Updating ACL {} failed.\".format(kwargs[\"name\"])\n\n return ret\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 313, "n_words": 116, "vocab_size": 63, "complexity": 8, "nloc": 35, "token_counts": 212, "n_ast_nodes": 390, "n_identifiers": 15, "d_id": 54153, "documentation": { "docstring": "\n Update an ACL token.\n\n :param consul_url: The Consul server URL.\n :param name: Meaningful indicator of the ACL's purpose.\n :param id: Unique identifier for the ACL to update.\n :param type: Type is either client or management. A management\n token is comparable to a root user and has the\n ability to perform any action including creating,\n modifying, and deleting ACLs.\n :param rules: The Consul server URL.\n :return: Boolean & message of success or failure.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' consul.acl_update\n\n ", "n_words": 80, "vocab_size": 63, "n_whitespaces": 166, "language": "en" } }, { "id": 159567, "commit_id": "e798bf049f036a5865c14d4343ed8a833864aabe", "repo": "rasa", "path": "rasa/shared/core/trackers.py", "file_name": "trackers.py", "fun_name": "_reset", "commit_message": "convert TrackerActiveLoop to a dataclass", "code": "def _reset(self) -> None:\n \n\n self._reset_slots()\n self._paused = False\n self.latest_action = {}\n self.latest_message = UserUttered.empty()\n self.latest_bot_utterance = BotUttered.empty()\n self.followup_action = ACTION_LISTEN_NAME\n self.active_loop = None\n", "url": "https://github.com/RasaHQ/rasa.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 79, "n_words": 23, "vocab_size": 18, "complexity": 1, "nloc": 9, "token_counts": 52, "n_ast_nodes": 89, "n_identifiers": 13, "d_id": 38339, "documentation": { "docstring": "Reset tracker to initial state - doesn't delete events though!.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 86529, "commit_id": "286bf2ae7ecfdd6698d8fb1cd4753f107159d4d2", "repo": "sentry", "path": "src/sentry/apidocs/spectacular_ports.py", "file_name": "spectacular_ports.py", "fun_name": "resolve_type_hint", "commit_message": "ref: use dict instead of OrderedDict since sentry is >python3.6 (#39695)\n\npartially automated (especially the fixtures) also via `\\(([^]+),\r\n(.*)\\),$` -> `\\1: \\2,`", "code": "def resolve_type_hint(hint) -> Any:\n \n origin, args = _get_type_hint_origin(hint)\n excluded_fields = get_override(hint, \"exclude_fields\", [])\n\n if origin is None and is_basic_type(hint, allow_none=False):\n return build_basic_type(hint)\n elif origin is None and inspect.isclass(hint) and issubclass(hint, tuple):\n # a convoluted way to catch NamedTuple. suggestions welcome.\n if get_type_hints(hint):\n properties = {k: resolve_type_hint(v) for k, v in get_type_hints(hint).items()}\n else:\n properties = {k: build_basic_type(OpenApiTypes.ANY) for k in hint._fields}\n return build_object_type(properties=properties, required=properties.keys())\n elif origin is list or hint is list:\n return build_array_type(\n resolve_type_hint(args[0]) if args else build_basic_type(OpenApiTypes.ANY)\n )\n elif origin is tuple:\n return build_array_type(\n schema=build_basic_type(args[0]),\n max_length=len(args),\n min_length=len(args),\n )\n elif origin is dict or origin is defaultdict:\n schema = build_basic_type(OpenApiTypes.OBJECT)\n if args and args[1] is not typing.Any:\n schema[\"additionalProperties\"] = resolve_type_hint(args[1])\n return schema\n elif origin is set:\n return build_array_type(resolve_type_hint(args[0]))\n elif origin is frozenset:\n return build_array_type(resolve_type_hint(args[0]))\n elif origin is Literal:\n # Literal only works for python >= 3.8 despite typing_extensions, because it\n # behaves slightly different w.r.t. __origin__\n schema = {\"enum\": list(args)}\n if all(type(args[0]) is type(choice) for choice in args):\n schema.update(build_basic_type(type(args[0])))\n return schema\n elif inspect.isclass(hint) and issubclass(hint, Enum):\n schema = {\"enum\": [item.value for item in hint]}\n mixin_base_types = [t for t in hint.__mro__ if is_basic_type(t)]\n if mixin_base_types:\n schema.update(build_basic_type(mixin_base_types[0]))\n return schema\n elif isinstance(hint, _TypedDictMeta):\n return build_object_type(\n properties={\n k: resolve_type_hint(v)\n for k, v in get_type_hints(hint).items()\n if k not in excluded_fields\n },\n description=inspect.cleandoc(hint.__doc__ or \"\"),\n required=[h for h in hint.__required_keys__ if h not in excluded_fields],\n )\n elif origin is Union:\n type_args = [arg for arg in args if arg is not type(None)] # noqa: E721\n if len(type_args) > 1:\n schema = {\"oneOf\": [resolve_type_hint(arg) for arg in type_args]}\n else:\n schema = resolve_type_hint(type_args[0])\n if type(None) in args:\n schema[\"nullable\"] = True\n return schema\n elif origin is collections.abc.Iterable:\n return build_array_type(resolve_type_hint(args[0]))\n elif isinstance(hint, typing._TypedDictMeta):\n raise UnableToProceedError(\"Wrong TypedDict class, please use typing_extensions.TypedDict\")\n else:\n raise UnableToProceedError(hint)\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 788, "n_words": 284, "vocab_size": 148, "complexity": 42, "nloc": 67, "token_counts": 569, "n_ast_nodes": 895, "n_identifiers": 63, "d_id": 18119, "documentation": { "docstring": "drf-spectacular library method modified as described above", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 47750, "commit_id": "c3d883a971a8e4e65ccc774891928daaaa0f4442", "repo": "airflow", "path": "tests/providers/cncf/kubernetes/operators/test_kubernetes_pod.py", "file_name": "test_kubernetes_pod.py", "fun_name": "test_mark_checked_unexpected_exception", "commit_message": "KubernetesPodOperator should patch \"already checked\" always (#22734)\n\nWhen not configured to delete pods, at end of task execution the current behavior is to patch the pod as \"already checked\", but only if pod not successful. We should also patch when successful so it isn't \"reattached\" to after a task clear.", "code": "def test_mark_checked_unexpected_exception(self, mock_patch_already_checked, mock_delete_pod):\n \n k = KubernetesPodOperator(\n namespace=\"default\",\n image=\"ubuntu:16.04\",\n name=\"test\",\n task_id=\"task\",\n is_delete_operator_pod=False,\n )\n self.await_pod_mock.side_effect = AirflowException(\"oops\")\n context = create_context(k)\n with pytest.raises(AirflowException):\n k.execute(context=context)\n mock_patch_already_checked.assert_called_once()\n mock_delete_pod.assert_not_called()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 146, "n_words": 24, "vocab_size": 22, "complexity": 1, "nloc": 14, "token_counts": 77, "n_ast_nodes": 133, "n_identifiers": 21, "d_id": 9243, "documentation": { "docstring": "If we aren't deleting pods and have an exception, mark it so we don't reattach to it", "n_words": 17, "vocab_size": 15, "n_whitespaces": 16, "language": "en" } }, { "id": 131087, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/aws/test_aws_batch_tag_update.py", "file_name": "test_aws_batch_tag_update.py", "fun_name": "batch_test", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def batch_test(num_threads, delay):\n \n with mock.patch(\n \"ray.autoscaler._private.aws.node_provider.make_ec2_client\"\n ), mock.patch.object(AWSNodeProvider, \"_create_tags\", mock_create_tags):\n provider = AWSNodeProvider(\n provider_config={\"region\": \"nowhere\"}, cluster_name=\"default\"\n )\n provider.batch_counter = 0\n provider.tag_update_counter = 0\n provider.tag_cache = {str(x): {} for x in range(num_threads)}\n\n threads = []\n for x in range(num_threads):\n thread = threading.Thread(\n target=provider.set_node_tags, args=(str(x), {\"foo\": \"bar\"})\n )\n threads.append(thread)\n\n for thread in threads:\n thread.start()\n time.sleep(delay)\n for thread in threads:\n thread.join()\n\n return provider.batch_counter, provider.tag_update_counter\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 239, "n_words": 61, "vocab_size": 43, "complexity": 5, "nloc": 22, "token_counts": 154, "n_ast_nodes": 256, "n_identifiers": 29, "d_id": 29474, "documentation": { "docstring": "Run AWSNodeProvider.set_node_tags in several threads, with a\n specified delay between thread launches.\n\n Return the number of batches of tag updates and the number of tags\n updated.\n ", "n_words": 26, "vocab_size": 22, "n_whitespaces": 38, "language": "en" } }, { "id": 133790, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/agents/ppo/ddppo.py", "file_name": "ddppo.py", "fun_name": "validate_config", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def validate_config(self, config):\n \n # Call (base) PPO's config validation function first.\n # Note that this will not touch or check on the train_batch_size=-1\n # setting.\n super().validate_config(config)\n\n # Error if run on Win.\n if sys.platform in [\"win32\", \"cygwin\"]:\n raise ValueError(\n \"DD-PPO not supported on Win yet! \" \"Due to usage of torch.distributed.\"\n )\n\n # Auto-train_batch_size: Calculate from rollout len and\n # envs-per-worker.\n if config[\"train_batch_size\"] == -1:\n config[\"train_batch_size\"] = (\n config[\"rollout_fragment_length\"] * config[\"num_envs_per_worker\"]\n )\n # Users should not define `train_batch_size` directly (always -1).\n else:\n raise ValueError(\n \"Set rollout_fragment_length instead of train_batch_size \" \"for DDPPO.\"\n )\n\n # Only supported for PyTorch so far.\n if config[\"framework\"] != \"torch\":\n raise ValueError(\"Distributed data parallel is only supported for PyTorch\")\n if config[\"torch_distributed_backend\"] not in (\"gloo\", \"mpi\", \"nccl\"):\n raise ValueError(\n \"Only gloo, mpi, or nccl is supported for \"\n \"the backend of PyTorch distributed.\"\n )\n # `num_gpus` must be 0/None, since all optimization happens on Workers.\n if config[\"num_gpus\"]:\n raise ValueError(\n \"When using distributed data parallel, you should set \"\n \"num_gpus=0 since all optimization \"\n \"is happening on workers. Enable GPUs for workers by setting \"\n \"num_gpus_per_worker=1.\"\n )\n # `batch_mode` must be \"truncate_episodes\".\n if config[\"batch_mode\"] != \"truncate_episodes\":\n raise ValueError(\n \"Distributed data parallel requires truncate_episodes \" \"batch mode.\"\n )\n # DDPPO doesn't support KL penalties like PPO-1.\n # In order to support KL penalties, DDPPO would need to become\n # undecentralized, which defeats the purpose of the algorithm.\n # Users can still tune the entropy coefficient to control the\n # policy entropy (similar to controlling the KL penalty).\n if config[\"kl_coeff\"] != 0.0 or config[\"kl_target\"] != 0.0:\n raise ValueError(\"DDPPO doesn't support KL penalties like PPO-1\")\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 743, "n_words": 264, "vocab_size": 168, "complexity": 9, "nloc": 34, "token_counts": 152, "n_ast_nodes": 306, "n_identifiers": 7, "d_id": 30109, "documentation": { "docstring": "Validates the Trainer's config dict.\n\n Args:\n config (TrainerConfigDict): The Trainer's config to check.\n\n Raises:\n ValueError: In case something is wrong with the config.\n ", "n_words": 23, "vocab_size": 19, "n_whitespaces": 66, "language": "en" } }, { "id": 268902, "commit_id": "b96f9fdc2d5d0d375809ad9c16608830b01fc59a", "repo": "keras", "path": "keras/metrics/metrics.py", "file_name": "metrics.py", "fun_name": "binary_accuracy", "commit_message": "fix sample_weight for BinAcc", "code": "def binary_accuracy(y_true, y_pred, threshold=0.5):\n \n y_pred = tf.convert_to_tensor(y_pred)\n threshold = tf.cast(threshold, y_pred.dtype)\n y_pred = tf.cast(y_pred > threshold, y_pred.dtype)\n return tf.cast(tf.equal(y_true, y_pred), tf.int8)\n\n\n@keras_export('keras.metrics.categorical_accuracy')\n@tf.__internal__.dispatch.add_dispatch_support", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export('keras.metrics.categorical_accuracy')\n@tf.__internal__.dispatch.add_dispatch_support", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 26, "n_words": 23, "vocab_size": 19, "complexity": 1, "nloc": 5, "token_counts": 66, "n_ast_nodes": 121, "n_identifiers": 14, "d_id": 79763, "documentation": { "docstring": "Calculates how often predictions match binary labels.\n\n Standalone usage:\n >>> y_true = [[1], [1], [0], [0]]\n >>> y_pred = [[1], [1], [0], [0]]\n >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)\n >>> assert m.shape == (4,)\n >>> m.numpy()\n array([1., 1., 1., 1.], dtype=float32)\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n threshold: (Optional) Float representing the threshold for deciding whether\n prediction values are 1 or 0.\n\n Returns:\n Binary accuracy values. shape = `[batch_size, d0, .. dN]`\n ", "n_words": 86, "vocab_size": 61, "n_whitespaces": 113, "language": "en" } }, { "id": 305137, "commit_id": "23090cb8a268b3f268aefa8477f30af88bf46051", "repo": "core", "path": "homeassistant/components/influxdb/sensor.py", "file_name": "sensor.py", "fun_name": "update", "commit_message": "Improve entity type hints [i] (#77529)", "code": "def update(self) -> None:\n \n self.data.update()\n if (value := self.data.value) is None:\n value = STATE_UNKNOWN\n if self._value_template is not None:\n value = self._value_template.render_with_possible_json_value(\n str(value), STATE_UNKNOWN\n )\n\n self._state = value\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 111, "n_words": 28, "vocab_size": 19, "complexity": 3, "nloc": 10, "token_counts": 59, "n_ast_nodes": 96, "n_identifiers": 9, "d_id": 103929, "documentation": { "docstring": "Get the latest data from Influxdb and updates the states.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 221146, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/bdb.py", "file_name": "bdb.py", "fun_name": "get_stack", "commit_message": "add python 3.10.4 for windows", "code": "def get_stack(self, f, t):\n \n stack = []\n if t and t.tb_frame is f:\n t = t.tb_next\n while f is not None:\n stack.append((f, f.f_lineno))\n if f is self.botframe:\n break\n f = f.f_back\n stack.reverse()\n i = max(0, len(stack) - 1)\n while t is not None:\n stack.append((t.tb_frame, t.tb_lineno))\n t = t.tb_next\n if f is None:\n i = max(0, len(stack) - 1)\n return stack, i\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 216, "n_words": 61, "vocab_size": 33, "complexity": 7, "nloc": 17, "token_counts": 124, "n_ast_nodes": 195, "n_identifiers": 16, "d_id": 56241, "documentation": { "docstring": "Return a list of (frame, lineno) in a stack trace and a size.\n\n List starts with original calling frame, if there is one.\n Size may be number of frames above or below f.\n ", "n_words": 33, "vocab_size": 30, "n_whitespaces": 54, "language": "en" } }, { "id": 176498, "commit_id": "f6755ffa00211b523c6c0bec5398bc6c3c43c8b1", "repo": "networkx", "path": "networkx/readwrite/graph6.py", "file_name": "graph6.py", "fun_name": "_generate_graph6_bytes", "commit_message": "Update black (#5438)\n\n* CI: sync up black dev requirements version with precommit\r\n\r\n* Run black\r\n\r\nCo-authored-by: Jarrod Millman ", "code": "def _generate_graph6_bytes(G, nodes, header):\n \n n = len(G)\n if n >= 2**36:\n raise ValueError(\n \"graph6 is only defined if number of nodes is less \" \"than 2 ** 36\"\n )\n if header:\n yield b\">>graph6<<\"\n for d in n_to_data(n):\n yield str.encode(chr(d + 63))\n # This generates the same as `(v in G[u] for u, v in combinations(G, 2))`,\n # but in \"column-major\" order instead of \"row-major\" order.\n bits = (nodes[j] in G[nodes[i]] for j in range(1, n) for i in range(j))\n chunk = list(islice(bits, 6))\n while chunk:\n d = sum(b << 5 - i for i, b in enumerate(chunk))\n yield str.encode(chr(d + 63))\n chunk = list(islice(bits, 6))\n yield b\"\\n\"\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 200, "n_words": 107, "vocab_size": 75, "complexity": 8, "nloc": 17, "token_counts": 147, "n_ast_nodes": 237, "n_identifiers": 22, "d_id": 41937, "documentation": { "docstring": "Yield bytes in the graph6 encoding of a graph.\n\n `G` is an undirected simple graph. `nodes` is the list of nodes for\n which the node-induced subgraph will be encoded; if `nodes` is the\n list of all nodes in the graph, the entire graph will be\n encoded. `header` is a Boolean that specifies whether to generate\n the header ``b'>>graph6<<'`` before the remaining data.\n\n This function generates `bytes` objects in the following order:\n\n 1. the header (if requested),\n 2. the encoding of the number of nodes,\n 3. each character, one-at-a-time, in the encoding of the requested\n node-induced subgraph,\n 4. a newline character.\n\n This function raises :exc:`ValueError` if the graph is too large for\n the graph6 format (that is, greater than ``2 ** 36`` nodes).\n\n ", "n_words": 122, "vocab_size": 77, "n_whitespaces": 167, "language": "en" } }, { "id": 107135, "commit_id": "ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22", "repo": "matplotlib", "path": "lib/matplotlib/figure.py", "file_name": "figure.py", "fun_name": "set_constrained_layout", "commit_message": "ENH: implement and use base layout_engine for more flexible layout.", "code": "def set_constrained_layout(self, constrained):\n \n if constrained is None:\n constrained = mpl.rcParams['figure.constrained_layout.use']\n _constrained = bool(constrained)\n _parameters = constrained if isinstance(constrained, dict) else {}\n if _constrained:\n self.set_layout_engine(ConstrainedLayoutEngine(**_parameters))\n self.stale = True\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 91, "n_words": 27, "vocab_size": 20, "complexity": 4, "nloc": 8, "token_counts": 58, "n_ast_nodes": 96, "n_identifiers": 13, "d_id": 22600, "documentation": { "docstring": "\n Set whether ``constrained_layout`` is used upon drawing. If None,\n :rc:`figure.constrained_layout.use` value will be used.\n\n When providing a dict containing the keys ``w_pad``, ``h_pad``\n the default ``constrained_layout`` paddings will be\n overridden. These pads are in inches and default to 3.0/72.0.\n ``w_pad`` is the width padding and ``h_pad`` is the height padding.\n\n See :doc:`/tutorials/intermediate/constrainedlayout_guide`.\n\n Parameters\n ----------\n constrained : bool or dict or None\n ", "n_words": 61, "vocab_size": 48, "n_whitespaces": 140, "language": "en" } }, { "id": 22541, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "XORcipher/XOR_cipher.py", "file_name": "XOR_cipher.py", "fun_name": "decrypt_file", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def decrypt_file(self, file, key):\n \n\n # precondition\n assert isinstance(file, str) and isinstance(key, int)\n\n try:\n with open(file, \"r\") as fin:\n with open(\"decrypt.out\", \"w+\") as fout:\n # actual encrypt-process\n for line in fin:\n fout.write(self.decrypt_string(line, key))\n\n except:\n return False\n\n return True\n\n\n# Tests\n# crypt = XORCipher()\n# key = 67\n\n# # test enrcypt\n# print crypt.encrypt(\"hallo welt\",key)\n# # test decrypt\n# print crypt.decrypt(crypt.encrypt(\"hallo welt\",key), key)\n\n# # test encrypt_string\n# print crypt.encrypt_string(\"hallo welt\",key)\n\n# # test decrypt_string\n# print crypt.decrypt_string(crypt.encrypt_string(\"hallo welt\",key),key)\n\n# if (crypt.encrypt_file(\"test.txt\",key)):\n# \tprint \"encrypt successful\"\n# else:\n# \tprint \"encrypt unsuccessful\"\n\n# if (crypt.decrypt_file(\"encrypt.out\",key)):\n# \tprint \"decrypt successful\"\n# else:\n# \tprint \"decrypt unsuccessful\"\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 227, "n_words": 106, "vocab_size": 60, "complexity": 4, "nloc": 10, "token_counts": 70, "n_ast_nodes": 141, "n_identifiers": 13, "d_id": 4357, "documentation": { "docstring": "\n input: filename (str) and a key (int)\n output: returns true if decrypt process was\n successful otherwise false\n if key not passed the method uses the key by the constructor.\n otherwise key = 1\n ", "n_words": 33, "vocab_size": 26, "n_whitespaces": 76, "language": "en" } }, { "id": 271870, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_utils_v1.py", "file_name": "training_utils_v1.py", "fun_name": "cast_if_floating_dtype_and_mismatch", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def cast_if_floating_dtype_and_mismatch(targets, outputs):\n \n if tf.is_tensor(targets):\n # There is one target, so output[0] should be the only output.\n return cast_single_tensor(targets, dtype=outputs[0].dtype)\n new_targets = []\n for target, out in zip(targets, outputs):\n if isinstance(target, np.ndarray):\n target = tf.convert_to_tensor(target)\n if target.dtype != out.dtype:\n new_targets.append(cast_single_tensor(target, dtype=out.dtype))\n else:\n new_targets.append(target)\n return new_targets\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 128, "n_words": 45, "vocab_size": 38, "complexity": 5, "nloc": 12, "token_counts": 98, "n_ast_nodes": 155, "n_identifiers": 16, "d_id": 80881, "documentation": { "docstring": "Returns target data tensors using correct datatype.\n\n Checks that each target and output pair are the same datatype. If not, casts\n the target to the output's datatype.\n\n Args:\n targets: tensor or list of targets.\n outputs: tensor or list of outputs.\n\n Returns:\n Targets in appropriate datatype.\n ", "n_words": 45, "vocab_size": 34, "n_whitespaces": 75, "language": "en" } }, { "id": 109212, "commit_id": "140257e3ac710450c97587f95532a84a28cc526c", "repo": "matplotlib", "path": "lib/matplotlib/textpath.py", "file_name": "textpath.py", "fun_name": "_get_font", "commit_message": "ENH: add font fallback support to svg", "code": "def _get_font(self, prop):\n \n filenames = _fontManager._find_fonts_by_props(prop)\n font = get_font(filenames)\n font.set_size(self.FONT_SCALE, self.DPI)\n return font\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 48, "n_words": 13, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 36, "n_ast_nodes": 59, "n_identifiers": 11, "d_id": 23478, "documentation": { "docstring": "\n Find the `FT2Font` matching font properties *prop*, with its size set.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 285746, "commit_id": "5bc7bc070ed7c9051b1277d0db21299fd310d42b", "repo": "OpenBBTerminal", "path": "openbb_terminal/cryptocurrency/overview/overview_controller.py", "file_name": "overview_controller.py", "fun_name": "call_hm", "commit_message": "Tests over API Best Practices (#2344)\n\n* Started new tests\r\n\r\n* Added special function\r\n\r\n* Improved dict\r\n\r\n* Finished new test\r\n\r\n* Got bad functions to 75\r\n\r\n* Got bad func to 73\r\n\r\n* Bad func down to 60\r\n\r\n* Bad func down to 50\r\n\r\n* Bad func down to 35\r\n\r\n* Got bad func to 30\r\n\r\n* No more bad functions\r\n\r\n* Added tests\r\n\r\n* Added fix\r\n\r\n* Fixed some tests\r\n\r\n* Fixed some tests\r\n\r\n* Fixed some tests\r\n\r\n* Fixed some tests\r\n\r\n* Added tests to CI\r\n\r\n* Fixed CI tests\r\n\r\n* Fixed CI tests\r\n\r\n* Imrproved CI tests\r\n\r\n* Fixed reports\r\n\r\n* Fixed reports\r\n\r\n* Added stuff\r\n\r\n* Removed CI\r\n\r\n* Fixed\r\n\r\n* Fixed some typing'\r\n\r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>", "code": "def call_hm(self, other_args):\n \n parser = argparse.ArgumentParser(\n prog=\"hm\",\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description=,\n )\n\n parser.add_argument(\n \"-l\",\n \"--limit\",\n dest=\"limit\",\n type=int,\n help=\"Display N items\",\n default=10,\n )\n parser.add_argument(\n \"-c\",\n \"--category\",\n default=\"\",\n dest=\"category\",\n help=\"Category (e.g., stablecoins). Empty for no category\",\n )\n if other_args and not other_args[0][0] == \"-\":\n other_args.insert(0, \"-c\")\n\n ns_parser = self.parse_known_args_and_warn(\n parser, other_args, EXPORT_ONLY_FIGURES_ALLOWED\n )\n if ns_parser:\n pycoingecko_view.display_crypto_heatmap(\n category=ns_parser.category,\n limit=ns_parser.limit,\n export=ns_parser.export,\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 388, "n_words": 57, "vocab_size": 50, "complexity": 4, "nloc": 37, "token_counts": 139, "n_ast_nodes": 225, "n_identifiers": 26, "d_id": 85414, "documentation": { "docstring": "Process hm commandDisplay cryptocurrencies heatmap with daily percentage change [Source: https://coingecko.com]\n Accepts --category or -c to display only coins of a certain category\n (default no category to display all coins ranked by market cap).\n You can look on only top N number of records with --limit.\n ", "n_words": 46, "vocab_size": 39, "n_whitespaces": 90, "language": "en" } }, { "id": 86879, "commit_id": "941184cd24186324fd9f7f304b7f713041834726", "repo": "sentry", "path": "src/sentry/utils/audit.py", "file_name": "audit.py", "fun_name": "create_system_audit_entry", "commit_message": "chore(hybrid-cloud): AuditLogEntry is a control silo model now (#39890)\n\nIn the control silo, creating an audit log entry writes to the db\r\ndirectly, whilst in region silo mode creating an audit log entry will\r\ninstead push to a new kafka producer that consumes into the control silo\r\nasynchronously.", "code": "def create_system_audit_entry(transaction_id=None, logger=None, **kwargs):\n \n entry = AuditLogEntry(actor_label=\"Sentry\", **kwargs)\n if entry.event is not None:\n entry.save_or_write_to_kafka()\n\n extra = {\n \"organization_id\": entry.organization_id,\n \"object_id\": entry.target_object,\n \"entry_id\": entry.id,\n \"actor_label\": entry.actor_label,\n }\n if transaction_id is not None:\n extra[\"transaction_id\"] = transaction_id\n\n if logger:\n # Only use the api_name for the logger message when the entry\n # is a real AuditLogEntry record\n if entry.event is not None:\n logger.info(audit_log.get(entry.event).api_name, extra=extra)\n else:\n logger.info(entry, extra=extra)\n\n return entry\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 182, "n_words": 66, "vocab_size": 46, "complexity": 5, "nloc": 18, "token_counts": 123, "n_ast_nodes": 202, "n_identifiers": 17, "d_id": 18182, "documentation": { "docstring": "\n Creates an audit log entry for events that are triggered by Sentry's\n systems and do not have an associated Sentry user as the \"actor\".\n ", "n_words": 24, "vocab_size": 23, "n_whitespaces": 34, "language": "en" } }, { "id": 190076, "commit_id": "9d1f066d637cb15baea10e6907ab85efff8fb36f", "repo": "manim", "path": "manim/utils/tex_file_writing.py", "file_name": "tex_file_writing.py", "fun_name": "convert_to_svg", "commit_message": "Migrate more `os.path` to `pathlib` (#2980)\n\n* Migrate more `os.path` to `pathlib`\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix type errors with recent pathlib code\r\n\r\n* pathlib fixes\r\n\r\n* more pathlib fixes\r\n\r\n* remove unused imports introduced by pathlib migration\r\n\r\n* convert `open()` calls to pathlib\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Migrate tex_file_writing to pathlib\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* converted more old code to pathlib, and fixed a bug in module_ops\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix test failures\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix test failures\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Benjamin Hackl \r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Benjamin Hackl ", "code": "def convert_to_svg(dvi_file, extension, page=1):\n \n result = dvi_file.with_suffix(\".svg\")\n if not result.exists():\n commands = [\n \"dvisvgm\",\n \"--pdf\" if extension == \".pdf\" else \"\",\n \"-p \" + str(page),\n f'\"{dvi_file}\"',\n \"-n\",\n \"-v 0\",\n \"-o \" + f'\"{result}\"',\n \">\",\n os.devnull,\n ]\n os.system(\" \".join(commands))\n\n # if the file does not exist now, this means conversion failed\n if not result.exists():\n raise ValueError(\n f\"Your installation does not support converting {dvi_file.suffix} files to SVG.\"\n f\" Consider updating dvisvgm to at least version 2.4.\"\n f\" If this does not solve the problem, please refer to our troubleshooting guide at:\"\n f\" https://docs.manim.community/en/stable/installation/troubleshooting.html\",\n )\n\n return result\n\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 290, "n_words": 94, "vocab_size": 74, "complexity": 4, "nloc": 23, "token_counts": 101, "n_ast_nodes": 192, "n_identifiers": 15, "d_id": 46312, "documentation": { "docstring": "Converts a .dvi, .xdv, or .pdf file into an svg using dvisvgm.\n\n Parameters\n ----------\n dvi_file : :class:`Path`\n File name of the input file to be converted.\n extension : :class:`str`\n String containing the file extension and thus indicating the file type, e.g. ``.dvi`` or ``.pdf``\n page : Optional[:class:`int`], optional\n Page to be converted if input file is multi-page.\n\n Returns\n -------\n :class:`Path`\n Path to generated SVG file.\n ", "n_words": 65, "vocab_size": 50, "n_whitespaces": 120, "language": "en" } }, { "id": 226746, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_frame.py", "file_name": "_frame.py", "fun_name": "group", "commit_message": "switch to black .22", "code": "def group(self):\n \n return self[\"group\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58419, "documentation": { "docstring": "\n An identifier that specifies the group to which the frame\n belongs, used by animate to select a subset of frames.\n\n The 'group' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n\n Returns\n -------\n str\n ", "n_words": 47, "vocab_size": 36, "n_whitespaces": 115, "language": "en" } }, { "id": 162533, "commit_id": "db2e129ca0c11de84d57b2298dffd5d87e852518", "repo": "yt-dlp", "path": "yt_dlp/options.py", "file_name": "options.py", "fun_name": "_match_long_opt", "commit_message": "[options] Better ambiguous option resolution\n\nEg: `--write-auto` no longer results in\n> ambiguous option: --write-auto (--write-auto-subs, --write-automatic-subs?)", "code": "def _match_long_opt(self, opt):\n \n try:\n return super()._match_long_opt(opt)\n except optparse.AmbiguousOptionError as e:\n if len(set(self._long_opt[p] for p in e.possibilities)) == 1:\n return e.possibilities[0]\n raise\n\n", "url": "https://github.com/yt-dlp/yt-dlp.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 90, "n_words": 21, "vocab_size": 20, "complexity": 4, "nloc": 7, "token_counts": 56, "n_ast_nodes": 91, "n_identifiers": 12, "d_id": 39211, "documentation": { "docstring": "Improve ambigious argument resolution by comparing option objects instead of argument strings", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 275142, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/mixed_precision/policy.py", "file_name": "policy.py", "fun_name": "global_policy", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def global_policy():\n \n if _global_policy is None:\n if base_layer_utils.v2_dtype_behavior_enabled():\n return Policy(backend.floatx())\n else:\n return Policy(\"_infer\")\n return _global_policy\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 60, "n_words": 15, "vocab_size": 11, "complexity": 3, "nloc": 7, "token_counts": 35, "n_ast_nodes": 64, "n_identifiers": 7, "d_id": 81317, "documentation": { "docstring": "Returns the global dtype policy.\n\n The global policy is the default `tf.keras.mixed_precision.Policy` used for\n layers, if no policy is passed to the layer constructor. If no policy has been\n set with `keras.mixed_precision.set_global_policy`, this will return a policy\n constructed from `tf.keras.backend.floatx()` (floatx defaults to float32).\n\n >>> tf.keras.mixed_precision.global_policy()\n \n >>> tf.keras.layers.Dense(10).dtype_policy # Defaults to the global policy\n \n\n If TensorFlow 2 behavior has been disabled with\n `tf.compat.v1.disable_v2_behavior()`, this will instead return a special\n \"_infer\" policy which infers the dtype from the dtype of the first input the\n first time the layer is called. This behavior matches the behavior that\n existed in TensorFlow 1.\n\n See `tf.keras.mixed_precision.Policy` for more information on policies.\n\n Returns:\n The global Policy.\n ", "n_words": 114, "vocab_size": 70, "n_whitespaces": 168, "language": "en" } }, { "id": 22659, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "linear-algebra-python/src/lib.py", "file_name": "lib.py", "fun_name": "__eq__", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def __eq__(self, other):\n \n ans = True\n SIZE = self.size()\n if SIZE == other.size():\n for i in range(SIZE):\n if self.__components[i] != other.component(i):\n ans = False\n break\n else:\n ans = False\n return ans\n\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 148, "n_words": 31, "vocab_size": 22, "complexity": 4, "nloc": 11, "token_counts": 61, "n_ast_nodes": 101, "n_identifiers": 10, "d_id": 4388, "documentation": { "docstring": "\n returns true if the vectors are equal otherwise false.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 259040, "commit_id": "f1d3417b086550be670cbfbb5b3c1760ac99203f", "repo": "scikit-learn", "path": "sklearn/feature_extraction/text.py", "file_name": "text.py", "fun_name": "fit", "commit_message": "MNT Drops Python 3.7 in CI, wheel building, and docs (#22617)\n\n* MNT Drops Python 3.7\r\n\r\n* MNT Bump NumPy and SciPy\r\n\r\n* FIX Fix build\r\n\r\n* FIX Bump versions improved\r\n\r\n* DOC Fixes numpy version [pypy]\r\n\r\n* BLD [pypy] [icc-build]\r\n\r\n* Update docs\r\n\r\n* MAINT use scipy.optimize.LinearConstraint in test\r\n\r\n* MAINT scipy 1.1.0 related code clean-up\r\n\r\n* scipy>=1.3.2 in pyproject.toml's build deps\r\n\r\n* [cd build]\r\n\r\n* DOC Adds comment about pypy\r\n\r\n* MAINT remove _astype_copy_false\r\n\r\n* FIX Update check for python version in setup.py\r\n\r\nCo-authored-by: Olivier Grisel ", "code": "def fit(self, X, y=None):\n \n # large sparse data is not supported for 32bit platforms because\n # _document_frequency uses np.bincount which works on arrays of\n # dtype NPY_INTP which is int32 for 32bit platforms. See #20923\n X = self._validate_data(\n X, accept_sparse=(\"csr\", \"csc\"), accept_large_sparse=not _IS_32BIT\n )\n if not sp.issparse(X):\n X = sp.csr_matrix(X)\n dtype = X.dtype if X.dtype in FLOAT_DTYPES else np.float64\n\n if self.use_idf:\n n_samples, n_features = X.shape\n df = _document_frequency(X)\n df = df.astype(dtype, copy=False)\n\n # perform idf smoothing if required\n df += int(self.smooth_idf)\n n_samples += int(self.smooth_idf)\n\n # log+1 instead of log makes sure terms with zero idf don't get\n # suppressed entirely.\n idf = np.log(n_samples / df) + 1\n self._idf_diag = sp.diags(\n idf,\n offsets=0,\n shape=(n_features, n_features),\n format=\"csr\",\n dtype=dtype,\n )\n\n return self\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 407, "n_words": 119, "vocab_size": 87, "complexity": 4, "nloc": 22, "token_counts": 156, "n_ast_nodes": 249, "n_identifiers": 31, "d_id": 75536, "documentation": { "docstring": "Learn the idf vector (global term weights).\n\n Parameters\n ----------\n X : sparse matrix of shape n_samples, n_features)\n A matrix of term/token counts.\n\n y : None\n This parameter is not needed to compute tf-idf.\n\n Returns\n -------\n self : object\n Fitted transformer.\n ", "n_words": 40, "vocab_size": 36, "n_whitespaces": 129, "language": "en" } }, { "id": 66329, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/loan_management/doctype/loan_repayment/loan_repayment.py", "file_name": "loan_repayment.py", "fun_name": "get_penalty_details", "commit_message": "style: format code with black", "code": "def get_penalty_details(against_loan):\n\tpenalty_details = frappe.db.sql(\n\t\t,\n\t\t(against_loan, against_loan),\n\t)\n\n\tif penalty_details:\n\t\treturn penalty_details[0][0], flt(penalty_details[0][1])\n\telse:\n\t\treturn None, 0\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 9, "n_words": 18, "vocab_size": 17, "complexity": 2, "nloc": 13, "token_counts": 50, "n_ast_nodes": 77, "n_identifiers": 7, "d_id": 14167, "documentation": { "docstring": "\n\t\tSELECT posting_date, (penalty_amount - total_penalty_paid) as pending_penalty_amount\n\t\tFROM `tabLoan Repayment` where posting_date >= (SELECT MAX(posting_date) from `tabLoan Repayment`\n\t\twhere against_loan = %s) and docstatus = 1 and against_loan = %s\n\t", "n_words": 30, "vocab_size": 23, "n_whitespaces": 27, "language": "en" } }, { "id": 283210, "commit_id": "ab4de1dd70fba866930150e440a03e461a6ca6a8", "repo": "OpenBBTerminal", "path": "build/pyinstaller/vaderSentiment/vaderSentiment.py", "file_name": "vaderSentiment.py", "fun_name": "polarity_scores", "commit_message": "Create a packaged app bundle with Pyinstaller (#1525)\n\n* Add dashboard widget assets\r\n\r\n* Add ipywidgets and ipyflex to project\r\n\r\n* Add currencies dashboard notebook\r\n\r\n* Update docs and docstrings\r\n\r\n* Add pyinstaller to project deps\r\n\r\n* Add pyinstaller artifacts to gitignore\r\n\r\n* Fix linter errors in terminal.py\r\n\r\n* Update cspell hook and action with a pyinstaller specific word\r\n\r\n* Add pyinstaller specfile and artifacts\r\n\r\n* Add splashscreen image\r\n\r\n* Add app icon\r\n\r\n* adding splash screen support to terminal.spec and terminal.py\r\n\r\n* Restore the conda env build files\r\n\r\n* Sync deps\r\n\r\n* Add border to the splashscreen image\r\n\r\n* Clean up terminal launcher\r\n\r\n* Add support for default feature flags in packages apps\r\n\r\n* Fix types and linting\r\n\r\n* Add splashscreen management to app bootup\r\n\r\n* Check prediction feature flag when entering crypto/pred\r\n\r\n* Update pyinstaller spec file\r\n\r\n* fix .spec file to work for splash and icon - removed the \"..\"\r\n\r\n* Allows to export when using installer (#1568)\r\n\r\n* fix export for packaged apps\r\n\r\n* fix filename\r\n\r\n* Git : replace commit_hash when it is set in config_terminal\r\n\r\n* Add update of the git commit hash in gtff default during build\r\n\r\n* Add packaged app name and feature flag to logs\r\n\r\n* Add platform specific icon assignment\r\n\r\n* Add macOS build assets\r\n\r\n* Add tensorflow to hidden imports\r\n\r\n* Move LOGGING_COMMIT_HASH to gtff\r\n\r\n* Adding files/folders needed to .spec and pyinstaller folder. This will make certain commands work again.\r\n\r\n* Linting\r\n\r\n* Workflow : ignore ./build/pyinstaller from codespell\r\n\r\n* Workflow : exclude ./build/pyinstaller from flake8\r\n\r\n* Poetry + Workflow : add types-six\r\n\r\n* Pyinstaller : remove property_cached, user_agent and vaderSentiment\r\n\r\n* Revert \"Pyinstaller : remove property_cached, user_agent and vaderSentiment\"\r\n\r\nThis reverts commit dbb3e2b81086f97819ebd21457148c7160a4d703.\r\n\r\n* Clean up local paths in specfile\r\n\r\n* Validate deps have correct Jinja version (they do)\r\n\r\n* Fix logging commit hash to be set correctly for the logger to see it\r\n\r\nCo-authored-by: Andrew \r\nCo-authored-by: didierlopes.eth \r\nCo-authored-by: Chavithra PARANA ", "code": "def polarity_scores(self, text):\n \n # convert emojis to their textual descriptions\n text_no_emoji = \"\"\n prev_space = True\n for chr in text:\n if chr in self.emojis:\n # get the textual description\n description = self.emojis[chr]\n if not prev_space:\n text_no_emoji += \" \"\n text_no_emoji += description\n prev_space = False\n else:\n text_no_emoji += chr\n prev_space = chr == \" \"\n text = text_no_emoji.strip()\n\n sentitext = SentiText(text)\n\n sentiments = []\n words_and_emoticons = sentitext.words_and_emoticons\n for i, item in enumerate(words_and_emoticons):\n valence = 0\n # check for vader_lexicon words that may be used as modifiers or negations\n if item.lower() in BOOSTER_DICT:\n sentiments.append(valence)\n continue\n if (\n i < len(words_and_emoticons) - 1\n and item.lower() == \"kind\"\n and words_and_emoticons[i + 1].lower() == \"of\"\n ):\n sentiments.append(valence)\n continue\n\n sentiments = self.sentiment_valence(valence, sentitext, item, i, sentiments)\n\n sentiments = self._but_check(words_and_emoticons, sentiments)\n\n valence_dict = self.score_valence(sentiments, text)\n\n return valence_dict\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 539, "n_words": 131, "vocab_size": 82, "complexity": 9, "nloc": 33, "token_counts": 186, "n_ast_nodes": 309, "n_identifiers": 25, "d_id": 84470, "documentation": { "docstring": "\n Return a float for sentiment strength based on the input text.\n Positive values are positive valence, negative value are negative\n valence.\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 50, "language": "en" } }, { "id": 154155, "commit_id": "9bf8d57ca44e22fd69b0abc55793cf60c199ab4d", "repo": "modin", "path": "modin/core/execution/dask/implementations/pandas_on_dask/partitioning/virtual_partition.py", "file_name": "virtual_partition.py", "fun_name": "list_of_ips", "commit_message": "FIX-#4676: drain sub-virtual-partition call queues. (#4695)\n\nSigned-off-by: mvashishtha \n\nCo-authored-by: Alexey Prutskov ", "code": "def list_of_ips(self):\n \n # Defer draining call queue until we get the ip address\n result = [None] * len(self.list_of_block_partitions)\n for idx, partition in enumerate(self.list_of_block_partitions):\n partition.drain_call_queue()\n result[idx] = partition._ip_cache\n return result\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 86, "n_words": 29, "vocab_size": 27, "complexity": 2, "nloc": 6, "token_counts": 45, "n_ast_nodes": 75, "n_identifiers": 10, "d_id": 35815, "documentation": { "docstring": "\n Get the IPs holding the physical objects composing this partition.\n\n Returns\n -------\n List\n A list of IPs as ``distributed.Future`` or str.\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 68, "language": "en" } }, { "id": 266140, "commit_id": "0bcc59a1e99065c1c0a143983fed4d0828d744f4", "repo": "netbox", "path": "netbox/extras/models/models.py", "file_name": "models.py", "fun_name": "set_status", "commit_message": "#8366: Add started field to JobResult", "code": "def set_status(self, status):\n \n self.status = status\n if status in JobResultStatusChoices.TERMINAL_STATE_CHOICES:\n self.completed = timezone.now()\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 45, "n_words": 13, "vocab_size": 11, "complexity": 2, "nloc": 4, "token_counts": 29, "n_ast_nodes": 49, "n_identifiers": 8, "d_id": 78305, "documentation": { "docstring": "\n Helper method to change the status of the job result. If the target status is terminal, the completion\n time is also set.\n ", "n_words": 22, "vocab_size": 17, "n_whitespaces": 44, "language": "en" } }, { "id": 267380, "commit_id": "86779cc90376ea70bafa7044b12ce5132409fd63", "repo": "ansible", "path": "test/lib/ansible_test/_internal/encoding.py", "file_name": "encoding.py", "fun_name": "to_text", "commit_message": "ansible-test - Code cleanup.\n\nThis helps prepare for a future pylint upgrade.", "code": "def to_text(value, errors='strict'): # type: (t.AnyStr, str) -> str\n \n if isinstance(value, bytes):\n return value.decode(ENCODING, errors)\n\n if isinstance(value, str):\n return value\n\n raise Exception('value is not bytes or text: %s' % type(value))\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 57, "n_words": 30, "vocab_size": 27, "complexity": 3, "nloc": 6, "token_counts": 47, "n_ast_nodes": 79, "n_identifiers": 10, "d_id": 78870, "documentation": { "docstring": "Return the given value as text decoded using UTF-8 if not already text.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 194798, "commit_id": "9d77adb86f707967b88d93964d8056a8bd5c84ec", "repo": "ParlAI", "path": "parlai/nn/lr_scheduler.py", "file_name": "lr_scheduler.py", "fun_name": "_is_lr_warming_up", "commit_message": "Warmup updates bug for LR < 1 (#4384)\n\n* revert bug\r\n\r\n* relax restrictions\r\n\r\n* even more relaxed :/", "code": "def _is_lr_warming_up(self):\n \n return (\n self.warmup_scheduler is not None\n and self._number_training_updates < self.warmup_updates\n )\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 56, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 5, "token_counts": 23, "n_ast_nodes": 38, "n_identifiers": 5, "d_id": 47089, "documentation": { "docstring": "\n Check if we're warming up the learning rate.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 136465, "commit_id": "784e66b1f2265aeb9cb9a4e2404a8ac274abce3b", "repo": "ray", "path": "python/ray/serve/handle.py", "file_name": "handle.py", "fun_name": "is_same_loop", "commit_message": "[all_tests][python] Remove calling of get_event_loop from python version >= 3.10 (#29285)\n\nget_event_loop is deprecated in 3.10. This PR removes its invocation with python >= 3.10 by introducing a proxy function\r\nget_or_create_event_loop in utils.py. More details please see the function comments.\r\n\r\nIn the long run - we should refactor the event based code by either:\r\n\r\nusing asyncio.run as much as possible after deprecating python 3.6\r\ncreating and managing the event loops explicitly across threads and different contexts.\r\nThis PR only serves as a mitigation for not calling the deprecated function.\r\n\r\n\r\nSigned-off-by: rickyyx \r\nCo-authored-by: Chen Shen ", "code": "def is_same_loop(self) -> bool:\n \n return get_or_create_event_loop() == self.router._event_loop\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 6, "token_counts": 18, "n_ast_nodes": 32, "n_identifiers": 6, "d_id": 30926, "documentation": { "docstring": "Whether the caller's asyncio loop is the same loop for handle.\n\n This is only useful for async handles.\n ", "n_words": 18, "vocab_size": 14, "n_whitespaces": 32, "language": "en" } }, { "id": 207001, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_changelist/tests.py", "file_name": "tests.py", "fun_name": "test_computed_list_display_localization", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_computed_list_display_localization(self):\n \n self.client.force_login(self.superuser)\n event = Event.objects.create(date=datetime.date.today())\n response = self.client.get(reverse(\"admin:admin_changelist_event_changelist\"))\n self.assertContains(response, formats.localize(event.date))\n self.assertNotContains(response, str(event.date))\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 55, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 6, "token_counts": 75, "n_ast_nodes": 124, "n_identifiers": 20, "d_id": 51830, "documentation": { "docstring": "\n Regression test for #13196: output of functions should be localized\n in the changelist.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 243004, "commit_id": "11be1631433f252b816802aef1a3cd109bd308c7", "repo": "Pillow", "path": "src/PIL/Image.py", "file_name": "Image.py", "fun_name": "apply_transparency", "commit_message": "Added apply_transparency()", "code": "def apply_transparency(self):\n \n if self.mode != \"P\" or \"transparency\" not in self.info:\n return\n\n from . import ImagePalette\n\n palette = self.getpalette(\"RGBA\")\n transparency = self.info[\"transparency\"]\n if isinstance(transparency, bytes):\n for i, alpha in enumerate(transparency):\n palette[i * 4 + 3] = alpha\n else:\n palette[transparency * 4 + 3] = 0\n self.palette = ImagePalette.ImagePalette(\"RGBA\", bytes(palette))\n self.palette.dirty = 1\n\n del self.info[\"transparency\"]\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 172, "n_words": 54, "vocab_size": 41, "complexity": 5, "nloc": 14, "token_counts": 110, "n_ast_nodes": 186, "n_identifiers": 14, "d_id": 69952, "documentation": { "docstring": "\n If a P mode image has a \"transparency\" key in the info dictionary,\n remove the key and apply the transparency to the palette instead.\n ", "n_words": 24, "vocab_size": 19, "n_whitespaces": 46, "language": "en" } }, { "id": 203151, "commit_id": "890bfa368c33d6ae19fe45cf1eed7e2e8d63160e", "repo": "django", "path": "django/db/backends/base/creation.py", "file_name": "creation.py", "fun_name": "mark_expected_failures_and_skips", "commit_message": "Refs #20349 -- Avoided loading testing libraries when not needed.", "code": "def mark_expected_failures_and_skips(self):\n \n # Only load unittest if we're actually testing.\n from unittest import expectedFailure, skip\n for test_name in self.connection.features.django_test_expected_failures:\n test_case_name, _, test_method_name = test_name.rpartition('.')\n test_app = test_name.split('.')[0]\n # Importing a test app that isn't installed raises RuntimeError.\n if test_app in settings.INSTALLED_APPS:\n test_case = import_string(test_case_name)\n test_method = getattr(test_case, test_method_name)\n setattr(test_case, test_method_name, expectedFailure(test_method))\n for reason, tests in self.connection.features.django_test_skips.items():\n for test_name in tests:\n test_case_name, _, test_method_name = test_name.rpartition('.')\n test_app = test_name.split('.')[0]\n # Importing a test app that isn't installed raises RuntimeError.\n if test_app in settings.INSTALLED_APPS:\n test_case = import_string(test_case_name)\n test_method = getattr(test_case, test_method_name)\n setattr(test_case, test_method_name, skip(reason)(test_method))\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 344, "n_words": 92, "vocab_size": 48, "complexity": 6, "nloc": 17, "token_counts": 158, "n_ast_nodes": 256, "n_identifiers": 26, "d_id": 50238, "documentation": { "docstring": "\n Mark tests in Django's test suite which are expected failures on this\n database and test which should be skipped on this database.\n ", "n_words": 22, "vocab_size": 18, "n_whitespaces": 44, "language": "en" } }, { "id": 101394, "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", "repo": "faceswap", "path": "scripts/fsmedia.py", "file_name": "fsmedia.py", "fun_name": "images_found", "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", "code": "def images_found(self) -> int:\n \n return self._images_found\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 4, "d_id": 20809, "documentation": { "docstring": "int: The number of frames that exist in the video file, or the folder of images. ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 16, "language": "en" } }, { "id": 251372, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "mitmproxy/http.py", "file_name": "http.py", "fun_name": "constrain_encoding", "commit_message": "make it black!", "code": "def constrain_encoding(self) -> None:\n \n accept_encoding = self.headers.get(\"accept-encoding\")\n if accept_encoding:\n self.headers[\"accept-encoding\"] = \", \".join(\n e\n for e in {\"gzip\", \"identity\", \"deflate\", \"br\", \"zstd\"}\n if e in accept_encoding\n )\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 115, "n_words": 27, "vocab_size": 21, "complexity": 4, "nloc": 11, "token_counts": 52, "n_ast_nodes": 95, "n_identifiers": 7, "d_id": 73701, "documentation": { "docstring": "\n Limits the permissible Accept-Encoding values, based on what we can decode appropriately.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 27, "language": "en" } }, { "id": 205534, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/fields/__init__.py", "file_name": "__init__.py", "fun_name": "__reduce__", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def __reduce__(self):\n \n if not hasattr(self, \"model\"):\n # Fields are sometimes used without attaching them to models (for\n # example in aggregation). In this case give back a plain field\n # instance. The code below will create a new empty instance of\n # class self.__class__, then update its dict with self.__dict__\n # values - so, this is very close to normal pickle.\n state = self.__dict__.copy()\n # The _get_default cached_property can't be pickled due to lambda\n # usage.\n state.pop(\"_get_default\", None)\n return _empty, (self.__class__,), state\n return _load_field, (\n self.model._meta.app_label,\n self.model._meta.object_name,\n self.name,\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 260, "n_words": 89, "vocab_size": 76, "complexity": 2, "nloc": 10, "token_counts": 68, "n_ast_nodes": 115, "n_identifiers": 15, "d_id": 51144, "documentation": { "docstring": "\n Pickling should return the model._meta.fields instance of the field,\n not a new copy of that field. So, use the app registry to load the\n model and then the field back.\n ", "n_words": 30, "vocab_size": 25, "n_whitespaces": 59, "language": "en" } }, { "id": 226150, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_bar.py", "file_name": "_bar.py", "fun_name": "meta", "commit_message": "switch to black .22", "code": "def meta(self):\n \n return self[\"meta\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 57823, "documentation": { "docstring": "\n Assigns extra meta information associated with this trace that\n can be used in various text attributes. Attributes such as\n trace `name`, graph, axis and colorbar `title.text`, annotation\n `text` `rangeselector`, `updatemenues` and `sliders` `label`\n text all support `meta`. To access the trace `meta` values in\n an attribute in the same trace, simply use `%{meta[i]}` where\n `i` is the index or key of the `meta` item in question. To\n access trace `meta` in layout attributes, use\n `%{data[n[.meta[i]}` where `i` is the index or key of the\n `meta` and `n` is the trace index.\n\n The 'meta' property accepts values of any type\n\n Returns\n -------\n Any|numpy.ndarray\n ", "n_words": 102, "vocab_size": 69, "n_whitespaces": 208, "language": "en" } }, { "id": 100763, "commit_id": "3d8e674adc88b8f4cc206ebad6fb5b600e38fe14", "repo": "faceswap", "path": "lib/convert.py", "file_name": "convert.py", "fun_name": "process", "commit_message": "convert - Fix affine borders", "code": "def process(self, in_queue, out_queue):\n \n logger.debug(\"Starting convert process. (in_queue: %s, out_queue: %s)\",\n in_queue, out_queue)\n log_once = False\n while True:\n items = in_queue.get()\n if items == \"EOF\":\n logger.debug(\"EOF Received\")\n logger.debug(\"Patch queue finished\")\n # Signal EOF to other processes in pool\n logger.debug(\"Putting EOF back to in_queue\")\n in_queue.put(items)\n break\n\n if isinstance(items, dict):\n items = [items]\n for item in items:\n logger.trace(\"Patch queue got: '%s'\", item[\"filename\"])\n try:\n image = self._patch_image(item)\n except Exception as err: # pylint: disable=broad-except\n # Log error and output original frame\n logger.error(\"Failed to convert image: '%s'. Reason: %s\",\n item[\"filename\"], str(err))\n image = item[\"image\"]\n\n loglevel = logger.trace if log_once else logger.warning\n loglevel(\"Convert error traceback:\", exc_info=True)\n log_once = True\n # UNCOMMENT THIS CODE BLOCK TO PRINT TRACEBACK ERRORS\n import sys ; import traceback\n exc_info = sys.exc_info() ; traceback.print_exception(*exc_info)\n logger.trace(\"Out queue put: %s\", item[\"filename\"])\n out_queue.put((item[\"filename\"], image))\n logger.debug(\"Completed convert process\")\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 635, "n_words": 133, "vocab_size": 102, "complexity": 7, "nloc": 30, "token_counts": 201, "n_ast_nodes": 350, "n_identifiers": 26, "d_id": 20216, "documentation": { "docstring": " Main convert process.\n\n Takes items from the in queue, runs the relevant adjustments, patches faces to final frame\n and outputs patched frame to the out queue.\n\n Parameters\n ----------\n in_queue: :class:`queue.Queue`\n The output from :class:`scripts.convert.Predictor`. Contains detected faces from the\n Faceswap model as well as the frame to be patched.\n out_queue: :class:`queue.Queue`\n The queue to place patched frames into for writing by one of Faceswap's\n :mod:`plugins.convert.writer` plugins.\n ", "n_words": 66, "vocab_size": 50, "n_whitespaces": 160, "language": "en" } }, { "id": 63443, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py", "file_name": "pyparsing.py", "fun_name": "traceParseAction", "commit_message": "upd; format", "code": "def traceParseAction(f):\n ", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "\n \"\"\"Decorator for debugging parse actions.\n\n When the parse action is called, this decorator will print\n ``\">> entering method-name(line:, , )\"``.\n When the parse action completes, the decorator will print\n ``\"<<\"`` followed by the returned value, or any exception that the parse action raised.\n\n Example::followed by the returned valueor any exception that the parse action", "n_ast_errors": 3, "ast_levels": 8, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 2, "nloc": 8, "token_counts": 29, "n_ast_nodes": 49, "n_identifiers": 18, "d_id": 13305, "documentation": { "docstring": "Decorator for debugging parse actions.\n\n When the parse action is called, this decorator will print\n ``\">> entering method-name(line:, , )\"``.\n When the parse action completes, the decorator will print\n ``\"<<\"`` followed by the returned value, or any exception that the parse action raised.\n\n Example::\n\n wd = Word(alphas)\n", "n_words": 47, "vocab_size": 34, "n_whitespaces": 68, "language": "en" } }, { "id": 308880, "commit_id": "e222e1b6f05b630bef5aed73e307ca5072b6f286", "repo": "core", "path": "homeassistant/components/flux_led/number.py", "file_name": "number.py", "fun_name": "_pixels_and_segments_fit_in_music_mode", "commit_message": "Add device configuration entities to flux_led (#62786)\n\nCo-authored-by: Chris Talkington ", "code": "def _pixels_and_segments_fit_in_music_mode(self) -> bool:\n \n pixels_per_segment = self._device.pixels_per_segment\n segments = self._device.segments\n assert pixels_per_segment is not None\n assert segments is not None\n return bool(\n pixels_per_segment <= MUSIC_PIXELS_PER_SEGMENT_MAX\n and segments <= MUSIC_SEGMENTS_MAX\n and pixels_per_segment * segments <= MUSIC_PIXELS_MAX\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 118, "n_words": 36, "vocab_size": 22, "complexity": 3, "nloc": 14, "token_counts": 49, "n_ast_nodes": 78, "n_identifiers": 9, "d_id": 107608, "documentation": { "docstring": "Check if the base pixel and segment settings will fit for music mode.\n\n If they fit, they do not need to be configured.\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 37, "language": "en" } }, { "id": 82839, "commit_id": "9ba53df5a19131e6926027b2e73aaa77cec17272", "repo": "examples", "path": "distributed/sharded_tensor/tensor_parallel.py", "file_name": "tensor_parallel.py", "fun_name": "_generate_sharding_spec", "commit_message": "Gh/fduwjj/2/base (#1007)\n\n* test ghstack\r\n\r\n[ghstack-poisoned]\r\n\r\n* Update base for Update on \"[PT-D] Add an example for Megatron-LM style example\"\r\n\r\n\r\n\r\n\r\n[ghstack-poisoned]\r\n\r\n* Update base for Update on \"[PT-D] Add an example for Megatron-LM style example\"\r\n\r\n\r\n\r\n\r\n[ghstack-poisoned]\r\n\r\n* Update base for Update on \"[PT-D] Add an example for Megatron-LM style example\"\r\n\r\n\r\n\r\n\r\n[ghstack-poisoned]\r\n\r\n* Update base for Update on \"[PT-D] Add an example for Megatron-LM style example\"\r\n\r\n\r\n\r\n\r\n[ghstack-poisoned]\r\n\r\n* [PT-D] Add an example for Megatron-LM style example (#1006)\r\n\r\n* [PT-D] Add an example for Megatron-LM style example\r\n\r\n[ghstack-poisoned]\r\n\r\n* Update on \"[PT-D] Add an example for Megatron-LM style example\"\r\n\r\n\r\n\r\n\r\n[ghstack-poisoned]", "code": "def _generate_sharding_spec(world_size):\n \n placements = [f\"rank:{idx}/cuda:{idx}\" for idx in range(world_size)]\n # Shard the first nn module's weight by dim 0.\n # (nn.Linear transposes the weight internally so dim 0 actually means column)\n colwise_spec = ChunkShardingSpec(\n dim=0,\n placements=placements,\n )\n # Shard the second nn module's weight by dim 1.\n rowwise_spec = ChunkShardingSpec(\n dim=1,\n placements=placements,\n )\n # The result from the second nn.linear layer needs aggregation by dim 0.\n output_spec = ChunkShardingSpec(\n dim=0,\n placements=placements,\n )\n return colwise_spec, rowwise_spec, output_spec\n\n", "url": "https://github.com/pytorch/examples.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 157, "n_words": 76, "vocab_size": 47, "complexity": 2, "nloc": 15, "token_counts": 64, "n_ast_nodes": 110, "n_identifiers": 10, "d_id": 17549, "documentation": { "docstring": "\n We first need to create a sharding spec for our sharding work.\n\n For now, we only support sharding on one dimension. So we use\n ``ChunkShardingSpec`` to chunk the size of the given sharding\n dim to equally split length. The behavior is similar to\n `torch.chunk`.\n\n We also need to create the output sharding spec for the second nn\n because we need to aggregate(reduce) the partial result after the\n second nn layer. So we have a new sharding spec to represent that\n how we store the aggregation result in a new sharded tensor.\n ", "n_words": 91, "vocab_size": 56, "n_whitespaces": 122, "language": "en" } }, { "id": 66581, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v11_0/update_total_qty_field.py", "file_name": "update_total_qty_field.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tfrappe.reload_doc(\"buying\", \"doctype\", \"purchase_order\")\n\tfrappe.reload_doc(\"buying\", \"doctype\", \"supplier_quotation\")\n\tfrappe.reload_doc(\"selling\", \"doctype\", \"sales_order\")\n\tfrappe.reload_doc(\"selling\", \"doctype\", \"quotation\")\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"delivery_note\")\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"purchase_receipt\")\n\tfrappe.reload_doc(\"accounts\", \"doctype\", \"sales_invoice\")\n\tfrappe.reload_doc(\"accounts\", \"doctype\", \"purchase_invoice\")\n\n\tdoctypes = [\n\t\t\"Sales Order\",\n\t\t\"Sales Invoice\",\n\t\t\"Delivery Note\",\n\t\t\"Purchase Order\",\n\t\t\"Purchase Invoice\",\n\t\t\"Purchase Receipt\",\n\t\t\"Quotation\",\n\t\t\"Supplier Quotation\",\n\t]\n\n\tfor doctype in doctypes:\n\t\ttotal_qty = frappe.db.sql(\n\t\t\t.format(\n\t\t\t\tdoctype\n\t\t\t),\n\t\t\tas_dict=True,\n\t\t)\n\n\t\t# Query to update total_qty might become too big, Update in batches\n\t\t# batch_size is chosen arbitrarily, Don't try too hard to reason about it\n\t\tbatch_size = 100000\n\t\tfor i in range(0, len(total_qty), batch_size):\n\t\t\tbatch_transactions = total_qty[i : i + batch_size]\n\n\t\t\t# UPDATE with CASE for some reason cannot use PRIMARY INDEX,\n\t\t\t# causing all rows to be examined, leading to a very slow update\n\n\t\t\t# UPDATE with WHERE clause uses PRIMARY INDEX, but will lead to too many queries\n\n\t\t\t# INSERT with ON DUPLICATE KEY UPDATE uses PRIMARY INDEX\n\t\t\t# and can perform multiple updates per query\n\t\t\t# This is probably never used anywhere else as of now, but should be\n\t\t\tvalues = []\n\t\t\tfor d in batch_transactions:\n\t\t\t\tvalues.append(\"({0}, {1})\".format(frappe.db.escape(d.parent), d.qty))\n\t\t\tconditions = \",\".join(values)\n\t\t\tfrappe.db.sql(\n\t\t\t\t.format(\n\t\t\t\t\tdoctype, conditions\n\t\t\t\t)\n\t\t\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 142, "n_words": 188, "vocab_size": 126, "complexity": 4, "nloc": 48, "token_counts": 213, "n_ast_nodes": 388, "n_identifiers": 23, "d_id": 14229, "documentation": { "docstring": "\n\t\t\tSELECT\n\t\t\t\tparent, SUM(qty) as qty\n\t\t\tFROM\n\t\t\t\t`tab{0} Item`\n\t\t\twhere parenttype = '{0}'\n\t\t\tGROUP BY parent\n\t\t\n\t\t\t\tINSERT INTO `tab{}` (name, total_qty) VALUES {}\n\t\t\t\tON DUPLICATE KEY UPDATE name = VALUES(name), total_qty = VALUES(total_qty)\n\t\t\t", "n_words": 32, "vocab_size": 30, "n_whitespaces": 24, "language": "en" } }, { "id": 176520, "commit_id": "83cc6cd2811dbc6d20cfb3de809f21153b30e14e", "repo": "networkx", "path": "networkx/algorithms/tree/mst.py", "file_name": "mst.py", "fun_name": "prim_mst_edges", "commit_message": "Optimize prim for mst (#5455)\n\nCo-authored-by: Dylan ", "code": "def prim_mst_edges(G, minimum, weight=\"weight\", keys=True, data=True, ignore_nan=False):\n \n is_multigraph = G.is_multigraph()\n push = heappush\n pop = heappop\n\n nodes = set(G)\n c = count()\n\n sign = 1 if minimum else -1\n\n while nodes:\n u = nodes.pop()\n frontier = []\n visited = {u}\n if is_multigraph:\n for v, keydict in G.adj[u].items():\n for k, d in keydict.items():\n wt = d.get(weight, 1) * sign\n if isnan(wt):\n if ignore_nan:\n continue\n msg = f\"NaN found as an edge weight. Edge {(u, v, k, d)}\"\n raise ValueError(msg)\n push(frontier, (wt, next(c), u, v, k, d))\n else:\n for v, d in G.adj[u].items():\n wt = d.get(weight, 1) * sign\n if isnan(wt):\n if ignore_nan:\n continue\n msg = f\"NaN found as an edge weight. Edge {(u, v, d)}\"\n raise ValueError(msg)\n push(frontier, (wt, next(c), u, v, d))\n while nodes and frontier:\n if is_multigraph:\n W, _, u, v, k, d = pop(frontier)\n else:\n W, _, u, v, d = pop(frontier)\n if v in visited or v not in nodes:\n continue\n # Multigraphs need to handle edge keys in addition to edge data.\n if is_multigraph and keys:\n if data:\n yield u, v, k, d\n else:\n yield u, v, k\n else:\n if data:\n yield u, v, d\n else:\n yield u, v\n # update frontier\n visited.add(v)\n nodes.discard(v)\n if is_multigraph:\n for w, keydict in G.adj[v].items():\n if w in visited:\n continue\n for k2, d2 in keydict.items():\n new_weight = d2.get(weight, 1) * sign\n push(frontier, (new_weight, next(c), v, w, k2, d2))\n else:\n for w, d2 in G.adj[v].items():\n if w in visited:\n continue\n new_weight = d2.get(weight, 1) * sign\n push(frontier, (new_weight, next(c), v, w, d2))\n\n\nALGORITHMS = {\n \"boruvka\": boruvka_mst_edges,\n \"borůvka\": boruvka_mst_edges,\n \"kruskal\": kruskal_mst_edges,\n \"prim\": prim_mst_edges,\n}\n\n\n@not_implemented_for(\"directed\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@not_implemented_for(\"directed\")", "n_ast_errors": 1, "ast_levels": 20, "n_whitespaces": 1173, "n_words": 264, "vocab_size": 110, "complexity": 26, "nloc": 62, "token_counts": 453, "n_ast_nodes": 762, "n_identifiers": 44, "d_id": 41941, "documentation": { "docstring": "Iterate over edges of Prim's algorithm min/max spanning tree.\n\n Parameters\n ----------\n G : NetworkX Graph\n The graph holding the tree of interest.\n\n minimum : bool (default: True)\n Find the minimum (True) or maximum (False) spanning tree.\n\n weight : string (default: 'weight')\n The name of the edge attribute holding the edge weights.\n\n keys : bool (default: True)\n If `G` is a multigraph, `keys` controls whether edge keys ar yielded.\n Otherwise `keys` is ignored.\n\n data : bool (default: True)\n Flag for whether to yield edge attribute dicts.\n If True, yield edges `(u, v, d)`, where `d` is the attribute dict.\n If False, yield edges `(u, v)`.\n\n ignore_nan : bool (default: False)\n If a NaN is found as an edge weight normally an exception is raised.\n If `ignore_nan is True` then that edge is ignored instead.\n\n ", "n_words": 133, "vocab_size": 80, "n_whitespaces": 230, "language": "en" } }, { "id": 196160, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/combinatorics/permutations.py", "file_name": "permutations.py", "fun_name": "__add__", "commit_message": "Updated import locations", "code": "def __add__(self, other):\n \n rank = (self.rank() + other) % self.cardinality\n rv = self.unrank_lex(self.size, rank)\n rv._rank = rank\n return rv\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 54, "n_words": 19, "vocab_size": 15, "complexity": 1, "nloc": 5, "token_counts": 42, "n_ast_nodes": 68, "n_identifiers": 9, "d_id": 47660, "documentation": { "docstring": "Return permutation that is other higher in rank than self.\n\n The rank is the lexicographical rank, with the identity permutation\n having rank of 0.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation\n >>> I = Permutation([0, 1, 2, 3])\n >>> a = Permutation([2, 1, 3, 0])\n >>> I + a.rank() == a\n True\n\n See Also\n ========\n\n __sub__, inversion_vector\n\n ", "n_words": 57, "vocab_size": 44, "n_whitespaces": 148, "language": "en" } }, { "id": 219712, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "to_integral_exact", "commit_message": "add python 3.10.4 for windows", "code": "def to_integral_exact(self, a):\n \n a = _convert_other(a, raiseit=True)\n return a.to_integral_exact(context=self)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 27, "n_ast_nodes": 44, "n_identifiers": 6, "d_id": 55735, "documentation": { "docstring": "Rounds to an integer.\n\n When the operand has a negative exponent, the result is the same\n as using the quantize() operation using the given operand as the\n left-hand-operand, 1E+0 as the right-hand-operand, and the precision\n of the operand as the precision setting; Inexact and Rounded flags\n are allowed in this operation. The rounding mode is taken from the\n context.\n\n >>> ExtendedContext.to_integral_exact(Decimal('2.1'))\n Decimal('2')\n >>> ExtendedContext.to_integral_exact(Decimal('100'))\n Decimal('100')\n >>> ExtendedContext.to_integral_exact(Decimal('100.0'))\n Decimal('100')\n >>> ExtendedContext.to_integral_exact(Decimal('101.5'))\n Decimal('102')\n >>> ExtendedContext.to_integral_exact(Decimal('-101.5'))\n Decimal('-102')\n >>> ExtendedContext.to_integral_exact(Decimal('10E+5'))\n Decimal('1.0E+6')\n >>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))\n Decimal('7.89E+77')\n >>> ExtendedContext.to_integral_exact(Decimal('-Inf'))\n Decimal('-Infinity')\n ", "n_words": 83, "vocab_size": 56, "n_whitespaces": 245, "language": "en" } }, { "id": 260606, "commit_id": "84c6421a9067de7d1b54b7a6d8e21ce38e1f0eca", "repo": "scikit-learn", "path": "sklearn/utils/tests/test_estimator_html_repr.py", "file_name": "test_estimator_html_repr.py", "fun_name": "test_invalid_parameters_in_stacking", "commit_message": "FIX Show a HTML repr for meta-estimatosr with invalid parameters (#24015)\n\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def test_invalid_parameters_in_stacking():\n \n stacker = StackingClassifier(estimators=[])\n\n html_output = estimator_html_repr(stacker)\n assert html.escape(str(stacker)) in html_output\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 24, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 32, "n_ast_nodes": 56, "n_identifiers": 9, "d_id": 76372, "documentation": { "docstring": "Invalidate stacking configuration uses default repr.\n\n Non-regression test for #24009.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 16, "language": "en" } }, { "id": 118576, "commit_id": "704eab3478cf69847825b23dabf15813a8ac9fa2", "repo": "streamlit", "path": "lib/streamlit/session_data.py", "file_name": "session_data.py", "fun_name": "generate_new_id", "commit_message": "Rename and refactor `Report` machinery (#4141)\n\nThis refactor renames (almost) everything related to the outdated \"report\" concept with more precise concepts that we use throughout our code, primarily \"script run\", \"session\", and \"app\".", "code": "def generate_new_id() -> str:\n \n return base58.b58encode(uuid.uuid4().bytes).decode()\n\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 43, "n_identifiers": 8, "d_id": 26302, "documentation": { "docstring": "Randomly generate an ID representing this session's execution.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 247781, "commit_id": "9d21ecf7ceab55bc19c4457b8b07401b0b1623a7", "repo": "synapse", "path": "tests/push/test_push_rule_evaluator.py", "file_name": "test_push_rule_evaluator.py", "fun_name": "test_event_match_body", "commit_message": "Add type hints to tests files. (#12256)", "code": "def test_event_match_body(self) -> None:\n \n\n # if the key is `content.body`, the pattern matches substrings.\n\n # non-wildcards should match\n condition = {\n \"kind\": \"event_match\",\n \"key\": \"content.body\",\n \"pattern\": \"foobaz\",\n }\n self._assert_matches(\n condition,\n {\"body\": \"aaa FoobaZ zzz\"},\n \"patterns should match and be case-insensitive\",\n )\n self._assert_not_matches(\n condition,\n {\"body\": \"aa xFoobaZ yy\"},\n \"pattern should only match at word boundaries\",\n )\n self._assert_not_matches(\n condition,\n {\"body\": \"aa foobazx yy\"},\n \"pattern should only match at word boundaries\",\n )\n\n # wildcards should match\n condition = {\n \"kind\": \"event_match\",\n \"key\": \"content.body\",\n \"pattern\": \"f?o*baz\",\n }\n\n self._assert_matches(\n condition,\n {\"body\": \"aaa FoobarbaZ zzz\"},\n \"* should match string and pattern should be case-insensitive\",\n )\n self._assert_matches(\n condition, {\"body\": \"aa foobaz yy\"}, \"* should match 0 characters\"\n )\n self._assert_not_matches(\n condition, {\"body\": \"aa fobbaz yy\"}, \"? should not match 0 characters\"\n )\n self._assert_not_matches(\n condition, {\"body\": \"aa fiiobaz yy\"}, \"? should not match 2 characters\"\n )\n self._assert_not_matches(\n condition,\n {\"body\": \"aa xfooxbaz yy\"},\n \"pattern should only match at word boundaries\",\n )\n self._assert_not_matches(\n condition,\n {\"body\": \"aa fooxbazx yy\"},\n \"pattern should only match at word boundaries\",\n )\n\n # test backslashes\n condition = {\n \"kind\": \"event_match\",\n \"key\": \"content.body\",\n \"pattern\": r\"f\\oobaz\",\n }\n self._assert_matches(\n condition,\n {\"body\": r\"F\\oobaz\"},\n \"backslash should match itself\",\n )\n condition = {\n \"kind\": \"event_match\",\n \"key\": \"content.body\",\n \"pattern\": r\"f\\?obaz\",\n }\n self._assert_matches(\n condition,\n {\"body\": r\"F\\oobaz\"},\n r\"? after \\ should match any character\",\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 883, "n_words": 209, "vocab_size": 74, "complexity": 1, "nloc": 71, "token_counts": 239, "n_ast_nodes": 450, "n_identifiers": 5, "d_id": 71916, "documentation": { "docstring": "Check that event_match conditions on content.body work as expected", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 182034, "commit_id": "9c2a125c2412c5d011307a80f4552cf9824cc022", "repo": "textual", "path": "src/textual/view.py", "file_name": "view.py", "fun_name": "layout", "commit_message": "Ensuring we get and set Layout as set in view.styles everywhere", "code": "def layout(self) -> Layout:\n \n return self.styles.layout\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 6, "token_counts": 14, "n_ast_nodes": 25, "n_identifiers": 4, "d_id": 43737, "documentation": { "docstring": "Convenience property for accessing ``view.styles.layout``.\n\n Returns: The Layout associated with this view\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 26, "language": "en" } }, { "id": 209457, "commit_id": "b26f2283379d3bba48d575c1fffd1c3cdeaf64c2", "repo": "scapy", "path": "scapy/libs/rfc3961.py", "file_name": "rfc3961.py", "fun_name": "_zeropad", "commit_message": "Kerberos update (#3688)\n\n* Kerberos over TCP\r\n\r\n* Kerberos: add FAST & PKCA\r\n\r\n* Many user-friendly improvements\r\n\r\n* RFC3961 crypto\r\n\r\n* Summary, Sessions, Examples, Bugs\r\n\r\n* More tests, _n_fold edge case\r\n\r\n* Ignore potatoe (kerberos tests) from codespell", "code": "def _zeropad(s, padsize):\n \n return s + b\"\\x00\" * (-len(s) % padsize)\n\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 17, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 40, "n_identifiers": 4, "d_id": 52681, "documentation": { "docstring": "\n Return s padded with 0 bytes to a multiple of padsize.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 18, "language": "en" } }, { "id": 196394, "commit_id": "59d22b6bb7287613d598611027f640d068ca5748", "repo": "sympy", "path": "sympy/matrices/repmatrix.py", "file_name": "repmatrix.py", "fun_name": "row_swap", "commit_message": "Moved imports to higher level", "code": "def row_swap(self, i, j):\n \n for k in range(0, self.cols):\n self[i, k], self[j, k] = self[j, k], self[i, k]\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 43, "n_words": 18, "vocab_size": 14, "complexity": 2, "nloc": 3, "token_counts": 49, "n_ast_nodes": 69, "n_identifiers": 7, "d_id": 47894, "documentation": { "docstring": "Swap the two given rows of the matrix in-place.\n\n Examples\n ========\n\n >>> from sympy import Matrix\n >>> M = Matrix([[0, 1], [1, 0]])\n >>> M\n Matrix([\n [0, 1],\n [1, 0]])\n >>> M.row_swap(0, 1)\n >>> M\n Matrix([\n [1, 0],\n [0, 1]])\n\n See Also\n ========\n\n row\n col_swap\n ", "n_words": 45, "vocab_size": 31, "n_whitespaces": 171, "language": "en" } }, { "id": 177423, "commit_id": "6ef8b9986ad9a8bc79a4a6640a8f9ee285b67a7b", "repo": "networkx", "path": "networkx/algorithms/bipartite/tests/test_matching.py", "file_name": "test_matching.py", "fun_name": "setup_method", "commit_message": "Update pytest (#6165)", "code": "def setup_method(self):\n \n self.simple_graph = nx.complete_bipartite_graph(2, 3)\n self.simple_solution = {0: 2, 1: 3, 2: 0, 3: 1}\n\n edges = [(0, 7), (0, 8), (2, 6), (2, 9), (3, 8), (4, 8), (4, 9), (5, 11)]\n self.top_nodes = set(range(6))\n self.graph = nx.Graph()\n self.graph.add_nodes_from(range(12))\n self.graph.add_edges_from(edges)\n\n # Example bipartite graph from issue 2127\n G = nx.Graph()\n G.add_nodes_from(\n [\n (1, \"C\"),\n (1, \"B\"),\n (0, \"G\"),\n (1, \"F\"),\n (1, \"E\"),\n (0, \"C\"),\n (1, \"D\"),\n (1, \"I\"),\n (0, \"A\"),\n (0, \"D\"),\n (0, \"F\"),\n (0, \"E\"),\n (0, \"H\"),\n (1, \"G\"),\n (1, \"A\"),\n (0, \"I\"),\n (0, \"B\"),\n (1, \"H\"),\n ]\n )\n G.add_edge((1, \"C\"), (0, \"A\"))\n G.add_edge((1, \"B\"), (0, \"A\"))\n G.add_edge((0, \"G\"), (1, \"I\"))\n G.add_edge((0, \"G\"), (1, \"H\"))\n G.add_edge((1, \"F\"), (0, \"A\"))\n G.add_edge((1, \"F\"), (0, \"C\"))\n G.add_edge((1, \"F\"), (0, \"E\"))\n G.add_edge((1, \"E\"), (0, \"A\"))\n G.add_edge((1, \"E\"), (0, \"C\"))\n G.add_edge((0, \"C\"), (1, \"D\"))\n G.add_edge((0, \"C\"), (1, \"I\"))\n G.add_edge((0, \"C\"), (1, \"G\"))\n G.add_edge((0, \"C\"), (1, \"H\"))\n G.add_edge((1, \"D\"), (0, \"A\"))\n G.add_edge((1, \"I\"), (0, \"A\"))\n G.add_edge((1, \"I\"), (0, \"E\"))\n G.add_edge((0, \"A\"), (1, \"G\"))\n G.add_edge((0, \"A\"), (1, \"H\"))\n G.add_edge((0, \"E\"), (1, \"G\"))\n G.add_edge((0, \"E\"), (1, \"H\"))\n self.disconnected_graph = G\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 698, "n_words": 175, "vocab_size": 65, "complexity": 1, "nloc": 52, "token_counts": 576, "n_ast_nodes": 901, "n_identifiers": 17, "d_id": 42374, "documentation": { "docstring": "Creates a bipartite graph for use in testing matching algorithms.\n\n The bipartite graph has a maximum cardinality matching that leaves\n vertex 1 and vertex 10 unmatched. The first six numbers are the left\n vertices and the next six numbers are the right vertices.\n\n ", "n_words": 43, "vocab_size": 31, "n_whitespaces": 71, "language": "en" } }, { "id": 221115, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/bdb.py", "file_name": "bdb.py", "fun_name": "dispatch_call", "commit_message": "add python 3.10.4 for windows", "code": "def dispatch_call(self, frame, arg):\n \n # XXX 'arg' is no longer used\n if self.botframe is None:\n # First call of dispatch since reset()\n self.botframe = frame.f_back # (CT) Note that this may also be None!\n return self.trace_dispatch\n if not (self.stop_here(frame) or self.break_anywhere(frame)):\n # No need to trace this function\n return # None\n # Ignore call events in generator except when stepping.\n if self.stopframe and frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS:\n return self.trace_dispatch\n self.user_call(frame, arg)\n if self.quitting: raise BdbQuit\n return self.trace_dispatch\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 205, "n_words": 76, "vocab_size": 59, "complexity": 7, "nloc": 11, "token_counts": 83, "n_ast_nodes": 137, "n_identifiers": 16, "d_id": 56216, "documentation": { "docstring": "Invoke user function and return trace function for call event.\n\n If the debugger stops on this function call, invoke\n self.user_call(). Raise BdbQuit if self.quitting is set.\n Return self.trace_dispatch to continue tracing in this scope.\n ", "n_words": 34, "vocab_size": 31, "n_whitespaces": 62, "language": "en" } }, { "id": 108485, "commit_id": "031093e6f05496f55616a1fa2f39e573fea02828", "repo": "matplotlib", "path": "lib/matplotlib/testing/__init__.py", "file_name": "__init__.py", "fun_name": "subprocess_run_helper", "commit_message": "Tweak subprocess_run_helper.\n\nOn general grounds, an API like\n`subprocess_run_helper(func, *args, timeout, **extra_env)`\nis problematic because it prevents one from passing an environment\nvariable called \"timeout\".\n\nInstead, pass the extra environment variables as a dict, without\nunpacking.\n\n(Technically this has been released in 3.5.2 as public API, but 1) I'm\nnot really sure it should have been a public API to start with (should\nwe deprecate it and make it private?), and 2) hopefully tweaking that in\n3.5.3 with no deprecation is not going to disrupt anyone... I can still\nput in a changelog entry if that's preferred.)", "code": "def subprocess_run_helper(func, *args, timeout, extra_env=None):\n \n target = func.__name__\n module = func.__module__\n proc = subprocess.run(\n [sys.executable,\n \"-c\",\n f\"from {module} import {target}; {target}()\",\n *args],\n env={**os.environ, \"SOURCE_DATE_EPOCH\": \"0\", **(extra_env or {})},\n timeout=timeout, check=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n universal_newlines=True)\n return proc\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 116, "n_words": 35, "vocab_size": 32, "complexity": 2, "nloc": 14, "token_counts": 92, "n_ast_nodes": 151, "n_identifiers": 22, "d_id": 23212, "documentation": { "docstring": "\n Run a function in a sub-process.\n\n Parameters\n ----------\n func : function\n The function to be run. It must be in a module that is importable.\n *args : str\n Any additional command line arguments to be passed in\n the first argument to ``subprocess.run``.\n extra_env : dict[str, str]\n Any additional environment variables to be set for the subprocess.\n ", "n_words": 56, "vocab_size": 39, "n_whitespaces": 107, "language": "en" } }, { "id": 22421, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "Eight_Puzzle_Solver/eight_puzzle.py", "file_name": "eight_puzzle.py", "fun_name": "getManhattanDistance", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def getManhattanDistance(self):\n \n ans = 0\n for i in range(self.size):\n for j in range(self.size):\n if self.state[i][j] != 0:\n ans = (\n ans\n + abs((self.state[i][j] - 1) % self.size - j)\n + abs((self.state[i][j] - 1) // self.size - i)\n )\n\n return ans\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 24, "n_whitespaces": 201, "n_words": 40, "vocab_size": 26, "complexity": 4, "nloc": 11, "token_counts": 89, "n_ast_nodes": 146, "n_identifiers": 9, "d_id": 4326, "documentation": { "docstring": "\n Parameters: State\n Returns: Manhattan Distance between Current State and Goal State\n Restrictions: State must be a self.size x self.size Array\n ", "n_words": 20, "vocab_size": 16, "n_whitespaces": 49, "language": "en" } }, { "id": 118779, "commit_id": "ee09d5da0986357dccdae9d0fff50c3dab3b40cf", "repo": "streamlit", "path": "lib/streamlit/app_session.py", "file_name": "app_session.py", "fun_name": "_on_source_file_changed", "commit_message": "ScriptRunner + AppSession type annotations (#4376)\n\nAdds missing type annotations in `script_runner.py` and `app_session.py`. No behavior changes.", "code": "def _on_source_file_changed(self) -> None:\n \n if self._run_on_save:\n self.request_rerun(self._client_state)\n else:\n self._enqueue_file_change_message()\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 52, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 6, "token_counts": 28, "n_ast_nodes": 50, "n_identifiers": 6, "d_id": 26419, "documentation": { "docstring": "One of our source files changed. Schedule a rerun if appropriate.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 200380, "commit_id": "24f1e7730119fe958cc8e28411f790c9a5ec04eb", "repo": "sympy", "path": "sympy/combinatorics/partitions.py", "file_name": "partitions.py", "fun_name": "__new__", "commit_message": "Fix various typos\n\nFound via `codespell -q 3 -L aboves,aline,ans,aother,arithmetics,assum,atleast,braket,clen,declar,declars,dorder,dum,enew,fo,fro,inout,iself,ist,ket,lamda,lightyear,lightyears,nd,numer,numers,orderd,ot,pring,rcall,rever,ro,ser,siz,splitted,sring,supercedes,te,tht,unequality,upto,vas,versin,whet`", "code": "def __new__(cls, partition, integer=None):\n \n if integer is not None:\n integer, partition = partition, integer\n if isinstance(partition, (dict, Dict)):\n _ = []\n for k, v in sorted(list(partition.items()), reverse=True):\n if not v:\n continue\n k, v = as_int(k), as_int(v)\n _.extend([k]*v)\n partition = tuple(_)\n else:\n partition = tuple(sorted(map(as_int, partition), reverse=True))\n sum_ok = False\n if integer is None:\n integer = sum(partition)\n sum_ok = True\n else:\n integer = as_int(integer)\n\n if not sum_ok and sum(partition) != integer:\n raise ValueError(\"Partition did not add to %s\" % integer)\n if any(i < 1 for i in partition):\n raise ValueError(\"All integer summands must be greater than one\")\n\n obj = Basic.__new__(cls, Integer(integer), Tuple(*partition))\n obj.partition = list(partition)\n obj.integer = integer\n return obj\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 374, "n_words": 109, "vocab_size": 69, "complexity": 10, "nloc": 27, "token_counts": 210, "n_ast_nodes": 337, "n_identifiers": 27, "d_id": 49610, "documentation": { "docstring": "\n Generates a new IntegerPartition object from a list or dictionary.\n\n Explanation\n ===========\n\n The partition can be given as a list of positive integers or a\n dictionary of (integer, multiplicity) items. If the partition is\n preceded by an integer an error will be raised if the partition\n does not sum to that given integer.\n\n Examples\n ========\n\n >>> from sympy.combinatorics.partitions import IntegerPartition\n >>> a = IntegerPartition([5, 4, 3, 1, 1])\n >>> a\n IntegerPartition(14, (5, 4, 3, 1, 1))\n >>> print(a)\n [5, 4, 3, 1, 1]\n >>> IntegerPartition({1:3, 2:1})\n IntegerPartition(5, (2, 1, 1, 1))\n\n If the value that the partition should sum to is given first, a check\n will be made to see n error will be raised if there is a discrepancy:\n\n >>> IntegerPartition(10, [5, 4, 3, 1])\n Traceback (most recent call last):\n ...\n ValueError: The partition is not valid\n\n ", "n_words": 138, "vocab_size": 80, "n_whitespaces": 307, "language": "en" } }, { "id": 41875, "commit_id": "26bf4b3b645edc405ca52b533b8d68273aeba7d1", "repo": "seaborn", "path": "seaborn/utils.py", "file_name": "utils.py", "fun_name": "_deprecate_ci", "commit_message": "Housekeeping on relational plot parameters (#2855)\n\n* Do some housekeeping on lineplot ci deprecation\r\n\r\n* Remove some unused parameters from scatterplot\r\n\r\n* Remove incorrect statement from relplot docstring\r\n\r\n* Update lineplot ci= deprecation test", "code": "def _deprecate_ci(errorbar, ci):\n \n if ci != \"deprecated\":\n if ci is None:\n errorbar = None\n elif ci == \"sd\":\n errorbar = \"sd\"\n else:\n errorbar = (\"ci\", ci)\n msg = (\n \"\\n\\nThe `ci` parameter is deprecated. \"\n f\"Use `errorbar={repr(errorbar)}` for the same effect.\\n\"\n )\n warnings.warn(msg, FutureWarning, stacklevel=3)\n\n return errorbar\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 153, "n_words": 47, "vocab_size": 37, "complexity": 4, "nloc": 14, "token_counts": 59, "n_ast_nodes": 117, "n_identifiers": 9, "d_id": 7451, "documentation": { "docstring": "\n Warn on usage of ci= and convert to appropriate errorbar= arg.\n\n ci was deprecated when errorbar was added in 0.12. It should not be removed\n completely for some time, but it can be moved out of function definitions\n (and extracted from kwargs) after one cycle.\n\n ", "n_words": 45, "vocab_size": 42, "n_whitespaces": 61, "language": "en" } }, { "id": 108142, "commit_id": "ec410abbb3a721e31f3aaa61e9e4f941467e35e1", "repo": "matplotlib", "path": "lib/matplotlib/backends/backend_svg.py", "file_name": "backend_svg.py", "fun_name": "start", "commit_message": "Deprecate functions in backends", "code": "def start(self, tag, attrib={}, **extra):\n \n self.__flush()\n tag = _escape_cdata(tag)\n self.__data = []\n self.__tags.append(tag)\n self.__write(self.__indentation[:len(self.__tags) - 1])\n self.__write(\"<%s\" % tag)\n for k, v in {**attrib, **extra}.items():\n if v:\n k = _escape_cdata(k)\n v = _quote_escape_attrib(v)\n self.__write(' %s=%s' % (k, v))\n self.__open = 1\n return len(self.__tags) - 1\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 171, "n_words": 45, "vocab_size": 37, "complexity": 3, "nloc": 14, "token_counts": 126, "n_ast_nodes": 206, "n_identifiers": 18, "d_id": 23076, "documentation": { "docstring": "\n Open a new element. Attributes can be given as keyword\n arguments, or as a string/string dictionary. The method returns\n an opaque identifier that can be passed to the :meth:`close`\n method, to close all open elements up to and including this one.\n\n Parameters\n ----------\n tag\n Element tag.\n attrib\n Attribute dictionary. Alternatively, attributes can be given as\n keyword arguments.\n\n Returns\n -------\n An element identifier.\n ", "n_words": 62, "vocab_size": 50, "n_whitespaces": 182, "language": "en" } }, { "id": 61340, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/wheel.py", "file_name": "wheel.py", "fun_name": "parse_wheel", "commit_message": "upd; format", "code": "def parse_wheel(wheel_zip, name):\n # type: (ZipFile, str) -> Tuple[str, Message]\n \n try:\n info_dir = wheel_dist_info_dir(wheel_zip, name)\n metadata = wheel_metadata(wheel_zip, info_dir)\n version = wheel_version(metadata)\n except UnsupportedWheel as e:\n raise UnsupportedWheel(\"{} has an invalid wheel, {}\".format(name, str(e)))\n\n check_compatibility(version, name)\n\n return info_dir, metadata\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 85, "n_words": 39, "vocab_size": 35, "complexity": 2, "nloc": 9, "token_counts": 62, "n_ast_nodes": 103, "n_identifiers": 14, "d_id": 12522, "documentation": { "docstring": "Extract information from the provided wheel, ensuring it meets basic\n standards.\n\n Returns the name of the .dist-info directory and the parsed WHEEL metadata.\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 32, "language": "en" } }, { "id": 337446, "commit_id": "5f433673e1bfc7588f8899b1ddf15c85bd630410", "repo": "accelerate", "path": "src/accelerate/utils.py", "file_name": "utils.py", "fun_name": "reduce", "commit_message": "Introduce reduce operator (#326)\n\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def reduce(tensor, reduction=\"mean\"):\n \n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 3, "token_counts": 27, "n_ast_nodes": 20, "n_identifiers": 3, "d_id": 121056, "documentation": { "docstring": "\n Recursively reduce the tensors in a nested list/tuple/dictionary of lists of tensors across all processes by the\n mean of a given operation.\n\n Args:\n tensor (nested list/tuple/dictionary of `torch.Tensor`):\n The data to reduce.\n reduction (`str`, *optional*, defaults to `\"mean\"`):\n A reduction method. Can be of \"mean\", \"sum\", or \"none\"\n\n Returns:\n The same data structure as `data` with all the tensors reduced.\n ", "n_words": 60, "vocab_size": 45, "n_whitespaces": 119, "language": "en" } }, { "id": 53897, "commit_id": "dc0f9feb764c72620a68ca139eb56e43f6e5f068", "repo": "prefect", "path": "tests/test_task_runners.py", "file_name": "test_task_runners.py", "fun_name": "parameterize_with_task_runners", "commit_message": "Add service marks to task runner tests", "code": "def parameterize_with_task_runners(*values):\n \n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 2, "nloc": 8, "token_counts": 37, "n_ast_nodes": 15, "n_identifiers": 2, "d_id": 10949, "documentation": { "docstring": "\n Generates a `pytest.mark.parametrize` instance for the `task_runner` indirect\n fixture.\n\n Passes marks from the fixtures to the parameter so we can indicate required services\n on each task runner fixture.\n ", "n_words": 28, "vocab_size": 25, "n_whitespaces": 44, "language": "en" } }, { "id": 201206, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/auth_tests/test_decorators.py", "file_name": "test_decorators.py", "fun_name": "test_login_required", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_login_required(self, view_url=\"/login_required/\", login_url=None):\n \n if login_url is None:\n login_url = settings.LOGIN_URL\n response = self.client.get(view_url)\n self.assertEqual(response.status_code, 302)\n self.assertIn(login_url, response.url)\n self.login()\n response = self.client.get(view_url)\n self.assertEqual(response.status_code, 200)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 91, "n_words": 24, "vocab_size": 18, "complexity": 2, "nloc": 9, "token_counts": 79, "n_ast_nodes": 127, "n_identifiers": 14, "d_id": 49901, "documentation": { "docstring": "\n login_required works on a simple view wrapped in a login_required\n decorator.\n ", "n_words": 11, "vocab_size": 9, "n_whitespaces": 33, "language": "en" } }, { "id": 125333, "commit_id": "ac1d21027da8a8c002cc7c28b8d1dc89c0d72fcf", "repo": "ray", "path": "python/ray/train/sklearn/sklearn_checkpoint.py", "file_name": "sklearn_checkpoint.py", "fun_name": "get_estimator", "commit_message": "[AIR] Add framework-specific checkpoints (#26777)", "code": "def get_estimator(self) -> BaseEstimator:\n \n with self.as_directory() as checkpoint_path:\n estimator_path = os.path.join(checkpoint_path, MODEL_KEY)\n with open(estimator_path, \"rb\") as f:\n return cpickle.load(f)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 70, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 6, "token_counts": 46, "n_ast_nodes": 83, "n_identifiers": 14, "d_id": 27838, "documentation": { "docstring": "Retrieve the ``Estimator`` stored in this checkpoint.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 49781, "commit_id": "f4d6e64cdc132ae868699a0ba442f4ab1d304a14", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/gaussian_diffusion.py", "file_name": "gaussian_diffusion.py", "fun_name": "condition_score_with_grad", "commit_message": "add disco_diffusion_cnclip_vitb16 module", "code": "def condition_score_with_grad(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n \n alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)\n\n eps = self._predict_eps_from_xstart(x, t, p_mean_var[\"pred_xstart\"])\n eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, p_mean_var, **model_kwargs)\n\n out = p_mean_var.copy()\n out[\"pred_xstart\"] = self._predict_xstart_from_eps(x, t, eps)\n out[\"mean\"], _, _ = self.q_posterior_mean_variance(x_start=out[\"pred_xstart\"], x_t=x, t=t)\n return out\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 102, "n_words": 46, "vocab_size": 32, "complexity": 1, "nloc": 8, "token_counts": 124, "n_ast_nodes": 187, "n_identifiers": 21, "d_id": 9905, "documentation": { "docstring": "\n Compute what the p_mean_variance output would have been, should the\n model's score function be conditioned by cond_fn.\n\n See condition_mean() for details on cond_fn.\n\n Unlike condition_mean(), this instead uses the conditioning strategy\n from Song et al (2020).\n ", "n_words": 36, "vocab_size": 33, "n_whitespaces": 79, "language": "en" } }, { "id": 176593, "commit_id": "7cad29b3542ad867f1eb5b7b6a9087495f252749", "repo": "networkx", "path": "networkx/algorithms/components/strongly_connected.py", "file_name": "strongly_connected.py", "fun_name": "is_strongly_connected", "commit_message": "Added examples in connected and strongly connected functions (#5559)\n\n* added examples\r\n\r\n* Update networkx/algorithms/components/connected.py\r\n\r\nCo-authored-by: Ross Barnowski \r\n\r\nCo-authored-by: Ross Barnowski ", "code": "def is_strongly_connected(G):\n \n if len(G) == 0:\n raise nx.NetworkXPointlessConcept(\n \n )\n\n return len(list(strongly_connected_components(G))[0]) == len(G)\n\n\n@not_implemented_for(\"undirected\")", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@not_implemented_for(\"undirected\")", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 48, "n_words": 14, "vocab_size": 12, "complexity": 2, "nloc": 6, "token_counts": 40, "n_ast_nodes": 80, "n_identifiers": 8, "d_id": 41992, "documentation": { "docstring": "Test directed graph for strong connectivity.\n\n A directed graph is strongly connected if and only if every vertex in\n the graph is reachable from every other vertex.\n\n Parameters\n ----------\n G : NetworkX Graph\n A directed graph.\n\n Returns\n -------\n connected : bool\n True if the graph is strongly connected, False otherwise.\n\n Examples\n --------\n >>> G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 0), (2, 4), (4, 2)])\n >>> nx.is_strongly_connected(G)\n True\n >>> G.remove_edge(2, 3)\n >>> nx.is_strongly_connected(G)\n False\n\n Raises\n ------\n NetworkXNotImplemented\n If G is undirected.\n\n See Also\n --------\n is_weakly_connected\n is_semiconnected\n is_connected\n is_biconnected\n strongly_connected_components\n\n Notes\n -----\n For directed graphs only.\n Connectivity is undefined for the null graph.", "n_words": 104, "vocab_size": 73, "n_whitespaces": 211, "language": "en" } }, { "id": 93967, "commit_id": "e1b25d625b185588fc7c2834dff5ea5bb3a98ce0", "repo": "sentry", "path": "src/sentry/search/events/datasets/metrics.py", "file_name": "metrics.py", "fun_name": "_resolve_project_threshold_config", "commit_message": "fix(mep): Use project thresholds for apdex calculation (#37256)\n\n- Currently apdex is always based on the satisfaction tags in the\r\n transaction.duration metrics. This updates the apdex function so we\r\n read the threshold config, and use that to determine which metric we\r\n should read the satisfaction tags from instead", "code": "def _resolve_project_threshold_config(self) -> SelectType:\n \n org_id = self.builder.params.get(\"organization_id\")\n project_ids = self.builder.params.get(\"project_id\")\n\n project_threshold_configs = (\n ProjectTransactionThreshold.objects.filter(\n organization_id=org_id,\n project_id__in=project_ids,\n )\n .order_by(\"project_id\")\n .values_list(\"project_id\", \"threshold\", \"metric\")\n )\n\n transaction_threshold_configs = (\n ProjectTransactionThresholdOverride.objects.filter(\n organization_id=org_id,\n project_id__in=project_ids,\n )\n .order_by(\"project_id\")\n .values_list(\"transaction\", \"project_id\", \"threshold\", \"metric\")\n )\n\n num_project_thresholds = project_threshold_configs.count()\n sentry_sdk.set_tag(\"project_threshold.count\", num_project_thresholds)\n sentry_sdk.set_tag(\n \"project_threshold.count.grouped\",\n format_grouped_length(num_project_thresholds, [10, 100, 250, 500]),\n )\n\n num_transaction_thresholds = transaction_threshold_configs.count()\n sentry_sdk.set_tag(\"txn_threshold.count\", num_transaction_thresholds)\n sentry_sdk.set_tag(\n \"txn_threshold.count.grouped\",\n format_grouped_length(num_transaction_thresholds, [10, 100, 250, 500]),\n )\n\n if (\n num_project_thresholds + num_transaction_thresholds\n > constants.MAX_QUERYABLE_TRANSACTION_THRESHOLDS\n ):\n raise InvalidSearchQuery(\n f\"Exceeded {constants.MAX_QUERYABLE_TRANSACTION_THRESHOLDS} configured transaction thresholds limit, try with fewer Projects.\"\n )\n\n # Arrays need to have toUint64 casting because clickhouse will define the type as the narrowest possible type\n # that can store listed argument types, which means the comparison will fail because of mismatched types\n project_thresholds = {}\n project_threshold_config_keys = []\n project_threshold_config_values = []\n for project_id, threshold, metric in project_threshold_configs:\n metric = TRANSACTION_METRICS[metric]\n if (\n threshold == constants.DEFAULT_PROJECT_THRESHOLD\n and metric == constants.DEFAULT_PROJECT_THRESHOLD_METRIC\n ):\n # small optimization, if the configuration is equal to the default,\n # we can skip it in the final query\n continue\n\n project_thresholds[project_id] = (metric, threshold)\n project_threshold_config_keys.append(Function(\"toUInt64\", [project_id]))\n project_threshold_config_values.append((metric, threshold))\n\n project_threshold_override_config_keys = []\n project_threshold_override_config_values = []\n for transaction, project_id, threshold, metric in transaction_threshold_configs:\n metric = TRANSACTION_METRICS[metric]\n if (\n project_id in project_thresholds\n and threshold == project_thresholds[project_id][1]\n and metric == project_thresholds[project_id][0]\n ):\n # small optimization, if the configuration is equal to the project\n # configs, we can skip it in the final query\n continue\n\n elif (\n project_id not in project_thresholds\n and threshold == constants.DEFAULT_PROJECT_THRESHOLD\n and metric == constants.DEFAULT_PROJECT_THRESHOLD_METRIC\n ):\n # small optimization, if the configuration is equal to the default\n # and no project configs were set, we can skip it in the final query\n continue\n\n transaction_id = self.resolve_tag_value(transaction)\n # Don't add to the config if we can't resolve it\n if transaction_id is None:\n continue\n project_threshold_override_config_keys.append(\n (Function(\"toUInt64\", [project_id]), (Function(\"toUInt64\", [transaction_id])))\n )\n project_threshold_override_config_values.append((metric, threshold))\n\n project_threshold_config_index: SelectType = Function(\n \"indexOf\",\n [\n project_threshold_config_keys,\n self.builder.column(\"project_id\"),\n ],\n constants.PROJECT_THRESHOLD_CONFIG_INDEX_ALIAS,\n )\n\n project_threshold_override_config_index: SelectType = Function(\n \"indexOf\",\n [\n project_threshold_override_config_keys,\n (self.builder.column(\"project_id\"), self.builder.column(\"transaction\")),\n ],\n constants.PROJECT_THRESHOLD_OVERRIDE_CONFIG_INDEX_ALIAS,\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 1399, "n_words": 318, "vocab_size": 165, "complexity": 15, "nloc": 117, "token_counts": 513, "n_ast_nodes": 741, "n_identifiers": 48, "d_id": 19036, "documentation": { "docstring": "This is mostly duplicated code from the discover dataset version\n TODO: try to make this more DRY with the discover version\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 35, "language": "en" } }, { "id": 108688, "commit_id": "e12db8dcf12d408cf8cc23e95ea16b99038a058a", "repo": "matplotlib", "path": "lib/matplotlib/figure.py", "file_name": "figure.py", "fun_name": "set_dpi", "commit_message": "Add get/set methods for DPI in SubFigure\n\nThis fixes the following error: \r\n\r\nmatplotlib\\lib\\text.py line 1489, dop = self.figure.get_dpi()/72. AttributeError: 'SubFigure' object has no attribute 'get_dpi'.\r\n\r\nEffect: in v3.5.2 it is not possible to save a figure with a subfigure to a PDF.", "code": "def set_dpi(self, val):\n \n self._parent.dpi = val\n self.stale = True\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 30, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 34, "n_identifiers": 6, "d_id": 23305, "documentation": { "docstring": "\n Set the resolution of parent figure in dots-per-inch.\n \n Parameters\n ----------\n val : float\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 57, "language": "en" } }, { "id": 222727, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/command/config.py", "file_name": "config.py", "fun_name": "try_cpp", "commit_message": "add python 3.10.4 for windows", "code": "def try_cpp(self, body=None, headers=None, include_dirs=None, lang=\"c\"):\n \n from distutils.ccompiler import CompileError\n self._check_compiler()\n ok = True\n try:\n self._preprocess(body, headers, include_dirs, lang)\n except CompileError:\n ok = False\n\n self._clean()\n return ok\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 105, "n_words": 27, "vocab_size": 24, "complexity": 2, "nloc": 10, "token_counts": 63, "n_ast_nodes": 100, "n_identifiers": 13, "d_id": 56711, "documentation": { "docstring": "Construct a source file from 'body' (a string containing lines\n of C/C++ code) and 'headers' (a list of header files to include)\n and run it through the preprocessor. Return true if the\n preprocessor succeeded, false if there were any errors.\n ('body' probably isn't of much use, but what the heck.)\n ", "n_words": 50, "vocab_size": 43, "n_whitespaces": 86, "language": "en" } }, { "id": 218929, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/lib2to3/tests/test_fixers.py", "file_name": "test_fixers.py", "fun_name": "test_weird_target_2", "commit_message": "add python 3.10.4 for windows", "code": "def test_weird_target_2(self):\n b = \n\n a = \n self.check(b, a)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 30, "n_words": 8, "vocab_size": 7, "complexity": 1, "nloc": 13, "token_counts": 19, "n_ast_nodes": 35, "n_identifiers": 5, "d_id": 55554, "documentation": { "docstring": "\n try:\n pass\n except Exception, a.foo:\n pass\n try:\n pass\n except Exception as xxx_todo_changeme:\n a.foo = xxx_todo_changeme\n pass", "n_words": 16, "vocab_size": 11, "n_whitespaces": 135, "language": "en" } }, { "id": 31310, "commit_id": "a72f1c9f5b907f96cbb7de3bbb02a1d431d34071", "repo": "transformers", "path": "tests/models/longt5/test_modeling_flax_longt5.py", "file_name": "test_modeling_flax_longt5.py", "fun_name": "test_summarization", "commit_message": "Add `LongT5` model (#16792)\n\n* Initial commit\r\n\r\n* Make some fixes\r\n\r\n* Make PT model full forward pass\r\n\r\n* Drop TF & Flax implementation, fix copies etc\r\n\r\n* Add Flax model and update some corresponding stuff\r\n\r\n* Drop some TF things\r\n\r\n* Update config and flax local attn\r\n\r\n* Add encoder_attention_type to config\r\n\r\n* .\r\n\r\n* Update docs\r\n\r\n* Do some cleansing\r\n\r\n* Fix some issues -> make style; add some docs\r\n\r\n* Fix position_bias + mask addition + Update tests\r\n\r\n* Fix repo consistency\r\n\r\n* Fix model consistency by removing flax operation over attn_mask\r\n\r\n* [WIP] Add PT TGlobal LongT5\r\n\r\n* .\r\n\r\n* [WIP] Add flax tglobal model\r\n\r\n* [WIP] Update flax model to use the right attention type in the encoder\r\n\r\n* Fix flax tglobal model forward pass\r\n\r\n* Make the use of global_relative_attention_bias\r\n\r\n* Add test suites for TGlobal model\r\n\r\n* Fix minor bugs, clean code\r\n\r\n* Fix pt-flax equivalence though not convinced with correctness\r\n\r\n* Fix LocalAttn implementation to match the original impl. + update READMEs\r\n\r\n* Few updates\r\n\r\n* Update: [Flax] improve large model init and loading #16148\r\n\r\n* Add ckpt conversion script accoring to #16853 + handle torch device placement\r\n\r\n* Minor updates to conversion script.\r\n\r\n* Typo: AutoModelForSeq2SeqLM -> FlaxAutoModelForSeq2SeqLM\r\n\r\n* gpu support + dtype fix\r\n\r\n* Apply some suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: Patrick von Platen \r\n\r\n* * Remove (de)parallelize stuff\r\n* Edit shape comments\r\n* Update README.md\r\n* make fix-copies\r\n\r\n* Remove caching logic for local & tglobal attention\r\n\r\n* Apply another batch of suggestions from code review\r\n\r\n* Add missing checkpoints\r\n* Format converting scripts\r\n* Drop (de)parallelize links from longT5 mdx\r\n\r\n* Fix converting script + revert config file change\r\n\r\n* Revert \"Remove caching logic for local & tglobal attention\"\r\n\r\nThis reverts commit 2a619828f6ddc3e65bd9bb1725a12b77fa883a46.\r\n\r\n* Stash caching logic in Flax model\r\n\r\n* Make side relative bias used always\r\n\r\n* Drop caching logic in PT model\r\n\r\n* Return side bias as it was\r\n\r\n* Drop all remaining model parallel logic\r\n\r\n* Remove clamp statements\r\n\r\n* Move test files to the proper place\r\n\r\n* Update docs with new version of hf-doc-builder\r\n\r\n* Fix test imports\r\n\r\n* Make some minor improvements\r\n\r\n* Add missing checkpoints to docs\r\n* Make TGlobal model compatible with torch.onnx.export\r\n* Replace some np.ndarray with jnp.ndarray\r\n\r\n* Fix TGlobal for ONNX conversion + update docs\r\n\r\n* fix _make_global_fixed_block_ids and masked neg value\r\n\r\n* update flax model\r\n\r\n* style and quality\r\n\r\n* fix imports\r\n\r\n* remove load_tf_weights_in_longt5 from init and fix copies\r\n\r\n* add slow test for TGlobal model\r\n\r\n* typo fix\r\n\r\n* Drop obsolete is_parallelizable and one warning\r\n\r\n* Update __init__ files to fix repo-consistency\r\n\r\n* fix pipeline test\r\n\r\n* Fix some device placements\r\n\r\n* [wip]: Update tests -- need to generate summaries to update expected_summary\r\n\r\n* Fix quality\r\n\r\n* Update LongT5 model card\r\n\r\n* Update (slow) summarization tests\r\n\r\n* make style\r\n\r\n* rename checkpoitns\r\n\r\n* finish\r\n\r\n* fix flax tests\r\n\r\nCo-authored-by: phungvanduy \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\nCo-authored-by: Patrick von Platen \r\nCo-authored-by: patil-suraj ", "code": "def test_summarization(self):\n model = FlaxLongT5ForConditionalGeneration.from_pretrained(self.model_path)\n tok = AutoTokenizer.from_pretrained(self.model_path)\n\n ARTICLE = \n\n dct = tok(\n [ARTICLE],\n max_length=1024,\n padding=\"max_length\",\n truncation=True,\n return_tensors=\"np\",\n )\n\n hypotheses_batch = model.generate(\n **dct,\n num_beams=4,\n length_penalty=2.0,\n max_length=142,\n min_length=56,\n do_sample=False,\n early_stopping=True,\n ).sequences\n\n decoded = tok.batch_decode(hypotheses_batch, skip_special_tokens=True, clean_up_tokenization_spaces=False)\n self.assertListEqual(\n self.expected_summary(),\n decoded,\n )\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 263, "n_words": 39, "vocab_size": 33, "complexity": 1, "nloc": 79, "token_counts": 120, "n_ast_nodes": 250, "n_identifiers": 28, "d_id": 5722, "documentation": { "docstring": "coronary artery disease ( cad ) is the emerging cause of morbidity and mortality in developing world . \\n it provides an excellent resolution for visualization of the coronary arteries for catheter - based or operating interventions . \\n\n although the association of this technique with major complications such as mortality is highly uncommon , it is frequently associated with various cardiac and noncardiac complications . computed tomography ( ct ) coronary angiography is\n a promising technique for the evaluation of cad noninvasively . \\n it assesses disease within the coronary artery and provides qualitative and quantitative information about nonobstructive atherosclerotic plaque burden within the vessel\n wall . \\n thus , ct angiography - based disease evaluation may provide clinically more significant information than conventional angiography . the introduction of multi - slice computed tomography ( msct ) technology such as 64-slice , 12\n 8-slice , 256-slice , and now 320-slice msct has produced a high diagnostic accuracy of ct coronary angiography . \\n it has consistently showed to have a very high negative predictive value ( well above 90% ) in ruling out patients with s\n ignificant cad defined as coronary luminal stenosis of > 50% . \\n the american college of cardiology / american heart association recommends that coronary angiography should be performed before valve surgery in men aged > 40 years , women\n aged > 35 years with coronary risk factors and in postmenopausal women . \\n the prevalence of cad in patients undergoing valve replacement is 2040% in developed countries . in the previous studies , \\n the incidence of angiographically p\n roven cad in acquired valvular diseases has been shown to vary widely from 9% to 41% . in aortic stenosis , \\n we aimed to report the diagnostic performance of 128-slice ct coronary angiography in 50 patients undergoing for major noncoron\n ary cardiac surgery referred for diagnostic invasive coronary angiography to assess the extent and severity of coronary stenosis . \\n during january 2013 to december 2014 , we enrolled fifty major noncoronary cardiac surgery patients sche\n duled for invasive coronary angiography who fulfilled the following inclusion criteria of age 40 years , having low or intermediate probability of cad , left ventricular ejection fraction ( lvef ) > 35% , and patient giving informed conse\n nt for undergoing msct and conventional coronary angiography . \\n those having any contraindication for contrast injection , lvef < 35% , high pretest probability of cad , and hemodynamic instability were excluded from the study . \\n pati\n ents with heart rates of > 70 bpm received ( unless they had known overt heart failure or electrocardiogram ( ecg ) atrioventricular conduction abnormalities ) a single oral dose of 100 mg metoprolol 45 min before the scan . \\n patients w\n ith heart rates of > 80 bpm received an additional oral dose of metoprolol if not contraindicated . \\n all patients were scanned with a 128-slice ct scanner ( siemens , somatom definition as ) equipped with a new feature in msct technolog\n y , so - called z - axis flying - focus technology . \\n the central 32 detector rows acquire 0.6-mm slices , and the flying - focus spot switches back and forth between 2 z positions between each reading . \\n two slices per detector row a\n re acquired , which results in a higher oversampling rate in the z - axis , thereby reducing artifacts related to the spiral acquisition and improving spatial resolution down to 0.4 mm . \\n a bolus of 6580 ml contrast material ( omnipaque\n ) was injected through an arm vein at a flow rate of 5 ml / s . \\n a bolus tracking technique was used to synchronize the arrival of contrast in the coronary arteries with the initiation of the scan . to monitor the arrival of contrast m\n aterial , \\n axial scans were obtained at the level of the ascending aorta with a delay of 10 s after the start of the contrast injection . \\n the scan was automatically started when a threshold of 150 hounsfield units was reached in a re\n gion of interest positioned in the ascending aorta . \\n images were reconstructed with ecg gating to obtain optimal , motion - free image quality . \\n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a s\n ingle observer unaware of the multi - slice ct results identified coronary lesion as a single vessel , double vessel , or triple vessel disease . \\n all lesion , regardless of size , were included for comparison with ct coronary angiograp\n hy . \\n lesions were classified as having nonsignificant disease ( luminal irregularities or < 50% stenosis ) or as having significant stenosis . \\n stenosis was evaluated in two orthogonal views and classified as significant if the mean\n lumen diameter reduction was 50% using a validated quantitative coronary angiography ( qca ) . \\n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiograp\n hy . \\n total calcium scores of all patients were calculated with dedicated software and expressed as agatston scores . \\n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of th\n e number , areas , and peak hounsfield units of the detected calcified lesions . \\n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \\n maximum intensity projections were\n used to identify coronary lesions and ( curved ) multiplanar reconstructions to classify lesions as significant or nonsignificant . \\n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \\n the di\n agnostic performance of ct coronary angiography for the detection of significant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and\n positive and negative likelihood ratios with the corresponding exact 95% of confidence interval ( cis ) . \\n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease p\n er vessel ) , and patient by patient ( no or any disease per patient ) . \\n all scans were performed within 2 weeks of the msct coronary diagnostic angiogram . a single observer unaware of the multi - slice ct results identified coronary\n lesion as a single vessel , double vessel , or triple vessel disease . \\n all lesion , regardless of size , were included for comparison with ct coronary angiography . \\n lesions were classified as having nonsignificant disease ( luminal\n irregularities or < 50% stenosis ) or as having significant stenosis . \\n stenosis was evaluated in two orthogonal views and classified as significant if the mean lumen diameter reduction was 50% using a validated quantitative coronary an\n giography ( qca ) . \\n all scans were analyzed independently by a radiologist and a cardiologist who were unaware of the results of conventional coronary angiography . \\n total calcium scores of all patients were calculated with dedicated\n software and expressed as agatston scores . \\n the agatston score is a commonly used scoring method that calculates the total amount of calcium on the basis of the number , areas , and peak hounsfield units of the detected calcified lesi\n ons . \\n all available coronary segments were visually scored for the presence of > 50% considered as significant stenosis . \\n maximum intensity projections were used to identify coronary lesions and ( curved ) multiplanar reconstruction\n s to classify lesions as significant or nonsignificant . \\n data were analyzed using statistical system spss version 20 software ( chicago , il , usa ) . \\n the diagnostic performance of ct coronary angiography for the detection of signif\n icant lesions in coronary arteries with qca as the standard of reference is presented as sensitivity , specificity , positive and negative predictive values , and positive and negative likelihood ratios with the corresponding exact 95% of\n confidence interval ( cis ) . \\n comparison between ct and conventional coronary angiography was performed on the two level vessel by vessel ( no or any disease per vessel ) , and patient by patient ( no or any disease per patient ) . \\n\n in this study , 29 ( 58% ) subjects were female , and 21 ( 42% ) were male showing an average age of 50.36 8.39 years . \\n of fifty patients 24 ( 48% ) , 13 ( 26% ) , eight ( 16% ) , and five ( 10% ) underwent mitral valve replacement ,\n double valve replacement ( dvr ) , aortic valve replacement , and other surgeries , respectively . \\n high distribution of cad risk factors such as hypertension ( 24% ) , smoking ( 22% ) , and dyslipidemia ( 18% ) was observed in the stu\n dy group . \\n the mean creatinine level was 0.766 0.17 and average dye used in conventional angiography was 48.5 26.6 whereas for ct angiography it was 72.8 6.32 . \\n average radiation dose in conventional coronary angiography and msct\n coronary angiography was 5.2 msv and 9.2 msv , respectively . \\n the majority of the patients had sinus rhythm ( 68% ) , whereas atrial fibrillation was found in 32% of the subjects . \\n patients included in the study had low to intermed\n iate probability of cad . in this study , three patients had complications after conventional angiography . \\n complications were of local site hematoma , acute kidney injury managed conservatively , and acute heart failure . \\n a patient\n who developed hematoma was obese female patients with body mass index > 30 kg / m . \\n the patient suffered from pseudoaneurysm , had hospitalized for 9 days , which leads to increased morbidity and cost of hospital stay . \\n the diagnos\n tic accuracy of ct coronary angiography was evaluated regarding true positive , true negative values and is presented in table 1 . the overall sensitivity and \\n specificity of ct angiography technique was 100% ( 95% ci : 39.76%100% ) and\n 91.30% ( 95% ci : 79.21%97.58% ) , respectively [ table 2 ] . \\n the positive predictive value ( 50% ; 95% ci : 15.70%84.30% ) and negative predictive value ( 100% ; 95% ci : 91.59%100% ) of ct angiography were also fairly high in these\n patients . \\n recent reports from multiple studies demonstrated that recent - generation msct scanners showed promise for noninvasive detection of coronary stenosis however , until now no studies were found regarding the clinical efficacy\n or prognostic value of 128-slice ct coronary angiography versus conventional invasive coronary angiography in the diagnosis of patients planned for major noncoronary surgeries such as dvr , bentall , atrial septal defect closure , etc .\n in our study , we reported 8% cad prevalence in patients planned for major noncoronary cardiac surgery . \\n we performed conventional and msct coronary angiography in all patients and the results showed that ct coronary angiography with i\n nvasive coronary angiography as the reference standard had a considerably high sensitivity ( 100% ) and specificity ( 95.65% ) . \\n the health economic model using invasive coronary angiography as the reference standard showed that at a p\n retest probability of cad of 70% or lower , ct coronary angiography resulted in lower cost per patient with a true positive diagnosis . at a pretest probability of cad of 70% or higher , invasive coronary angiography was associated with a\n lower cost per patient with a true positive diagnosis . in our study population , \\n two patients developed local site complications in the form of hematoma and pseudoaneurysm after conventional angiography . \\n hence , msct coronary ang\n iography will be more favorable in female obese patients with intermediate likelihood of cad . \\n hence , msct coronary angiography will be cost - effective in patients of valvular heart diseases . \\n however , ct angiography suffers from\n a drawback that average amount of dye used in msct coronary angiography were 72.8 6.32 ml which is higher than average amount of dye required for conventional angiography ( 48.6 26.6 ml ) . \\n hence , the use of ct coronary angiography\n could not be used in patients with known renal dysfunction , where reduction of contrast dye load is highly advocated . \\n our results show that 128-slice ct coronary angiography is a reliable technique to detect coronary stenosis in pat\n ients planned for noncoronary cardiac surgery . \\n although there has been important technological progress in the development of ct coronary angiography , its clinical application remains limited . \\n a study wth large numbers of patient\n s is required for the recommendation of only ct coronary angiography for the coronary evaluation in major non - cardiac surgeries . \\n mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , guja\n rat , india ) . \\n u.n . mehta institute of cardiology and research center ( affiliated to bj medical college , ahmedabad , gujarat , india ) . \\n ", "n_words": 2237, "vocab_size": 651, "n_whitespaces": 2837, "language": "en" } }, { "id": 286486, "commit_id": "aed683f44015cb5aa6cae9c2ce719c956cda7b46", "repo": "OpenBBTerminal", "path": "openbb_terminal/portfolio/attribution_model.py", "file_name": "attribution_model.py", "fun_name": "get_daily_sector_prices", "commit_message": "Feature/attribution toolkit (#3156)\n\n* add attribution toolkit\r\n\r\n* add attrib to test script for portfolio\r\n\r\n* removes yahooquery dependency and early rounding\r\n\r\n* Update _index.md\r\n\r\n* update feature to include raw and type flags, graph always shows, table output optional, one type of output at a time\r\n\r\n* Linting\r\n\r\n* Update index\r\n\r\n* Update index 2\r\n\r\n* Update tests\r\n\r\n* changes argument descriptions\r\n\r\n* Small fix\r\n\r\n* Formatting Black\r\n\r\nCo-authored-by: S3908818 \r\nCo-authored-by: Louise Platts (S3908818) <88080425+s3908818@users.noreply.github.com>\r\nCo-authored-by: Jeroen Bouma \r\nCo-authored-by: James Maslek \r\nCo-authored-by: Louise Amy <74476622+louiseamy4@users.noreply.github.com>\r\nCo-authored-by: Jeroen Bouma ", "code": "def get_daily_sector_prices(start_date, end_date):\n \n # sector ticker information\n sp500_tickers = {\n \"S&P 500 Materials (Sector)\": \"^SP500-15\",\n \"S&P 500 Industrials (Sector)\": \"^SP500-20\",\n \"S&P 500 Consumer Discretionary (Sector)\": \"^SP500-25\",\n \"S&P 500 Consumer Staples (Sector)\": \"^SP500-30\",\n \"S&P 500 Health Care (Sector)\": \"^SP500-35\",\n \"S&P 500 Financials (Sector)\": \"^SP500-40\",\n \"S&P 500 Information Technology (Sector)\": \"^SP500-45\",\n \"S&P 500 Telecommunication Services (Sector)\": \"^SP500-50\",\n \"S&P 500 Utilities (Sector)\": \"^SP500-55\",\n \"S&P 500 Real Estate (Sector)\": \"^SP500-60\",\n \"S&P 500 Energy (Sector)\": \"^GSPE\",\n }\n\n sp500_tickers_data = {} # to store data\n\n for (\n sector,\n sector_ticker,\n ) in sp500_tickers.items(): # iterate thru the sectors\n # load the data required from yfinance\n sp500_tickers_data[\n sector\n ] = { # builds a dictionary entry for the sector with adj close data\n \"sector_data\": yf.download(\n sector_ticker,\n start=start_date,\n end=end_date,\n progress=False,\n )[\"Adj Close\"]\n } # stores the data here\n\n return sp500_tickers_data\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 371, "n_words": 131, "vocab_size": 80, "complexity": 2, "nloc": 30, "token_counts": 107, "n_ast_nodes": 204, "n_identifiers": 13, "d_id": 85828, "documentation": { "docstring": "\n fetches daily sector prices for S&P500 for a fixed time period\n\n Parameters\n ----------\n start_date : str ('yyyy-mm-dd') or datetime.date\n start date for fetching data\n end_date : str ('yyyy-mm-dd') or datetime.date\n end date for fetching data\n\n Returns\n -------\n sp500_tickers_data : Dictionary\n dictionary of dataframes with SPY daily sector prices\n ", "n_words": 48, "vocab_size": 33, "n_whitespaces": 97, "language": "en" } }, { "id": 260619, "commit_id": "4f315db68bb190f0ac03d594f5b45d8fb4213f6f", "repo": "scikit-learn", "path": "sklearn/decomposition/tests/test_sparse_pca.py", "file_name": "test_sparse_pca.py", "fun_name": "test_equivalence_components_pca_spca", "commit_message": "FIX make SparsePCA components_ deterministic (#23935)", "code": "def test_equivalence_components_pca_spca(global_random_seed):\n \n rng = np.random.RandomState(global_random_seed)\n X = rng.randn(50, 4)\n\n n_components = 2\n pca = PCA(\n n_components=n_components,\n svd_solver=\"randomized\",\n random_state=0,\n ).fit(X)\n spca = SparsePCA(\n n_components=n_components,\n method=\"lars\",\n ridge_alpha=0,\n alpha=0,\n random_state=0,\n ).fit(X)\n\n assert_allclose(pca.components_, spca.components_)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 113, "n_words": 30, "vocab_size": 23, "complexity": 1, "nloc": 17, "token_counts": 91, "n_ast_nodes": 142, "n_identifiers": 21, "d_id": 76379, "documentation": { "docstring": "Check the equivalence of the components found by PCA and SparsePCA.\n\n Non-regression test for:\n https://github.com/scikit-learn/scikit-learn/issues/23932\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 24, "language": "en" } }, { "id": 256954, "commit_id": "ac5617e757e9ace6f30b7291686d9dbbc339f433", "repo": "haystack", "path": "haystack/telemetry.py", "file_name": "telemetry.py", "fun_name": "send_event_if_public_demo", "commit_message": "Add basic telemetry features (#2314)\n\n* add basic telemetry features\r\n\r\n* change pipeline_config to _component_config\r\n\r\n* Update Documentation & Code Style\r\n\r\n* add super().__init__() calls to error classes\r\n\r\n* make posthog mock work with python 3.7\r\n\r\n* Update Documentation & Code Style\r\n\r\n* update link to docs web page\r\n\r\n* log exceptions, send event for raised HaystackErrors, refactor Path(CONFIG_PATH)\r\n\r\n* add comment on send_event in BaseComponent.init() and fix mypy\r\n\r\n* mock NonPrivateParameters and fix pylint undefined-variable\r\n\r\n* Update Documentation & Code Style\r\n\r\n* check model path contains multiple /\r\n\r\n* add test for writing to file\r\n\r\n* add test for en-/disable telemetry\r\n\r\n* Update Documentation & Code Style\r\n\r\n* merge file deletion methods and ignore pylint global statement\r\n\r\n* Update Documentation & Code Style\r\n\r\n* set env variable in demo to activate telemetry\r\n\r\n* fix mock of HAYSTACK_TELEMETRY_ENABLED\r\n\r\n* fix mypy and linter\r\n\r\n* add CI as env variable to execution contexts\r\n\r\n* remove threading, add test for custom error event\r\n\r\n* Update Documentation & Code Style\r\n\r\n* simplify config/log file deletion\r\n\r\n* add test for final event being sent\r\n\r\n* force writing config file in test\r\n\r\n* make test compatible with python 3.7\r\n\r\n* switch to posthog production server\r\n\r\n* Update Documentation & Code Style\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def send_event_if_public_demo(func):\n \n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 4, "token_counts": 15, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 74971, "documentation": { "docstring": "\n Can be used as a decorator to send an event only if HAYSTACK_EXECUTION_CONTEXT is \"public_demo\"\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 22, "language": "en" } }, { "id": 276277, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/saving_utils_test.py", "file_name": "saving_utils_test.py", "fun_name": "test_model_with_fixed_input_dim", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_model_with_fixed_input_dim(self):\n \n model = test_utils.get_small_mlp(10, 3, 5)\n\n loss_object = keras.losses.MeanSquaredError()\n optimizer = gradient_descent.SGD()\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 41, "n_words": 13, "vocab_size": 11, "complexity": 1, "nloc": 14, "token_counts": 118, "n_ast_nodes": 57, "n_identifiers": 12, "d_id": 81611, "documentation": { "docstring": "Ensure that the batch_dim is removed when saving.\n\n When serving or retraining, it is important to reset the batch dim.\n This can be an issue inside of tf.function. See b/132783590 for context.\n ", "n_words": 32, "vocab_size": 30, "n_whitespaces": 53, "language": "en" } }, { "id": 96012, "commit_id": "2790a30b7f6a6cffa2cd1aa69c678327a41a0664", "repo": "sentry", "path": "tests/sentry/integrations/bitbucket/test_installed.py", "file_name": "test_installed.py", "fun_name": "test_installed_without_username", "commit_message": "fix(bitbucket): Fix domain name (#31536)\n\n* fix(bitbucket): Fix domain name", "code": "def test_installed_without_username(self):\n \n\n # Remove username to simulate privacy mode\n del self.user_data_from_bitbucket[\"principal\"][\"username\"]\n\n response = self.client.post(self.path, data=self.user_data_from_bitbucket)\n assert response.status_code == 200\n integration = Integration.objects.get(provider=self.provider, external_id=self.client_key)\n assert integration.name == self.user_display_name\n assert integration.metadata == self.user_metadata\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 87, "n_words": 31, "vocab_size": 26, "complexity": 1, "nloc": 7, "token_counts": 76, "n_ast_nodes": 122, "n_identifiers": 20, "d_id": 19263, "documentation": { "docstring": "Test a user (not team) installation where the user has hidden their username from public view", "n_words": 16, "vocab_size": 15, "n_whitespaces": 15, "language": "en" } }, { "id": 153043, "commit_id": "58bbcc37477866d19c8b092a0e1974a4f0baa586", "repo": "modin", "path": "modin/core/dataframe/algebra/reduce.py", "file_name": "reduce.py", "fun_name": "call", "commit_message": "REFACTOR-#2656: Update modin to fit algebra (code only) (#3717)\n\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Vasily Litvinov \r\nCo-authored-by: Alexey Prutskov \r\nCo-authored-by: Devin Petersohn \r\nSigned-off-by: Rehan Durrani ", "code": "def call(cls, reduce_function, axis=None):\n \n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 3, "token_counts": 16, "n_ast_nodes": 20, "n_identifiers": 4, "d_id": 35227, "documentation": { "docstring": "\n Build Reduce operator that will be performed across rows/columns.\n\n It's used if `func` reduces the dimension of partitions in contrast to `Fold`.\n\n Parameters\n ----------\n reduce_function : callable(pandas.DataFrame) -> pandas.Series\n Source function.\n axis : int, optional\n Axis to apply function along.\n\n Returns\n -------\n callable\n Function that takes query compiler and executes Reduce function.\n ", "n_words": 52, "vocab_size": 47, "n_whitespaces": 156, "language": "en" } }, { "id": 126557, "commit_id": "46ed3557ba6b4f4f72c15ef960aba5270ada2a9c", "repo": "ray", "path": "python/ray/tune/tests/test_tune_restore.py", "file_name": "test_tune_restore.py", "fun_name": "test_resource_exhausted_info", "commit_message": "[tune] Fix test_resource_exhausted_info test (#27426)\n\n#27213 broke this test\r\n\r\nSigned-off-by: Kai Fricke ", "code": "def test_resource_exhausted_info(self):\n \n\n # generate some random data to be captured implicitly in training func.\n from sklearn.datasets import fetch_olivetti_faces\n\n a_large_array = []\n for i in range(50):\n a_large_array.append(fetch_olivetti_faces())\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 72, "n_words": 26, "vocab_size": 25, "complexity": 2, "nloc": 11, "token_counts": 51, "n_ast_nodes": 56, "n_identifiers": 9, "d_id": 28198, "documentation": { "docstring": "This is to test if helpful information is displayed when\n the objects captured in trainable/training function are too\n large and RESOURCES_EXHAUSTED error of gRPC is triggered.", "n_words": 26, "vocab_size": 24, "n_whitespaces": 39, "language": "en" } }, { "id": 110468, "commit_id": "df3d2ab53722d191bbbc667a5ac2f7cb7cdfee84", "repo": "matplotlib", "path": "lib/matplotlib/_api/__init__.py", "file_name": "__init__.py", "fun_name": "check_shape", "commit_message": "Improve argument checking for set_xticks().", "code": "def check_shape(_shape, **kwargs):\n \n target_shape = _shape\n for k, v in kwargs.items():\n data_shape = v.shape\n\n if len(target_shape) != len(data_shape) or any(\n t not in [s, None]\n for t, s in zip(target_shape, data_shape)\n ):\n dim_labels = iter(itertools.chain(\n 'MNLIJKLH',\n (f\"D{i}\" for i in itertools.count())))\n text_shape = \", \".join((str(n)\n if n is not None\n else next(dim_labels)\n for n in target_shape))\n if len(target_shape) == 1:\n text_shape += \",\"\n\n raise ValueError(\n f\"{k!r} must be {len(target_shape)}D \"\n f\"with shape ({text_shape}). \"\n f\"Your input has shape {v.shape}.\"\n )\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 390, "n_words": 80, "vocab_size": 62, "complexity": 9, "nloc": 22, "token_counts": 134, "n_ast_nodes": 244, "n_identifiers": 26, "d_id": 24175, "documentation": { "docstring": "\n For each *key, value* pair in *kwargs*, check that *value* has the shape\n *_shape*, if not, raise an appropriate ValueError.\n\n *None* in the shape is treated as a \"free\" size that can have any length.\n e.g. (None, 2) -> (N, 2)\n\n The values checked must be numpy arrays.\n\n Examples\n --------\n To check for (N, 2) shaped arrays\n\n >>> _api.check_shape((None, 2), arg=arg, other_arg=other_arg)\n ", "n_words": 62, "vocab_size": 54, "n_whitespaces": 93, "language": "en" } }, { "id": 22137, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/utils.py", "file_name": "utils.py", "fun_name": "get_unicode_from_response", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def get_unicode_from_response(r):\n \n warnings.warn(\n (\n \"In requests 3.0, get_unicode_from_response will be removed. For \"\n \"more information, please see the discussion on issue #2266. (This\"\n \" warning should only appear once.)\"\n ),\n DeprecationWarning,\n )\n\n tried_encodings = []\n\n # Try charset from content-type\n encoding = get_encoding_from_headers(r.headers)\n\n if encoding:\n try:\n return str(r.content, encoding)\n except UnicodeError:\n tried_encodings.append(encoding)\n\n # Fall back:\n try:\n return str(r.content, encoding, errors=\"replace\")\n except TypeError:\n return r.content\n\n\n# The unreserved URI characters (RFC 3986)\nUNRESERVED_SET = frozenset(\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\" + \"0123456789-._~\"\n)\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 212, "n_words": 78, "vocab_size": 67, "complexity": 4, "nloc": 20, "token_counts": 76, "n_ast_nodes": 150, "n_identifiers": 17, "d_id": 4209, "documentation": { "docstring": "Returns the requested content back in unicode.\n\n :param r: Response object to get unicode content from.\n\n Tried:\n\n 1. charset from content-type\n 2. fall back and replace all unicode characters\n\n :rtype: str\n ", "n_words": 31, "vocab_size": 28, "n_whitespaces": 49, "language": "en" } }, { "id": 209588, "commit_id": "495b21f2867e48286767085c8cf2918e4092e9dc", "repo": "scapy", "path": "scapy/contrib/automotive/scanner/graph.py", "file_name": "graph.py", "fun_name": "render", "commit_message": "Add Automotive Logger for all debug outputs of the automotive layer", "code": "def render(self, filename=\"SystemStateGraph.gv\", view=True):\n # type: (str, bool) -> None\n \n try:\n from graphviz import Digraph\n except ImportError:\n log_automotive.info(\"Please install graphviz.\")\n return\n\n ps = Digraph(name=\"SystemStateGraph\",\n node_attr={\"fillcolor\": \"lightgrey\",\n \"style\": \"filled\",\n \"shape\": \"box\"},\n graph_attr={\"concentrate\": \"true\"})\n for n in self.nodes:\n ps.node(str(n))\n\n for e, f in self.__transition_functions.items():\n try:\n desc = \"\" if f is None else f[1][\"desc\"]\n except (AttributeError, KeyError):\n desc = \"\"\n ps.edge(str(e[0]), str(e[1]), label=desc)\n\n ps.render(filename, view=view)\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 328, "n_words": 63, "vocab_size": 53, "complexity": 6, "nloc": 20, "token_counts": 152, "n_ast_nodes": 260, "n_identifiers": 26, "d_id": 52742, "documentation": { "docstring": "\n Renders this Graph as PDF, if `graphviz` is installed.\n\n :param filename: A filename for the rendered PDF.\n :param view: If True, rendered file will be opened.\n ", "n_words": 26, "vocab_size": 24, "n_whitespaces": 55, "language": "en" } }, { "id": 267587, "commit_id": "43153c58310d02223f2cb0964f4255ba1ac4ed53", "repo": "ansible", "path": "lib/ansible/playbook/task.py", "file_name": "task.py", "fun_name": "preprocess_data", "commit_message": "`FieldAttribute`s as descriptors (#73908)", "code": "def preprocess_data(self, ds):\n \n\n if not isinstance(ds, dict):\n raise AnsibleAssertionError('ds (%s) should be a dict but was a %s' % (ds, type(ds)))\n\n # the new, cleaned datastructure, which will have legacy\n # items reduced to a standard structure suitable for the\n # attributes of the task class\n new_ds = AnsibleMapping()\n if isinstance(ds, AnsibleBaseYAMLObject):\n new_ds.ansible_pos = ds.ansible_pos\n\n # since this affects the task action parsing, we have to resolve in preprocess instead of in typical validator\n default_collection = AnsibleCollectionConfig.default_collection\n\n collections_list = ds.get('collections')\n if collections_list is None:\n # use the parent value if our ds doesn't define it\n collections_list = self.collections\n else:\n # Validate this untemplated field early on to guarantee we are dealing with a list.\n # This is also done in CollectionSearch._load_collections() but this runs before that call.\n collections_list = self.get_validated_value('collections', self.fattributes.get('collections'), collections_list, None)\n\n if default_collection and not self._role: # FIXME: and not a collections role\n if collections_list:\n if default_collection not in collections_list:\n collections_list.insert(0, default_collection)\n else:\n collections_list = [default_collection]\n\n if collections_list and 'ansible.builtin' not in collections_list and 'ansible.legacy' not in collections_list:\n collections_list.append('ansible.legacy')\n\n if collections_list:\n ds['collections'] = collections_list\n\n # use the args parsing class to determine the action, args,\n # and the delegate_to value from the various possible forms\n # supported as legacy\n args_parser = ModuleArgsParser(task_ds=ds, collection_list=collections_list)\n try:\n (action, args, delegate_to) = args_parser.parse()\n except AnsibleParserError as e:\n # if the raises exception was created with obj=ds args, then it includes the detail\n # so we dont need to add it so we can just re raise.\n if e.obj:\n raise\n # But if it wasn't, we can add the yaml object now to get more detail\n raise AnsibleParserError(to_native(e), obj=ds, orig_exc=e)\n else:\n self.resolved_action = args_parser.resolved_action\n\n # the command/shell/script modules used to support the `cmd` arg,\n # which corresponds to what we now call _raw_params, so move that\n # value over to _raw_params (assuming it is empty)\n if action in C._ACTION_HAS_CMD:\n if 'cmd' in args:\n if args.get('_raw_params', '') != '':\n raise AnsibleError(\"The 'cmd' argument cannot be used when other raw parameters are specified.\"\n \" Please put everything in one or the other place.\", obj=ds)\n args['_raw_params'] = args.pop('cmd')\n\n new_ds['action'] = action\n new_ds['args'] = args\n new_ds['delegate_to'] = delegate_to\n\n # we handle any 'vars' specified in the ds here, as we may\n # be adding things to them below (special handling for includes).\n # When that deprecated feature is removed, this can be too.\n if 'vars' in ds:\n # _load_vars is defined in Base, and is used to load a dictionary\n # or list of dictionaries in a standard way\n new_ds['vars'] = self._load_vars(None, ds.get('vars'))\n else:\n new_ds['vars'] = dict()\n\n for (k, v) in ds.items():\n if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell':\n # we don't want to re-assign these values, which were determined by the ModuleArgsParser() above\n continue\n elif k.startswith('with_') and k.replace(\"with_\", \"\") in lookup_loader:\n # transform into loop property\n self._preprocess_with_loop(ds, new_ds, k, v)\n elif C.INVALID_TASK_ATTRIBUTE_FAILED or k in self._valid_attrs:\n new_ds[k] = v\n else:\n display.warning(\"Ignoring invalid attribute: %s\" % k)\n\n return super(Task, self).preprocess_data(new_ds)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 1282, "n_words": 491, "vocab_size": 278, "complexity": 26, "nloc": 54, "token_counts": 421, "n_ast_nodes": 737, "n_identifiers": 53, "d_id": 78966, "documentation": { "docstring": "\n tasks are especially complex arguments so need pre-processing.\n keep it short.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 33, "language": "en" } }, { "id": 207588, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_custom_changelist", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_custom_changelist(self):\n \n # Insert some data\n post_data = {\"name\": \"First Gadget\"}\n response = self.client.post(reverse(\"admin:admin_views_gadget_add\"), post_data)\n self.assertEqual(response.status_code, 302) # redirect somewhere\n # Hit the page once to get messages out of the queue message list\n response = self.client.get(reverse(\"admin:admin_views_gadget_changelist\"))\n # Data is still not visible on the page\n response = self.client.get(reverse(\"admin:admin_views_gadget_changelist\"))\n self.assertNotContains(response, \"First Gadget\")\n\n\n@override_settings(ROOT_URLCONF=\"admin_views.urls\")", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "@override_settings(ROOT_URLCONF=\"admin_views.urls\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 123, "n_words": 53, "vocab_size": 40, "complexity": 1, "nloc": 7, "token_counts": 72, "n_ast_nodes": 146, "n_identifiers": 13, "d_id": 52011, "documentation": { "docstring": "\n Validate that a custom ChangeList class can be used (#9749)\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 200522, "commit_id": "27ff0c7bf7062f5b4b80ad12098e6422af5fcb44", "repo": "sympy", "path": "sympy/codegen/scipy_nodes.py", "file_name": "scipy_nodes.py", "fun_name": "fdiff", "commit_message": "more tests of cosm1, powm1 from scipy.special", "code": "def fdiff(self, argindex=1):\n \n if argindex == 1:\n return Pow(self.args[0], self.args[1])*self.args[1]/self.args[0]\n elif argindex == 2:\n return log(self.args[0])*Pow(*self.args)\n else:\n raise ArgumentIndexError(self, argindex)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 81, "n_words": 20, "vocab_size": 17, "complexity": 3, "nloc": 7, "token_counts": 78, "n_ast_nodes": 120, "n_identifiers": 7, "d_id": 49688, "documentation": { "docstring": "\n Returns the first derivative of this function.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 165949, "commit_id": "5531195f6f0d87817a704b288008809a3c98a304", "repo": "pandas", "path": "pandas/core/indexes/accessors.py", "file_name": "accessors.py", "fun_name": "isocalendar", "commit_message": "fix-ci-isocalendar (#46690)", "code": "def isocalendar(self):\n \n return self._get_values().isocalendar().set_index(self._parent.index)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 25, "n_ast_nodes": 44, "n_identifiers": 6, "d_id": 39746, "documentation": { "docstring": "\n Calculate year, week, and day according to the ISO 8601 standard.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame\n With columns year, week and day.\n\n See Also\n --------\n Timestamp.isocalendar : Function return a 3-tuple containing ISO year,\n week number, and weekday for the given Timestamp object.\n datetime.date.isocalendar : Return a named tuple object with\n three components: year, week and weekday.\n\n Examples\n --------\n >>> ser = pd.to_datetime(pd.Series([\"2010-01-01\", pd.NaT]))\n >>> ser.dt.isocalendar()\n year week day\n 0 2009 53 5\n 1 \n >>> ser.dt.isocalendar().week\n 0 53\n 1 \n Name: week, dtype: UInt32\n ", "n_words": 88, "vocab_size": 64, "n_whitespaces": 293, "language": "en" } }, { "id": 197436, "commit_id": "9a3ffc6781bd44c47cf49e128ef154389c32876a", "repo": "sympy", "path": "sympy/physics/vector/fieldfunctions.py", "file_name": "fieldfunctions.py", "fun_name": "is_solenoidal", "commit_message": "Some pep8 cleanup of sympy.physics.vector.", "code": "def is_solenoidal(field):\n \n\n # Field is solenoidal irrespective of frame\n # Take the first frame in the result of the separate method in Vector\n if field == Vector(0):\n return True\n frame = list(field.separate())[0]\n return divergence(field, frame).simplify() is S.Zero\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 62, "n_words": 37, "vocab_size": 28, "complexity": 2, "nloc": 5, "token_counts": 44, "n_ast_nodes": 75, "n_identifiers": 10, "d_id": 48544, "documentation": { "docstring": "\n Checks if a field is solenoidal.\n\n Parameters\n ==========\n\n field : Vector\n The field to check for solenoidal property\n\n Examples\n ========\n\n >>> from sympy.physics.vector import ReferenceFrame\n >>> from sympy.physics.vector import is_solenoidal\n >>> R = ReferenceFrame('R')\n >>> is_solenoidal(R[1]*R[2]*R.x + R[0]*R[2]*R.y + R[0]*R[1]*R.z)\n True\n >>> is_solenoidal(R[1] * R.y)\n False\n\n ", "n_words": 46, "vocab_size": 36, "n_whitespaces": 96, "language": "en" } }, { "id": 184630, "commit_id": "b22436933acc0d7440ec300f971a249bd6105a5b", "repo": "textual", "path": "src/textual/widget.py", "file_name": "widget.py", "fun_name": "_allow_scroll", "commit_message": "lots of docstrings", "code": "def _allow_scroll(self) -> bool:\n \n return self.allow_horizontal_scroll and self.allow_vertical_scroll\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 2, "nloc": 7, "token_counts": 16, "n_ast_nodes": 28, "n_identifiers": 5, "d_id": 44728, "documentation": { "docstring": "Check if both axis may be scrolled.\n\n Returns:\n bool: True if horizontal and vertical scrolling is enabled.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 42, "language": "en" } }, { "id": 107162, "commit_id": "ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_constrainedlayout.py", "file_name": "test_constrainedlayout.py", "fun_name": "test_align_labels", "commit_message": "ENH: implement and use base layout_engine for more flexible layout.", "code": "def test_align_labels():\n \n fig, (ax3, ax1, ax2) = plt.subplots(3, 1, layout=\"constrained\",\n figsize=(6.4, 8),\n gridspec_kw={\"height_ratios\": (1, 1,\n 0.7)})\n\n ax1.set_ylim(0, 1)\n ax1.set_ylabel(\"Label\")\n\n ax2.set_ylim(-1.5, 1.5)\n ax2.set_ylabel(\"Label\")\n\n ax3.set_ylim(0, 1)\n ax3.set_ylabel(\"Label\")\n\n fig.align_ylabels(axs=(ax3, ax1, ax2))\n\n fig.draw_without_rendering()\n after_align = [ax1.yaxis.label.get_window_extent(),\n ax2.yaxis.label.get_window_extent(),\n ax3.yaxis.label.get_window_extent()]\n # ensure labels are approximately aligned\n np.testing.assert_allclose([after_align[0].x0, after_align[2].x0],\n after_align[1].x0, rtol=0, atol=1e-05)\n # ensure labels do not go off the edge\n assert after_align[0].x0 >= 1\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 317, "n_words": 58, "vocab_size": 51, "complexity": 1, "nloc": 19, "token_counts": 200, "n_ast_nodes": 294, "n_identifiers": 25, "d_id": 22617, "documentation": { "docstring": "\n Tests for a bug in which constrained layout and align_ylabels on\n three unevenly sized subplots, one of whose y tick labels include\n negative numbers, drives the non-negative subplots' y labels off\n the edge of the plot\n ", "n_words": 36, "vocab_size": 31, "n_whitespaces": 52, "language": "en" } }, { "id": 264451, "commit_id": "7c105019d8ae9205051c302e7499b33a455f9176", "repo": "netbox", "path": "netbox/utilities/templatetags/builtins/tags.py", "file_name": "tags.py", "fun_name": "tag", "commit_message": "Closes #8600: Document built-in template tags & filters", "code": "def tag(value, viewname=None):\n \n return {\n 'tag': value,\n 'viewname': viewname,\n }\n\n\n@register.inclusion_tag('builtins/badge.html')", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "@register.inclusion_tag('builtins/badge.html')", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 33, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 5, "token_counts": 21, "n_ast_nodes": 51, "n_identifiers": 5, "d_id": 77737, "documentation": { "docstring": "\n Display a tag, optionally linked to a filtered list of objects.\n\n Args:\n value: A Tag instance\n viewname: If provided, the tag will be a hyperlink to the specified view's URL\n ", "n_words": 30, "vocab_size": 26, "n_whitespaces": 54, "language": "en" } }, { "id": 211049, "commit_id": "e6d4d2bc7ba5eb4aa543e3439fa4e24cdd68d028", "repo": "PaddleDetection", "path": "ppdet/modeling/backbones/swin_transformer.py", "file_name": "swin_transformer.py", "fun_name": "forward", "commit_message": "fix export_model for swin (#6399)", "code": "def forward(self, x, mask=None):\n \n B_, N, C = x.shape\n qkv = self.qkv(x).reshape(\n [-1, N, 3, self.num_heads, C // self.num_heads]).transpose(\n [2, 0, 3, 1, 4])\n q, k, v = qkv[0], qkv[1], qkv[2]\n\n q = q * self.scale\n attn = paddle.mm(q, k.transpose([0, 1, 3, 2]))\n\n index = self.relative_position_index.flatten()\n\n relative_position_bias = paddle.index_select(\n self.relative_position_bias_table, index)\n relative_position_bias = relative_position_bias.reshape([\n self.window_size[0] * self.window_size[1],\n self.window_size[0] * self.window_size[1], -1\n ]) # Wh*Ww,Wh*Ww,nH\n relative_position_bias = relative_position_bias.transpose(\n [2, 0, 1]) # nH, Wh*Ww, Wh*Ww\n attn = attn + relative_position_bias.unsqueeze(0)\n\n if mask is not None:\n nW = mask.shape[0]\n attn = attn.reshape([-1, nW, self.num_heads, N, N\n ]) + mask.unsqueeze(1).unsqueeze(0)\n attn = attn.reshape([-1, self.num_heads, N, N])\n attn = self.softmax(attn)\n else:\n attn = self.softmax(attn)\n\n attn = self.attn_drop(attn)\n\n # x = (attn @ v).transpose(1, 2).reshape([B_, N, C])\n x = paddle.mm(attn, v).transpose([0, 2, 1, 3]).reshape([-1, N, C])\n x = self.proj(x)\n x = self.proj_drop(x)\n return x\n\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 438, "n_words": 139, "vocab_size": 81, "complexity": 2, "nloc": 31, "token_counts": 337, "n_ast_nodes": 510, "n_identifiers": 32, "d_id": 53015, "documentation": { "docstring": " Forward function.\n Args:\n x: input features with shape of (num_windows*B, N, C)\n mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 60, "language": "en" } }, { "id": 204761, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/serializers/xml_serializer.py", "file_name": "xml_serializer.py", "fun_name": "handle_m2m_field", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def handle_m2m_field(self, obj, field):\n \n if field.remote_field.through._meta.auto_created:\n self._start_relational_field(field)\n if self.use_natural_foreign_keys and hasattr(\n field.remote_field.model, \"natural_key\"\n ):\n # If the objects in the m2m have a natural key, use it", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 104, "n_words": 27, "vocab_size": 25, "complexity": 5, "nloc": 16, "token_counts": 98, "n_ast_nodes": 71, "n_identifiers": 12, "d_id": 50875, "documentation": { "docstring": "\n Handle a ManyToManyField. Related objects are only serialized as\n references to the object's PK (i.e. the related *data* is not dumped,\n just the relation).\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 53, "language": "en" } }, { "id": 75351, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/tests/tests.py", "file_name": "tests.py", "fun_name": "test_get_with_extra_component", "commit_message": "Reformat with black", "code": "def test_get_with_extra_component(self):\n \n # Generate signature\n signature = generate_signature(self.image.id, \"fill-800x600\")\n\n # Get the image\n response = self.client.get(\n reverse(\n \"wagtailimages_serve\", args=(signature, self.image.id, \"fill-800x600\")\n )\n + \"test.png\"\n )\n\n # Check response\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.streaming)\n self.assertEqual(response[\"Content-Type\"], \"image/png\")\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 151, "n_words": 33, "vocab_size": 26, "complexity": 1, "nloc": 11, "token_counts": 76, "n_ast_nodes": 132, "n_identifiers": 15, "d_id": 16398, "documentation": { "docstring": "\n Test that a filename can be optionally added to the end of the URL.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 29, "language": "en" } }, { "id": 60556, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/cli/req_command.py", "file_name": "req_command.py", "fun_name": "with_cleanup", "commit_message": "upd; format", "code": "def with_cleanup(func):\n # type: (Any) -> Any\n \n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 4, "token_counts": 12, "n_ast_nodes": 14, "n_identifiers": 2, "d_id": 12207, "documentation": { "docstring": "Decorator for common logic related to managing temporary\n directories.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 15, "language": "en" } }, { "id": 62143, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/util.py", "file_name": "util.py", "fun_name": "convert_path", "commit_message": "upd; format", "code": "def convert_path(pathname):\n \n if os.sep == '/':\n return pathname\n if not pathname:\n return pathname\n if pathname[0] == '/':\n raise ValueError(\"path '%s' cannot be absolute\" % pathname)\n if pathname[-1] == '/':\n raise ValueError(\"path '%s' cannot end with '/'\" % pathname)\n\n paths = pathname.split('/')\n while os.curdir in paths:\n paths.remove(os.curdir)\n if not paths:\n return os.curdir\n return os.path.join(*paths)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 122, "n_words": 53, "vocab_size": 32, "complexity": 7, "nloc": 15, "token_counts": 93, "n_ast_nodes": 163, "n_identifiers": 11, "d_id": 12879, "documentation": { "docstring": "Return 'pathname' as a name that will work on the native filesystem.\n\n The path is split on '/' and put back together again using the current\n directory separator. Needed because filenames in the setup script are\n always supplied in Unix style, and have to be converted to the local\n convention before we can actually use them in the filesystem. Raises\n ValueError on non-Unix-ish systems if 'pathname' either starts or\n ends with a slash.\n ", "n_words": 73, "vocab_size": 60, "n_whitespaces": 96, "language": "en" } }, { "id": 261326, "commit_id": "2a6703d9e8d1e54d22dd07f2bfff3c92adecd758", "repo": "scikit-learn", "path": "sklearn/utils/_set_output.py", "file_name": "_set_output.py", "fun_name": "_wrap_data_with_container", "commit_message": "ENH Introduces set_output API for pandas output (#23734)\n\n* Introduces set_output API for all transformers\r\n\r\n* TransformerMixin inherits from _SetOutputMixin\r\n\r\n* Adds tests\r\n\r\n* Adds whatsnew\r\n\r\n* Adds example on using set_output API\r\n\r\n* Adds developer docs for set_output", "code": "def _wrap_data_with_container(method, data_to_wrap, original_input, estimator):\n \n output_config = _get_output_config(method, estimator)\n\n if output_config[\"dense\"] == \"default\" or not _auto_wrap_is_configured(estimator):\n return data_to_wrap\n\n # dense_config == \"pandas\"\n return _wrap_in_pandas_container(\n data_to_wrap=data_to_wrap,\n index=getattr(original_input, \"index\", None),\n columns=estimator.get_feature_names_out,\n )\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 76, "n_words": 30, "vocab_size": 28, "complexity": 3, "nloc": 9, "token_counts": 61, "n_ast_nodes": 97, "n_identifiers": 13, "d_id": 76754, "documentation": { "docstring": "Wrap output with container based on an estimator's or global config.\n\n Parameters\n ----------\n method : {\"transform\"}\n Estimator's method to get container output for.\n\n data_to_wrap : {ndarray, dataframe}\n Data to wrap with container.\n\n original_input : {ndarray, dataframe}\n Original input of function.\n\n estimator : estimator instance\n Estimator with to get the output configuration from.\n\n Returns\n -------\n output : {ndarray, dataframe}\n If the output config is \"default\" or the estimator is not configured\n for wrapping return `data_to_wrap` unchanged.\n If the output config is \"pandas\", return `data_to_wrap` as a pandas\n DataFrame.\n ", "n_words": 87, "vocab_size": 55, "n_whitespaces": 173, "language": "en" } }, { "id": 184593, "commit_id": "c0a631ac492580c2d8a311cdd69385cbc95a7fc0", "repo": "textual", "path": "src/textual/geometry.py", "file_name": "geometry.py", "fun_name": "is_origin", "commit_message": "faster screenshots, docstrings", "code": "def is_origin(self) -> bool:\n \n return self == (0, 0)\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 8, "token_counts": 16, "n_ast_nodes": 27, "n_identifiers": 3, "d_id": 44695, "documentation": { "docstring": "Check if the point is at the origin (0, 0).\n\n Returns:\n bool: True if the offset is the origin.\n\n ", "n_words": 19, "vocab_size": 14, "n_whitespaces": 44, "language": "en" } }, { "id": 107485, "commit_id": "f156db08eee54d285ab0fb4e031e48d078ba6aa3", "repo": "matplotlib", "path": "lib/matplotlib/axis.py", "file_name": "axis.py", "fun_name": "get_text_heights", "commit_message": "DOC: More cleanup axes -> Axes", "code": "def get_text_heights(self, renderer):\n \n bbox, bbox2 = self.get_ticklabel_extents(renderer)\n # MGDTODO: Need a better way to get the pad\n pad_pixels = self.majorTicks[0].get_pad_pixels()\n\n above = 0.0\n if bbox2.height:\n above += bbox2.height + pad_pixels\n below = 0.0\n if bbox.height:\n below += bbox.height + pad_pixels\n\n if self.get_label_position() == 'top':\n above += self.label.get_window_extent(renderer).height + pad_pixels\n else:\n below += self.label.get_window_extent(renderer).height + pad_pixels\n return above, below\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 179, "n_words": 58, "vocab_size": 36, "complexity": 4, "nloc": 14, "token_counts": 107, "n_ast_nodes": 170, "n_identifiers": 15, "d_id": 22774, "documentation": { "docstring": "\n Return how much space should be reserved for text above and below the\n Axes, as a pair of floats.\n ", "n_words": 19, "vocab_size": 19, "n_whitespaces": 41, "language": "en" } }, { "id": 154161, "commit_id": "9bf8d57ca44e22fd69b0abc55793cf60c199ab4d", "repo": "modin", "path": "modin/core/execution/dask/implementations/pandas_on_dask/partitioning/virtual_partition.py", "file_name": "virtual_partition.py", "fun_name": "mask", "commit_message": "FIX-#4676: drain sub-virtual-partition call queues. (#4695)\n\nSigned-off-by: mvashishtha \n\nCo-authored-by: Alexey Prutskov ", "code": "def mask(self, row_indices, col_indices):\n \n return (\n self.force_materialization()\n .list_of_block_partitions[0]\n .mask(row_indices, col_indices)\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 65, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 6, "token_counts": 30, "n_ast_nodes": 47, "n_identifiers": 6, "d_id": 35821, "documentation": { "docstring": "\n Create (synchronously) a mask that extracts the indices provided.\n\n Parameters\n ----------\n row_indices : list-like, slice or label\n The row labels for the rows to extract.\n col_indices : list-like, slice or label\n The column labels for the columns to extract.\n\n Returns\n -------\n PandasOnDaskDataframeVirtualPartition\n A new ``PandasOnDaskDataframeVirtualPartition`` object,\n materialized.\n ", "n_words": 47, "vocab_size": 35, "n_whitespaces": 155, "language": "en" } }, { "id": 20231, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/platformdirs/unix.py", "file_name": "unix.py", "fun_name": "site_config_path", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def site_config_path(self) -> Path:\n \n return self._first_item_as_path_if_multipath(self.site_config_dir)\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 30, "n_identifiers": 5, "d_id": 3283, "documentation": { "docstring": ":return: config path shared by the users. Only return first item, even if ``multipath`` is set to ``True``", "n_words": 18, "vocab_size": 18, "n_whitespaces": 17, "language": "en" } }, { "id": 27919, "commit_id": "67492396aa41d068cac82e8fa328f218b5951d13", "repo": "saleor", "path": "saleor/discount/tasks.py", "file_name": "tasks.py", "fun_name": "send_sale_toggle_notifications", "commit_message": "New event for starting and ending sales (#10110)\n\n* Add sale started and sale ended webhooks\r\n\r\n* Add started_notification_sent and ended_notification_sent flags to Sale model\r\n\r\n* Add sale_webhook_schedule\r\n\r\n* Add send_sale_started_and_sale_ended_notifications discount task\r\n\r\n* Add tests for discount tasks\r\n\r\n* Move sale task celery beat schedule to settings\r\n\r\n* Add tests for sale_webhook_schedule\r\n\r\n* Add sale_started and sale_ended methods to PluginSample\r\n\r\n* Update send_sale_started_and_sale_ended_notifications logging\r\n\r\n* Update SaleUpdate mutation - ensure the notification is sent and the flag is changed if needed\r\n\r\n* Update SaleCreate mutation - send sale_creatd and sale_ended notifications\r\n\r\n* Optimize fetch_catalogue_info\r\n\r\n* Clean up\r\n\r\n* Apply code review suggestions\r\n\r\n* Add SALE_TOGGLE webhook\r\n\r\n* Use sale_toggle webhook instead of sale_started and sale_ended\r\n\r\n* Delete sale_started and sale_eded wbhooks\r\n\r\n* Drop notification flags from Sale model\r\n\r\n* Add missing docstrings and comments\r\n\r\n* Fix failing tests\r\n\r\n* Update changelog\r\n\r\n* Add description for SaleToggle event type\r\n\r\n* Update discount task and webhook schedule\r\n\r\n* Set notification_sent_datetime to current date by default\r\n\r\n* Fix typo in comment", "code": "def send_sale_toggle_notifications():\n \n manager = get_plugins_manager()\n\n sales = get_sales_to_notify_about()\n\n catalogue_infos = fetch_catalogue_infos(sales)\n\n if not sales:\n return\n\n for sale in sales:\n catalogues = catalogue_infos.get(sale.id)\n manager.sale_toggle(sale, catalogues)\n\n sale_ids = \", \".join([str(sale.id) for sale in sales])\n sales.update(notification_sent_datetime=datetime.now(pytz.UTC))\n\n task_logger.info(\"The sale_toggle webhook sent for sales with ids: %s\", sale_ids)\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 91, "n_words": 43, "vocab_size": 33, "complexity": 4, "nloc": 12, "token_counts": 91, "n_ast_nodes": 153, "n_identifiers": 23, "d_id": 5140, "documentation": { "docstring": "Send the notification about starting or ending sales.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 218469, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "isasyncgenfunction", "commit_message": "add python 3.10.4 for windows", "code": "def isasyncgenfunction(obj):\n \n return _has_code_flag(obj, CO_ASYNC_GENERATOR)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 23, "n_identifiers": 4, "d_id": 55329, "documentation": { "docstring": "Return true if the object is an asynchronous generator function.\n\n Asynchronous generator functions are defined with \"async def\"\n syntax and have \"yield\" expressions in their body.\n ", "n_words": 26, "vocab_size": 25, "n_whitespaces": 35, "language": "en" } }, { "id": 195034, "commit_id": "4291c8a63a3ae9e7107dda0f90fff8da3b31d29b", "repo": "ParlAI", "path": "parlai/core/params.py", "file_name": "params.py", "fun_name": "parse_known_args", "commit_message": "python 3.8 parser fix on args_that_override (#4507)\n\n* single dash\r\n\r\n* handle args during parsing", "code": "def parse_known_args(self, args=None, namespace=None, nohelp=False):\n \n if args is None:\n # args default to the system args\n args = _sys.argv[1:]\n\n args = fix_underscores(args)\n # handle the single dash stuff. See _handle_single_dash_addarg for info\n actions = set()\n for action in self._actions:\n actions.update(action.option_strings)\n args = self._handle_single_dash_parsearg(args, actions)\n if nohelp:\n # ignore help\n args = [\n a\n for a in args\n if a != '-h' and a != '--help' and a != '--helpall' and a != '--h'\n ]\n return super().parse_known_args(args, namespace)\n", "url": "https://github.com/facebookresearch/ParlAI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 251, "n_words": 77, "vocab_size": 48, "complexity": 9, "nloc": 15, "token_counts": 107, "n_ast_nodes": 177, "n_identifiers": 17, "d_id": 47160, "documentation": { "docstring": "\n Parse known args to ignore help flag.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 44285, "commit_id": "640c0b67631c5f2c8ee866b0726fa7a8a452cd3c", "repo": "airflow", "path": "airflow/providers/google/cloud/operators/vertex_ai/custom_job.py", "file_name": "custom_job.py", "fun_name": "on_kill", "commit_message": "Create CustomJob and Datasets operators for Vertex AI service (#20077)", "code": "def on_kill(self) -> None:\n \n if self.hook:\n self.hook.cancel_job()\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 32, "n_words": 7, "vocab_size": 7, "complexity": 2, "nloc": 7, "token_counts": 20, "n_ast_nodes": 36, "n_identifiers": 4, "d_id": 8232, "documentation": { "docstring": "\n Callback called when the operator is killed.\n Cancel any running job.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 33, "language": "en" } }, { "id": 222616, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/cmd.py", "file_name": "cmd.py", "fun_name": "debug_print", "commit_message": "add python 3.10.4 for windows", "code": "def debug_print(self, msg):\n \n from distutils.debug import DEBUG\n if DEBUG:\n print(msg)\n sys.stdout.flush()\n\n\n # -- Option validation methods -------------------------------------\n # (these are very handy in writing the 'finalize_options()' method)\n #\n # NB. the general philosophy here is to ensure that a particular option\n # value meets certain type and value constraints. If not, we try to\n # force it into conformance (eg. if we expect a list but have a string,\n # split the string on comma and/or whitespace). If we can't force the\n # option into conformance, raise DistutilsOptionError. Thus, command\n # classes need do nothing more than (eg.)\n # self.ensure_string_list('foo')\n # and they can be guaranteed that thereafter, self.foo will be\n # a list of strings.\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 200, "n_words": 116, "vocab_size": 86, "complexity": 2, "nloc": 5, "token_counts": 28, "n_ast_nodes": 60, "n_identifiers": 10, "d_id": 56677, "documentation": { "docstring": "Print 'msg' to stdout if the global DEBUG (taken from the\n DISTUTILS_DEBUG environment variable) flag is true.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 31, "language": "en" } }, { "id": 128168, "commit_id": "2b62bba7c4014c8d943b197bf8396df7dd0f82e3", "repo": "ray", "path": "python/ray/train/tests/test_base_trainer.py", "file_name": "test_base_trainer.py", "fun_name": "test_large_params", "commit_message": "[AIR] Support large checkpoints and other arguments (#28826)\n\nSigned-off-by: Amog Kamsetty amogkamsetty@yahoo.com\r\n\r\nPreviously the arguments passed to the Trainer would be captured in the Trainable context. For arguments that are very large in size, this would prevent the Trainable from being registered due to gRPC resource limits.\r\n\r\nInstead, we now always use tune.with_parameters to save the Trainer arguments in the object store rather than capturing it in the context.", "code": "def test_large_params(ray_start_4_cpus):\n \n array_size = int(1e8)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 6, "token_counts": 48, "n_ast_nodes": 23, "n_identifiers": 4, "d_id": 28617, "documentation": { "docstring": "Tests if large arguments are can be serialized by the Trainer.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 231525, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/_geo.py", "file_name": "_geo.py", "fun_name": "lataxis", "commit_message": "switch to black .22", "code": "def lataxis(self):\n \n return self[\"lataxis\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 62969, "documentation": { "docstring": "\n The 'lataxis' property is an instance of Lataxis\n that may be specified as:\n - An instance of :class:`plotly.graph_objs.layout.geo.Lataxis`\n - A dict of string/value properties that will be passed\n to the Lataxis constructor\n\n Supported dict properties:\n\n dtick\n Sets the graticule's longitude/latitude tick\n step.\n gridcolor\n Sets the graticule's stroke color.\n gridwidth\n Sets the graticule's stroke width (in px).\n range\n Sets the range of this axis (in degrees), sets\n the map's clipped coordinates.\n showgrid\n Sets whether or not graticule are shown on the\n map.\n tick0\n Sets the graticule's starting tick\n longitude/latitude.\n\n Returns\n -------\n plotly.graph_objs.layout.geo.Lataxis\n ", "n_words": 91, "vocab_size": 63, "n_whitespaces": 454, "language": "en" } }, { "id": 203186, "commit_id": "0dcd549bbe36c060f536ec270d34d9e7d4b8e6c7", "repo": "django", "path": "django/contrib/auth/tokens.py", "file_name": "tokens.py", "fun_name": "make_token", "commit_message": "Fixed #30360 -- Added support for secret key rotation.\n\nThanks Florian Apolloner for the implementation idea.\n\nCo-authored-by: Andreas Pelme \nCo-authored-by: Carlton Gibson \nCo-authored-by: Vuyisile Ndlovu ", "code": "def make_token(self, user):\n \n return self._make_token_with_timestamp(\n user,\n self._num_seconds(self._now()),\n self.secret,\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 63, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 6, "token_counts": 31, "n_ast_nodes": 49, "n_identifiers": 7, "d_id": 50245, "documentation": { "docstring": "\n Return a token that can be used once to do a password reset\n for the given user.\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 39, "language": "en" } }, { "id": 70279, "commit_id": "9614e2bb19c6bdd512fea5dafbed1250da0049d9", "repo": "glances", "path": "glances/plugins/glances_processlist.py", "file_name": "glances_processlist.py", "fun_name": "get_process_curses_data", "commit_message": "First version but UI should be improved and when user is in program mode, it did not work...", "code": "def get_process_curses_data(self, p, selected, args):\n \n ret = [self.curse_new_line()]\n\n # When a process is selected:\n # * display a special character at the beginning of the line\n # * underline the command name\n ret.append(self.curse_add_line(unicode_message('PROCESS_SELECTOR') if (selected and not args.disable_cursor) else ' ', 'SELECTED'))\n\n # CPU\n ret.append(self._get_process_curses_cpu(p, selected, args))\n\n # MEM\n ret.append(self._get_process_curses_mem(p, selected, args))\n ret.append(self._get_process_curses_vms(p, selected, args))\n ret.append(self._get_process_curses_rss(p, selected, args))\n\n # PID\n if not self.args.programs:\n # Display processes, so the PID should be displayed\n msg = self.layout_stat['pid'].format(p['pid'], width=self.__max_pid_size())\n else:\n # Display programs, so the PID should not be displayed\n # Instead displays the number of children\n msg = self.layout_stat['pid'].format(\n len(p['childrens']) if 'childrens' in p else '_', width=self.__max_pid_size()\n )\n ret.append(self.curse_add_line(msg))\n\n # USER\n ret.append(self._get_process_curses_username(p, selected, args))\n\n # TIME+\n ret.append(self._get_process_curses_time(p, selected, args))\n\n # THREAD\n ret.append(self._get_process_curses_thread(p, selected, args))\n\n # NICE\n ret.append(self._get_process_curses_nice(p, selected, args))\n\n # STATUS\n ret.append(self._get_process_curses_status(p, selected, args))\n\n # IO read/write\n ret.append(self._get_process_curses_io_read(p, selected, args))\n ret.append(self._get_process_curses_io_write(p, selected, args))\n\n # Command line\n # If no command line for the process is available, fallback to the bare process name instead\n bare_process_name = p['name']\n cmdline = p.get('cmdline', '?')\n\n try:\n process_decoration = 'PROCESS_SELECTED' if (selected and not args.disable_cursor) else 'PROCESS'\n if cmdline:\n path, cmd, arguments = split_cmdline(bare_process_name, cmdline)\n # Manage end of line in arguments (see #1692)\n arguments.replace('\\r\\n', ' ')\n arguments.replace('\\n', ' ')\n if os.path.isdir(path) and not args.process_short_name:\n msg = self.layout_stat['command'].format(path) + os.sep\n ret.append(self.curse_add_line(msg, splittable=True))\n ret.append(self.curse_add_line(cmd, decoration=process_decoration, splittable=True))\n else:\n msg = self.layout_stat['command'].format(cmd)\n ret.append(self.curse_add_line(msg, decoration=process_decoration, splittable=True))\n if arguments:\n msg = ' ' + self.layout_stat['command'].format(arguments)\n ret.append(self.curse_add_line(msg, splittable=True))\n else:\n msg = self.layout_stat['name'].format(bare_process_name)\n ret.append(self.curse_add_line(msg, decoration=process_decoration, splittable=True))\n except (TypeError, UnicodeEncodeError) as e:\n # Avoid crash after running fine for several hours #1335\n logger.debug(\"Can not decode command line '{}' ({})\".format(cmdline, e))\n ret.append(self.curse_add_line('', splittable=True))\n\n return ret\n", "url": "https://github.com/nicolargo/glances.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 935, "n_words": 268, "vocab_size": 147, "complexity": 12, "nloc": 46, "token_counts": 560, "n_ast_nodes": 920, "n_identifiers": 49, "d_id": 15483, "documentation": { "docstring": "Get curses data to display for a process.\n\n - p is the process to display\n - selected is a tag=True if p is the selected process\n ", "n_words": 26, "vocab_size": 16, "n_whitespaces": 47, "language": "en" } }, { "id": 300441, "commit_id": "11cc1feb853bcfd9633ebfc44eae142c10a7f983", "repo": "core", "path": "tests/components/template/test_switch.py", "file_name": "test_switch.py", "fun_name": "test_available_template_with_entities", "commit_message": "Tweak template switch tests (#71738)", "code": "async def test_available_template_with_entities(hass):\n \n await setup.async_setup_component(\n hass,\n \"switch\",\n {\n \"switch\": {\n \"platform\": \"template\",\n \"switches\": {\n \"test_template_switch\": {\n **OPTIMISTIC_SWITCH_CONFIG,\n \"value_template\": \"{{ 1 == 1 }}\",\n \"availability_template\": \"{{ is_state('availability_state.state', 'on') }}\",\n }\n },\n }\n },\n )\n\n await hass.async_block_till_done()\n await hass.async_start()\n await hass.async_block_till_done()\n\n hass.states.async_set(\"availability_state.state\", STATE_ON)\n await hass.async_block_till_done()\n\n assert hass.states.get(\"switch.test_template_switch\").state != STATE_UNAVAILABLE\n\n hass.states.async_set(\"availability_state.state\", STATE_OFF)\n await hass.async_block_till_done()\n\n assert hass.states.get(\"switch.test_template_switch\").state == STATE_UNAVAILABLE\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 293, "n_words": 55, "vocab_size": 34, "complexity": 1, "nloc": 26, "token_counts": 123, "n_ast_nodes": 224, "n_identifiers": 14, "d_id": 99301, "documentation": { "docstring": "Test availability templates with values from other entities.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 79300, "commit_id": "bf65fa94ea5aa17f3c42e5cb5401fb7d34a60b5e", "repo": "wagtail", "path": "wagtail/admin/tests/test_buttons_hooks.py", "file_name": "test_buttons_hooks.py", "fun_name": "test_delete_button_with_next_url", "commit_message": "fix issue with edit page header delete button showing an invalid next_url\n\n- fixes #9195\n- header button on edit page & page listing - unpublish now correctly includes the next url (was missing on page listing previously)\n- header button on edit page - delete button does not include next url (as this would be the edit page for what was deleted)\n- adds more robust unit tests for the page listing & page header more hooks, including separating the tests out to separate classes", "code": "def test_delete_button_with_next_url(self):\n \n\n # page_listing_more_button generator yields only `Delete button` with this permission set\n page_perms = DeleteOnlyPagePerms()\n page = self.root_page\n base_url = reverse(\"wagtailadmin_pages:delete\", args=[page.id])\n\n next_url = \"a/random/url/\"\n full_url = base_url + \"?\" + urlencode({\"next\": next_url})\n\n delete_button = next(\n page_listing_more_buttons(page, page_perms, next_url=next_url)\n )\n\n self.assertEqual(delete_button.url, full_url)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 124, "n_words": 43, "vocab_size": 36, "complexity": 1, "nloc": 10, "token_counts": 72, "n_ast_nodes": 124, "n_identifiers": 18, "d_id": 16914, "documentation": { "docstring": "\n Ensure that the built in delete button supports a next_url provided.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 126860, "commit_id": "dac7bf17d9214dd3b79238caf0c8ec76f40328c6", "repo": "ray", "path": "dashboard/modules/reporter/reporter_head.py", "file_name": "reporter_head.py", "fun_name": "get_cluster_status", "commit_message": "[serve] Make serve agent not blocking when GCS is down. (#27526)\n\nThis PR fixed several issue which block serve agent when GCS is down. We need to make sure serve agent is always alive and can make sure the external requests can be sent to the agent and check the status.\r\n\r\n- internal kv used in dashboard/agent blocks the agent. We use the async one instead\r\n- serve controller use ray.nodes which is a blocking call and blocking forever. change to use gcs client with timeout\r\n- agent use serve controller client which is a blocking call with max retries = -1. This blocks until controller is back.\r\n\r\nTo enable Serve HA, we also need to setup:\r\n\r\n- RAY_gcs_server_request_timeout_seconds=5\r\n- RAY_SERVE_KV_TIMEOUT_S=5\r\n\r\nwhich we should set in KubeRay.", "code": "async def get_cluster_status(self, req):\n \n\n (legacy_status, formatted_status_string, error) = await asyncio.gather(\n *[\n self._gcs_aio_client.internal_kv_get(\n key.encode(), namespace=None, timeout=GCS_RPC_TIMEOUT_SECONDS\n )\n for key in [\n DEBUG_AUTOSCALING_STATUS_LEGACY,\n DEBUG_AUTOSCALING_STATUS,\n DEBUG_AUTOSCALING_ERROR,\n ]\n ]\n )\n\n formatted_status = (\n json.loads(formatted_status_string.decode())\n if formatted_status_string\n else {}\n )\n return dashboard_optional_utils.rest_response(\n success=True,\n message=\"Got cluster status.\",\n autoscaling_status=legacy_status.decode() if legacy_status else None,\n autoscaling_error=error.decode() if error else None,\n cluster_status=formatted_status if formatted_status else None,\n )\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 352, "n_words": 57, "vocab_size": 43, "complexity": 6, "nloc": 25, "token_counts": 121, "n_ast_nodes": 180, "n_identifiers": 29, "d_id": 28284, "documentation": { "docstring": "Returns status information about the cluster.\n\n Currently contains two fields:\n autoscaling_status (str)-- a status message from the autoscaler.\n autoscaling_error (str)-- an error message from the autoscaler if\n anything has gone wrong during autoscaling.\n\n These fields are both read from the GCS, it's expected that the\n autoscaler writes them there.\n ", "n_words": 49, "vocab_size": 39, "n_whitespaces": 114, "language": "en" } }, { "id": 199141, "commit_id": "a226912a87198dac24e5cc9db4b2077422b021f0", "repo": "sympy", "path": "sympy/functions/elementary/piecewise.py", "file_name": "piecewise.py", "fun_name": "piecewise_exclusive", "commit_message": "Add piecewise_canonical function", "code": "def piecewise_exclusive(expr, *, skip_nan=False):\n \n if not expr.has(Piecewise):\n return expr\n if isinstance(expr, Piecewise):\n cumcond = false\n newargs = []\n for arg in expr.args:\n cancond = And(arg.cond, Not(cumcond)).simplify()\n cumcond = Or(arg.cond, cumcond).simplify()\n newargs.append(\n ExprCondPair(piecewise_exclusive(arg.expr, skip_nan=skip_nan),\n cancond))\n if not skip_nan and cumcond is not true:\n newargs.append(ExprCondPair(Undefined, Not(cumcond).simplify()))\n return Piecewise(*newargs, evaluate=False)\n return expr.func(*[piecewise_exclusive(arg, skip_nan=skip_nan)\n for arg in expr.args],\n evaluate=False)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 238, "n_words": 55, "vocab_size": 40, "complexity": 7, "nloc": 18, "token_counts": 160, "n_ast_nodes": 249, "n_identifiers": 23, "d_id": 49161, "documentation": { "docstring": "\n Return a :class:`Piecewise` with exclusive conditions, i.e., where exactly\n one condition is True.\n\n SymPy normally represents the condition in an \"if-elif\"-fashion, which\n leads to that more than one condition can be True. This is sometimes not\n wanted when representing the :class:`Piecewise` mathematically.\n\n Note that further manipulation of the resulting :class:`Piecewise`, e.g.\n simplifying it, will most likely make it non-exclusive. Hence, this is\n primarily a function to be used in conjunction with printing the Piecewise\n or if one would like to reorder the expression-condition pairs.\n\n ``piecewise_exclusive`` will also explicitly add a final\n :class:`~sympy.core.numbers.NaN` segment to the :class:`Piecewise`, unless\n all cases are covered. This can be avoided by passing ``skip_nan=True`` as\n a final argument. It can also be used in some situations where SymPy cannot\n determine that all cases are covered.\n\n Examples\n ========\n >>> from sympy import piecewise_exclusive, Symbol, Piecewise, S\n >>> x = Symbol('x', real=True)\n >>> p = Piecewise((0, x < 0), (S.Half, x <= 0), (1, True))\n >>> piecewise_exclusive(p)\n Piecewise((0, x < 0), (1/2, Eq(x, 0)), (1, x > 0))\n >>> piecewise_exclusive(Piecewise((2, x > 1)))\n Piecewise((2, x > 1), (nan, x <= 1))\n >>> piecewise_exclusive(Piecewise((2, x > 1)), skip_nan=True)\n Piecewise((2, x > 1))\n\n ", "n_words": 193, "vocab_size": 124, "n_whitespaces": 272, "language": "en" } }, { "id": 290231, "commit_id": "9a747bafa398185eb3d4fe041c52acfbb8264372", "repo": "core", "path": "homeassistant/components/zwave_js/climate.py", "file_name": "climate.py", "fun_name": "temperature_unit", "commit_message": "Use enums instead of deprecated constants (#81591)", "code": "def temperature_unit(self) -> str:\n \n if (\n self._unit_value\n and self._unit_value.metadata.unit\n and \"f\" in self._unit_value.metadata.unit.lower()\n ):\n return UnitOfTemperature.FAHRENHEIT\n return UnitOfTemperature.CELSIUS\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 90, "n_words": 18, "vocab_size": 16, "complexity": 4, "nloc": 9, "token_counts": 45, "n_ast_nodes": 75, "n_identifiers": 10, "d_id": 89349, "documentation": { "docstring": "Return the unit of measurement used by the platform.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 119021, "commit_id": "2c20d82776fea482aaf52e18ebad4f7fce5c3a81", "repo": "jax", "path": "jax/experimental/sparse/bcoo.py", "file_name": "bcoo.py", "fun_name": "_bcoo_todense_batching_rule", "commit_message": "[sparse] generalize metadata argument in BCOO primitives", "code": "def _bcoo_todense_batching_rule(batched_args, batch_dims, *, spinfo):\n data, indices = batched_args\n if any(b not in [0, None] for b in batch_dims):\n raise NotImplementedError(f\"batch_dims={batch_dims}. Only 0 and None are supported.\")\n if batch_dims[0] is None:\n data = data[None, ...]\n if batch_dims[1] is None:\n indices = indices[None, ...]\n new_spinfo = BCOOInfo(\n shape=(max(data.shape[0], indices.shape[0]), *spinfo.shape))\n return bcoo_todense(data, indices, spinfo=new_spinfo), 0\n\nad.defjvp(bcoo_todense_p, _bcoo_todense_jvp, None)\nad.primitive_transposes[bcoo_todense_p] = _bcoo_todense_transpose\nbatching.primitive_batchers[bcoo_todense_p] = _bcoo_todense_batching_rule\nxla.register_translation(bcoo_todense_p, xla.lower_fun(\n _bcoo_todense_impl, multiple_results=False, new_style=True))\n\n#--------------------------------------------------------------------\n# bcoo_fromdense\n\nbcoo_fromdense_p = core.Primitive('bcoo_fromdense')\nbcoo_fromdense_p.multiple_results = True\n\n_TRACED_NSE_ERROR = \n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 91, "n_words": 79, "vocab_size": 63, "complexity": 5, "nloc": 11, "token_counts": 114, "n_ast_nodes": 270, "n_identifiers": 32, "d_id": 26534, "documentation": { "docstring": "\nThe error arose for the nse argument of bcoo_fromdense. In order for BCOO.fromdense()\nto be used in traced/compiled code, you must pass a concrete value to the nse\n(number of specified elements) argument.\n", "n_words": 33, "vocab_size": 28, "n_whitespaces": 30, "language": "en" } }, { "id": 206236, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/template/defaultfilters.py", "file_name": "defaultfilters.py", "fun_name": "addslashes", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def addslashes(value):\n \n return value.replace(\"\\\\\", \"\\\\\\\\\").replace('\"', '\\\\\"').replace(\"'\", \"\\\\'\")\n\n\n@register.filter(is_safe=True)\n@stringfilter", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "@register.filter(is_safe=True)\n@stringfilter", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 13, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 29, "n_ast_nodes": 81, "n_identifiers": 7, "d_id": 51427, "documentation": { "docstring": "\n Add slashes before quotes. Useful for escaping strings in CSV, for\n example. Less useful for escaping JavaScript; use the ``escapejs``\n filter instead.\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 35, "language": "en" } }, { "id": 267824, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py", "file_name": "__init__.py", "fun_name": "get_provider_plugins", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def get_provider_plugins() -> t.Dict[str, t.Type[CloudProvider]]:\n \n return get_cloud_plugins()[0]\n\n\n@cache", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "@cache", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 13, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 46, "n_identifiers": 8, "d_id": 79105, "documentation": { "docstring": "Return a dictionary of the available cloud provider plugins.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 215645, "commit_id": "07abfa2a7a70b0bfa23ae45172e090fc4b9c180c", "repo": "salt", "path": "tests/unit/modules/test_boto_elb.py", "file_name": "test_boto_elb.py", "fun_name": "test_get_elb_config", "commit_message": "adding test_get_elb_config function", "code": "def test_get_elb_config(self):\n \n conn_ec2 = boto.ec2.connect_to_region(region, **boto_conn_parameters)\n conn_elb = boto.ec2.elb.connect_to_region(region,\n **boto_conn_parameters)\n zones = [zone.name for zone in conn_ec2.get_all_zones()]\n elb_name = 'TestGetELBConfig'\n load_balancer = conn_elb.create_load_balancer(elb_name, zones,\n [(80, 80, 'http')])\n reservations = conn_ec2.run_instances('ami-08389d60', min_count=3)\n all_instance_ids = [instance.id for instance in reservations.instances]\n load_balancer.register_instances(all_instance_ids)\n\n # DescribeTags does not appear to be included in moto\n # so mock the _get_all_tags function. Ideally we wouldn't\n # need to mock this.\n with patch('salt.modules.boto_elb._get_all_tags',\n MagicMock(return_value=None)):\n ret = boto_elb.get_elb_config(elb_name, **conn_parameters)\n _expected_keys = ['subnets',\n 'availability_zones',\n 'canonical_hosted_zone_name_id',\n 'tags',\n 'dns_name',\n 'listeners',\n 'backends',\n 'policies',\n 'vpc_id',\n 'scheme',\n 'canonical_hosted_zone_name',\n 'security_groups']\n for key in _expected_keys:\n self.assertIn(key, ret)\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 666, "n_words": 90, "vocab_size": 72, "complexity": 4, "nloc": 28, "token_counts": 167, "n_ast_nodes": 281, "n_identifiers": 35, "d_id": 54068, "documentation": { "docstring": "\n tests that given an valid ids in the form of a list that the boto_elb\n deregister_instances all members of the given list\n ", "n_words": 22, "vocab_size": 16, "n_whitespaces": 44, "language": "en" } }, { "id": 296298, "commit_id": "36bb947cdf24cb74c4d4288ca61825226e1de5ff", "repo": "core", "path": "homeassistant/components/google/calendar.py", "file_name": "calendar.py", "fun_name": "offset_reached", "commit_message": "Fix bug in google calendar offset calculation (#70024)\n\nMove the offset reached computation outside of the update method so that it is\r\ncomputed when state updates occur rather than when data refreshes happen (which\r\nare throttled and happen at most every 15 minutes).\r\n\r\nIssue #69892", "code": "def offset_reached(self) -> bool:\n \n if self._event and self._offset_value:\n return is_offset_reached(\n self._event.start_datetime_local, self._offset_value\n )\n return False\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 73, "n_words": 15, "vocab_size": 14, "complexity": 3, "nloc": 7, "token_counts": 32, "n_ast_nodes": 52, "n_identifiers": 7, "d_id": 95285, "documentation": { "docstring": "Return whether or not the event offset was reached.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 86711, "commit_id": "4acb1834c41648180bbb41cbe248b50d65e5977d", "repo": "sentry", "path": "src/sentry/snuba/metrics/fields/base.py", "file_name": "base.py", "fun_name": "validate_can_orderby", "commit_message": "feat(metrics): Adds mqb query transform to MetricsQuery [TET-163] (#37652)\n\nSo far this PR has only test cases that shows expected output from MQB\r\n(input to metrics abstraction layer) and the final output that would be\r\npassed to metrics abstraction layer\r\n\r\nI have printed out queries spit out by MQB and coalesced them into the\r\ntest cases in this PR, and so should cover all queries made by\r\nperformance to metrics:\r\n- I have only listed a variation or two of the same functions for\r\nexample `p75(transaction.duration)` but I did not add\r\n`p50(transaction.duration)` because the logic would be the same so need\r\nto add this to these tests\r\n- Only thing missing is the recent `countIf` functions added for\r\nperformance which I will add later on listed here ->\r\nhttps://github.com/getsentry/sentry/blob/master/src/sentry/search/events/datasets/metrics.py#L179-L276\r\n\r\n### Changes to MQB output:-\r\n- Removed tags from select statement, as if they are listed in the\r\n`groupBy`, they will be returned by metrics abstraction layer\r\n- Having clauses are not supported \r\n- Transform functions are not supported\r\n- Removed ordering by `bucketed_time` as this behavior is handled post\r\nquery by metrics abstraction layer\r\n- Replaced metric ids/names with MRI as this is the naming contract we\r\ncan guarantee\r\n- Replaced tag values with their tag names because metrics abstraction\r\nlayer will handle the indexer resolving and reverse resolving\r\n- Replaced SnQL function definition with their corresponding derived\r\nmetrics so for example failure_rate, apdex, user_misery,\r\nteam_key_transactions, count_web_vitals and histogram functions\r\n\r\n\r\n### ToDo from me to get this test to pass\r\n\r\n- [x] `snuba-sdk` needs to support MRI as a column name in `Column`\r\n[TET-323]\r\n- [x] `MetricField` needs to support `args` and `alias` [TET-320,\r\nTET-322]\r\n- [x] Add `MetricGroupByField` for `groupBy` columns that accept an\r\n`alias` [TET-320]\r\n- [x] Aliasing functionality needs to be supported [TET-320]\r\n- [x] Add derived metric for `team_key_transaction` [TET-325]\r\n- [x] Add derived metric for `count_web_vital_measurements` [TET-161]\r\n- [x] Add derived metric for `rate` [TET-129]\r\n- [x] `MetricsQuery` accepts MRI rather than public facing names\r\n[TET-321]\r\n- [x] Support for tuples conditions [TET-319]\r\n- [x] Add derived metrics for the 3 `countIf` functions [TET-326]\r\n- [x] Transform MQB `Query` object to `MetricsQuery` (This PR)\r\n- [x] Figure out addition of Granularity processor [TET-327]\r\n- [x] Add Invalid test cases (This PR)\r\n- [ ] Discuss granularity differences/query bounds (Will be handled in\r\nsubsequent PR [TET-452])\r\n\r\n\r\n\r\n[TET-323]:\r\nhttps://getsentry.atlassian.net/browse/TET-323?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ", "code": "def validate_can_orderby(self) -> None:\n \n raise NotImplementedError\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 5, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 18156, "documentation": { "docstring": "\n Validate that the expression can be used to order a query\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 101643, "commit_id": "a8f22cc019d56cec18ccd8223587d97dc4b37d04", "repo": "faceswap", "path": "plugins/extract/_config.py", "file_name": "_config.py", "fun_name": "set_globals", "commit_message": "Extract updates:\n - Default CPU detector to MTCNN\n - add basic Aligner false positive filters\n - Typing: align + plugins\n - Use specific AlignerBatch class for alignment\n -", "code": "def set_globals(self):\n \n logger.debug(\"Setting global config\")\n section = \"global\"\n self.add_section(title=section, info=\"Options that apply to all extraction plugins\")\n self.add_item(\n section=section,\n title=\"allow_growth\",\n datatype=bool,\n default=False,\n group=\"settings\",\n info=\"[Nvidia Only]. Enable the Tensorflow GPU `allow_growth` configuration option. \"\n \"This option prevents Tensorflow from allocating all of the GPU VRAM at launch \"\n \"but can lead to higher VRAM fragmentation and slower performance. Should only \"\n \"be enabled if you are having problems running extraction.\")\n self.add_item(\n section=section,\n title=\"aligner_min_scale\",\n datatype=float,\n min_max=(0.0, 1.0),\n rounding=2,\n default=0.05,\n group=\"filters\",\n info=\"Filters out faces below this size. This is a multiplier of the minimum \"\n \"dimension of the frame (i.e. 1280x720 = 720). If the original face extract \"\n \"box is smaller than the minimum dimension times this multiplier, it is \"\n \"considered a false positive and discarded. Faces which are found to be \"\n \"unusually smaller than the frame tend to be misaligned images, except in \"\n \"extreme long-shots. These can be usually be safely discarded.\")\n self.add_item(\n section=section,\n title=\"aligner_max_scale\",\n datatype=float,\n min_max=(0.0, 10.0),\n rounding=2,\n default=2.00,\n group=\"filters\",\n info=\"Filters out faces above this size. This is a multiplier of the minimum \"\n \"dimension of the frame (i.e. 1280x720 = 720). If the original face extract \"\n \"box is larger than the minimum dimension times this multiplier, it is \"\n \"considered a false positive and discarded. Faces which are found to be \"\n \"unusually larger than the frame tend to be misaligned images except in extreme \"\n \"close-ups. These can be usually be safely discarded.\")\n self.add_item(\n section=section,\n title=\"aligner_distance\",\n datatype=float,\n min_max=(0.0, 25.0),\n rounding=1,\n default=16,\n group=\"filters\",\n info=\"Filters out faces who's landmarks are above this distance from an 'average' \"\n \"face. Values above 16 tend to be fairly safe. Values above 10 will remove more \"\n \"false positives, but may also filter out some faces at extreme angles.\")\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 914, "n_words": 288, "vocab_size": 148, "complexity": 1, "nloc": 53, "token_counts": 206, "n_ast_nodes": 322, "n_identifiers": 16, "d_id": 21051, "documentation": { "docstring": "\n Set the global options for extract\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 30105, "commit_id": "fa2ad657482aca9dc628e6d7062b8badf2706bb6", "repo": "spotify-downloader", "path": "spotdl/types/saved.py", "file_name": "saved.py", "fun_name": "load", "commit_message": "v4 init", "code": "def load(cls):\n \n\n urls = cls.get_urls()\n\n # Remove songs without id\n # and create Song objects\n tracks = [Song.from_url(url) for url in urls]\n\n return cls(tracks)\n", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 66, "n_words": 24, "vocab_size": 22, "complexity": 2, "nloc": 4, "token_counts": 32, "n_ast_nodes": 56, "n_identifiers": 8, "d_id": 5314, "documentation": { "docstring": "\n Loads saved tracks from Spotify.\n Will throw an exception if users is not logged in.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 37, "language": "en" } }, { "id": 281209, "commit_id": "18c3a4e5f69de5909fd3f516e54855b938bda51f", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/cryptocurrency/overview/overview_controller.py", "file_name": "overview_controller.py", "fun_name": "print_help", "commit_message": "Feature (crypto): Altcoin season index (#1155)\n\n* adding blockchaincenter model\r\n\r\n* added altindex feature\r\n\r\n* fix tests name\r\n\r\n* added autocompletion and fixed chart\r\n\r\n* fixed help strings and chart issue\r\n\r\n* refactor for subplot\r\n\r\n* changed dates to more readable format", "code": "def print_help(self):\n \n help_text = \n\n print(help_text)\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 27, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 39, "token_counts": 13, "n_ast_nodes": 27, "n_identifiers": 4, "d_id": 83615, "documentation": { "docstring": "Print help\nOverview Menu:\n\nCoinGecko:\n cgglobal global crypto market info\n cgnews last news available on CoinGecko\n cgdefi global DeFi market info\n cgstables stablecoins\n cgnft non fungible token market status\n cgnftday non fungible token of the day\n cgexchanges top crypto exchanges\n cgexrates coin exchange rates\n cgplatforms crypto financial platforms\n cgproducts crypto financial products\n cgindexes crypto indexes\n cgderivatives crypto derivatives\n cgcategories crypto categories\n cghold ethereum, bitcoin holdings overview statistics\nCoinPaprika:\n cpglobal global crypto market info\n cpinfo basic info about all coins available on CoinPaprika\n cpmarkets market related info about all coins available on CoinPaprika\n cpexchanges list all exchanges\n cpexmarkets all available markets on given exchange\n cpplatforms list blockchain platforms eg. ethereum, solana, kusama, terra\n cpcontracts all smart contracts for given platform\nCoinbase:\n cbpairs info about available trading pairs on Coinbase\nCryptoPanic:\n news recent crypto news from CryptoPanic aggregator\nWithdrawalFees:\n wf overall withdrawal fees\n ewf overall exchange withdrawal fees\n wfpe crypto withdrawal fees per exchange\nBlockchainCenter:\n altindex displays altcoin season index (if 75% of top 50 coins perform better than btc)\n", "n_words": 168, "vocab_size": 110, "n_whitespaces": 482, "language": "en" } }, { "id": 67421, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/selling/report/pending_so_items_for_purchase_request/pending_so_items_for_purchase_request.py", "file_name": "pending_so_items_for_purchase_request.py", "fun_name": "get_data", "commit_message": "style: format code with black", "code": "def get_data():\n\tsales_order_entry = frappe.db.sql(\n\t\t,\n\t\tas_dict=1,\n\t)\n\n\tsales_orders = [row.name for row in sales_order_entry]\n\tmr_records = frappe.get_all(\n\t\t\"Material Request Item\",\n\t\t{\"sales_order\": (\"in\", sales_orders), \"docstatus\": 1},\n\t\t[\"parent\", \"qty\", \"sales_order\", \"item_code\"],\n\t)\n\n\tbundled_item_map = get_packed_items(sales_orders)\n\n\titem_with_product_bundle = get_items_with_product_bundle(\n\t\t[row.item_code for row in sales_order_entry]\n\t)\n\n\tmaterials_request_dict = {}\n\n\tfor record in mr_records:\n\t\tkey = (record.sales_order, record.item_code)\n\t\tif key not in materials_request_dict:\n\t\t\tmaterials_request_dict.setdefault(key, {\"qty\": 0, \"material_requests\": [record.parent]})\n\n\t\tdetails = materials_request_dict.get(key)\n\t\tdetails[\"qty\"] += record.qty\n\n\t\tif record.parent not in details.get(\"material_requests\"):\n\t\t\tdetails[\"material_requests\"].append(record.parent)\n\n\tpending_so = []\n\tfor so in sales_order_entry:\n\t\tif so.item_code not in item_with_product_bundle:\n\t\t\tmaterial_requests_against_so = materials_request_dict.get((so.name, so.item_code)) or {}\n\t\t\t# check for pending sales order\n\t\t\tif flt(so.total_qty) > flt(material_requests_against_so.get(\"qty\")):\n\t\t\t\tso_record = {\n\t\t\t\t\t\"item_code\": so.item_code,\n\t\t\t\t\t\"item_name\": so.item_name,\n\t\t\t\t\t\"description\": so.description,\n\t\t\t\t\t\"sales_order_no\": so.name,\n\t\t\t\t\t\"date\": so.transaction_date,\n\t\t\t\t\t\"material_request\": \",\".join(material_requests_against_so.get(\"material_requests\", [])),\n\t\t\t\t\t\"customer\": so.customer,\n\t\t\t\t\t\"territory\": so.territory,\n\t\t\t\t\t\"so_qty\": so.total_qty,\n\t\t\t\t\t\"requested_qty\": material_requests_against_so.get(\"qty\"),\n\t\t\t\t\t\"pending_qty\": so.total_qty - flt(material_requests_against_so.get(\"qty\")),\n\t\t\t\t\t\"company\": so.company,\n\t\t\t\t}\n\t\t\t\tpending_so.append(so_record)\n\t\telse:\n\t\t\tfor item in bundled_item_map.get((so.name, so.item_code), []):\n\t\t\t\tmaterial_requests_against_so = materials_request_dict.get((so.name, item.item_code)) or {}\n\t\t\t\tif flt(item.qty) > flt(material_requests_against_so.get(\"qty\")):\n\t\t\t\t\tso_record = {\n\t\t\t\t\t\t\"item_code\": item.item_code,\n\t\t\t\t\t\t\"item_name\": item.item_name,\n\t\t\t\t\t\t\"description\": item.description,\n\t\t\t\t\t\t\"sales_order_no\": so.name,\n\t\t\t\t\t\t\"date\": so.transaction_date,\n\t\t\t\t\t\t\"material_request\": \",\".join(material_requests_against_so.get(\"material_requests\", [])),\n\t\t\t\t\t\t\"customer\": so.customer,\n\t\t\t\t\t\t\"territory\": so.territory,\n\t\t\t\t\t\t\"so_qty\": item.qty,\n\t\t\t\t\t\t\"requested_qty\": material_requests_against_so.get(\"qty\", 0),\n\t\t\t\t\t\t\"pending_qty\": item.qty - flt(material_requests_against_so.get(\"qty\", 0)),\n\t\t\t\t\t\t\"company\": so.company,\n\t\t\t\t\t}\n\t\t\t\t\tpending_so.append(so_record)\n\n\treturn pending_so\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 23, "n_whitespaces": 124, "n_words": 189, "vocab_size": 122, "complexity": 13, "nloc": 82, "token_counts": 501, "n_ast_nodes": 832, "n_identifiers": 40, "d_id": 14520, "documentation": { "docstring": "\n\t\tSELECT\n\t\t\tso_item.item_code,\n\t\t\tso_item.item_name,\n\t\t\tso_item.description,\n\t\t\tso.name,\n\t\t\tso.transaction_date,\n\t\t\tso.customer,\n\t\t\tso.territory,\n\t\t\tsum(so_item.qty) as total_qty,\n\t\t\tso.company\n\t\tFROM `tabSales Order` so, `tabSales Order Item` so_item\n\t\tWHERE\n\t\t\tso.docstatus = 1\n\t\t\tand so.name = so_item.parent\n\t\t\tand so.status not in (\"Closed\",\"Completed\",\"Cancelled\")\n\t\tGROUP BY\n\t\t\tso.name,so_item.item_code\n\t\t", "n_words": 36, "vocab_size": 33, "n_whitespaces": 20, "language": "en" } }, { "id": 26494, "commit_id": "aca6418d6c36956bc1ab530e6ef7e146ec9df90c", "repo": "saleor", "path": "saleor/plugins/webhook/tests/subscription_webhooks/test_create_deliveries_for_subscription.py", "file_name": "test_create_deliveries_for_subscription.py", "fun_name": "test_validate_subscription_query_valid_with_fragment", "commit_message": "Add Webhook payload via graphql subscriptions (#9394)\n\n* Add PoC of webhook subscriptions\r\n\r\n* add async webhooks subscription payloads feature\r\n\r\n* remove unneeded file\r\n\r\n* add translations subscription handling, fixes after review\r\n\r\n* remove todo\r\n\r\n* add descriptions\r\n\r\n* add descriptions, move subsrciption_payloads.py\r\n\r\n* refactor\r\n\r\n* fix imports, add changelog\r\n\r\n* check_document_is_single_subscription refactor\r\n\r\nCo-authored-by: Maciej Korycinski \r\nCo-authored-by: Marcin Gębala <5421321+maarcingebala@users.noreply.github.com>", "code": "def test_validate_subscription_query_valid_with_fragment():\n\n result = validate_subscription_query(TEST_VALID_SUBSCRIPTION_QUERY_WITH_FRAGMENT)\n assert result is True\n\n\nTEST_INVALID_MULTIPLE_QUERY_AND_SUBSCRIPTION = \n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 16, "n_words": 11, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 31, "n_identifiers": 5, "d_id": 5023, "documentation": { "docstring": "\nquery{\n products(first:100){\n edges{\n node{\n id\n }\n }\n }\n}\nsubscription{\n event{\n ...on ProductUpdated{\n product{\n id\n }\n }\n }\n}", "n_words": 19, "vocab_size": 11, "n_whitespaces": 65, "language": "en" } }, { "id": 124753, "commit_id": "b383d987d161fee39fafe873c0822f4ea6ea02eb", "repo": "ray", "path": "rllib/evaluation/rollout_worker.py", "file_name": "rollout_worker.py", "fun_name": "sample_with_count", "commit_message": "[RLlib] Fix a bunch of issues related to connectors. (#26510)", "code": "def sample_with_count(self) -> Tuple[SampleBatchType, int]:\n \n batch = self.sample()\n return batch, batch.count\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 32, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 19, "token_counts": 26, "n_ast_nodes": 43, "n_identifiers": 8, "d_id": 27674, "documentation": { "docstring": "Same as sample() but returns the count as a separate value.\n\n Returns:\n A columnar batch of experiences (e.g., tensors) and the\n size of the collected batch.\n\n Examples:\n >>> import gym\n >>> from ray.rllib.evaluation.rollout_worker import RolloutWorker\n >>> from ray.rllib.algorithms.pg.pg_tf_policy import PGTF1Policy\n >>> worker = RolloutWorker( # doctest: +SKIP\n ... env_creator=lambda _: gym.make(\"CartPole-v0\"), # doctest: +SKIP\n ... policy_spec=PGTFPolicy) # doctest: +SKIP\n >>> print(worker.sample_with_count()) # doctest: +SKIP\n (SampleBatch({\"obs\": [...], \"action\": [...], ...}), 3)\n ", "n_words": 70, "vocab_size": 48, "n_whitespaces": 209, "language": "en" } }, { "id": 137274, "commit_id": "794cfd9725b4dc113aa50e60428367b15e921514", "repo": "ray", "path": "rllib/algorithms/algorithm_config.py", "file_name": "algorithm_config.py", "fun_name": "overrides", "commit_message": "[RLlib] `AlgorithmConfig.overrides()` to replace `multiagent->policies->config` and `evaluation_config` dicts. (#30879)", "code": "def overrides(cls, **kwargs):\n \n default_config = cls()\n config_overrides = {}\n for key, value in kwargs.items():\n if not hasattr(default_config, key):\n raise KeyError(\n f\"Invalid property name {key} for config class {cls.__name__}!\"\n )\n # Allow things like \"lambda\" as well.\n key = cls._translate_special_keys(key, warn_deprecated=True)\n config_overrides[key] = value\n\n return config_overrides\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 173, "n_words": 45, "vocab_size": 39, "complexity": 3, "nloc": 11, "token_counts": 64, "n_ast_nodes": 116, "n_identifiers": 13, "d_id": 31119, "documentation": { "docstring": "Generates and validates a set of config key/value pairs (passed via kwargs).\n\n Validation whether given config keys are valid is done immediately upon\n construction (by comparing against the properties of a default AlgorithmConfig\n object of this class).\n Allows combination with a full AlgorithmConfig object to yield a new\n AlgorithmConfig object.\n\n Used anywhere, we would like to enable the user to only define a few config\n settings that would change with respect to some main config, e.g. in multi-agent\n setups and evaluation configs.\n\n Examples:\n >>> from ray.rllib.algorithms.ppo import PPOConfig\n >>> from ray.rllib.policy.policy import PolicySpec\n >>> config = (\n ... PPOConfig()\n ... .multi_agent(\n ... policies={\n ... \"pol0\": PolicySpec(config=PPOConfig.overrides(lambda_=0.95))\n ... },\n ... )\n ... )\n\n >>> from ray.rllib.algorithms.algorithm_config import AlgorithmConfig\n >>> from ray.rllib.algorithms.pg import PGConfig\n >>> config = (\n ... PGConfig()\n ... .evaluation(\n ... evaluation_num_workers=1,\n ... evaluation_interval=1,\n ... evaluation_config=AlgorithmConfig.overrides(explore=False),\n ... )\n ... )\n\n Returns:\n A dict mapping valid config property-names to values.\n\n Raises:\n KeyError: In case a non-existing property name (kwargs key) is being\n passed in. Valid property names are taken from a default AlgorithmConfig\n object of `cls`.\n ", "n_words": 175, "vocab_size": 112, "n_whitespaces": 599, "language": "en" } }, { "id": 108976, "commit_id": "f34d0b9fb38b813eef3eb0de0d424860f9b3b102", "repo": "matplotlib", "path": "examples/axisartist/demo_floating_axes.py", "file_name": "demo_floating_axes.py", "fun_name": "setup_axes3", "commit_message": "Display grid in floating axes example.\n\nThis is the only full featured example with floating axes, so displaying\ngrids makes it easier to check that grids are indeed working.", "code": "def setup_axes3(fig, rect):\n \n\n # rotate a bit for better orientation\n tr_rotate = Affine2D().translate(-95, 0)\n\n # scale degree to radians\n tr_scale = Affine2D().scale(np.pi/180., 1.)\n\n tr = tr_rotate + tr_scale + PolarAxes.PolarTransform()\n\n grid_locator1 = angle_helper.LocatorHMS(4)\n tick_formatter1 = angle_helper.FormatterHMS()\n\n grid_locator2 = MaxNLocator(3)\n\n # Specify theta limits in degrees\n ra0, ra1 = 8.*15, 14.*15\n # Specify radial limits\n cz0, cz1 = 0, 14000\n grid_helper = floating_axes.GridHelperCurveLinear(\n tr, extremes=(ra0, ra1, cz0, cz1),\n grid_locator1=grid_locator1,\n grid_locator2=grid_locator2,\n tick_formatter1=tick_formatter1,\n tick_formatter2=None)\n\n ax1 = fig.add_subplot(\n rect, axes_class=floating_axes.FloatingAxes, grid_helper=grid_helper)\n\n # adjust axis\n ax1.axis[\"left\"].set_axis_direction(\"bottom\")\n ax1.axis[\"right\"].set_axis_direction(\"top\")\n\n ax1.axis[\"bottom\"].set_visible(False)\n ax1.axis[\"top\"].set_axis_direction(\"bottom\")\n ax1.axis[\"top\"].toggle(ticklabels=True, label=True)\n ax1.axis[\"top\"].major_ticklabels.set_axis_direction(\"top\")\n ax1.axis[\"top\"].label.set_axis_direction(\"top\")\n\n ax1.axis[\"left\"].label.set_text(r\"cz [km$^{-1}$]\")\n ax1.axis[\"top\"].label.set_text(r\"$\\alpha_{1950}$\")\n ax1.grid()\n\n # create a parasite axes whose transData in RA, cz\n aux_ax = ax1.get_aux_axes(tr)\n\n aux_ax.patch = ax1.patch # for aux_ax to have a clip path as in ax\n ax1.patch.zorder = 0.9 # but this has a side effect that the patch is\n # drawn twice, and possibly over some other\n # artists. So, we decrease the zorder a bit to\n # prevent this.\n\n return ax1, aux_ax\n\n\n##########################################################\nfig = plt.figure(figsize=(8, 4))\nfig.subplots_adjust(wspace=0.3, left=0.05, right=0.95)\n\nax1, aux_ax1 = setup_axes1(fig, 131)\naux_ax1.bar([0, 1, 2, 3], [3, 2, 1, 3])\n\nax2, aux_ax2 = setup_axes2(fig, 132)\ntheta = np.random.rand(10)*.5*np.pi\nradius = np.random.rand(10) + 1.\naux_ax2.scatter(theta, radius)\n\nax3, aux_ax3 = setup_axes3(fig, 133)\n\ntheta = (8 + np.random.rand(10)*(14 - 8))*15. # in degrees\nradius = np.random.rand(10)*14000.\naux_ax3.scatter(theta, radius)\n\nplt.show()\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 347, "n_words": 214, "vocab_size": 152, "complexity": 1, "nloc": 31, "token_counts": 293, "n_ast_nodes": 732, "n_identifiers": 67, "d_id": 23409, "documentation": { "docstring": "\n Sometimes, things like axis_direction need to be adjusted.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 15, "language": "en" } }, { "id": 3612, "commit_id": "91eff1dffdb04be968b6ee4ef8d8bbfeb2e882d0", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-s3/source_s3/s3file.py", "file_name": "s3file.py", "fun_name": "_setup_boto_session", "commit_message": "🐛 Source S3: Loading of files' metadata (#8252)", "code": "def _setup_boto_session(self) -> None:\n \n if self.use_aws_account:\n self._boto_session = boto3session.Session(\n aws_access_key_id=self._provider.get(\"aws_access_key_id\"),\n aws_secret_access_key=self._provider.get(\"aws_secret_access_key\"),\n )\n self._boto_s3_resource = make_s3_resource(self._provider, session=self._boto_session)\n else:\n self._boto_session = boto3session.Session()\n self._boto_s3_resource = make_s3_resource(self._provider, config=Config(signature_version=UNSIGNED), session=self._boto_session)\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 131, "n_words": 25, "vocab_size": 18, "complexity": 2, "nloc": 15, "token_counts": 96, "n_ast_nodes": 155, "n_identifiers": 17, "d_id": 497, "documentation": { "docstring": "\n Making a new Session at file level rather than stream level as boto3 sessions are NOT thread-safe.\n Currently grabbing last_modified across multiple files asynchronously and may implement more multi-threading in future.\n See https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html (anchor link broken, scroll to bottom)\n ", "n_words": 39, "vocab_size": 38, "n_whitespaces": 68, "language": "en" } }, { "id": 269611, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "transpose", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def transpose(x):\n \n return tf.compat.v1.transpose(x)\n\n\n@keras_export(\"keras.backend.gather\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.backend.gather\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 10, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 59, "n_identifiers": 11, "d_id": 80230, "documentation": { "docstring": "Transposes a tensor and returns it.\n\n Args:\n x: Tensor or variable.\n\n Returns:\n A tensor.\n\n Examples:\n\n >>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]])\n >>> tf.keras.backend.eval(var)\n array([[1., 2., 3.],\n [4., 5., 6.]], dtype=float32)\n >>> var_transposed = tf.keras.backend.transpose(var)\n >>> tf.keras.backend.eval(var_transposed)\n array([[1., 4.],\n [2., 5.],\n [3., 6.]], dtype=float32)\n >>> input = tf.keras.backend.placeholder((2, 3))\n >>> input\n \n >>> input_transposed = tf.keras.backend.transpose(input)\n >>> input_transposed\n \n ", "n_words": 69, "vocab_size": 51, "n_whitespaces": 168, "language": "en" } }, { "id": 202390, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/csrf_tests/tests.py", "file_name": "tests.py", "fun_name": "test_csrf_cookie_bad_or_missing_token", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_csrf_cookie_bad_or_missing_token(self):\n \n cases = [\n (None, None, REASON_CSRF_TOKEN_MISSING),\n (16 * \"a\", None, \"CSRF token from POST has incorrect length.\"),\n (64 * \"*\", None, \"CSRF token from POST has invalid characters.\"),\n (64 * \"a\", None, \"CSRF token from POST incorrect.\"),\n (\n None,\n 16 * \"a\",\n \"CSRF token from the 'X-Csrftoken' HTTP header has incorrect length.\",\n ),\n (\n None,\n 64 * \"*\",\n \"CSRF token from the 'X-Csrftoken' HTTP header has invalid characters.\",\n ),\n (\n None,\n 64 * \"a\",\n \"CSRF token from the 'X-Csrftoken' HTTP header incorrect.\",\n ),\n ]\n for post_token, meta_token, expected in cases:\n with self.subTest(post_token=post_token, meta_token=meta_token):\n self._check_bad_or_missing_token(\n expected,\n post_token=post_token,\n meta_token=meta_token,\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 471, "n_words": 100, "vocab_size": 49, "complexity": 2, "nloc": 29, "token_counts": 119, "n_ast_nodes": 184, "n_identifiers": 9, "d_id": 50101, "documentation": { "docstring": "\n If a CSRF cookie is present but the token is missing or invalid, the\n middleware rejects the incoming request.\n ", "n_words": 19, "vocab_size": 16, "n_whitespaces": 41, "language": "en" } }, { "id": 112291, "commit_id": "2d8f925b5ac558c45589bd90324efc86a568539e", "repo": "nni", "path": "nni/common/serializer.py", "file_name": "serializer.py", "fun_name": "get", "commit_message": "Bug fix of Retiarii hyperparameter mutation (#4751)", "code": "def get(self) -> Any:\n \n raise NotImplementedError()\n\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 5, "token_counts": 12, "n_ast_nodes": 23, "n_identifiers": 4, "d_id": 24626, "documentation": { "docstring": "\n Get the original object. Usually used together with ``trace_copy``.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 22233, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/vendor/requirementslib/models/requirements.py", "file_name": "requirements.py", "fun_name": "find_all_matches", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def find_all_matches(self, sources=None, finder=None):\n # type: (Optional[List[Dict[S, Union[S, bool]]]], Optional[PackageFinder]) -> List[InstallationCandidate]\n \n\n from .dependencies import find_all_matches, get_finder\n\n if not finder:\n _, finder = get_finder(sources=sources)\n return find_all_matches(finder, self.as_ireq())\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 73, "n_words": 27, "vocab_size": 27, "complexity": 2, "nloc": 5, "token_counts": 46, "n_ast_nodes": 77, "n_identifiers": 8, "d_id": 4277, "documentation": { "docstring": "Find all matching candidates for the current requirement.\n\n Consults a finder to find all matching candidates.\n\n :param sources: Pipfile-formatted sources, defaults to None\n :param sources: list[dict], optional\n :param PackageFinder finder: A **PackageFinder** instance from pip's repository implementation\n :return: A list of Installation Candidates\n :rtype: list[ :class:`~pipenv.patched.pip._internal.index.InstallationCandidate` ]\n ", "n_words": 47, "vocab_size": 40, "n_whitespaces": 96, "language": "en" } }, { "id": 207827, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_basic_add_GET", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_basic_add_GET(self):\n \n response = self.client.get(reverse(\"admin:admin_views_section_add\"))\n self.assertIsInstance(response, TemplateResponse)\n self.assertEqual(response.status_code, 200)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 37, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 4, "token_counts": 37, "n_ast_nodes": 63, "n_identifiers": 10, "d_id": 52119, "documentation": { "docstring": "\n A smoke test to ensure GET on the add_view works.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 48642, "commit_id": "8b2ccccbe53f855fd9ee9a06e7b7997270e26dda", "repo": "django-rest-framework", "path": "tests/test_fields.py", "file_name": "test_fields.py", "fun_name": "test_create_only_default_callable_sets_context", "commit_message": "Stop calling `set_context`, planned for 3.13 drop (#8589)\n\nPer the deprecation warnings (which have been raised since DRF 3.11),\r\n`set_context()` was planned not to be supported in DRF 3.13. I think we\r\ncan safely delete it, in favor of `requires_context`.\r\n\r\nFrom the 3.11 announcement:\r\n\r\n> Previous our approach to this was that implementations could include a\r\n> `set_context` method, which would be called prior to validation. However\r\n> this approach had issues with potential race conditions. We have now\r\n> move this approach into a pending deprecation state. It will continue to\r\n> function, but will be escalated to a deprecated state in 3.12, and\r\n> removed entirely in 3.13.\r\n\r\nWhy keep `RemovedInDRF313Warning` around?\r\n=========================================\r\nIt's a bit odd that version 3.13 includes an exception class describing\r\nthings which are to be deleted in 3.13, but I've opted to keep the (now\r\nunreferenced) class around, for fear of breaking others' setup.\r\n\r\n(For example, if projects have a `filterwarnings` setup meant to\r\nintercept `rest_framework.RemovedInDRF313Warning`, an error will be\r\nthrown due to an unresolvable reference).", "code": "def test_create_only_default_callable_sets_context(self):\n ", "url": "https://github.com/encode/django-rest-framework.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 9, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 9, "token_counts": 63, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 9552, "documentation": { "docstring": "\n CreateOnlyDefault instances with a callable default should set context\n on the callable if possible\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 320165, "commit_id": "f8ce6285df44cc580319c370a9d76149012615b1", "repo": "paperless-ngx", "path": "src/documents/tests/test_barcodes.py", "file_name": "test_barcodes.py", "fun_name": "test_barcode_splitter_legacy_fallback", "commit_message": "Allows using pdf2image instead of pikepdf if desired", "code": "def test_barcode_splitter_legacy_fallback(self):\n \n test_file = os.path.join(\n self.BARCODE_SAMPLE_DIR,\n \"patch-code-t-middle.pdf\",\n )\n tempdir = tempfile.mkdtemp(prefix=\"paperless-\", dir=settings.SCRATCH_DIR)\n\n pdf_file, separator_page_numbers = barcodes.scan_file_for_separating_barcodes(\n test_file,\n )\n\n self.assertEqual(test_file, pdf_file)\n self.assertTrue(len(separator_page_numbers) > 0)\n\n document_list = barcodes.separate_pages(test_file, separator_page_numbers)\n self.assertTrue(document_list)\n for document in document_list:\n barcodes.save_to_dir(document, target_dir=tempdir)\n\n target_file1 = os.path.join(tempdir, \"patch-code-t-middle_document_0.pdf\")\n target_file2 = os.path.join(tempdir, \"patch-code-t-middle_document_1.pdf\")\n\n self.assertTrue(os.path.isfile(target_file1))\n self.assertTrue(os.path.isfile(target_file2))\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 193, "n_words": 44, "vocab_size": 37, "complexity": 2, "nloc": 19, "token_counts": 148, "n_ast_nodes": 239, "n_identifiers": 29, "d_id": 117082, "documentation": { "docstring": "\n GIVEN:\n - File containing barcode\n - Legacy method of detection is enabled\n WHEN:\n - File is scanned for barcodes\n THEN:\n - Barcodes are properly detected\n ", "n_words": 25, "vocab_size": 20, "n_whitespaces": 98, "language": "en" } }, { "id": 226189, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_bar.py", "file_name": "_bar.py", "fun_name": "selected", "commit_message": "switch to black .22", "code": "def selected(self):\n \n return self[\"selected\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 57862, "documentation": { "docstring": "\n The 'selected' property is an instance of Selected\n that may be specified as:\n - An instance of :class:`plotly.graph_objs.bar.Selected`\n - A dict of string/value properties that will be passed\n to the Selected constructor\n\n Supported dict properties:\n\n marker\n :class:`plotly.graph_objects.bar.selected.Marke\n r` instance or dict with compatible properties\n textfont\n :class:`plotly.graph_objects.bar.selected.Textf\n ont` instance or dict with compatible\n properties\n\n Returns\n -------\n plotly.graph_objs.bar.Selected\n ", "n_words": 56, "vocab_size": 39, "n_whitespaces": 264, "language": "en" } }, { "id": 261652, "commit_id": "758fe0d9c72ba343097003e7992c9239e58bfc63", "repo": "scikit-learn", "path": "sklearn/model_selection/tests/test_plot.py", "file_name": "test_plot.py", "fun_name": "test_learning_curve_display_default_usage", "commit_message": "FEA add LearningCurveDisplay to show plot learning curve (#24084)\n\nCo-authored-by: jeremie du boisberranger \r\nCo-authored-by: Arturo Amor <86408019+ArturoAmorQ@users.noreply.github.com>", "code": "def test_learning_curve_display_default_usage(pyplot, data):\n \n X, y = data\n estimator = DecisionTreeClassifier(random_state=0)\n\n train_sizes = [0.3, 0.6, 0.9]\n display = LearningCurveDisplay.from_estimator(\n estimator, X, y, train_sizes=train_sizes\n )\n\n import matplotlib as mpl\n\n assert display.errorbar_ is None\n\n assert isinstance(display.lines_, list)\n for line in display.lines_:\n assert isinstance(line, mpl.lines.Line2D)\n\n assert isinstance(display.fill_between_, list)\n for fill in display.fill_between_:\n assert isinstance(fill, mpl.collections.PolyCollection)\n assert fill.get_alpha() == 0.5\n\n assert display.score_name == \"Score\"\n assert display.ax_.get_xlabel() == \"Number of samples in the training set\"\n assert display.ax_.get_ylabel() == \"Score\"\n\n _, legend_labels = display.ax_.get_legend_handles_labels()\n assert legend_labels == [\"Testing metric\"]\n\n train_sizes_abs, train_scores, test_scores = learning_curve(\n estimator, X, y, train_sizes=train_sizes\n )\n\n assert_array_equal(display.train_sizes, train_sizes_abs)\n assert_allclose(display.train_scores, train_scores)\n assert_allclose(display.test_scores, test_scores)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 199, "n_words": 98, "vocab_size": 68, "complexity": 3, "nloc": 27, "token_counts": 211, "n_ast_nodes": 313, "n_identifiers": 39, "d_id": 76917, "documentation": { "docstring": "Check the default usage of the LearningCurveDisplay class.", "n_words": 8, "vocab_size": 7, "n_whitespaces": 7, "language": "en" } }, { "id": 153953, "commit_id": "a7354c9ca76525a265da98f2afe882c53f378840", "repo": "modin", "path": "modin/core/execution/dask/implementations/pandas_on_dask/dataframe/dataframe.py", "file_name": "dataframe.py", "fun_name": "_get_partition_size_along_axis", "commit_message": "FEAT-#4419: Extend virtual partitioning API to pandas on Dask (#4420)\n\nSigned-off-by: Rehan Durrani \r\n\r\nCo-authored-by: Mahesh Vashishtha ", "code": "def _get_partition_size_along_axis(self, partition, axis=0):\n \n if isinstance(partition, self._partition_mgr_cls._partition_class):\n return [\n partition.apply(\n lambda df: len(df) if not axis else len(df.columns)\n )._data\n ]\n elif partition.axis == axis:\n return [\n ptn.apply(lambda df: len(df) if not axis else len(df.columns))._data\n for ptn in partition.list_of_partitions_to_combine\n ]\n return [\n partition.list_of_partitions_to_combine[0]\n .apply(lambda df: len(df) if not axis else (len(df.columns)))\n ._data\n ]\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 243, "n_words": 52, "vocab_size": 33, "complexity": 7, "nloc": 17, "token_counts": 125, "n_ast_nodes": 193, "n_identifiers": 14, "d_id": 35721, "documentation": { "docstring": "\n Compute the length along the specified axis of the specified partition.\n\n Parameters\n ----------\n partition : ``PandasOnDaskDataframeVirtualPartition`` or ``PandasOnDaskDataframePartition``\n The partition whose size to compute.\n axis : int, default: 0\n The axis along which to compute size.\n\n Returns\n -------\n list\n A list of lengths along the specified axis that sum to the overall length of the partition\n along the specified axis.\n\n Notes\n -----\n This utility function is used to ensure that computation occurs asynchronously across all partitions\n whether the partitions are virtual or physical partitions.\n ", "n_words": 84, "vocab_size": 54, "n_whitespaces": 220, "language": "en" } }, { "id": 196456, "commit_id": "975df9b627556d176039ba3a0f3a2e3a3df9686c", "repo": "sympy", "path": "sympy/vector/vector.py", "file_name": "vector.py", "fun_name": "_projections", "commit_message": "Fixed removals not fully performed earlier", "code": "def _projections(self):\n \n\n from sympy.vector.operators import _get_coord_systems\n if isinstance(self, VectorZero):\n return (S.Zero, S.Zero, S.Zero)\n base_vec = next(iter(_get_coord_systems(self))).base_vectors()\n return tuple([self.dot(i) for i in base_vec])\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 68, "n_words": 22, "vocab_size": 21, "complexity": 3, "nloc": 6, "token_counts": 68, "n_ast_nodes": 106, "n_identifiers": 17, "d_id": 47938, "documentation": { "docstring": "\n Returns the components of this vector but the output includes\n also zero values components.\n\n Examples\n ========\n\n >>> from sympy.vector import CoordSys3D, Vector\n >>> C = CoordSys3D('C')\n >>> v1 = 3*C.i + 4*C.j + 5*C.k\n >>> v1._projections\n (3, 4, 5)\n >>> v2 = C.x*C.y*C.z*C.i\n >>> v2._projections\n (C.x*C.y*C.z, 0, 0)\n >>> v3 = Vector.zero\n >>> v3._projections\n (0, 0, 0)\n ", "n_words": 57, "vocab_size": 43, "n_whitespaces": 170, "language": "en" } }, { "id": 147565, "commit_id": "756d08cd31b71f3654b8ca732c961e8cd9afe71d", "repo": "ray", "path": "doc/source/custom_directives.py", "file_name": "custom_directives.py", "fun_name": "download_and_preprocess_ecosystem_docs", "commit_message": "[docs] Add support for external markdown (#23505)\n\nThis PR fixes the issue of diverging documentation between Ray Docs and ecosystem library readmes which live in separate repos (eg. xgboost_ray). This is achieved by adding an extra step before the docs build process starts that downloads the readmes of specified ecosystem libraries from their GitHub repositories. The files are then preprocessed by a very simple parser to allow for differences between GitHub and Docs markdowns.\r\n\r\nIn summary, this makes the markdown files in ecosystem library repositories single sources of truth and removes the need to manually keep the doc pages up to date, all the while allowing for differences between what's rendered on GitHub and in the Docs.\r\n\r\nSee ray-project/xgboost_ray#204 & https://ray--23505.org.readthedocs.build/en/23505/ray-more-libs/xgboost-ray.html for an example.\r\n\r\nNeeds ray-project/xgboost_ray#204 and ray-project/lightgbm_ray#30 to be merged first.", "code": "def download_and_preprocess_ecosystem_docs():\n \n\n import urllib.request\n import requests\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 15, "n_words": 6, "vocab_size": 5, "complexity": 2, "nloc": 8, "token_counts": 33, "n_ast_nodes": 22, "n_identifiers": 4, "d_id": 34004, "documentation": { "docstring": "\n This function downloads markdown readme files for various\n ecosystem libraries, saves them in specified locations and preprocesses\n them before sphinx build starts.\n\n If you have ecosystem libraries that live in a separate repo from Ray,\n adding them here will allow for their docs to be present in Ray docs\n without the need for duplicate files. For more details, see ``doc/README.md``.\n ", "n_words": 60, "vocab_size": 52, "n_whitespaces": 82, "language": "en" } }, { "id": 215394, "commit_id": "ab4803984bce4a4de7cc10910e7310c4babf557e", "repo": "salt", "path": "salt/transport/base.py", "file_name": "base.py", "fun_name": "close", "commit_message": "Start to add base class defs", "code": "def close(self):\n \n\n # XXX: Should have a connect too?\n # def connect(self):\n # \n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 34, "n_words": 13, "vocab_size": 10, "complexity": 1, "nloc": 1, "token_counts": 6, "n_ast_nodes": 18, "n_identifiers": 2, "d_id": 53945, "documentation": { "docstring": "\n Close the connection.\n \n # Connect to the server / broker.\n # ", "n_words": 11, "vocab_size": 9, "n_whitespaces": 39, "language": "en" } }, { "id": 42077, "commit_id": "6460a21555ba6557e1f6f06f4d677d9c19148169", "repo": "seaborn", "path": "seaborn/utils.py", "file_name": "utils.py", "fun_name": "adjust_legend_subtitles", "commit_message": "Workaround for matplotlib rc_context issue (#2925)\n\n* Workaround for matplotlib rc_context issue\r\n\r\nFixes #2914\r\n\r\n* Add some additional comments about this workaround", "code": "def adjust_legend_subtitles(legend):\n \n # Legend title not in rcParams until 3.0\n font_size = plt.rcParams.get(\"legend.title_fontsize\", None)\n hpackers = legend.findobj(mpl.offsetbox.VPacker)[0].get_children()\n for hpack in hpackers:\n draw_area, text_area = hpack.get_children()\n handles = draw_area.get_children()\n if not all(artist.get_visible() for artist in handles):\n draw_area.set_width(0)\n for text in text_area.get_children():\n if font_size is not None:\n text.set_size(font_size)\n\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 138, "n_words": 46, "vocab_size": 34, "complexity": 6, "nloc": 11, "token_counts": 100, "n_ast_nodes": 165, "n_identifiers": 22, "d_id": 7477, "documentation": { "docstring": "\n Make invisible-handle \"subtitles\" entries look more like titles.\n\n Note: This function is not part of the public API and may be changed or removed.\n\n ", "n_words": 24, "vocab_size": 24, "n_whitespaces": 34, "language": "en" } }, { "id": 232559, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/template/_data.py", "file_name": "_data.py", "fun_name": "ohlc", "commit_message": "switch to black .22", "code": "def ohlc(self):\n \n return self[\"ohlc\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 64003, "documentation": { "docstring": "\n The 'ohlc' property is a tuple of instances of\n Ohlc that may be specified as:\n - A list or tuple of instances of plotly.graph_objs.layout.template.data.Ohlc\n - A list or tuple of dicts of string/value properties that\n will be passed to the Ohlc constructor\n\n Supported dict properties:\n\n Returns\n -------\n tuple[plotly.graph_objs.layout.template.data.Ohlc]\n ", "n_words": 48, "vocab_size": 33, "n_whitespaces": 131, "language": "en" } }, { "id": 130277, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/_private/thirdparty/pathspec/util.py", "file_name": "util.py", "fun_name": "detailed_match_files", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def detailed_match_files(patterns, files, all_matches=None):\n \n all_files = files if isinstance(files, Collection) else list(files)\n return_files = {}\n for pattern in patterns:\n if pattern.include is not None:\n result_files = pattern.match(all_files)\n if pattern.include:\n # Add files and record pattern.\n for result_file in result_files:\n if result_file in return_files:\n if all_matches:\n return_files[result_file].patterns.append(pattern)\n else:\n return_files[result_file].patterns[0] = pattern\n else:\n return_files[result_file] = MatchDetail([pattern])\n\n else:\n # Remove files.\n for file in result_files:\n del return_files[file]\n\n return return_files\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 361, "n_words": 66, "vocab_size": 45, "complexity": 9, "nloc": 19, "token_counts": 121, "n_ast_nodes": 190, "n_identifiers": 17, "d_id": 29202, "documentation": { "docstring": "\n Matches the files to the patterns, and returns which patterns matched\n the files.\n\n *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)\n contains the patterns to use.\n\n *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains\n the normalized file paths to be matched against *patterns*.\n\n *all_matches* (:class:`boot` or :data:`None`) is whether to return all\n matches patterns (:data:`True`), or only the last matched pattern\n (:data:`False`). Default is :data:`None` for :data:`False`.\n\n Returns the matched files (:class:`dict`) which maps each matched file\n (:class:`str`) to the patterns that matched in order (:class:`.MatchDetail`).\n ", "n_words": 79, "vocab_size": 52, "n_whitespaces": 116, "language": "en" } }, { "id": 228402, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_table.py", "file_name": "_table.py", "fun_name": "columnordersrc", "commit_message": "switch to black .22", "code": "def columnordersrc(self):\n \n return self[\"columnordersrc\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 60075, "documentation": { "docstring": "\n Sets the source reference on Chart Studio Cloud for\n `columnorder`.\n\n The 'columnordersrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 84, "language": "en" } }, { "id": 285143, "commit_id": "bd12c203a0585dab6ca3ff81c3b4500e088b41d6", "repo": "OpenBBTerminal", "path": "openbb_terminal/stocks/discovery/yahoofinance_model.py", "file_name": "yahoofinance_model.py", "fun_name": "get_ugs", "commit_message": "Fixed bad yfinance urls (#2282)", "code": "def get_ugs() -> pd.DataFrame:\n \n return get_df(\n \"https://finance.yahoo.com/screener/predefined/undervalued_growth_stocks\"\n )\n\n\n@log_start_end(log=logger)", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@log_start_end(log=logger)", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 24, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 12, "token_counts": 14, "n_ast_nodes": 40, "n_identifiers": 7, "d_id": 85185, "documentation": { "docstring": "Get stocks with earnings growth rates better than 25% and relatively low PE and PEG ratios.\n [Source: Yahoo Finance]\n\n Returns\n -------\n pd.DataFrame\n Undervalued stocks\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 46, "language": "en" } }, { "id": 109119, "commit_id": "2d918ba09155810194bb4ba136369082ad46c8c8", "repo": "matplotlib", "path": "lib/matplotlib/pyplot.py", "file_name": "pyplot.py", "fun_name": "ion", "commit_message": "Simplify impl. of functions optionally used as context managers.\n\nWe can actually just put the \"exit\" logic into an ExitStack callback.\nIf the return value is never `__enter__`'d via a \"with\" statement, it is\nnever `__exit__`'d either.", "code": "def ion():\n \n stack = ExitStack()\n stack.callback(ion if isinteractive() else ioff)\n matplotlib.interactive(True)\n install_repl_displayhook()\n return stack\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 32, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 6, "token_counts": 33, "n_ast_nodes": 59, "n_identifiers": 9, "d_id": 23442, "documentation": { "docstring": "\n Enable interactive mode.\n\n See `.pyplot.isinteractive` for more details.\n\n See Also\n --------\n ioff : Disable interactive mode.\n isinteractive : Whether interactive mode is enabled.\n show : Show all figures (and maybe block).\n pause : Show all figures, and block for a time.\n\n Notes\n -----\n For a temporary change, this can be used as a context manager::\n\n # if interactive mode is off\n # then figures will not be shown on creation\n plt.ioff()\n # This figure will not be shown immediately\n fig = plt.figure()\n\n with plt.ion():\n # interactive mode will be on\n # figures will automatically be shown\n fig2 = plt.figure()\n # ...\n\n To enable optional usage as a context manager, this function returns a\n `~contextlib.ExitStack` object, which is not intended to be stored or\n accessed by the user.\n ", "n_words": 127, "vocab_size": 82, "n_whitespaces": 259, "language": "en" } }, { "id": 108004, "commit_id": "075ff0952896f44d7d0b0b3318f0978ae53f84d7", "repo": "matplotlib", "path": "examples/misc/custom_projection.py", "file_name": "custom_projection.py", "fun_name": "format_coord", "commit_message": "Small style fixes.", "code": "def format_coord(self, lon, lat):\n \n lon, lat = np.rad2deg([lon, lat])\n ns = 'N' if lat >= 0.0 else 'S'\n ew = 'E' if lon >= 0.0 else 'W'\n return ('%f\\N{DEGREE SIGN}%s, %f\\N{DEGREE SIGN}%s'\n % (abs(lat), ns, abs(lon), ew))\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 87, "n_words": 37, "vocab_size": 29, "complexity": 3, "nloc": 6, "token_counts": 66, "n_ast_nodes": 102, "n_identifiers": 9, "d_id": 23009, "documentation": { "docstring": "\n Override this method to change how the values are displayed in\n the status bar.\n\n In this case, we want them to be displayed in degrees N/S/E/W.\n ", "n_words": 26, "vocab_size": 21, "n_whitespaces": 55, "language": "en" } }, { "id": 269092, "commit_id": "d56b634f711802ae88c277926b6634465f346275", "repo": "keras", "path": "keras/dtensor/utils.py", "file_name": "utils.py", "fun_name": "call_with_layout", "commit_message": "Remove the @tf.function for the dtensor run_with_layout().\n\nThis was creating one tf.function per initializer, and causing function retracing. We only need this currently for Identity initializer, since tf.function will convert the tf.MatrixDiag to tf.constant.\n\nPiperOrigin-RevId: 433516308", "code": "def call_with_layout(fn, layout, *args, **kwargs):\n \n if layout:\n with dtensor.run_on(layout):\n result = fn(*args, **kwargs)\n return dtensor.relayout(result, layout)\n return fn(*args, **kwargs)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 35, "n_words": 19, "vocab_size": 16, "complexity": 2, "nloc": 6, "token_counts": 53, "n_ast_nodes": 86, "n_identifiers": 9, "d_id": 79890, "documentation": { "docstring": "Invoke the function with inputs and relayout the result.\n\n Args:\n fn: the function to invoke.\n layout: if not None, the output of the fn will be relayout with this.\n *args: positional arguments to be called with fn.\n **kwargs: keyword arguments to be called with fn.\n\n Returns:\n The output of fn, with potential relayout with the layout specified.\n ", "n_words": 57, "vocab_size": 35, "n_whitespaces": 75, "language": "en" } }, { "id": 244298, "commit_id": "f3a451abab8fc89810b317ca0a88ee9fd12cb0c2", "repo": "mmdetection", "path": "tools/analysis_tools/analyze_results.py", "file_name": "analyze_results.py", "fun_name": "detection_evaluate", "commit_message": "[Feature] Support panoptic segmentation result analysis (#7922)\n\n* support analyze panoptic segmentation result\r\n\r\n* fix lint\r\n\r\n* update docstring\r\n\r\n* update docstring\r\n\r\n* set print_log=False by default\r\n\r\n* update\r\n\r\n* fix bug 8035", "code": "def detection_evaluate(self, dataset, results, topk=20, eval_fn=None):\n \n if eval_fn is None:\n eval_fn = bbox_map_eval\n else:\n assert callable(eval_fn)\n\n prog_bar = mmcv.ProgressBar(len(results))\n _mAPs = {}\n for i, (result, ) in enumerate(zip(results)):\n # self.dataset[i] should not call directly\n # because there is a risk of mismatch\n data_info = dataset.prepare_train_img(i)\n mAP = eval_fn(result, data_info['ann_info'])\n _mAPs[i] = mAP\n prog_bar.update()\n # descending select topk image\n _mAPs = list(sorted(_mAPs.items(), key=lambda kv: kv[1]))\n good_mAPs = _mAPs[-topk:]\n bad_mAPs = _mAPs[:topk]\n\n return good_mAPs, bad_mAPs\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 238, "n_words": 73, "vocab_size": 58, "complexity": 3, "nloc": 16, "token_counts": 136, "n_ast_nodes": 219, "n_identifiers": 28, "d_id": 70313, "documentation": { "docstring": "Evaluation for object detection.\n\n Args:\n dataset (Dataset): A PyTorch dataset.\n results (list): Object detection results from test\n results pkl file.\n topk (int): Number of the highest topk and\n lowest topk after evaluation index sorting. Default: 20.\n eval_fn (callable, optional): Eval function, Default: None.\n\n Returns:\n tuple: A tuple contains good samples and bad samples.\n good_mAPs (dict[int, float]): A dict contains good\n samples's indices in dataset and model's\n performance on them.\n bad_mAPs (dict[int, float]): A dict contains bad\n samples's indices in dataset and model's\n performance on them.\n ", "n_words": 85, "vocab_size": 58, "n_whitespaces": 297, "language": "en" } }, { "id": 83638, "commit_id": "327ff9ea0f5e4712a34d767fee55a549cc1d3f39", "repo": "zulip", "path": "zerver/tests/test_link_embed.py", "file_name": "test_link_embed.py", "fun_name": "test_page_with_og", "commit_message": "preview: Use a dataclass for the embed data.\n\nThis is significantly cleaner than passing around `Dict[str, Any]` all\nof the time.", "code": "def test_page_with_og(self) -> None:\n html = b\n\n parser = OpenGraphParser(html, \"text/html; charset=UTF-8\")\n result = parser.extract_data()\n self.assertEqual(result.title, \"The Rock\")\n self.assertEqual(result.description, \"The Rock film\")\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 56, "n_words": 22, "vocab_size": 19, "complexity": 1, "nloc": 14, "token_counts": 46, "n_ast_nodes": 79, "n_identifiers": 10, "d_id": 17698, "documentation": { "docstring": "\n \n \n \n \n \n \n \n ", "n_words": 27, "vocab_size": 18, "n_whitespaces": 96, "language": "en" } }, { "id": 48280, "commit_id": "5d8cda8c5be42c8daaaa904d29a1011833c0c699", "repo": "airflow", "path": "airflow/migrations/versions/0109_1de7bc13c950_add_index_for_event_in_log.py", "file_name": "0109_1de7bc13c950_add_index_for_event_in_log.py", "fun_name": "upgrade", "commit_message": "Add index for event column in log table (#23625)", "code": "def upgrade():\n \n op.create_index('idx_log_event', 'log', ['event'], unique=False)\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 40, "n_identifiers": 4, "d_id": 9421, "documentation": { "docstring": "Apply Add index for ``event`` column in ``log`` table.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 205717, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/options.py", "file_name": "options.py", "fun_name": "get_fields", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_fields(self, include_parents=True, include_hidden=False):\n \n if include_parents is False:\n include_parents = PROXY_PARENTS\n return self._get_fields(\n include_parents=include_parents, include_hidden=include_hidden\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 66, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 6, "token_counts": 35, "n_ast_nodes": 55, "n_identifiers": 6, "d_id": 51174, "documentation": { "docstring": "\n Return a list of fields associated to the model. By default, include\n forward and reverse fields, fields derived from inheritance, but not\n hidden fields. The returned fields can be changed using the parameters:\n\n - include_parents: include fields derived from inheritance\n - include_hidden: include fields that have a related_name that\n starts with a \"+\"\n ", "n_words": 53, "vocab_size": 40, "n_whitespaces": 123, "language": "en" } }, { "id": 73205, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/tests/test_page_modeladmin.py", "file_name": "test_page_modeladmin.py", "fun_name": "test_location_present", "commit_message": "Reformat with black", "code": "def test_location_present(self):\n \n response = self.get(4)\n self.assertContains(response, \"The North Pole\", 1)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 42, "n_identifiers": 5, "d_id": 15989, "documentation": { "docstring": "\n The location should appear once, in the field listing\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 32820, "commit_id": "5cd40323684c183c30b34758aea1e877996a7ac9", "repo": "transformers", "path": "src/transformers/utils/hub.py", "file_name": "hub.py", "fun_name": "_patch_hf_hub_tqdm", "commit_message": "Use new huggingface_hub tools for download models (#18438)\n\n* Draft new cached_file\r\n\r\n* Initial draft for config and model\r\n\r\n* Small fixes\r\n\r\n* Fix first batch of tests\r\n\r\n* Look in cache when internet is down\r\n\r\n* Fix last tests\r\n\r\n* Bad black, not fixing all quality errors\r\n\r\n* Make diff less\r\n\r\n* Implement change for TF and Flax models\r\n\r\n* Add tokenizer and feature extractor\r\n\r\n* For compatibility with main\r\n\r\n* Add utils to move the cache and auto-do it at first use.\r\n\r\n* Quality\r\n\r\n* Deal with empty commit shas\r\n\r\n* Deal with empty etag\r\n\r\n* Address review comments", "code": "def _patch_hf_hub_tqdm():\n \n old_tqdm = huggingface_hub.file_download.tqdm\n huggingface_hub.file_download.tqdm = tqdm\n yield\n huggingface_hub.file_download.tqdm = old_tqdm\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 27, "n_words": 12, "vocab_size": 7, "complexity": 1, "nloc": 5, "token_counts": 27, "n_ast_nodes": 48, "n_identifiers": 5, "d_id": 5987, "documentation": { "docstring": "\n A context manager to make huggingface hub use the tqdm version of Transformers (which is controlled by some utils)\n in logging.\n ", "n_words": 21, "vocab_size": 21, "n_whitespaces": 31, "language": "en" } }, { "id": 3777, "commit_id": "a3aae8017a0a40ff2006e2567f71dccb04c997a5", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_async_job.py", "file_name": "test_async_job.py", "fun_name": "test_failed_execution", "commit_message": "🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805)\n\n* Facebook Marketing performance improvement\r\n\r\n* add comments and little refactoring\r\n\r\n* fix integration tests with the new config\r\n\r\n* improve job status handling, limit concurrency to 10\r\n\r\n* fix campaign jobs, refactor manager\r\n\r\n* big refactoring of async jobs, support random order of slices\r\n\r\n* update source _read_incremental to hook new state logic\r\n\r\n* fix issues with timeout\r\n\r\n* remove debugging and clean up, improve retry logic\r\n\r\n* merge changes from #8234\r\n\r\n* fix call super _read_increment\r\n\r\n* generalize batch execution, add use_batch flag\r\n\r\n* improve coverage, do some refactoring of spec\r\n\r\n* update test, remove overrides of source\r\n\r\n* add split by AdSet\r\n\r\n* add smaller insights\r\n\r\n* fix end_date < start_date case\r\n\r\n* add account_id to PK\r\n\r\n* add notes\r\n\r\n* fix new streams\r\n\r\n* fix reversed incremental stream\r\n\r\n* update spec.json for SAT\r\n\r\n* upgrade CDK and bump version\r\n\r\nCo-authored-by: Dmytro Rezchykov \r\nCo-authored-by: Eugene Kulak ", "code": "def test_failed_execution(self, api, started_job, batch):\n \n jobs = [started_job for _ in range(49)]\n batch.execute.side_effect = [batch, batch, None]\n\n update_in_batch(api=api, jobs=jobs)\n\n assert started_job.update_job.call_count == 49\n assert len(api.new_batch.return_value) == 49\n assert batch.execute.call_count == 3\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 80, "n_words": 31, "vocab_size": 25, "complexity": 2, "nloc": 7, "token_counts": 74, "n_ast_nodes": 111, "n_identifiers": 16, "d_id": 559, "documentation": { "docstring": "Should execute batch until there are no failed tasks", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 298435, "commit_id": "982e314de630de2fe8e379b6f1106ec9fa945335", "repo": "core", "path": "tests/components/logbook/test_init.py", "file_name": "test_init.py", "fun_name": "test_logbook_entity_matches_only_multiple", "commit_message": "Use recorder_mock in tests (#70363)\n\nCo-authored-by: Paulus Schoutsen ", "code": "async def test_logbook_entity_matches_only_multiple(hass, hass_client, recorder_mock):\n \n await async_setup_component(hass, \"logbook\", {})\n assert await async_setup_component(\n hass,\n \"switch\",\n {\n \"switch\": {\n \"platform\": \"template\",\n \"switches\": {\n \"test_template_switch\": {\n \"value_template\": \"{{ states.switch.test_state.state }}\",\n \"turn_on\": {\n \"service\": \"switch.turn_on\",\n \"entity_id\": \"switch.test_state\",\n },\n \"turn_off\": {\n \"service\": \"switch.turn_off\",\n \"entity_id\": \"switch.test_state\",\n },\n }\n },\n }\n },\n )\n await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)\n await hass.async_block_till_done()\n await hass.async_start()\n await hass.async_block_till_done()\n\n # Entity added (should not be logged)\n hass.states.async_set(\"switch.test_state\", STATE_ON)\n hass.states.async_set(\"light.test_state\", STATE_ON)\n\n await hass.async_block_till_done()\n\n # First state change (should be logged)\n hass.states.async_set(\"switch.test_state\", STATE_OFF)\n hass.states.async_set(\"light.test_state\", STATE_OFF)\n\n await hass.async_block_till_done()\n\n switch_turn_off_context = ha.Context(\n id=\"9c5bd62de45711eaaeb351041eec8dd9\",\n user_id=\"9400facee45711eaa9308bfd3d19e474\",\n )\n hass.states.async_set(\n \"switch.test_state\", STATE_ON, context=switch_turn_off_context\n )\n hass.states.async_set(\"light.test_state\", STATE_ON, context=switch_turn_off_context)\n await hass.async_block_till_done()\n\n await hass.async_add_executor_job(trigger_db_commit, hass)\n await hass.async_block_till_done()\n await hass.async_add_executor_job(hass.data[recorder.DATA_INSTANCE].block_till_done)\n\n client = await hass_client()\n\n # Today time 00:00:00\n start = dt_util.utcnow().date()\n start_date = datetime(start.year, start.month, start.day)\n\n # Test today entries with filter by end_time\n end_time = start + timedelta(hours=24)\n response = await client.get(\n f\"/api/logbook/{start_date.isoformat()}?end_time={end_time}&entity=switch.test_state,light.test_state&entity_matches_only\"\n )\n assert response.status == HTTPStatus.OK\n json_dict = await response.json()\n\n assert len(json_dict) == 4\n\n assert json_dict[0][\"entity_id\"] == \"switch.test_state\"\n\n assert json_dict[1][\"entity_id\"] == \"light.test_state\"\n\n assert json_dict[2][\"entity_id\"] == \"switch.test_state\"\n assert json_dict[2][\"context_user_id\"] == \"9400facee45711eaa9308bfd3d19e474\"\n\n assert json_dict[3][\"entity_id\"] == \"light.test_state\"\n assert json_dict[3][\"context_user_id\"] == \"9400facee45711eaa9308bfd3d19e474\"\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 684, "n_words": 174, "vocab_size": 100, "complexity": 1, "nloc": 62, "token_counts": 381, "n_ast_nodes": 680, "n_identifiers": 45, "d_id": 97379, "documentation": { "docstring": "Test the logbook view with a multiple entities and entity_matches_only.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 54599, "commit_id": "cfe630e97a5942c285b25d3bea5f1a7a47c4d9c5", "repo": "prefect", "path": "tests/utilities/test_asyncio.py", "file_name": "test_asyncio.py", "fun_name": "test_gather_is_robust_with_return_types_that_break_equality_checks", "commit_message": "Fix issue where gather can fail when a task returns a pandas object", "code": "async def test_gather_is_robust_with_return_types_that_break_equality_checks():\n \n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 3, "nloc": 7, "token_counts": 54, "n_ast_nodes": 13, "n_identifiers": 1, "d_id": 11107, "documentation": { "docstring": "\n Some libraries like pandas override the equality operator and can fail if gather\n performs an __eq__ check with the GatherIncomplete type\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 31, "language": "en" } }, { "id": 111722, "commit_id": "a36dc07e8d39ec4438fd660c98f6f4551ff5f4a6", "repo": "nni", "path": "nni/retiarii/nn/pytorch/api.py", "file_name": "api.py", "fun_name": "_valuechoice_staticmethod_helper", "commit_message": "Composition of `ValueChoice` (#4435)", "code": "def _valuechoice_staticmethod_helper(orig_func):\n orig_func.__doc__ += \n return orig_func\n\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 9, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 3, "d_id": 24473, "documentation": { "docstring": "\n Notes\n -----\n This function performs lazy evaluation.\n Only the expression will be recorded when the function is called.\n The real evaluation happens when the inner value choice has determined its final decision.\n If no value choice is contained in the parameter list, the evaluation will be intermediate.", "n_words": 47, "vocab_size": 35, "n_whitespaces": 89, "language": "en" } }, { "id": 5815, "commit_id": "2a157d452611d37cf50ccb7d56ff1a06e9790ecb", "repo": "InstaPy", "path": "instapy/unfollow_util.py", "file_name": "unfollow_util.py", "fun_name": "get_buttons_from_dialog", "commit_message": "PR - Fix `extract_text_from_element()`and `find_element*()` to `find_element()` (#6438)\n\n* Updated getUserData() and find_element*\r\nSigned-off-by: elulcao \r\n\r\nThanks @breuerfelix for reviewing, 🚀 \r\nPeople in this thread please let me know if something is not OK, IG changed a lot these days. 🤗 @her", "code": "def get_buttons_from_dialog(dialog, channel):\n \n\n buttons = None\n\n if channel == \"Follow\":\n # get follow buttons. This approach will find the follow buttons and\n # ignore the Unfollow/Requested buttons.\n buttons = dialog.find_elements(\n By.XPATH, read_xpath(get_buttons_from_dialog.__name__, \"follow_button\")\n )\n\n elif channel == \"Unfollow\":\n buttons = dialog.find_elements(\n By.XPATH, read_xpath(get_buttons_from_dialog.__name__, \"unfollow_button\")\n )\n\n return buttons\n\n", "url": "https://github.com/InstaPy/InstaPy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 126, "n_words": 47, "vocab_size": 31, "complexity": 3, "nloc": 11, "token_counts": 61, "n_ast_nodes": 105, "n_identifiers": 9, "d_id": 838, "documentation": { "docstring": "Gets buttons from the `Followers` or `Following` dialog boxes", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 196293, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/geometry/polygon.py", "file_name": "polygon.py", "fun_name": "circumcenter", "commit_message": "Updated import locations", "code": "def circumcenter(self):\n \n return self.center\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 47793, "documentation": { "docstring": "\n Alias for center.\n\n Examples\n ========\n\n >>> from sympy import RegularPolygon, Point\n >>> rp = RegularPolygon(Point(0, 0), 5, 4)\n >>> rp.circumcenter\n Point2D(0, 0)\n ", "n_words": 22, "vocab_size": 20, "n_whitespaces": 79, "language": "en" } }, { "id": 176371, "commit_id": "28b3014d68d2b4e40d3e02219770296a827bd55c", "repo": "networkx", "path": "networkx/algorithms/matching.py", "file_name": "matching.py", "fun_name": "is_maximal_matching", "commit_message": "Update matching functions for error validation and speed (#4897)\n\n* First steps to update matching functions for #4644\r\n\r\nExpand tests\r\nChange API to raise NetworkXError when matching involves nodes not in G\r\nUpdate is_*_matching to 100+ times faster.\r\n\r\n* improve matching_dict_to_set and docs for min_weight_matching\r\n\r\n* fix sphinx error", "code": "def is_maximal_matching(G, matching):\n \n if isinstance(matching, dict):\n matching = matching_dict_to_set(matching)\n # If the given set is not a matching, then it is not a maximal matching.\n edges = set()\n nodes = set()\n for edge in matching:\n if len(edge) != 2:\n raise nx.NetworkXError(f\"matching has non-2-tuple edge {edge}\")\n u, v = edge\n if u not in G or v not in G:\n raise nx.NetworkXError(f\"matching contains edge {edge} with node not in G\")\n if u == v:\n return False\n if not G.has_edge(u, v):\n return False\n if u in nodes or v in nodes:\n return False\n nodes.update(edge)\n edges.add(edge)\n edges.add((v, u))\n # A matching is maximal if adding any new edge from G to it\n # causes the resulting set to match some node twice.\n # Be careful to check for adding selfloops\n for u, v in G.edges:\n if (u, v) not in edges:\n # could add edge (u, v) to edges and have a bigger matching\n if u not in nodes and v not in nodes and u != v:\n return False\n return True\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 371, "n_words": 169, "vocab_size": 84, "complexity": 15, "nloc": 25, "token_counts": 168, "n_ast_nodes": 276, "n_identifiers": 18, "d_id": 41857, "documentation": { "docstring": "Return True if ``matching`` is a maximal matching of ``G``\n\n A *maximal matching* in a graph is a matching in which adding any\n edge would cause the set to no longer be a valid matching.\n\n Parameters\n ----------\n G : NetworkX graph\n\n matching : dict or set\n A dictionary or set representing a matching. If a dictionary, it\n must have ``matching[u] == v`` and ``matching[v] == u`` for each\n edge ``(u, v)`` in the matching. If a set, it must have elements\n of the form ``(u, v)``, where ``(u, v)`` is an edge in the\n matching.\n\n Returns\n -------\n bool\n Whether the given set or dictionary represents a valid maximal\n matching in the graph.\n\n ", "n_words": 112, "vocab_size": 66, "n_whitespaces": 191, "language": "en" } }, { "id": 128028, "commit_id": "1dede1c296a29332171df87b31d9ba92c26b40f7", "repo": "ray", "path": "python/ray/air/checkpoint.py", "file_name": "checkpoint.py", "fun_name": "uri", "commit_message": "[air] Add `Checkpoint.uri` to return checkpoint URI, if available (#28731)\n\nA common ask is to retrieve the URI of a cloud checkpoint, e.g. after training. This PR introduces a property to the `Checkpoint` class that will return a URI if available and reachable from the local node (i.e. cloud storage or locally available file).\r\n\r\nIf accepted, we should then return URI checkpoints from Tune if syncing to cloud is enabled.\r\n\r\nSigned-off-by: Kai Fricke ", "code": "def uri(self) -> Optional[str]:\n \n if self._uri:\n return self._uri\n\n if self._local_path and Path(self._local_path).exists():\n return \"file://\" + self._local_path\n\n return None\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 68, "n_words": 18, "vocab_size": 14, "complexity": 4, "nloc": 29, "token_counts": 44, "n_ast_nodes": 74, "n_identifiers": 8, "d_id": 28588, "documentation": { "docstring": "Return checkpoint URI, if available.\n\n This will return a URI to cloud storage if this checkpoint is\n persisted on cloud, or a local ``file://`` URI if this checkpoint\n is persisted on local disk and available on the current node.\n\n In all other cases, this will return None. Users can then choose to\n persist to cloud with\n :meth:`Checkpoint.to_uri() `.\n\n Example:\n\n >>> from ray.air import Checkpoint\n >>> checkpoint = Checkpoint.from_uri(\"s3://some-bucket/some-location\")\n >>> assert checkpoint.uri == \"s3://some-bucket/some-location\"\n >>> checkpoint = Checkpoint.from_dict({\"data\": 1})\n >>> assert checkpoint.uri == None\n\n Returns:\n Checkpoint URI if this URI is reachable from the current node (e.g.\n cloud storage or locally available file URI).\n\n ", "n_words": 103, "vocab_size": 62, "n_whitespaces": 243, "language": "en" } }, { "id": 197011, "commit_id": "e0dc14eca132f37c5f49369eb4051eae37c9b119", "repo": "sympy", "path": "sympy/functions/combinatorial/numbers.py", "file_name": "numbers.py", "fun_name": "nD", "commit_message": "Refactored import ordering in functions", "code": "def nD(i=None, brute=None, *, n=None, m=None):\n \n from sympy.integrals.integrals import integrate\n from sympy.functions.special.polynomials import laguerre\n from sympy.abc import x", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 30, "n_words": 18, "vocab_size": 14, "complexity": 40, "nloc": 67, "token_counts": 562, "n_ast_nodes": 67, "n_identifiers": 14, "d_id": 48287, "documentation": { "docstring": "return the number of derangements for: ``n`` unique items, ``i``\n items (as a sequence or multiset), or multiplicities, ``m`` given\n as a sequence or multiset.\n\n Examples\n ========\n\n >>> from sympy.utilities.iterables import generate_derangements as enum\n >>> from sympy.functions.combinatorial.numbers import nD\n\n A derangement ``d`` of sequence ``s`` has all ``d[i] != s[i]``:\n\n >>> set([''.join(i) for i in enum('abc')])\n {'bca', 'cab'}\n >>> nD('abc')\n 2\n\n Input as iterable or dictionary (multiset form) is accepted:\n\n >>> assert nD([1, 2, 2, 3, 3, 3]) == nD({1: 1, 2: 2, 3: 3})\n\n By default, a brute-force enumeration and count of multiset permutations\n is only done if there are fewer than 9 elements. There may be cases when\n there is high multiplicty with few unique elements that will benefit\n from a brute-force enumeration, too. For this reason, the `brute`\n keyword (default None) is provided. When False, the brute-force\n enumeration will never be used. When True, it will always be used.\n\n >>> nD('1111222233', brute=True)\n 44\n\n For convenience, one may specify ``n`` distinct items using the\n ``n`` keyword:\n\n >>> assert nD(n=3) == nD('abc') == 2\n\n Since the number of derangments depends on the multiplicity of the\n elements and not the elements themselves, it may be more convenient\n to give a list or multiset of multiplicities using keyword ``m``:\n\n >>> assert nD('abc') == nD(m=(1,1,1)) == nD(m={1:3}) == 2\n\n ", "n_words": 217, "vocab_size": 140, "n_whitespaces": 304, "language": "en" } }, { "id": 314204, "commit_id": "90e1fb6ce2faadb9a35fdbe1774fce7b4456364f", "repo": "core", "path": "homeassistant/components/weather/__init__.py", "file_name": "__init__.py", "fun_name": "_temperature_unit", "commit_message": "Weather unit conversion (#73441)\n\nCo-authored-by: Erik ", "code": "def _temperature_unit(self) -> str:\n \n if (\n weather_option_temperature_unit := self._weather_option_temperature_unit\n ) is not None:\n return weather_option_temperature_unit\n\n return self._default_temperature_unit\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 67, "n_words": 17, "vocab_size": 15, "complexity": 2, "nloc": 10, "token_counts": 26, "n_ast_nodes": 43, "n_identifiers": 6, "d_id": 112812, "documentation": { "docstring": "Return the converted unit of measurement for temperature.\n\n Should not be set by integrations.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 28, "language": "en" } }, { "id": 197090, "commit_id": "807ed058b5804382971f0045fa1395f087ff12cb", "repo": "sympy", "path": "sympy/physics/mechanics/particle.py", "file_name": "particle.py", "fun_name": "set_potential_energy", "commit_message": "Update the set_potential_energy() deprecation", "code": "def set_potential_energy(self, scalar):\n sympy_deprecation_warning(\n ,\n deprecated_since_version=\"1.5\",\n active_deprecations_target=\"deprecated-set-potential-energy\",\n )\n self.potential_energy = scalar\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 56, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 12, "token_counts": 25, "n_ast_nodes": 43, "n_identifiers": 7, "d_id": 48332, "documentation": { "docstring": "\nThe sympy.physics.mechanics.Particle.set_potential_energy()\nmethod is deprecated. Instead use\n\n P.potential_energy = scalar\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 23, "language": "en" } }, { "id": 86320, "commit_id": "ec6965d597186ae0ecfba786472154f1c3cb7e42", "repo": "sentry", "path": "tests/sentry/middleware/test_api_gateway.py", "file_name": "test_api_gateway.py", "fun_name": "verify_request_params", "commit_message": "feat(api-gateway): Unit test helpers (#39424)\n\nThese functions will help with the creation of new test cases for the\r\nAPI gateway", "code": "def verify_request_params(params, headers):\n \n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 3, "token_counts": 12, "n_ast_nodes": 15, "n_identifiers": 3, "d_id": 18100, "documentation": { "docstring": "Wrapper for a callback function for responses.add_callback", "n_words": 7, "vocab_size": 6, "n_whitespaces": 6, "language": "en" } }, { "id": 20041, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distro.py", "file_name": "distro.py", "fun_name": "id", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def id(self):\n # type: () -> str\n \n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 5, "nloc": 15, "token_counts": 82, "n_ast_nodes": 14, "n_identifiers": 2, "d_id": 3190, "documentation": { "docstring": "Return the distro ID of the OS distribution, as a string.\n\n For details, see :func:`distro.id`.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 29, "language": "en" } }, { "id": 201726, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/backends/oracle/tests.py", "file_name": "tests.py", "fun_name": "test_boolean_constraints", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_boolean_constraints(self):\n \n for field in (BooleanField(), BooleanField(null=True)):\n with self.subTest(field=field):\n field.set_attributes_from_name(\"is_nice\")\n self.assertIn('\"IS_NICE\" IN (0,1)', field.db_check(connection))\n\n\n@unittest.skipUnless(connection.vendor == \"oracle\", \"Oracle tests\")", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "@unittest.skipUnless(connection.vendor == \"oracle\", \"Oracle tests\")", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 73, "n_words": 19, "vocab_size": 19, "complexity": 2, "nloc": 5, "token_counts": 51, "n_ast_nodes": 113, "n_identifiers": 13, "d_id": 49982, "documentation": { "docstring": "Boolean fields have check constraints on their values.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 269911, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/callbacks.py", "file_name": "callbacks.py", "fun_name": "on_predict_batch_begin", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def on_predict_batch_begin(self, batch, logs=None):\n \n if self._should_call_predict_batch_hooks:\n self._call_batch_hook(ModeKeys.PREDICT, \"begin\", batch, logs=logs)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 35, "n_words": 10, "vocab_size": 9, "complexity": 2, "nloc": 3, "token_counts": 33, "n_ast_nodes": 52, "n_identifiers": 8, "d_id": 80325, "documentation": { "docstring": "Calls the `on_predict_batch_begin` methods of its callbacks.\n\n Args:\n batch: Integer, index of batch within the current epoch.\n logs: Dict, contains the return value of `model.predict_step`,\n it typically returns a dict with a key 'outputs' containing\n the model's outputs.\n ", "n_words": 38, "vocab_size": 32, "n_whitespaces": 100, "language": "en" } }, { "id": 117547, "commit_id": "3f1a5c30c2ccbd78b21f1f41b7dfdfca87bb7135", "repo": "mindsdb", "path": "tests/unit/test_project_structure.py", "file_name": "test_project_structure.py", "fun_name": "test_version_managing", "commit_message": "update and delete model version\nrenaming (predictor->model)", "code": "def test_version_managing(self, data_handler):\n # set up\n\n df = pd.DataFrame([\n {'a': 1, 'b': dt.datetime(2020, 1, 1)},\n {'a': 2, 'b': dt.datetime(2020, 1, 2)},\n {'a': 1, 'b': dt.datetime(2020, 1, 3)},\n ])\n self.set_handler(data_handler, name='pg', tables={'tasks': df})\n\n # ================= retrain cycles =====================\n\n # create folder\n self.run_sql('create database proj')\n\n # -- create model --\n self.run_sql(\n \n )\n self.wait_predictor('proj', 'task_model')\n\n assert data_handler().native_query.call_args[0][0] == 'select * from tasks'\n\n # tag works in create model\n ret = self.run_sql('select * from proj.models')\n assert ret['TAG'][0] == 'first'\n\n # use model\n ret = self.run_sql()\n\n assert len(ret) == 3\n assert ret.predicted[0] == 42\n\n # -- retrain predictor with tag --\n data_handler.reset_mock()\n self.run_sql(\n \n )\n self.wait_predictor('proj', 'task_model', {'tag': 'second'})\n\n # get current model\n ret = self.run_sql('select * from proj.models')\n\n # check target\n assert ret['PREDICT'][0] == 'b'\n\n # check label\n assert ret['TAG'][0] == 'second'\n\n # check integration sql\n assert data_handler().native_query.call_args[0][0] == 'select * from tasks where a=2'\n\n # use model\n ret = self.run_sql()\n assert ret.predicted[0] == 42\n\n # used model has tag 'second'\n models = self.get_models()\n model_id = ret.predictor_id[0]\n assert models[model_id].label == 'second'\n\n # -- retrain again with active=0 --\n data_handler.reset_mock()\n self.run_sql(\n \n )\n self.wait_predictor('proj', 'task_model', {'tag': 'third'})\n\n ret = self.run_sql('select * from proj.models')\n\n # check target is from previous retrain\n assert ret['PREDICT'][0] == 'b'\n\n # use model\n ret = self.run_sql()\n\n # used model has tag 'second' (previous)\n models = self.get_models()\n model_id = ret.predictor_id[0]\n assert models[model_id].label == 'second'\n\n # ================ working with inactive versions =================\n\n # run 3st version model and check used model version\n ret = self.run_sql()\n\n models = self.get_models()\n model_id = ret.predictor_id[0]\n assert models[model_id].label == 'third'\n\n # one-line query model by version\n ret = self.run_sql('SELECT * from proj.task_model.3 where a=1 and b=2')\n model_id = ret.predictor_id[0]\n assert models[model_id].label == 'third'\n\n # not existing version\n with pytest.raises(Exception) as exc_info:\n self.run_sql(\n 'SELECT * from proj.task_model.4 where a=1 and b=2',\n )\n assert 'does not exists' in str(exc_info.value)\n\n # ================== managing versions =========================\n\n # show models command\n # Show models where \n ret = self.run_sql('Show models')\n assert len(ret) == 1 and ret['NAME'][0] == 'task_model'\n\n ret = self.run_sql('Show models from proj')\n assert len(ret) == 1 and ret['NAME'][0] == 'task_model'\n\n ret = self.run_sql('Show models in proj')\n assert len(ret) == 1 and ret['NAME'][0] == 'task_model'\n\n ret = self.run_sql(\"Show models where name='task_model'\")\n assert len(ret) == 1 and ret['NAME'][0] == 'task_model'\n\n ret = self.run_sql(\"Show models from proj where name='xxx'\")\n assert len(ret) == 0\n\n # ----------------\n\n # See all versions\n ret = self.run_sql('select * from proj.models_versions')\n # we have all tags in versions\n assert set(ret['TAG']) == {'first', 'second', 'third'}\n\n # Set active selected version\n self.run_sql()\n\n # get active version\n ret = self.run_sql('select * from proj.models_versions where active = 1')\n assert ret['TAG'][0] == 'first'\n\n # use active version ?\n\n # Delete specific version\n self.run_sql()\n\n # deleted version not in list\n ret = self.run_sql('select * from proj.models_versions')\n assert len(ret) == 2\n assert 'second' not in ret['TAG']\n\n # try to use deleted version\n with pytest.raises(Exception) as exc_info:\n self.run_sql(\n 'SELECT * from proj.task_model.2 where a=1',\n )\n assert 'does not exists' in str(exc_info.value)\n\n # exception with deleting active version\n with pytest.raises(Exception) as exc_info:\n self.run_sql()\n assert 'is not found' in str(exc_info.value)\n\n # drop predictor and check model is deleted and no versions\n self.run_sql('drop predictor proj.task_model')\n ret = self.run_sql('select * from proj.models')\n assert len(ret) == 0\n\n ret = self.run_sql('select * from proj.models_versions')\n assert len(ret) == 0\n\n\n", "url": "https://github.com/mindsdb/mindsdb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 1445, "n_words": 536, "vocab_size": 173, "complexity": 5, "nloc": 130, "token_counts": 716, "n_ast_nodes": 1293, "n_identifiers": 31, "d_id": 26025, "documentation": { "docstring": "\n CREATE PREDICTOR proj.task_model\n from pg (select * from tasks)\n PREDICT a\n using engine='dummy_ml', tag = 'first'\n \n SELECT m.*\n FROM pg.tasks as t\n JOIN proj.task_model as m\n \n retrain proj.task_model\n from pg (select * from tasks where a=2)\n PREDICT b\n using tag = 'second'\n \n SELECT m.*\n FROM pg.tasks as t\n JOIN proj.task_model as m\n \n retrain proj.task_model\n from pg (select * from tasks where a=2)\n PREDICT a\n using tag='third', active=0\n \n SELECT m.*\n FROM pg.tasks as t\n JOIN proj.task_model as m\n \n SELECT m.*\n FROM pg.tasks as t\n JOIN proj.task_model.3 as m\n \n update proj.models_versions \n set active=1\n where version=1 and name='task_model' \n \n delete from proj.models_versions \n where version=2 \n and name='task_model'\n \n delete from proj.models_versions \n where version=3 \n and model='task_model'\n ", "n_words": 109, "vocab_size": 43, "n_whitespaces": 654, "language": "en" } }, { "id": 261258, "commit_id": "5ceb8a6a031ddff26a7ede413db1b53edb64166a", "repo": "scikit-learn", "path": "sklearn/ensemble/_hist_gradient_boosting/grower.py", "file_name": "grower.py", "fun_name": "_compute_interactions", "commit_message": "ENH FEA add interaction constraints to HGBT (#21020)\n\nCo-authored-by: Loïc Estève ", "code": "def _compute_interactions(self, node):\n r\n # Note:\n # - Case of no interactions is already captured before function call.\n # - This is for nodes that are already split and have a\n # node.split_info.feature_idx.\n allowed_features = set()\n interaction_cst_indices = []\n for i in node.interaction_cst_indices:\n if node.split_info.feature_idx in self.interaction_cst[i]:\n interaction_cst_indices.append(i)\n allowed_features.update(self.interaction_cst[i])\n return (\n np.fromiter(allowed_features, dtype=np.uint32, count=len(allowed_features)),\n interaction_cst_indices,\n )\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 193, "n_words": 56, "vocab_size": 47, "complexity": 3, "nloc": 37, "token_counts": 81, "n_ast_nodes": 128, "n_identifiers": 18, "d_id": 76716, "documentation": { "docstring": "Compute features allowed by interactions to be inherited by child nodes.\n\n Example: Assume constraints [{0, 1}, {1, 2}].\n 1 <- Both constraint groups could be applied from now on\n / \\\n 1 2 <- Left split still fulfills both constraint groups.\n / \\ / \\ Right split at feature 2 has only group {1, 2} from now on.\n\n LightGBM uses the same logic for overlapping groups. See\n https://github.com/microsoft/LightGBM/issues/4481 for details.\n\n Parameters:\n ----------\n node : TreeNode\n A node that might have children. Based on its feature_idx, the interaction\n constraints for possible child nodes are computed.\n\n Returns\n -------\n allowed_features : ndarray, dtype=uint32\n Indices of features allowed to split for children.\n interaction_cst_indices : list of ints\n Indices of the interaction sets that have to be applied on splits of\n child nodes. The fewer sets the stronger the constraint as fewer sets\n contain fewer features.\n ", "n_words": 141, "vocab_size": 90, "n_whitespaces": 333, "language": "en" } }, { "id": 233318, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/sankey/_node.py", "file_name": "_node.py", "fun_name": "groups", "commit_message": "switch to black .22", "code": "def groups(self):\n \n return self[\"groups\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 64762, "documentation": { "docstring": "\n Groups of nodes. Each group is defined by an array with the\n indices of the nodes it contains. Multiple groups can be\n specified.\n\n The 'groups' property is an info array that may be specified as:\n * a 2D list where:\n The 'groups[i][j]' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n list\n ", "n_words": 59, "vocab_size": 44, "n_whitespaces": 141, "language": "en" } }, { "id": 42750, "commit_id": "4a5250774be8f48629294785801879277f42cc62", "repo": "airflow", "path": "tests/providers/apache/beam/operators/test_beam.py", "file_name": "test_beam.py", "fun_name": "test_exec_dataflow_runner", "commit_message": "Added missing project_id to the wait_for_job (#24020)", "code": "def test_exec_dataflow_runner(self, gcs_hook, dataflow_hook_mock, beam_hook_mock, persist_link_mock):\n \n dataflow_config = DataflowConfiguration()\n self.operator.runner = \"DataflowRunner\"\n self.operator.dataflow_config = dataflow_config\n gcs_provide_file = gcs_hook.return_value.provide_file\n self.operator.execute(None)\n job_name = dataflow_hook_mock.build_dataflow_job_name.return_value\n dataflow_hook_mock.assert_called_once_with(\n gcp_conn_id=dataflow_config.gcp_conn_id,\n delegate_to=dataflow_config.delegate_to,\n poll_sleep=dataflow_config.poll_sleep,\n impersonation_chain=dataflow_config.impersonation_chain,\n drain_pipeline=dataflow_config.drain_pipeline,\n cancel_timeout=dataflow_config.cancel_timeout,\n wait_until_finished=dataflow_config.wait_until_finished,\n )\n expected_options = {\n 'project': dataflow_hook_mock.return_value.project_id,\n 'job_name': job_name,\n 'staging_location': 'gs://test/staging',\n 'output': 'gs://test/output',\n 'labels': {'foo': 'bar', 'airflow-version': TEST_VERSION},\n 'region': 'us-central1',\n }\n gcs_provide_file.assert_called_once_with(object_url=PY_FILE)\n persist_link_mock.assert_called_once_with(\n self.operator,\n None,\n expected_options['project'],\n expected_options['region'],\n self.operator.dataflow_job_id,\n )\n beam_hook_mock.return_value.start_python_pipeline.assert_called_once_with(\n variables=expected_options,\n py_file=gcs_provide_file.return_value.__enter__.return_value.name,\n py_options=PY_OPTIONS,\n py_interpreter=PY_INTERPRETER,\n py_requirements=None,\n py_system_site_packages=False,\n process_line_callback=mock.ANY,\n )\n dataflow_hook_mock.return_value.wait_for_done.assert_called_once_with(\n job_id=self.operator.dataflow_job_id,\n job_name=job_name,\n location='us-central1',\n multiple_jobs=False,\n project_id=dataflow_config.project_id,\n )\n dataflow_hook_mock.return_value.provide_authorized_gcloud.assert_called_once_with()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 538, "n_words": 75, "vocab_size": 66, "complexity": 1, "nloc": 49, "token_counts": 268, "n_ast_nodes": 416, "n_identifiers": 49, "d_id": 7719, "documentation": { "docstring": "Test DataflowHook is created and the right args are passed to\n start_python_dataflow.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 26, "language": "en" } }, { "id": 101722, "commit_id": "e2a77e7c6e84e81f642cb22f528e25e3f2d2dbc1", "repo": "faceswap", "path": "tools/alignments/jobs.py", "file_name": "jobs.py", "fun_name": "_get_no_faces", "commit_message": "Alignments Tool - Typing, Documentation + Re-org", "code": "def _get_no_faces(self) -> Generator[str, None, None]:\n \n self.output_message = \"Frames with no faces\"\n for frame in tqdm(cast(List[Dict[str, str]], self._items),\n desc=self.output_message,\n leave=False):\n logger.trace(frame) # type:ignore\n frame_name = frame[\"frame_fullname\"]\n if not self._alignments.frame_has_faces(frame_name):\n logger.debug(\"Returning: '%s'\", frame_name)\n yield frame_name\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 169, "n_words": 34, "vocab_size": 32, "complexity": 3, "nloc": 17, "token_counts": 86, "n_ast_nodes": 137, "n_identifiers": 19, "d_id": 21126, "documentation": { "docstring": " yield each frame that has no face match in alignments file\n\n Yields\n ------\n str\n The frame name of any frames which have no faces\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 64, "language": "en" } }, { "id": 262800, "commit_id": "3aad9af18641aa2181dd86cececc2aeb8a0dba06", "repo": "pyinstaller", "path": "PyInstaller/utils/win32/icon.py", "file_name": "icon.py", "fun_name": "CopyIcons", "commit_message": "Icon translation using PIL (#6697)\n\nConvert icons into the correct platform dependent format using PIL/Pillow if installed.", "code": "def CopyIcons(dstpath, srcpath):\n \n\n if isinstance(srcpath, str):\n # Just a single string, make it a one-element list.\n srcpath = [srcpath]\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 39, "n_words": 19, "vocab_size": 18, "complexity": 12, "nloc": 44, "token_counts": 376, "n_ast_nodes": 36, "n_identifiers": 5, "d_id": 77378, "documentation": { "docstring": "\n Called from building/api.py to handle icons. If the input was by --icon on the command line, srcpath is a single\n string. However, it is possible to modify the spec file adding icon=['foo.ico','bar.ico'] to the EXE() statement.\n In that case, srcpath is a list of strings.\n\n The string format is either path-to-.ico or path-to-.exe,n for n an integer resource index in the .exe. In either\n case, the path can be relative or absolute.\n ", "n_words": 72, "vocab_size": 56, "n_whitespaces": 91, "language": "en" } }, { "id": 133122, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/dask/callbacks.py", "file_name": "callbacks.py", "fun_name": "local_ray_callbacks", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def local_ray_callbacks(callbacks=None):\n \n global_callbacks = callbacks is None\n if global_callbacks:\n callbacks, RayDaskCallback.ray_active = (RayDaskCallback.ray_active, set())\n try:\n yield callbacks or ()\n finally:\n if global_callbacks:\n RayDaskCallback.ray_active = callbacks\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 72, "n_words": 25, "vocab_size": 18, "complexity": 5, "nloc": 9, "token_counts": 48, "n_ast_nodes": 82, "n_identifiers": 6, "d_id": 29930, "documentation": { "docstring": "\n Allows Dask-Ray callbacks to work with nested schedulers.\n\n Callbacks will only be used by the first started scheduler they encounter.\n This means that only the outermost scheduler will use global callbacks.\n ", "n_words": 31, "vocab_size": 27, "n_whitespaces": 44, "language": "en" } }, { "id": 248468, "commit_id": "2fc787c341ff540e5880932f116498ec0ed7a2c2", "repo": "synapse", "path": "tests/rest/media/test_media_retention.py", "file_name": "test_media_retention.py", "fun_name": "test_local_media_retention", "commit_message": "Add config options for media retention (#12732)", "code": "def test_local_media_retention(self) -> None:\n \n # Advance 31 days (in seconds)\n self.reactor.advance(31 * 24 * 60 * 60)\n\n # Check that media has been correctly purged.\n # Local media accessed <30 days ago should still exist.\n # Remote media should be unaffected.\n self._assert_if_mxc_uris_purged(\n purged=[\n (\n self.hs.config.server.server_name,\n self.local_not_recently_accessed_media,\n ),\n (self.hs.config.server.server_name, self.local_never_accessed_media),\n ],\n not_purged=[\n (self.hs.config.server.server_name, self.local_recently_accessed_media),\n (self.remote_server_name, self.remote_recently_accessed_media),\n (self.remote_server_name, self.remote_not_recently_accessed_media),\n ],\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 287, "n_words": 59, "vocab_size": 47, "complexity": 1, "nloc": 20, "token_counts": 106, "n_ast_nodes": 158, "n_identifiers": 17, "d_id": 72296, "documentation": { "docstring": "\n Tests that local media that have not been accessed recently is purged, while\n cached remote media is unaffected.\n ", "n_words": 18, "vocab_size": 15, "n_whitespaces": 40, "language": "en" } }, { "id": 198389, "commit_id": "7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c", "repo": "sympy", "path": "sympy/liealgebras/root_system.py", "file_name": "root_system.py", "fun_name": "add_simple_roots", "commit_message": "Cleanup loops and ranges", "code": "def add_simple_roots(self, root1, root2):\n \n\n alpha = self.simple_roots()\n if root1 > len(alpha) or root2 > len(alpha):\n raise ValueError(\"You've used a root that doesn't exist!\")\n a1 = alpha[root1]\n a2 = alpha[root2]\n newroot = [_a1 + _a2 for _a1, _a2 in zip(a1, a2)]\n return newroot\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 102, "n_words": 42, "vocab_size": 36, "complexity": 4, "nloc": 8, "token_counts": 69, "n_ast_nodes": 110, "n_identifiers": 14, "d_id": 48901, "documentation": { "docstring": "Add two simple roots together\n\n The function takes as input two integers, root1 and root2. It then\n uses these integers as keys in the dictionary of simple roots, and gets\n the corresponding simple roots, and then adds them together.\n\n Examples\n ========\n\n >>> from sympy.liealgebras.root_system import RootSystem\n >>> c = RootSystem(\"A3\")\n >>> newroot = c.add_simple_roots(1, 2)\n >>> newroot\n [1, 0, -1, 0]\n\n ", "n_words": 61, "vocab_size": 47, "n_whitespaces": 139, "language": "en" } }, { "id": 137593, "commit_id": "98fef7732852cdb3e9377cd87c1ee1085b894928", "repo": "ray", "path": "python/ray/tests/test_runtime_env.py", "file_name": "test_runtime_env.py", "fun_name": "test_get_release_wheel_url", "commit_message": "[runtime env] Support python 3.10 for runtime_env conda (#30970)\n\nSigned-off-by: Archit Kulkarni \r\n\r\nconda environments are isolated, so when runtime_env sets up a conda environment it must download the Ray wheel into the conda environment. It must download the wheel that matches the current Python and Ray version running, otherwise there will be incompatibility issues between the workers that use this runtime_env and the other workers and Ray processes.\r\n\r\nThis PR updates the wheel name format logic to support Python 3.10.", "code": "def test_get_release_wheel_url():\n \n # This should be a commit for which wheels have already been built for\n # all platforms and python versions at\n # `s3://ray-wheels/releases/2.2.0//`.\n test_commits = {\"2.2.0\": \"b6af0887ee5f2e460202133791ad941a41f15beb\"}\n for sys_platform in [\"darwin\", \"linux\", \"win32\"]:\n for py_version in ray_constants.RUNTIME_ENV_CONDA_PY_VERSIONS:\n for version, commit in test_commits.items():\n if sys_platform == \"win32\" and py_version == (3, 6):\n # Windows wheels are not built for py3.6 anymore\n continue\n url = get_release_wheel_url(commit, sys_platform, version, py_version)\n assert requests.head(url).status_code == 200, url\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 193, "n_words": 74, "vocab_size": 53, "complexity": 6, "nloc": 9, "token_counts": 80, "n_ast_nodes": 136, "n_identifiers": 14, "d_id": 31197, "documentation": { "docstring": "Test the code that generates the filenames of the `release` branch wheels.", "n_words": 12, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 186649, "commit_id": "7d9e9a49005de7961e84d2a7c608db57dbab3046", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/http_01.py", "file_name": "http_01.py", "fun_name": "prepare_http01_modules", "commit_message": "Add typing to certbot.apache (#9071)\n\n* Add typing to certbot.apache\r\n\r\nCo-authored-by: Adrien Ferrand ", "code": "def prepare_http01_modules(self) -> None:\n \n\n if self.configurator.conf(\"handle-modules\"):\n needed_modules = [\"rewrite\"]\n if self.configurator.version < (2, 4):\n needed_modules.append(\"authz_host\")\n else:\n needed_modules.append(\"authz_core\")\n for mod in needed_modules:\n if mod + \"_module\" not in self.configurator.parser.modules:\n self.configurator.enable_mod(mod, temp=True)\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 152, "n_words": 30, "vocab_size": 26, "complexity": 5, "nloc": 11, "token_counts": 81, "n_ast_nodes": 139, "n_identifiers": 12, "d_id": 45558, "documentation": { "docstring": "Make sure that we have the needed modules available for http01", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 105694, "commit_id": "1ea4d091b7a4b83a85b2eeb8df65115d39af3766", "repo": "datasets", "path": "src/datasets/table.py", "file_name": "table.py", "fun_name": "to_reader", "commit_message": "Fast dataset iter (#5030)\n\n* Fast dataset iter\r\n\r\n* Final improvements + some minor fixes\r\n\r\n* Update src/datasets/arrow_dataset.py\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Address comments\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>", "code": "def to_reader(self, *args, **kwargs):\n \n if config.PYARROW_VERSION.major < 8:\n raise NotImplementedError(\"`pyarrow>=8.0.0` is required to use this method\")\n return self.table.to_reader(*args, **kwargs)\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 51, "n_words": 19, "vocab_size": 19, "complexity": 2, "nloc": 4, "token_counts": 39, "n_ast_nodes": 65, "n_identifiers": 9, "d_id": 22190, "documentation": { "docstring": "\n Convert the Table to a RecordBatchReader.\n\n Note that this method is zero-copy, it merely exposes the same data under a different API.\n\n Args:\n max_chunksize (:obj:`int`, defaults to :obj:`None`)\n Maximum size for RecordBatch chunks. Individual chunks may be smaller depending\n on the chunk layout of individual columns.\n\n Returns:\n :obj:`pyarrow.RecordBatchReader`\n\n \n\n pyarrow >= 8.0.0 needs to be installed to use this method.\n\n \n ", "n_words": 62, "vocab_size": 54, "n_whitespaces": 171, "language": "en" } }, { "id": 83893, "commit_id": "708204290ecebd608a575f76892489a0caad5836", "repo": "zulip", "path": "zerver/tests/test_subs.py", "file_name": "test_subs.py", "fun_name": "test_you_must_be_realm_admin", "commit_message": "streams: Capitalize \"ID\" in invalid stream errors in API.\n\nThis commit changes the error message from \"Invalid stream id\"\nto \"Invalid stream ID\" for cases where invalid stream IDs are\npassed to API endpoints to make it consistent with other similar\nerror messages.", "code": "def test_you_must_be_realm_admin(self) -> None:\n \n user_profile = self.example_user(\"hamlet\")\n self.login_user(user_profile)\n\n other_realm = do_create_realm(string_id=\"other\", name=\"other\")\n stream = self.make_stream(\"other_realm_stream\", realm=other_realm)\n\n result = self.client_delete(\"/json/streams/\" + str(stream.id))\n self.assert_json_error(result, \"Invalid stream ID\")\n\n # Even becoming a realm admin doesn't help us for an out-of-realm\n # stream.\n do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR, acting_user=None)\n result = self.client_delete(\"/json/streams/\" + str(stream.id))\n self.assert_json_error(result, \"Invalid stream ID\")\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 135, "n_words": 51, "vocab_size": 37, "complexity": 1, "nloc": 13, "token_counts": 104, "n_ast_nodes": 182, "n_identifiers": 21, "d_id": 17744, "documentation": { "docstring": "\n You must be on the realm to create a stream.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 308567, "commit_id": "a2677983a2924366ea13eab416bf286996a64bdb", "repo": "core", "path": "homeassistant/components/unifiprotect/select.py", "file_name": "select.py", "fun_name": "_async_set_dynamic_options", "commit_message": "Add UniFi Protect select platform (#63337)", "code": "def _async_set_dynamic_options(self) -> None:\n \n if self.entity_description.ufp_options is not None:\n return\n\n if self.entity_description.key == _KEY_VIEWER:\n options = [\n {\"id\": item.id, \"name\": item.name}\n for item in self.data.api.bootstrap.liveviews.values()\n ]\n elif self.entity_description.key == _KEY_DOORBELL_TEXT:\n default_message = (\n self.data.api.bootstrap.nvr.doorbell_settings.default_message_text\n )\n messages = self.data.api.bootstrap.nvr.doorbell_settings.all_messages\n built_messages = (\n {\"id\": item.type.value, \"name\": item.text} for item in messages\n )\n\n options = [\n {\"id\": \"\", \"name\": f\"Default Message ({default_message})\"},\n *built_messages,\n ]\n elif self.entity_description.key == _KEY_PAIRED_CAMERA:\n options = [{\"id\": TYPE_EMPTY_VALUE, \"name\": \"Not Paired\"}]\n for camera in self.data.api.bootstrap.cameras.values():\n options.append({\"id\": camera.id, \"name\": camera.name})\n\n self._attr_options = [item[\"name\"] for item in options]\n self._hass_to_unifi_options = {item[\"name\"]: item[\"id\"] for item in options}\n self._unifi_to_hass_options = {item[\"id\"]: item[\"name\"] for item in options}\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 396, "n_words": 103, "vocab_size": 60, "complexity": 11, "nloc": 31, "token_counts": 252, "n_ast_nodes": 416, "n_identifiers": 34, "d_id": 107315, "documentation": { "docstring": "Options that do not actually update dynamically.\n\n This is due to possible downstream platforms dependencies on these options.\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 32, "language": "en" } }, { "id": 203142, "commit_id": "efb4478e484ae61c5fc23563d4e9df1f8f49df49", "repo": "django", "path": "tests/messages_tests/test_cookie.py", "file_name": "test_cookie.py", "fun_name": "test_safedata", "commit_message": "Fixed #33458 -- Fixed encoding of messages with empty string as extra_tags.", "code": "def test_safedata(self):\n \n self.assertIsInstance(\n self.encode_decode(mark_safe('Hello Django!')).message,\n SafeData,\n )\n self.assertNotIsInstance(\n self.encode_decode('Hello Django!').message,\n SafeData,\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 91, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 9, "token_counts": 41, "n_ast_nodes": 70, "n_identifiers": 8, "d_id": 50235, "documentation": { "docstring": "\n A message containing SafeData is keeping its safe status when\n retrieved from the message storage.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 37, "language": "en" } }, { "id": 223024, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/tests/test_archive_util.py", "file_name": "test_archive_util.py", "fun_name": "test_make_tarball_extended", "commit_message": "add python 3.10.4 for windows", "code": "def test_make_tarball_extended(self):\n \n self.test_make_tarball('のアーカイブ') # japanese for archive\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 26, "n_identifiers": 3, "d_id": 56854, "documentation": { "docstring": "\n Mirror test_make_tarball, except filename contains extended\n characters outside the latin charset.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 33, "language": "en" } }, { "id": 242898, "commit_id": "f77aabf28134d93e35ca2d5622759c856333beb9", "repo": "Pillow", "path": "src/PIL/Image.py", "file_name": "Image.py", "fun_name": "effect_mandelbrot", "commit_message": "Update Image.py docstrings.\n\nUpdate Image.py file with a typo in effect_mandelbrot method.\r\nThe Typo was in docstrings of the effect_mandelbrot method in Image module of PIL.", "code": "def effect_mandelbrot(size, extent, quality):\n \n return Image()._new(core.effect_mandelbrot(size, extent, quality))\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 14, "n_words": 8, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 28, "n_ast_nodes": 44, "n_identifiers": 7, "d_id": 69936, "documentation": { "docstring": "\n Generate a Mandelbrot set covering the given extent.\n\n :param size: The requested size in pixels, as a 2-tuple:\n (width, height).\n :param extent: The extent to cover, as a 4-tuple:\n (x0, y0, x1, y1).\n :param quality: Quality.\n ", "n_words": 36, "vocab_size": 30, "n_whitespaces": 64, "language": "en" } }, { "id": 63022, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pep517/wrappers.py", "file_name": "wrappers.py", "fun_name": "subprocess_runner", "commit_message": "upd; format", "code": "def subprocess_runner(self, runner):\n \n prev = self._subprocess_runner\n self._subprocess_runner = runner\n try:\n yield\n finally:\n self._subprocess_runner = prev\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 72, "n_words": 15, "vocab_size": 10, "complexity": 2, "nloc": 7, "token_counts": 28, "n_ast_nodes": 50, "n_identifiers": 5, "d_id": 13102, "documentation": { "docstring": "A context manager for temporarily overriding the default subprocess\n runner.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 24, "language": "en" } }, { "id": 124881, "commit_id": "09a6e5336ad6ab3c41e4a16e906c778aee2450bc", "repo": "ray", "path": "python/ray/serve/tests/fault_tolerance_tests/test_controller_recovery.py", "file_name": "test_controller_recovery.py", "fun_name": "test_recover_start_from_replica_actor_names", "commit_message": "[Serve][Part2] Migrate the tests to use deployment graph api (#26507)", "code": "def test_recover_start_from_replica_actor_names(serve_instance):\n \n # Test failed to deploy with total of 2 replicas,\n # but first constructor call fails.", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 27, "n_words": 18, "vocab_size": 17, "complexity": 14, "nloc": 62, "token_counts": 343, "n_ast_nodes": 15, "n_identifiers": 2, "d_id": 27710, "documentation": { "docstring": "Test controller is able to recover starting -> running replicas from\n actor names.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 19, "language": "en" } }, { "id": 54856, "commit_id": "e11fd5aa4905c7c27dbdf6ec49442ee107daebac", "repo": "prefect", "path": "src/prefect/context.py", "file_name": "context.py", "fun_name": "get_profile_context", "commit_message": "Bug fix for PrefectHQ/orion#1383, contains test", "code": "def get_profile_context() -> ProfileContext:\n \n profile_ctx = ProfileContext.get()\n\n if not profile_ctx:\n raise MissingContextError(\"No profile context found.\")\n\n return profile_ctx\n\n\n_PROFILE_ENV_LOCK = threading.Lock()\n\n\n@contextmanager", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "@contextmanager", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 38, "n_words": 21, "vocab_size": 19, "complexity": 2, "nloc": 9, "token_counts": 25, "n_ast_nodes": 63, "n_identifiers": 9, "d_id": 11161, "documentation": { "docstring": "\n Returns a ProfileContext that contains the combination of user profile \n settings and environment variable settings present when the context was initialized\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 32, "language": "en" } }, { "id": 297253, "commit_id": "41d5256533ec6ef1c102af0a43c7b7f26b8e06fb", "repo": "core", "path": "tests/components/zwave_js/test_device_trigger.py", "file_name": "test_device_trigger.py", "fun_name": "test_no_controller_triggers", "commit_message": "Add via_device support to zwave_js (#83219)\n\nCo-authored-by: Paulus Schoutsen ", "code": "async def test_no_controller_triggers(hass, client, integration):\n \n dev_reg = async_get_dev_reg(hass)\n device = dev_reg.async_get_device(\n {get_device_id(client.driver, client.driver.controller.nodes[1])}\n )\n assert device\n assert (\n await async_get_device_automations(\n hass, DeviceAutomationType.TRIGGER, device.id\n )\n == []\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 87, "n_words": 27, "vocab_size": 22, "complexity": 1, "nloc": 12, "token_counts": 63, "n_ast_nodes": 98, "n_identifiers": 16, "d_id": 96222, "documentation": { "docstring": "Test that we do not get triggers for the controller.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 21410, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py", "file_name": "tarfile.py", "fun_name": "is_tarfile", "commit_message": "Vendor in pip 22.1.2", "code": "def is_tarfile(name):\n \n try:\n t = open(name)\n t.close()\n return True\n except TarError:\n return False\n\nbltn_open = open\nopen = TarFile.open\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 54, "n_words": 19, "vocab_size": 15, "complexity": 2, "nloc": 7, "token_counts": 26, "n_ast_nodes": 60, "n_identifiers": 8, "d_id": 3815, "documentation": { "docstring": "Return True if name points to a tar archive that we\n are able to handle, else return False.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 27, "language": "en" } }, { "id": 19648, "commit_id": "9a3b3ce70621af6f9adaa9eeac9cf83fa149319c", "repo": "pipenv", "path": "pipenv/core.py", "file_name": "core.py", "fun_name": "fallback_which", "commit_message": "Issue 4993 Add standard pre commit hooks and apply linting. (#4994)\n\n* Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.", "code": "def fallback_which(command, location=None, allow_global=False, system=False):\n \n\n from .vendor.pythonfinder import Finder\n\n if not command:\n raise ValueError(\"fallback_which: Must provide a command to search for...\")\n if not isinstance(command, str):\n raise TypeError(f\"Provided command must be a string, received {command!r}\")\n global_search = system or allow_global\n if location is None:\n global_search = True\n finder = Finder(system=False, global_search=global_search, path=location)\n if is_python_command(command):\n result = find_python(finder, command)\n if result:\n return result\n result = finder.which(command)\n if result:\n return result.path.as_posix()\n return \"\"\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 156, "n_words": 70, "vocab_size": 51, "complexity": 8, "nloc": 18, "token_counts": 118, "n_ast_nodes": 196, "n_identifiers": 20, "d_id": 3046, "documentation": { "docstring": "\n A fallback implementation of the `which` utility command that relies exclusively on\n searching the path for commands.\n\n :param str command: The command to search for, optional\n :param str location: The search location to prioritize (prepend to path), defaults to None\n :param bool allow_global: Whether to search the global path, defaults to False\n :param bool system: Whether to use the system python instead of pipenv's python, defaults to False\n :raises ValueError: Raised if no command is provided\n :raises TypeError: Raised if the command provided is not a string\n :return: A path to the discovered command location\n :rtype: str\n ", "n_words": 97, "vocab_size": 58, "n_whitespaces": 131, "language": "en" } }, { "id": 289746, "commit_id": "6979cd95b0fe85c3ee8eca3dbc9881b8d05591e8", "repo": "core", "path": "homeassistant/components/sensor/__init__.py", "file_name": "__init__.py", "fun_name": "suggested_unit_of_measurement", "commit_message": "Add suggested_unit_of_measurement attribute to sensors (#80638)\n\n* Add suggested_unit_of_measurement attribute to sensors\r\n\r\n* Lazy calculation of initial entity options\r\n\r\n* Add type alias for entity options\r\n\r\n* Small tweak\r\n\r\n* Add tests\r\n\r\n* Store suggested_unit_of_measurement in its own option key\r\n\r\n* Adapt to renaming of IMPERIAL_SYSTEM\r\n\r\n* Fix rebase mistakes\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: epenet <6771947+epenet@users.noreply.github.com>\r\n\r\nCo-authored-by: epenet <6771947+epenet@users.noreply.github.com>", "code": "def suggested_unit_of_measurement(self) -> str | None:\n \n if hasattr(self, \"_attr_suggested_unit_of_measurement\"):\n return self._attr_suggested_unit_of_measurement\n if hasattr(self, \"entity_description\"):\n return self.entity_description.suggested_unit_of_measurement\n return None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 68, "n_words": 18, "vocab_size": 14, "complexity": 3, "nloc": 22, "token_counts": 38, "n_ast_nodes": 65, "n_identifiers": 6, "d_id": 88882, "documentation": { "docstring": "Return the unit which should be used for the sensor's state.\n\n This can be used by integrations to override automatic unit conversion rules,\n for example to make a temperature sensor display in °C even if the configured\n unit system prefers °F.\n\n For sensors without a `unique_id`, this takes precedence over legacy\n temperature conversion rules only.\n\n For sensors with a `unique_id`, this is applied only if the unit is not set by the user,\n and takes precedence over automatic device-class conversion rules.\n\n Note:\n suggested_unit_of_measurement is stored in the entity registry the first time\n the entity is seen, and then never updated.\n ", "n_words": 100, "vocab_size": 65, "n_whitespaces": 185, "language": "en" } }, { "id": 133802, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "rllib/agents/ppo/tests/test_ppo.py", "file_name": "test_ppo.py", "fun_name": "test_ppo_exploration_setup", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_ppo_exploration_setup(self):\n \n config = copy.deepcopy(ppo.DEFAULT_CONFIG)\n config[\"num_workers\"] = 0 # Run locally.\n config[\"env_config\"] = {\"is_slippery\": False, \"map_name\": \"4x4\"}\n obs = np.array(0)\n\n # Test against all frameworks.\n for fw in framework_iterator(config):\n # Default Agent should be setup with StochasticSampling.\n trainer = ppo.PPOTrainer(config=config, env=\"FrozenLake-v1\")\n # explore=False, always expect the same (deterministic) action.\n a_ = trainer.compute_single_action(\n obs, explore=False, prev_action=np.array(2), prev_reward=np.array(1.0)\n )\n # Test whether this is really the argmax action over the logits.\n if fw != \"tf\":\n last_out = trainer.get_policy().model.last_output()\n if fw == \"torch\":\n check(a_, np.argmax(last_out.detach().cpu().numpy(), 1)[0])\n else:\n check(a_, np.argmax(last_out.numpy(), 1)[0])\n for _ in range(50):\n a = trainer.compute_single_action(\n obs,\n explore=False,\n prev_action=np.array(2),\n prev_reward=np.array(1.0),\n )\n check(a, a_)\n\n # With explore=True (default), expect stochastic actions.\n actions = []\n for _ in range(300):\n actions.append(\n trainer.compute_single_action(\n obs, prev_action=np.array(2), prev_reward=np.array(1.0)\n )\n )\n check(np.mean(actions), 1.5, atol=0.2)\n trainer.stop()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 629, "n_words": 126, "vocab_size": 87, "complexity": 6, "nloc": 33, "token_counts": 285, "n_ast_nodes": 446, "n_identifiers": 37, "d_id": 30114, "documentation": { "docstring": "Tests, whether PPO runs with different exploration setups.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 191814, "commit_id": "5c5a710f23d83ba5ff1dc9ab6fc23b28094560fb", "repo": "ydata-profiling", "path": "src/pandas_profiling/utils/compat.py", "file_name": "compat.py", "fun_name": "pandas_version_info", "commit_message": "feat: add support for Pandas 1.5 (#1076)", "code": "def pandas_version_info() -> Tuple[int, ...]:\n \n return tuple(int(s) for s in pd.__version__.split(\".\"))\n", "url": "https://github.com/ydataai/ydata-profiling.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 17, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 6, "token_counts": 31, "n_ast_nodes": 52, "n_identifiers": 8, "d_id": 46844, "documentation": { "docstring": "\n Get the Pandas version as a tuple of integers,\n akin to `sys.version_info` for the Python version.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 26, "language": "en" } }, { "id": 20773, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/rich/live_render.py", "file_name": "live_render.py", "fun_name": "position_cursor", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def position_cursor(self) -> Control:\n \n if self._shape is not None:\n _, height = self._shape\n return Control(\n ControlType.CARRIAGE_RETURN,\n (ControlType.ERASE_IN_LINE, 2),\n *(\n (\n (ControlType.CURSOR_UP, 1),\n (ControlType.ERASE_IN_LINE, 2),\n )\n * (height - 1)\n )\n )\n return Control()\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 250, "n_words": 33, "vocab_size": 27, "complexity": 2, "nloc": 20, "token_counts": 70, "n_ast_nodes": 105, "n_identifiers": 10, "d_id": 3537, "documentation": { "docstring": "Get control codes to move cursor to beginning of live render.\n\n Returns:\n Control: A control instance that may be printed.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 45, "language": "en" } }, { "id": 20715, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/rich/console.py", "file_name": "console.py", "fun_name": "_exit_buffer", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def _exit_buffer(self) -> None:\n \n self._buffer_index -= 1\n self._check_buffer()\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 18, "n_ast_nodes": 33, "n_identifiers": 4, "d_id": 3497, "documentation": { "docstring": "Leave buffer context, and render content if required.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 268977, "commit_id": "01c906c4178db5ae03b7eb2d298a052c952a0667", "repo": "keras", "path": "keras/layers/rnn/rnn_utils.py", "file_name": "rnn_utils.py", "fun_name": "is_multiple_state", "commit_message": "Reorganize RNN layers, cells and wrappers into smaller logically organized files hosted under an `rnn` directory.\n\nPiperOrigin-RevId: 428841673", "code": "def is_multiple_state(state_size):\n \n return (hasattr(state_size, '__len__') and\n not isinstance(state_size, tf.TensorShape))\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 20, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 3, "token_counts": 25, "n_ast_nodes": 43, "n_identifiers": 6, "d_id": 79799, "documentation": { "docstring": "Check whether the state_size contains multiple states.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 62204, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/util.py", "file_name": "util.py", "fun_name": "remove", "commit_message": "upd; format", "code": "def remove(self, event, subscriber):\n \n subs = self._subscribers\n if event not in subs:\n raise ValueError('No subscribers: %r' % event)\n subs[event].remove(subscriber)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 58, "n_words": 19, "vocab_size": 19, "complexity": 2, "nloc": 5, "token_counts": 37, "n_ast_nodes": 61, "n_identifiers": 7, "d_id": 12898, "documentation": { "docstring": "\n Remove a subscriber for an event.\n\n :param event: The name of an event.\n :param subscriber: The subscriber to be removed.\n ", "n_words": 20, "vocab_size": 15, "n_whitespaces": 49, "language": "en" } }, { "id": 102079, "commit_id": "48c886b3dce3d3117ad16edaf35c8abd28dc51f5", "repo": "faceswap", "path": "lib/sysinfo.py", "file_name": "sysinfo.py", "fun_name": "_format_ram", "commit_message": "Allow decoding errors", "code": "def _format_ram(self):\n \n retval = []\n for name in (\"total\", \"available\", \"used\", \"free\"):\n value = getattr(self, f\"_ram_{name}\")\n value = int(value / (1024 * 1024))\n retval.append(f\"{name.capitalize()}: {value}MB\")\n return \", \".join(retval)\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 89, "n_words": 28, "vocab_size": 25, "complexity": 2, "nloc": 7, "token_counts": 58, "n_ast_nodes": 121, "n_identifiers": 10, "d_id": 21444, "documentation": { "docstring": " Format the RAM stats into Megabytes to make it more readable.\n\n Returns\n -------\n str\n The total, available, used and free RAM displayed in Megabytes\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 64, "language": "en" } }, { "id": 66259, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/hr/report/monthly_attendance_sheet/monthly_attendance_sheet.py", "file_name": "monthly_attendance_sheet.py", "fun_name": "get_holiday", "commit_message": "style: format code with black", "code": "def get_holiday(holiday_list, month):\n\tholiday_map = frappe._dict()\n\tfor d in holiday_list:\n\t\tif d:\n\t\t\tholiday_map.setdefault(\n\t\t\t\td,\n\t\t\t\tfrappe.db.sql(\n\t\t\t\t\t,\n\t\t\t\t\t(d, month),\n\t\t\t\t),\n\t\t\t)\n\n\treturn holiday_map\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 10, "n_words": 23, "vocab_size": 22, "complexity": 3, "nloc": 13, "token_counts": 47, "n_ast_nodes": 83, "n_identifiers": 11, "d_id": 14150, "documentation": { "docstring": "select day(holiday_date), weekly_off from `tabHoliday`\n\t\t\t\twhere parent=%s and month(holiday_date)=%s", "n_words": 9, "vocab_size": 9, "n_whitespaces": 7, "language": "en" } }, { "id": 160870, "commit_id": "6d77c591c59b5678f14ae5af2127eebb7d2415bc", "repo": "numpy", "path": "numpy/ma/core.py", "file_name": "core.py", "fun_name": "__sub__", "commit_message": "ENH: Adding __array_ufunc__ capability to MaskedArrays.\n\nThis enables any ufunc numpy operations that are called on a\nMaskedArray to use the masked version of that function automatically\nwithout needing to resort to np.ma.func() calls.", "code": "def __sub__(self, other):\n \n if self._delegate_binop(other):\n return NotImplemented\n return np.subtract(self, other)\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 42, "n_words": 10, "vocab_size": 9, "complexity": 2, "nloc": 4, "token_counts": 27, "n_ast_nodes": 44, "n_identifiers": 7, "d_id": 38770, "documentation": { "docstring": "\n Subtract other from self, and return a new masked array.\n\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 274647, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/metrics/base_metric_test.py", "file_name": "base_metric_test.py", "fun_name": "test_build_in_tf_function", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def test_build_in_tf_function(self):\n \n m = metrics.MeanTensor(dtype=tf.float64)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 11, "token_counts": 117, "n_ast_nodes": 32, "n_identifiers": 8, "d_id": 81253, "documentation": { "docstring": "Ensure that variables are created correctly in a tf function.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 185685, "commit_id": "0af9fed65969894d604e32a177120f0a03857265", "repo": "textual", "path": "src/textual/app.py", "file_name": "app.py", "fun_name": "isatty", "commit_message": "added constant, and docstrings", "code": "def isatty(self) -> bool:\n \n return True\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 7, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 45109, "documentation": { "docstring": "Pretend to be a terminal.\n\n Returns:\n bool: True if this is a tty.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 38, "language": "en" } }, { "id": 21607, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/typing_extensions.py", "file_name": "typing_extensions.py", "fun_name": "is_typeddict", "commit_message": "Vendor in pip 22.1.2", "code": "def is_typeddict(tp):\n ", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "\"\"\"Check if an annotation is a TypedDict class\n\n For example::anFor", "n_ast_errors": 3, "ast_levels": 5, "n_whitespaces": 9, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 26, "n_identifiers": 9, "d_id": 3951, "documentation": { "docstring": "Check if an annotation is a TypedDict class\n\n For example::", "n_words": 10, "vocab_size": 10, "n_whitespaces": 16, "language": "en" } }, { "id": 248242, "commit_id": "d38d242411b8910dfacde1e61fd3a0ec5cbcaa66", "repo": "synapse", "path": "tests/config/test_cache.py", "file_name": "test_cache.py", "fun_name": "test_cache_with_asterisk_in_name", "commit_message": "Reload cache factors from disk on SIGHUP (#12673)", "code": "def test_cache_with_asterisk_in_name(self):\n \n\n config = {\n \"caches\": {\n \"per_cache_factors\": {\"*cache_a*\": 5, \"cache_b\": 6, \"cache_c\": 2}\n }\n }\n self.config._environ = {\n \"SYNAPSE_CACHE_FACTOR_CACHE_A\": \"2\",\n \"SYNAPSE_CACHE_FACTOR_CACHE_B\": 3,\n }\n self.config.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.config.resize_all_caches()\n\n cache_a = LruCache(100)\n add_resizable_cache(\"*cache_a*\", cache_resize_callback=cache_a.set_cache_factor)\n self.assertEqual(cache_a.max_size, 200)\n\n cache_b = LruCache(100)\n add_resizable_cache(\"*Cache_b*\", cache_resize_callback=cache_b.set_cache_factor)\n self.assertEqual(cache_b.max_size, 300)\n\n cache_c = LruCache(100)\n add_resizable_cache(\"*cache_c*\", cache_resize_callback=cache_c.set_cache_factor)\n self.assertEqual(cache_c.max_size, 200)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 220, "n_words": 49, "vocab_size": 38, "complexity": 1, "nloc": 21, "token_counts": 146, "n_ast_nodes": 250, "n_identifiers": 17, "d_id": 72173, "documentation": { "docstring": "Some caches have asterisks in their name, test that they are set correctly.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 241590, "commit_id": "650c710efacd633fa283955145342bb64063c883", "repo": "lightning", "path": "tests/strategies/test_tpu_spawn.py", "file_name": "test_tpu_spawn.py", "fun_name": "test_model_tpu_one_core", "commit_message": "Rename training plugin test files & names to strategy (#11303)", "code": "def test_model_tpu_one_core():\n \n trainer = Trainer(tpu_cores=1, fast_dev_run=True, strategy=TPUSpawnStrategy(debug=True))\n # assert training strategy attributes for device setting\n assert isinstance(trainer.strategy, TPUSpawnStrategy)\n assert not trainer.strategy.on_gpu\n assert trainer.strategy.on_tpu\n assert trainer.strategy.root_device == torch.device(\"xla\", index=1)\n model = BoringModelTPU()\n trainer.fit(model)\n assert \"PT_XLA_DEBUG\" not in os.environ\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 67, "n_words": 37, "vocab_size": 30, "complexity": 1, "nloc": 9, "token_counts": 83, "n_ast_nodes": 135, "n_identifiers": 20, "d_id": 69613, "documentation": { "docstring": "Tests if device/debug flag is set correctely when training and after teardown for TPUSpawnStrategy.", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 84790, "commit_id": "b0de5c0f364632feb1e0a662f9be49aaf3412770", "repo": "zulip", "path": "zerver/tests/test_subs.py", "file_name": "test_subs.py", "fun_name": "test_subscriptions_query_count", "commit_message": "streams: Set can_remove_subscribers_group while creating streams.\n\nThis commit sets can_remove_subscribers_group to admins system\ngroup while creating streams as it will be the default value\nof this setting. In further we would provide an option to set\nvalue of this setting to any user group while creating streams\nusing API or UI.", "code": "def test_subscriptions_query_count(self) -> None:\n \n user1 = self.example_user(\"cordelia\")\n user2 = self.example_user(\"iago\")\n new_streams = [\n \"query_count_stream_1\",\n \"query_count_stream_2\",\n \"query_count_stream_3\",\n ]\n\n # Test creating a public stream when realm does not have a notification stream.\n with queries_captured() as queries:\n self.common_subscribe_to_streams(\n self.test_user,\n [new_streams[0]],\n dict(principals=orjson.dumps([user1.id, user2.id]).decode()),\n )\n self.assert_length(queries, 37)\n\n # Test creating private stream.\n with queries_captured() as queries:\n self.common_subscribe_to_streams(\n self.test_user,\n [new_streams[1]],\n dict(principals=orjson.dumps([user1.id, user2.id]).decode()),\n invite_only=True,\n )\n self.assert_length(queries, 36)\n\n # Test creating a public stream with announce when realm has a notification stream.\n notifications_stream = get_stream(self.streams[0], self.test_realm)\n self.test_realm.notifications_stream_id = notifications_stream.id\n self.test_realm.save()\n with queries_captured() as queries:\n self.common_subscribe_to_streams(\n self.test_user,\n [new_streams[2]],\n dict(\n announce=\"true\",\n principals=orjson.dumps([user1.id, user2.id]).decode(),\n ),\n )\n self.assert_length(queries, 45)\n\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 519, "n_words": 98, "vocab_size": 59, "complexity": 1, "nloc": 39, "token_counts": 239, "n_ast_nodes": 387, "n_identifiers": 25, "d_id": 17875, "documentation": { "docstring": "\n Test database query count when creating stream with api/v1/users/me/subscriptions.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 82419, "commit_id": "c1290c9ff89cb00caa5469129fd527e9d82cd820", "repo": "django-cms", "path": "cms/tests/test_plugins.py", "file_name": "test_plugins.py", "fun_name": "test_meta_options_as_defaults", "commit_message": "ci: Added codespell (#7355)\n\nCo-authored-by: Christian Clauss \r\n\r\n* ci: codespell config taken from #7292", "code": "def test_meta_options_as_defaults(self):\n \n # this plugin relies on the base CMSPlugin and Model classes to\n # decide what the app_label and db_table should be\n\n plugin = TestPlugin.model\n self.assertEqual(plugin._meta.db_table, 'meta_testpluginmodel')\n self.assertEqual(plugin._meta.app_label, 'meta')\n", "url": "https://github.com/django-cms/django-cms.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 72, "n_words": 30, "vocab_size": 26, "complexity": 1, "nloc": 4, "token_counts": 35, "n_ast_nodes": 63, "n_identifiers": 9, "d_id": 17389, "documentation": { "docstring": " handling when a CMSPlugin meta options are computed defaults ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 257821, "commit_id": "621e1af74c9c7d04b79ca5f5826ddcc06e1237f0", "repo": "haystack", "path": "haystack/schema.py", "file_name": "schema.py", "fun_name": "to_dict", "commit_message": "refactor: improve support for dataclasses (#3142)\n\n* refactor: improve support for dataclasses\r\n\r\n* refactor: refactor class init\r\n\r\n* refactor: remove unused import\r\n\r\n* refactor: testing 3.7 diffs\r\n\r\n* refactor: checking meta where is Optional\r\n\r\n* refactor: reverting some changes on 3.7\r\n\r\n* refactor: remove unused imports\r\n\r\n* build: manual pre-commit run\r\n\r\n* doc: run doc pre-commit manually\r\n\r\n* refactor: post initialization hack for 3.7-3.10 compat.\r\n\r\nTODO: investigate another method to improve 3.7 compatibility.\r\n\r\n* doc: force pre-commit\r\n\r\n* refactor: refactored for both Python 3.7 and 3.9\r\n\r\n* docs: manually run pre-commit hooks\r\n\r\n* docs: run api docs manually\r\n\r\n* docs: fix wrong comment\r\n\r\n* refactor: change no type-checked test code\r\n\r\n* docs: update primitives\r\n\r\n* docs: api documentation\r\n\r\n* docs: api documentation\r\n\r\n* refactor: minor test refactoring\r\n\r\n* refactor: remova unused enumeration on test\r\n\r\n* refactor: remove unneeded dir in gitignore\r\n\r\n* refactor: exclude all private fields and change meta def\r\n\r\n* refactor: add pydantic comment\r\n\r\n* refactor : fix for mypy on Python 3.7\r\n\r\n* refactor: revert custom init\r\n\r\n* docs: update docs to new pydoc-markdown style\r\n\r\n* Update test/nodes/test_generator.py\r\n\r\nCo-authored-by: Sara Zan ", "code": "def to_dict(self, field_map={}) -> Dict:\n \n inv_field_map = {v: k for k, v in field_map.items()}\n _doc: Dict[str, str] = {}\n for k, v in self.__dict__.items():\n # Exclude internal fields (Pydantic, ...) fields from the conversion process\n if k.startswith(\"__\"):\n continue\n if k == \"content\":\n # Convert pd.DataFrame to list of rows for serialization\n if self.content_type == \"table\" and isinstance(self.content, pd.DataFrame):\n v = [self.content.columns.tolist()] + self.content.values.tolist()\n k = k if k not in inv_field_map else inv_field_map[k]\n _doc[k] = v\n return _doc\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 232, "n_words": 78, "vocab_size": 55, "complexity": 8, "nloc": 24, "token_counts": 130, "n_ast_nodes": 210, "n_identifiers": 20, "d_id": 75140, "documentation": { "docstring": "\n Convert Document to dict. An optional field_map can be supplied to change the names of the keys in the\n resulting dict. This way you can work with standardized Document objects in Haystack, but adjust the format that\n they are serialized / stored in other places (e.g. elasticsearch)\n Example:\n | doc = Document(content=\"some text\", content_type=\"text\")\n | doc.to_dict(field_map={\"custom_content_field\": \"content\"})\n | >>> {\"custom_content_field\": \"some text\", content_type\": \"text\"}\n\n :param field_map: Dict with keys being the custom target keys and values being the standard Document attributes\n :return: dict with content of the Document\n ", "n_words": 88, "vocab_size": 65, "n_whitespaces": 159, "language": "en" } }, { "id": 279524, "commit_id": "be73ac1a1e25d9abd4d793cba9707098d7adf231", "repo": "keras", "path": "keras/optimizers/optimizer_v2/optimizer_v2.py", "file_name": "optimizer_v2.py", "fun_name": "_resource_apply_sparse", "commit_message": "Add f-string format and lint with flynt on the whole codebase", "code": "def _resource_apply_sparse(self, grad, handle, indices, apply_state):\n \n raise NotImplementedError(\n \"`_resource_apply_sparse` Must be implemented in subclasses.\"\n )\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 47, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 4, "token_counts": 19, "n_ast_nodes": 31, "n_identifiers": 7, "d_id": 83029, "documentation": { "docstring": "Add ops to apply sparse gradients to the variable `handle`.\n\n Similar to `_apply_sparse`, the `indices` argument to this method has\n been de-duplicated. Optimizers which deal correctly with non-unique\n indices may instead override `_resource_apply_sparse_duplicate_indices`\n to avoid this overhead.\n\n Args:\n grad: a `Tensor` representing the gradient for the affected indices.\n handle: a `Tensor` of dtype `resource` which points to the variable to\n be updated.\n indices: a `Tensor` of integral type representing the indices for\n which the gradient is nonzero. Indices are unique.\n apply_state: A dict which is used across multiple apply calls.\n\n Returns:\n An `Operation` which updates the value of the variable.\n ", "n_words": 100, "vocab_size": 68, "n_whitespaces": 216, "language": "en" } }, { "id": 224971, "commit_id": "a56ac6e0513bdea6860ed1fdc3debc10410638cd", "repo": "mkdocs", "path": "mkdocs/plugins.py", "file_name": "plugins.py", "fun_name": "on_shutdown", "commit_message": "Add plugin events that persist across builds in `mkdocs serve`\n\n\"One-time events\" `on_startup(command)`, `on_shutdown`.\n\nTheir presence also shows that a plugin *wants* to persist across builds. Otherwise they will be re-created, to not change any existing behavior.", "code": "def on_shutdown(self) -> None:\n \n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 16, "token_counts": 8, "n_ast_nodes": 16, "n_identifiers": 2, "d_id": 57435, "documentation": { "docstring": "\n The `shutdown` event runs once at the very end of an `mkdocs` invocation, before exiting.\n\n This event is relevant only for support of `mkdocs serve`, otherwise within a\n single build it's undistinguishable from `on_post_build`.\n\n New in MkDocs 1.4.\n\n The presence of an `on_shutdown` method (even if empty) migrates the plugin to the new\n system where the plugin object is kept across builds within one `mkdocs serve`.\n\n Note the `on_post_build` method is still preferred for cleanups, when possible, as it has\n a much higher chance of actually triggering. `on_shutdown` is \"best effort\" because it\n relies on detecting a graceful shutdown of MkDocs.\n ", "n_words": 101, "vocab_size": 78, "n_whitespaces": 172, "language": "en" } }, { "id": 278045, "commit_id": "b1105dca17670dcac229271e63d5073fe445b84c", "repo": "keras", "path": "keras/distribute/distributed_training_utils_v1.py", "file_name": "distributed_training_utils_v1.py", "fun_name": "_build_network_on_replica", "commit_message": "resolve line-too-long in distribute", "code": "def _build_network_on_replica(model, mode, inputs=None, targets=None):\n \n # Need to do imports here since we run into a circular dependency error.\n from keras import models # pylint: disable=g-import-not-at-top\n from keras.engine import sequential # pylint: disable=g-import-not-at-top\n\n # We rely on the internal methods to avoid having share_weights weights in\n # the public API.\n if isinstance(model, sequential.Sequential):\n updated_model = models._clone_sequential_model(\n model, input_tensors=inputs, layer_fn=models.share_weights\n )\n else:\n updated_model = models._clone_functional_model(\n model, input_tensors=inputs, layer_fn=models.share_weights\n )\n # Callable losses added directly to a functional Model need to be added\n # here.\n updated_model._callable_losses = model._callable_losses\n\n # Recast all low precision outputs back to float32 since we only casted the\n # inputs to bfloat16 and not targets. This is done so that we can preserve\n # precision when calculating the loss value.", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 228, "n_words": 122, "vocab_size": 88, "complexity": 6, "nloc": 33, "token_counts": 188, "n_ast_nodes": 133, "n_identifiers": 18, "d_id": 82343, "documentation": { "docstring": "Build an updated model on replicas.\n\n We create a new Keras model while sharing the variables from the old graph.\n Building a new sub-graph is required since the original keras model creates\n placeholders for the input and the output that are not accessible till we\n call iterator.get_next() inside the step_fn for `fit`/`evaluate`/`predict`.\n\n The sharing of weights and layers between the old and the new model\n guarantee that we're using Strategy variables and any updates on either\n model are reflected correctly in callbacks and loop iterations.\n\n We need to make sure we share the optimizers between the old and the new\n model as well so that optimizer state is not lost if the user is running fit\n multiple times.\n\n Args:\n model: Model to be replicated across Replicas\n mode: Which of fit/eval/predict is building the distributed network\n inputs: Input variables to be passed to the model\n targets: Target tensor to be passed to model.compile\n\n Returns:\n A new model with shared layers with the old model.\n ", "n_words": 163, "vocab_size": 103, "n_whitespaces": 227, "language": "en" } }, { "id": 101263, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "tools/manual/faceviewer/viewport.py", "file_name": "viewport.py", "fun_name": "_discard_tk_faces", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def _discard_tk_faces(self):\n \n keys = [f\"{pnt_x}_{pnt_y}\"\n for pnt_x, pnt_y in self._objects.visible_grid[:2].T.reshape(-1, 2)]\n for key in list(self._tk_faces):\n if key not in keys:\n del self._tk_faces[key]\n logger.trace(\"keys: %s allocated_faces: %s\", keys, len(self._tk_faces))\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 97, "n_words": 28, "vocab_size": 24, "complexity": 4, "nloc": 7, "token_counts": 74, "n_ast_nodes": 124, "n_identifiers": 15, "d_id": 20683, "documentation": { "docstring": " Remove any :class:`TKFace` objects from the cache that are not currently displayed. ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 13, "language": "en" } }, { "id": 207601, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_views/tests.py", "file_name": "tests.py", "fun_name": "test_i18n_language_non_english_fallback", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_i18n_language_non_english_fallback(self):\n \n with self.settings(LANGUAGE_CODE=\"fr\"), translation.override(\"none\"):\n response = self.client.get(reverse(\"admin:jsi18n\"))\n self.assertContains(response, \"Choisir une heure\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 48, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 44, "n_ast_nodes": 83, "n_identifiers": 11, "d_id": 52017, "documentation": { "docstring": "\n Makes sure that the fallback language is still working properly\n in cases where the selected language cannot be found.\n ", "n_words": 19, "vocab_size": 17, "n_whitespaces": 41, "language": "en" } }, { "id": 275520, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_v2/optimizer_v2.py", "file_name": "optimizer_v2.py", "fun_name": "name_scope_only_in_function_or_graph", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def name_scope_only_in_function_or_graph(name):\n \n if not tf.executing_eagerly():\n return tf.name_scope(name)\n else:\n return NullContextmanager()\n\n\n@keras_export(\"keras.optimizers.Optimizer\", metaclass=abc.ABCMeta)", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.optimizers.Optimizer\", metaclass=abc.ABCMeta)", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 34, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 27, "n_ast_nodes": 68, "n_identifiers": 10, "d_id": 81411, "documentation": { "docstring": "Internal-only entry point for `name_scope*`.\n\n Enters a compat.v1.name_scope only when in a function or graph,\n not when running fully eagerly.\n\n Args:\n name: The name argument that is passed to the op function.\n\n Returns:\n `name_scope*` context manager.\n ", "n_words": 36, "vocab_size": 34, "n_whitespaces": 61, "language": "en" } }, { "id": 105102, "commit_id": "ab7d3045ac9154e9c1c2602d0869130defdc6dc7", "repo": "datasets", "path": "src/datasets/download/streaming_download_manager.py", "file_name": "streaming_download_manager.py", "fun_name": "name", "commit_message": "Support DataLoader with num_workers > 0 in streaming mode (#4375)\n\n* make TorchIterableDataset work in parallel\r\n- make it picklable\r\n- paralellize over the shards when num_workers is passed\r\n\r\n* start writing some tests\r\n\r\n* fix streaming extension and fsspec issues in subprocesses\r\n\r\n* fix some tests\r\n\r\n* fix more tests\r\n\r\n* fix import\r\n\r\n* fix and add tests\r\n\r\n* fix patch (handle successive patches and builtins)\r\n\r\n* revert unnecessary change to enriched_web_blg\r\n\r\n* style\r\n\r\n* use open locally to fix win permission errors\r\n\r\n* keep file opened in read_csv\r\n\r\n* fix compression for read_csv\r\n\r\n* consistency of read_csv: don't infer compression for file-like objects\r\n\r\n* stringify Path objects\r\n\r\n* comments + raise error if sharding is ambiguous\r\n\r\n* minor\r\n\r\n* Update src/datasets/iterable_dataset.py\r\n\r\nCo-authored-by: Mario Šaško \r\n\r\nCo-authored-by: Mario Šaško ", "code": "def name(self) -> PurePosixPath:\n \n return PurePosixPath(_as_posix(self).split(\"::\")[0]).name\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 10, "token_counts": 26, "n_ast_nodes": 46, "n_identifiers": 5, "d_id": 22070, "documentation": { "docstring": "Name function for argument of type :obj:`~pathlib.Path` that supports both local paths end remote URLs.\n\n Args:\n path (:obj:`~pathlib.Path`): Calling Path instance.\n\n Returns:\n :obj:`str`\n ", "n_words": 23, "vocab_size": 23, "n_whitespaces": 66, "language": "en" } }, { "id": 204854, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/backends/base/introspection.py", "file_name": "introspection.py", "fun_name": "django_table_names", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def django_table_names(self, only_existing=False, include_views=True):\n \n tables = set()\n for model in self.get_migratable_models():\n if not model._meta.managed:\n continue\n tables.add(model._meta.db_table)\n tables.update(\n f.m2m_db_table()\n for f in model._meta.local_many_to_many\n if f.remote_field.through._meta.managed\n )\n tables = list(tables)\n if only_existing:\n existing_tables = set(self.table_names(include_views=include_views))\n tables = [\n t for t in tables if self.identifier_converter(t) in existing_tables\n ]\n return tables\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 242, "n_words": 48, "vocab_size": 31, "complexity": 8, "nloc": 18, "token_counts": 117, "n_ast_nodes": 186, "n_identifiers": 23, "d_id": 50932, "documentation": { "docstring": "\n Return a list of all table names that have associated Django models and\n are in INSTALLED_APPS.\n\n If only_existing is True, include only the tables in the database.\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 56, "language": "en" } }, { "id": 196289, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/geometry/polygon.py", "file_name": "polygon.py", "fun_name": "eulerline", "commit_message": "Updated import locations", "code": "def eulerline(self):\n \n if self.is_equilateral():\n return self.orthocenter\n return Line(self.orthocenter, self.circumcenter)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 41, "n_words": 9, "vocab_size": 8, "complexity": 2, "nloc": 4, "token_counts": 28, "n_ast_nodes": 47, "n_identifiers": 6, "d_id": 47789, "documentation": { "docstring": "The Euler line of the triangle.\n\n The line which passes through circumcenter, centroid and orthocenter.\n\n Returns\n =======\n\n eulerline : Line (or Point for equilateral triangles in which case all\n centers coincide)\n\n Examples\n ========\n\n >>> from sympy import Point, Triangle\n >>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)\n >>> t = Triangle(p1, p2, p3)\n >>> t.eulerline\n Line2D(Point2D(0, 0), Point2D(1/2, 1/2))\n\n ", "n_words": 62, "vocab_size": 51, "n_whitespaces": 165, "language": "en" } }, { "id": 221238, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/calendar.py", "file_name": "calendar.py", "fun_name": "itermonthdays2", "commit_message": "add python 3.10.4 for windows", "code": "def itermonthdays2(self, year, month):\n \n for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday):\n yield d, i % 7\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 41, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 3, "token_counts": 37, "n_ast_nodes": 57, "n_identifiers": 9, "d_id": 56285, "documentation": { "docstring": "\n Like itermonthdates(), but will yield (day number, weekday number)\n tuples. For days outside the specified month the day number is 0.\n ", "n_words": 21, "vocab_size": 20, "n_whitespaces": 43, "language": "en" } }, { "id": 271837, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_utils.py", "file_name": "training_utils.py", "fun_name": "get_input_shape_and_dtype", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_input_shape_and_dtype(layer):\n \n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 4, "nloc": 9, "token_counts": 55, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 80858, "documentation": { "docstring": "Retrieves input shape and input dtype of layer if applicable.\n\n Args:\n layer: Layer (or model) instance.\n\n Returns:\n Tuple (input_shape, input_dtype). Both could be None if the layer\n does not have a defined input shape.\n\n Raises:\n ValueError: in case an empty Sequential or Functional model is passed.\n ", "n_words": 46, "vocab_size": 42, "n_whitespaces": 80, "language": "en" } }, { "id": 133505, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/workflow/workflow_storage.py", "file_name": "workflow_storage.py", "fun_name": "load_workflow_meta", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def load_workflow_meta(self) -> Optional[WorkflowMetaData]:\n \n\n try:\n metadata = asyncio_run(self._get(self._key_workflow_metadata(), True))\n return WorkflowMetaData(status=WorkflowStatus(metadata[\"status\"]))\n except KeyNotFoundError:\n return None\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 69, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 12, "token_counts": 48, "n_ast_nodes": 81, "n_identifiers": 11, "d_id": 30038, "documentation": { "docstring": "Load the metadata of the current workflow.\n\n Returns:\n The metadata of the current workflow. If it doesn't exist,\n return None.\n ", "n_words": 20, "vocab_size": 14, "n_whitespaces": 56, "language": "en" } }, { "id": 208651, "commit_id": "d11e987f174a15f1640f8006c86f58d884c3faa4", "repo": "ipython", "path": "IPython/core/interactiveshell.py", "file_name": "interactiveshell.py", "fun_name": "_update_code_co_name", "commit_message": "Set co_name for cells run line by line. Fixes https://github.com/ipython/ipykernel/issues/841", "code": "def _update_code_co_name(self, code):\n \n if not hasattr(code, \"replace\"):\n # It may not be available on older versions of Python (only\n # available for 3.8 onwards).\n return code\n try:\n first_real_line = next(dis.findlinestarts(code))[1]\n except StopIteration:\n return code\n return code.replace(co_name=\"\" % (first_real_line,))\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 130, "n_words": 40, "vocab_size": 34, "complexity": 3, "nloc": 8, "token_counts": 54, "n_ast_nodes": 93, "n_identifiers": 11, "d_id": 52437, "documentation": { "docstring": "Python 3.10 changed the behaviour so that whenever a code object\n is assembled in the compile(ast) the co_firstlineno would be == 1.\n\n This makes pydevd/debugpy think that all cells invoked are the same\n since it caches information based on (co_firstlineno, co_name, co_filename).\n\n Given that, this function changes the code 'co_name' to be unique\n based on the first real lineno of the code (which also has a nice\n side effect of customizing the name so that it's not always ).\n\n See: https://github.com/ipython/ipykernel/issues/841\n ", "n_words": 81, "vocab_size": 64, "n_whitespaces": 137, "language": "en" } }, { "id": 111621, "commit_id": "d5ed88e4e7f9aa78f06922dce8219a82e3b52682", "repo": "nni", "path": "nni/retiarii/integration.py", "file_name": "integration.py", "fun_name": "send_trial", "commit_message": "Retiarii serializer user experience improvements (#4437)", "code": "def send_trial(self, parameters, placement_constraint=None):\n \n self.parameters_count += 1\n if placement_constraint is None:\n placement_constraint = {\n 'type': 'None',\n 'gpus': []\n }\n self._validate_placement_constraint(placement_constraint)\n new_trial = {\n 'parameter_id': self.parameters_count,\n 'parameters': parameters,\n 'parameter_source': 'algorithm',\n 'placement_constraint': placement_constraint\n }\n _logger.debug('New trial sent: %s', new_trial)\n\n try:\n send_payload = nni.dump(new_trial, pickle_size_limit=int(os.getenv('PICKLE_SIZE_LIMIT', 64 * 1024)))\n except PayloadTooLarge:\n raise ValueError(\n 'Serialization failed when trying to dump the model because payload too large (larger than 64 KB). '\n 'This is usually caused by pickling large objects (like datasets) by mistake. '\n 'See the full error traceback for details and https://nni.readthedocs.io/en/stable/NAS/Serialization.html '\n 'for how to resolve such issue. '\n )\n\n # trial parameters can be super large, disable pickle size limit here\n # nevertheless, there could still be blocked by pipe / nni-manager\n send(CommandType.NewTrialJob, send_payload)\n\n if self.send_trial_callback is not None:\n self.send_trial_callback(parameters) # pylint: disable=not-callable\n return self.parameters_count\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 432, "n_words": 133, "vocab_size": 108, "complexity": 4, "nloc": 28, "token_counts": 132, "n_ast_nodes": 233, "n_identifiers": 22, "d_id": 24455, "documentation": { "docstring": "\n Send parameters to NNI.\n\n Parameters\n ----------\n parameters : Any\n Any payload.\n\n Returns\n -------\n int\n Parameter ID that is assigned to this parameter,\n which will be used for identification in future.\n ", "n_words": 30, "vocab_size": 27, "n_whitespaces": 120, "language": "en" } }, { "id": 13668, "commit_id": "e143ea3092ebae68f8c2cf7f784f86296cae68d7", "repo": "jina", "path": "jina/serve/runtimes/gateway/grpc/gateway.py", "file_name": "gateway.py", "fun_name": "dry_run", "commit_message": "refactor: use stream_docs from streamer (#5438)", "code": "async def dry_run(self, empty, context) -> jina_pb2.StatusProto:\n \n from docarray import DocumentArray, Document\n\n from jina.serve.executors import __dry_run_endpoint__\n\n da = DocumentArray([Document()])\n try:", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "async def dry_run(self, empty, context) -> jina_pb2.StatusProto:\n \"\"\"\n Process the call requested by having a dry run call to every Executor in the graph\n\n :param empty: The service expects an empty protobuf message\n :param context: grpc context\n :returns: the response request\n \"\"\"\n from docarray import DocumentArray, Document\n\n from jina.serve.executors import __dry_run_endpoint__\n\n da = DocumentArray([Document()])\n try:", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 55, "n_words": 20, "vocab_size": 18, "complexity": 3, "nloc": 23, "token_counts": 103, "n_ast_nodes": 63, "n_identifiers": 14, "d_id": 2721, "documentation": { "docstring": "\n Process the call requested by having a dry run call to every Executor in the graph\n\n :param empty: The service expects an empty protobuf message\n :param context: grpc context\n :returns: the response request\n ", "n_words": 33, "vocab_size": 29, "n_whitespaces": 69, "language": "en" } }, { "id": 108635, "commit_id": "bf3a554ccd1299bc260647029811758aeaf577b1", "repo": "matplotlib", "path": "lib/matplotlib/axes/_base.py", "file_name": "_base.py", "fun_name": "get_yaxis_transform", "commit_message": "Add tests, improve error messages, and use argument checks to simplify code", "code": "def get_yaxis_transform(self, which='grid'):\n \n if which == 'grid':\n return self._yaxis_transform\n elif which == 'tick1':\n # for cartesian projection, this is bottom spine\n return self.spines.left.get_spine_transform()\n elif which == 'tick2':\n # for cartesian projection, this is top spine\n return self.spines.right.get_spine_transform()\n else:\n raise ValueError(f'unknown value for which: {which!r}')\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 145, "n_words": 44, "vocab_size": 29, "complexity": 4, "nloc": 9, "token_counts": 57, "n_ast_nodes": 109, "n_identifiers": 9, "d_id": 23279, "documentation": { "docstring": "\n Get the transformation used for drawing y-axis labels, ticks\n and gridlines. The x-direction is in axis coordinates and the\n y-direction is in data coordinates.\n\n .. note::\n\n This transformation is primarily used by the\n `~matplotlib.axis.Axis` class, and is meant to be\n overridden by new kinds of projections that may need to\n place axis elements in different locations.\n ", "n_words": 56, "vocab_size": 42, "n_whitespaces": 137, "language": "en" } }, { "id": 69552, "commit_id": "34e4903ed7936c35176d6031a16d1a27654dcb40", "repo": "erpnext", "path": "erpnext/buying/doctype/request_for_quotation/request_for_quotation.py", "file_name": "request_for_quotation.py", "fun_name": "get_rfq_containing_supplier", "commit_message": "refactor: search queries (#33004)\n\n- guard clauses for readability\r\n- use values or format", "code": "def get_rfq_containing_supplier(doctype, txt, searchfield, start, page_len, filters):\n\tconditions = \"\"\n\tif txt:\n\t\tconditions += \"and rfq.name like '%%\" + txt + \"%%' \"\n\n\tif filters.get(\"transaction_date\"):\n\t\tconditions += \"and rfq.transaction_date = '{0}'\".format(filters.get(\"transaction_date\"))\n\n\trfq_data = frappe.db.sql(\n\t\tf,\n\t\t{\n\t\t\t\"page_len\": page_len,\n\t\t\t\"start\": start,\n\t\t\t\"company\": filters.get(\"company\"),\n\t\t\t\"supplier\": filters.get(\"supplier\"),\n\t\t},\n\t\tas_dict=1,\n\t)\n\n\treturn rfq_data\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 32, "n_words": 49, "vocab_size": 38, "complexity": 3, "nloc": 30, "token_counts": 96, "n_ast_nodes": 169, "n_identifiers": 15, "d_id": 15063, "documentation": { "docstring": "\n\t\tselect\n\t\t\tdistinct rfq.name, rfq.transaction_date,\n\t\t\trfq.company\n\t\tfrom\n\t\t\t`tabRequest for Quotation` rfq, `tabRequest for Quotation Supplier` rfq_supplier\n\t\twhere\n\t\t\trfq.name = rfq_supplier.parent\n\t\t\tand rfq_supplier.supplier = %(supplier)s\n\t\t\tand rfq.docstatus = 1\n\t\t\tand rfq.company = %(company)s\n\t\t\t{conditions}\n\t\torder by rfq.transaction_date ASC\n\t\tlimit %(page_len)s offset %(start)s ", "n_words": 40, "vocab_size": 32, "n_whitespaces": 28, "language": "en" } }, { "id": 296246, "commit_id": "0df30782a623204be2941da74ddee5bb110dd03b", "repo": "core", "path": "homeassistant/components/androidtv/media_player.py", "file_name": "media_player.py", "fun_name": "async_update", "commit_message": "Bump androidtv to 0.0.67 (improve connect attempt logging) (#69721)", "code": "async def async_update(self):\n \n # Check if device is disconnected.\n if not self._attr_available:\n # Try to connect\n if await self.aftv.adb_connect(log_errors=self._failed_connect_count == 0):\n self._failed_connect_count = 0\n self._attr_available = True\n else:\n self._failed_connect_count += 1\n\n # If the ADB connection is not intact, don't update.\n if not self.available:\n return\n\n # Get the updated state and attributes.\n (\n state,\n self._attr_app_id,\n running_apps,\n _,\n self._attr_is_volume_muted,\n self._attr_volume_level,\n self._attr_extra_state_attributes[ATTR_HDMI_INPUT],\n ) = await self.aftv.update(self._get_sources)\n\n self._attr_state = ANDROIDTV_STATES.get(state)\n if self._attr_state is None:\n self._attr_available = False\n\n if running_apps:\n self._attr_source = self._attr_app_name = self._app_id_to_name.get(\n self._attr_app_id, self._attr_app_id\n )\n sources = [\n self._app_id_to_name.get(\n app_id, app_id if not self._exclude_unnamed_apps else None\n )\n for app_id in running_apps\n ]\n self._attr_source_list = [source for source in sources if source]\n else:\n self._attr_source_list = None\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 520, "n_words": 114, "vocab_size": 74, "complexity": 10, "nloc": 34, "token_counts": 184, "n_ast_nodes": 288, "n_identifiers": 29, "d_id": 95241, "documentation": { "docstring": "Update the device state and, if necessary, re-connect.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 199618, "commit_id": "e875bdb804b0285e4a9bd8de0158436e792c03cb", "repo": "sympy", "path": "sympy/polys/appellseqs.py", "file_name": "appellseqs.py", "fun_name": "euler_poly", "commit_message": "Initial definition of Appell sequences", "code": "def euler_poly(n, x=None, polys=False):\n \n return appell_poly(n, [[1], [1, QQ(-1,2)]], 1, lambda p, i: -p / 2, QQ, x, polys)\n\n\n@public", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "@public", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 25, "n_words": 20, "vocab_size": 20, "complexity": 1, "nloc": 2, "token_counts": 55, "n_ast_nodes": 81, "n_identifiers": 9, "d_id": 49296, "documentation": { "docstring": "Generates the Euler polynomial of degree `n` in `x`.\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ", "n_words": 35, "vocab_size": 29, "n_whitespaces": 67, "language": "en" } }, { "id": 101716, "commit_id": "e2a77e7c6e84e81f642cb22f528e25e3f2d2dbc1", "repo": "faceswap", "path": "tools/alignments/jobs.py", "file_name": "jobs.py", "fun_name": "_get_output_folder", "commit_message": "Alignments Tool - Typing, Documentation + Re-org", "code": "def _get_output_folder(self) -> str:\n \n if self._is_video and self._type == \"frames\":\n return os.path.dirname(self._source_dir)\n return self._source_dir\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 46, "n_words": 14, "vocab_size": 13, "complexity": 3, "nloc": 12, "token_counts": 34, "n_ast_nodes": 58, "n_identifiers": 9, "d_id": 21120, "documentation": { "docstring": " Return output folder. Needs to be in the root if input is a video and processing\n frames\n\n Returns\n -------\n str\n Full path to the output folder\n ", "n_words": 26, "vocab_size": 23, "n_whitespaces": 73, "language": "en" } }, { "id": 21319, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/cachecontrol/controller.py", "file_name": "controller.py", "fun_name": "update_cached_response", "commit_message": "Vendor in pip 22.1.2", "code": "def update_cached_response(self, request, response):\n \n cache_url = self.cache_url(request.url)\n\n cached_response = self.serializer.loads(request, self.cache.get(cache_url))\n\n if not cached_response:\n # we didn't have a cached response\n return response\n\n # Lets update our headers with the headers from the new request:\n # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1\n #\n # The server isn't supposed to send headers that would make\n # the cached body invalid. But... just in case, we'll be sure\n # to strip out ones we know that might be problmatic due to\n # typical assumptions.\n excluded_headers = [\"content-length\"]\n\n cached_response.headers.update(\n dict(\n (k, v)\n for k, v in response.headers.items()\n if k.lower() not in excluded_headers\n )\n )\n\n # we want a 200 b/c we have content via the cache\n cached_response.status = 200\n\n # update our cache\n self._cache_set(cache_url, request, cached_response)\n\n return cached_response\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 342, "n_words": 120, "vocab_size": 79, "complexity": 4, "nloc": 16, "token_counts": 103, "n_ast_nodes": 172, "n_identifiers": 21, "d_id": 3761, "documentation": { "docstring": "On a 304 we will get a new set of headers that we want to\n update our cached value with, assuming we have one.\n\n This should only ever be called when we've sent an ETag and\n gotten a 304 as the response.\n ", "n_words": 42, "vocab_size": 37, "n_whitespaces": 70, "language": "en" } }, { "id": 132910, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/util/client/api.py", "file_name": "api.py", "fun_name": "remote", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def remote(self, *args, **kwargs):\n \n # Delayed import to avoid a cyclic import\n from ray.util.client.common import remote_decorator\n\n if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):\n # This is the case where the decorator is just @ray.remote.\n return remote_decorator(options=None)(args[0])\n error_string = (\n \"The @ray.remote decorator must be applied either \"\n \"with no arguments and no parentheses, for example \"\n \"'@ray.remote', or it must be applied using some of \"\n \"the arguments 'num_returns', 'num_cpus', 'num_gpus', \"\n \"'memory', 'object_store_memory', 'resources', \"\n \"'max_calls', or 'max_restarts', like \"\n \"'@ray.remote(num_returns=2, \"\n 'resources={\"CustomResource\": 1})\\'.'\n )\n assert len(args) == 0 and len(kwargs) > 0, error_string\n return remote_decorator(options=kwargs)\n\n # TODO(mwtian): consider adding _internal_ prefix to call_remote /\n # call_release / call_retain.", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 285, "n_words": 113, "vocab_size": 81, "complexity": 5, "nloc": 16, "token_counts": 93, "n_ast_nodes": 162, "n_identifiers": 13, "d_id": 29866, "documentation": { "docstring": "remote is the hook stub passed on to replace `ray.remote`.\n\n This sets up remote functions or actors, as the decorator,\n but does not execute them.\n\n Args:\n args: opaque arguments\n kwargs: opaque keyword arguments\n ", "n_words": 33, "vocab_size": 29, "n_whitespaces": 83, "language": "en" } }, { "id": 105091, "commit_id": "1c1eaf96d5ef4623e36c9124d49e88ab476dd655", "repo": "datasets", "path": "datasets/quickdraw/quickdraw.py", "file_name": "quickdraw.py", "fun_name": "process_struct", "commit_message": "Add QuickDraw dataset (#3592)\n\n* Add QuickDraw dataset\r\n\r\n* Style\r\n\r\n* Add infos file, dummy data, improve script\r\n\r\n* Add info and dummy data\r\n\r\n* Test readme\r\n\r\n* Finish readme\r\n\r\n* Delete generate_dummy.py\r\n\r\n* Remove whitespace", "code": "def process_struct(fileobj):\n \n (key_id,) = struct.unpack(\"Q\", fileobj.read(8))\n (country_code,) = struct.unpack(\"2s\", fileobj.read(2))\n (recognized,) = struct.unpack(\"b\", fileobj.read(1))\n (timestamp,) = struct.unpack(\"I\", fileobj.read(4))\n (n_strokes,) = struct.unpack(\"H\", fileobj.read(2))\n drawing = []\n for _ in range(n_strokes):\n (n_points,) = struct.unpack(\"H\", fileobj.read(2))\n fmt = str(n_points) + \"B\"\n x = struct.unpack(fmt, fileobj.read(n_points))\n y = struct.unpack(fmt, fileobj.read(n_points))\n drawing.append({\"x\": list(x), \"y\": list(y)})\n\n return {\n \"key_id\": str(key_id),\n \"recognized\": recognized,\n \"timestamp\": datetime.fromtimestamp(timestamp),\n \"countrycode\": country_code.decode(\"utf-8\"),\n \"drawing\": drawing,\n }\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 163, "n_words": 63, "vocab_size": 49, "complexity": 2, "nloc": 20, "token_counts": 220, "n_ast_nodes": 365, "n_identifiers": 23, "d_id": 22068, "documentation": { "docstring": "\n Process a struct from a binary file object.\n\n The code for this function is borrowed from the following link:\n https://github.com/googlecreativelab/quickdraw-dataset/blob/f0f3beef0fc86393b3771cdf1fc94828b76bc89b/examples/binary_file_parser.py#L19\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 33, "language": "en" } }, { "id": 211029, "commit_id": "c84153a355d9855fe55cf51d203b8b24e7d884e5", "repo": "PaddleDetection", "path": "deploy/pptracking/python/mot/tracker/ocsort_tracker.py", "file_name": "ocsort_tracker.py", "fun_name": "convert_x_to_bbox", "commit_message": "[MOT] Add OC_SORT tracker (#6272)\n\n* add ocsort tracker\r\n\r\n* add ocsort deploy\r\n\r\n* merge develop\r\n\r\n* fix ocsort tracker codes\r\n\r\n* fix doc, test=document_fix\r\n\r\n* fix doc, test=document_fix", "code": "def convert_x_to_bbox(x, score=None):\n \n w = np.sqrt(x[2] * x[3])\n h = x[2] / w\n if (score == None):\n return np.array(\n [x[0] - w / 2., x[1] - h / 2., x[0] + w / 2.,\n x[1] + h / 2.]).reshape((1, 4))\n else:\n score = np.array([score])\n return np.array([\n x[0] - w / 2., x[1] - h / 2., x[0] + w / 2., x[1] + h / 2., score\n ]).reshape((1, 5))\n\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 146, "n_words": 69, "vocab_size": 31, "complexity": 2, "nloc": 12, "token_counts": 167, "n_ast_nodes": 233, "n_identifiers": 9, "d_id": 53004, "documentation": { "docstring": "\n Takes a bounding box in the centre form [x,y,s,r] and returns it in the form\n [x1,y1,x2,y2] where x1,y1 is the top left and x2,y2 is the bottom right\n ", "n_words": 28, "vocab_size": 21, "n_whitespaces": 40, "language": "en" } }, { "id": 272694, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/merging/multiply.py", "file_name": "multiply.py", "fun_name": "multiply", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def multiply(inputs, **kwargs):\n \n return Multiply(**kwargs)(inputs)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 32, "n_identifiers": 4, "d_id": 81038, "documentation": { "docstring": "Functional interface to the `Multiply` layer.\n\n Example:\n\n >>> x1 = np.arange(3.0)\n >>> x2 = np.arange(3.0)\n >>> tf.keras.layers.multiply([x1, x2])\n \n\n Usage in a functional model:\n\n >>> input1 = tf.keras.layers.Input(shape=(16,))\n >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1) #shape=(None, 8)\n >>> input2 = tf.keras.layers.Input(shape=(32,))\n >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2) #shape=(None, 8)\n >>> out = tf.keras.layers.multiply([x1,x2]) #shape=(None, 8)\n >>> out = tf.keras.layers.Dense(4)(out)\n >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)\n\n Args:\n inputs: A list of input tensors.\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor, the element-wise product of the inputs.\n ", "n_words": 89, "vocab_size": 59, "n_whitespaces": 158, "language": "en" } }, { "id": 175928, "commit_id": "b97d27d2e916025f65fed751d54c089d4d4bd022", "repo": "autokeras", "path": "autokeras/auto_model.py", "file_name": "auto_model.py", "fun_name": "export_model", "commit_message": "clean up imports", "code": "def export_model(self):\n \n return self.tuner.get_best_model()\n", "url": "https://github.com/keras-team/autokeras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 14, "n_ast_nodes": 26, "n_identifiers": 4, "d_id": 41662, "documentation": { "docstring": "Export the best Keras Model.\n\n # Returns\n keras.Model instance. The best model found during the search, loaded\n with trained weights.\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 56, "language": "en" } }, { "id": 288840, "commit_id": "f23b1750e85f07091eb896a0b12b8f95e5646338", "repo": "core", "path": "homeassistant/components/homekit_controller/connection.py", "file_name": "connection.py", "fun_name": "async_remove_legacy_device_serial_numbers", "commit_message": "Migrate HomeKit Controller to use stable identifiers (#80064)", "code": "def async_remove_legacy_device_serial_numbers(self) -> None:\n \n _LOGGER.debug(\n \"Removing legacy serial numbers from device registry entries for pairing %s\",\n self.unique_id,\n )\n\n device_registry = dr.async_get(self.hass)\n for accessory in self.entity_map.accessories:\n identifiers = {\n (\n IDENTIFIER_ACCESSORY_ID,\n f\"{self.unique_id}:aid:{accessory.aid}\",\n )\n }\n legacy_serial_identifier = (\n IDENTIFIER_SERIAL_NUMBER,\n accessory.serial_number,\n )\n\n device = device_registry.async_get_device(identifiers=identifiers)\n if not device or legacy_serial_identifier not in device.identifiers:\n continue\n\n device_registry.async_update_device(device.id, new_identifiers=identifiers)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 300, "n_words": 53, "vocab_size": 41, "complexity": 4, "nloc": 27, "token_counts": 93, "n_ast_nodes": 160, "n_identifiers": 23, "d_id": 87989, "documentation": { "docstring": "Migrate remove legacy serial numbers from devices.\n\n We no longer use serial numbers as device identifiers\n since they are not reliable, and the HomeKit spec\n does not require them to be stable.\n ", "n_words": 32, "vocab_size": 29, "n_whitespaces": 60, "language": "en" } }, { "id": 181601, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/driver_tests.py", "file_name": "driver_tests.py", "fun_name": "test_print_args", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_print_args(self):\n \n args_list = [\n 'tests/tests.csv',\n '-is', ','\n ]\n args = self.parser.parse_args(args_list)\n with captured_output() as (out, err):\n _print_args(args)\n output = out.getvalue()\n expected_output = \n self.assertEqual(_sort_lines(expected_output), _sort_lines(output))\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 115, "n_words": 25, "vocab_size": 22, "complexity": 1, "nloc": 38, "token_counts": 64, "n_ast_nodes": 115, "n_identifiers": 15, "d_id": 43390, "documentation": { "docstring": "Assert that _print_args prints correct values for all parameters in default settings.\nTPOT settings:\nCHECKPOINT_FOLDER = None\nCONFIG_FILE = None\nCROSSOVER_RATE = 0.1\nEARLY_STOP = None\nGENERATIONS = 100\nINPUT_FILE = tests/tests.csv\nINPUT_SEPARATOR = ,\nLOG = None\nMAX_EVAL_MINS = 5\nMAX_TIME_MINS = None\nMEMORY = None\nMUTATION_RATE = 0.9\nNUM_CV_FOLDS = 5\nNUM_JOBS = 1\nOFFSPRING_SIZE = 100\nOUTPUT_FILE = None\nPOPULATION_SIZE = 100\nRANDOM_STATE = None\nSCORING_FN = accuracy\nSUBSAMPLE = 1.0\nTARGET_NAME = class\nTEMPLATE = None\nTPOT_MODE = classification\nVERBOSITY = 1\n\n", "n_words": 86, "vocab_size": 51, "n_whitespaces": 348, "language": "en" } }, { "id": 100731, "commit_id": "afec52309326304f4323029039e49bfcf928ef43", "repo": "faceswap", "path": "lib/logger.py", "file_name": "logger.py", "fun_name": "crash_log", "commit_message": "Bugfixes:\n - Stats graph - Handle NaNs in data\n - logger - de-elevate matplotlib font messages", "code": "def crash_log():\n \n original_traceback = traceback.format_exc().encode(\"utf-8\")\n path = os.path.dirname(os.path.realpath(sys.argv[0]))\n filename = os.path.join(path, datetime.now().strftime(\"crash_report.%Y.%m.%d.%H%M%S%f.log\"))\n freeze_log = [line.encode(\"utf-8\") for line in _DEBUG_BUFFER]\n try:\n from lib.sysinfo import sysinfo # pylint:disable=import-outside-toplevel\n except Exception: # pylint:disable=broad-except\n sysinfo = (\"\\n\\nThere was an error importing System Information from lib.sysinfo. This is \"\n f\"probably a bug which should be fixed:\\n{traceback.format_exc()}\")\n with open(filename, \"wb\") as outfile:\n outfile.writelines(freeze_log)\n outfile.write(original_traceback)\n outfile.write(sysinfo.encode(\"utf-8\"))\n return filename\n\n\n_OLD_FACTORY = logging.getLogRecordFactory()\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 145, "n_words": 64, "vocab_size": 55, "complexity": 3, "nloc": 15, "token_counts": 127, "n_ast_nodes": 249, "n_identifiers": 29, "d_id": 20186, "documentation": { "docstring": " On a crash, write out the contents of :func:`_DEBUG_BUFFER` containing the last 100 lines\n of debug messages to a crash report in the root Faceswap folder.\n\n Returns\n -------\n str\n The filename of the file that contains the crash report\n ", "n_words": 39, "vocab_size": 30, "n_whitespaces": 62, "language": "en" } }, { "id": 242978, "commit_id": "46a80d144a16836af304a7aaa8e620962d91ac23", "repo": "Pillow", "path": "src/PIL/Image.py", "file_name": "Image.py", "fun_name": "remap_palette", "commit_message": "Update transparency when remapping the palette", "code": "def remap_palette(self, dest_map, source_palette=None):\n \n from . import ImagePalette\n\n if self.mode not in (\"L\", \"P\"):\n raise ValueError(\"illegal image mode\")\n\n if source_palette is None:\n if self.mode == \"P\":\n self.load()\n source_palette = self.im.getpalette(\"RGB\")[:768]\n else: # L-mode\n source_palette = bytearray(i // 3 for i in range(768))\n\n palette_bytes = b\"\"\n new_positions = [0] * 256\n\n # pick only the used colors from the palette\n for i, oldPosition in enumerate(dest_map):\n palette_bytes += source_palette[oldPosition * 3 : oldPosition * 3 + 3]\n new_positions[oldPosition] = i\n\n # replace the palette color id of all pixel with the new id\n\n # Palette images are [0..255], mapped through a 1 or 3\n # byte/color map. We need to remap the whole image\n # from palette 1 to palette 2. New_positions is\n # an array of indexes into palette 1. Palette 2 is\n # palette 1 with any holes removed.\n\n # We're going to leverage the convert mechanism to use the\n # C code to remap the image from palette 1 to palette 2,\n # by forcing the source image into 'L' mode and adding a\n # mapping 'L' mode palette, then converting back to 'L'\n # sans palette thus converting the image bytes, then\n # assigning the optimized RGB palette.\n\n # perf reference, 9500x4000 gif, w/~135 colors\n # 14 sec prepatch, 1 sec postpatch with optimization forced.\n\n mapping_palette = bytearray(new_positions)\n\n m_im = self.copy()\n m_im.mode = \"P\"\n\n m_im.palette = ImagePalette.ImagePalette(\"RGB\", palette=mapping_palette * 3)\n # possibly set palette dirty, then\n # m_im.putpalette(mapping_palette, 'L') # converts to 'P'\n # or just force it.\n # UNDONE -- this is part of the general issue with palettes\n m_im.im.putpalette(\"RGB;L\", m_im.palette.tobytes())\n\n m_im = m_im.convert(\"L\")\n\n # Internally, we require 768 bytes for a palette.\n new_palette_bytes = palette_bytes + (768 - len(palette_bytes)) * b\"\\x00\"\n m_im.putpalette(new_palette_bytes)\n m_im.palette = ImagePalette.ImagePalette(\"RGB\", palette=palette_bytes)\n\n if \"transparency\" in self.info:\n m_im.info[\"transparency\"] = new_positions[self.info[\"transparency\"]]\n\n return m_im\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 680, "n_words": 299, "vocab_size": 177, "complexity": 6, "nloc": 27, "token_counts": 231, "n_ast_nodes": 425, "n_identifiers": 27, "d_id": 69947, "documentation": { "docstring": "\n Rewrites the image to reorder the palette.\n\n :param dest_map: A list of indexes into the original palette.\n e.g. ``[1,0]`` would swap a two item palette, and ``list(range(256))``\n is the identity transform.\n :param source_palette: Bytes or None.\n :returns: An :py:class:`~PIL.Image.Image` object.\n\n ", "n_words": 40, "vocab_size": 35, "n_whitespaces": 97, "language": "en" } }, { "id": 242437, "commit_id": "a0e1fde1eddf45f26653e2ff6080d31e177adbec", "repo": "Pillow", "path": "src/PIL/ImageFile.py", "file_name": "ImageFile.py", "fun_name": "decode", "commit_message": "Added PyEncoder", "code": "def decode(self, buffer):\n \n raise NotImplementedError()\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 4, "d_id": 69859, "documentation": { "docstring": "\n Override to perform the decoding process.\n\n :param buffer: A bytes object with the data to be decoded.\n :returns: A tuple of ``(bytes consumed, errcode)``.\n If finished with decoding return 0 for the bytes consumed.\n Err codes are from :data:`.ImageFile.ERRORS`.\n ", "n_words": 39, "vocab_size": 32, "n_whitespaces": 90, "language": "en" } }, { "id": 139970, "commit_id": "55d039af320caaab7fe11d404585bd402e66d393", "repo": "ray", "path": "rllib/utils/annotations.py", "file_name": "annotations.py", "fun_name": "DeveloperAPI", "commit_message": "Annotate datasources and add API annotation check script (#24999)\n\nWhy are these changes needed?\r\nAdd API stability annotations for datasource classes, and add a linter to check all data classes have appropriate annotations.", "code": "def DeveloperAPI(obj):\n \n\n _mark_annotated(obj)\n return obj\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 14, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 3, "token_counts": 12, "n_ast_nodes": 23, "n_identifiers": 3, "d_id": 31815, "documentation": { "docstring": "Decorator for documenting developer APIs.\n\n Developer APIs are classes and methods explicitly exposed to developers\n for the purposes of building custom algorithms or advanced training\n strategies on top of RLlib internals. You can generally expect these APIs\n to be stable sans minor changes (but less stable than public APIs).\n\n Subclasses that inherit from a ``@DeveloperAPI`` base class can be\n assumed part of the RLlib developer API as well.\n\n Examples:\n >>> # Indicates that the `TorchPolicy` class is exposed to end users\n >>> # of RLlib and will remain (relatively) stable across RLlib\n >>> # releases.\n >>> from ray.rllib.policy import Policy\n >>> @DeveloperAPI # doctest: +SKIP\n ... class TorchPolicy(Policy): # doctest: +SKIP\n ... ... # doctest: +SKIP\n ", "n_words": 116, "vocab_size": 78, "n_whitespaces": 193, "language": "en" } }, { "id": 50724, "commit_id": "a6790a651a12eb391060e533868bf0ba197f6f7e", "repo": "PaddleHub", "path": "modules/image/text_to_image/stable_diffusion/diffusers/models/resnet.py", "file_name": "resnet.py", "fun_name": "upsample_2d", "commit_message": "Add stable diffusion module", "code": "def upsample_2d(x, k=None, factor=2, gain=1):\n r\n assert isinstance(factor, int) and factor >= 1\n if k is None:\n k = [1] * factor\n\n k = np.asarray(k, dtype=np.float32)\n if k.ndim == 1:\n k = np.outer(k, k)\n k /= np.sum(k)\n\n k = k * (gain * (factor**2))\n p = k.shape[0] - factor\n return upfirdn2d_native(x, paddle.to_tensor(k), up=factor, pad=((p + 1) // 2 + factor - 1, p // 2))\n\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 105, "n_words": 65, "vocab_size": 45, "complexity": 4, "nloc": 27, "token_counts": 130, "n_ast_nodes": 209, "n_identifiers": 21, "d_id": 10204, "documentation": { "docstring": "Upsample2D a batch of 2D images with the given filter.\n\n Args:\n Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and upsamples each image with the given\n filter. The filter is normalized so that if the input pixels are constant, they will be scaled by the specified\n `gain`. Pixels outside the image are assumed to be zero, and the filter is padded with zeros so that its shape is a:\n multiple of the upsampling factor.\n x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W,\n C]`.\n k: FIR filter of the shape `[firH, firW]` or `[firN]`\n (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.\n factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0).\n\n Returns:\n Tensor of the shape `[N, C, H * factor, W * factor]`\n ", "n_words": 148, "vocab_size": 89, "n_whitespaces": 215, "language": "en" } }, { "id": 251362, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "mitmproxy/flow.py", "file_name": "flow.py", "fun_name": "revert", "commit_message": "make it black!", "code": "def revert(self):\n \n if self._backup:\n self.set_state(self._backup)\n self._backup = None\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 44, "n_words": 8, "vocab_size": 8, "complexity": 2, "nloc": 4, "token_counts": 24, "n_ast_nodes": 42, "n_identifiers": 4, "d_id": 73697, "documentation": { "docstring": "\n Revert to the last backed up state.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 260492, "commit_id": "1ac8ea14847cad8bec5ac49a01013beef4361f79", "repo": "scikit-learn", "path": "sklearn/metrics/cluster/_supervised.py", "file_name": "_supervised.py", "fun_name": "homogeneity_completeness_v_measure", "commit_message": "DOC Ensure homogeneity_completeness_v_measure passes numpydoc validation (#23942)", "code": "def homogeneity_completeness_v_measure(labels_true, labels_pred, *, beta=1.0):\n \n labels_true, labels_pred = check_clusterings(labels_true, labels_pred)\n\n if len(labels_true) == 0:\n return 1.0, 1.0, 1.0\n\n entropy_C = entropy(labels_true)\n entropy_K = entropy(labels_pred)\n\n contingency = contingency_matrix(labels_true, labels_pred, sparse=True)\n MI = mutual_info_score(None, None, contingency=contingency)\n\n homogeneity = MI / (entropy_C) if entropy_C else 1.0\n completeness = MI / (entropy_K) if entropy_K else 1.0\n\n if homogeneity + completeness == 0.0:\n v_measure_score = 0.0\n else:\n v_measure_score = (\n (1 + beta)\n * homogeneity\n * completeness\n / (beta * homogeneity + completeness)\n )\n\n return homogeneity, completeness, v_measure_score\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 191, "n_words": 83, "vocab_size": 48, "complexity": 5, "nloc": 20, "token_counts": 151, "n_ast_nodes": 205, "n_identifiers": 17, "d_id": 76288, "documentation": { "docstring": "Compute the homogeneity and completeness and V-Measure scores at once.\n\n Those metrics are based on normalized conditional entropy measures of\n the clustering labeling to evaluate given the knowledge of a Ground\n Truth class labels of the same samples.\n\n A clustering result satisfies homogeneity if all of its clusters\n contain only data points which are members of a single class.\n\n A clustering result satisfies completeness if all the data points\n that are members of a given class are elements of the same cluster.\n\n Both scores have positive values between 0.0 and 1.0, larger values\n being desirable.\n\n Those 3 metrics are independent of the absolute values of the labels:\n a permutation of the class or cluster label values won't change the\n score values in any way.\n\n V-Measure is furthermore symmetric: swapping ``labels_true`` and\n ``label_pred`` will give the same score. This does not hold for\n homogeneity and completeness. V-Measure is identical to\n :func:`normalized_mutual_info_score` with the arithmetic averaging\n method.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n labels_true : int array, shape = [n_samples]\n Ground truth class labels to be used as a reference.\n\n labels_pred : array-like of shape (n_samples,)\n Gluster labels to evaluate.\n\n beta : float, default=1.0\n Ratio of weight attributed to ``homogeneity`` vs ``completeness``.\n If ``beta`` is greater than 1, ``completeness`` is weighted more\n strongly in the calculation. If ``beta`` is less than 1,\n ``homogeneity`` is weighted more strongly.\n\n Returns\n -------\n homogeneity : float\n Score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling.\n\n completeness : float\n Score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling.\n\n v_measure : float\n Harmonic mean of the first two.\n\n See Also\n --------\n homogeneity_score : Homogeneity metric of cluster labeling.\n completeness_score : Completeness metric of cluster labeling.\n v_measure_score : V-Measure (NMI with arithmetic mean option).\n ", "n_words": 292, "vocab_size": 166, "n_whitespaces": 457, "language": "en" } }, { "id": 113538, "commit_id": "8f454f3bf29e2c3cd0d359231a46edd8ee768d42", "repo": "nni", "path": "nni/mutable/symbol.py", "file_name": "symbol.py", "fun_name": "leaf_symbols", "commit_message": "Mutable V3 (Stage 2) - Symbolic execution engine (#5195)", "code": "def leaf_symbols(self) -> Iterable[Symbol]:\n \n for arg in self.arguments:\n if isinstance(arg, SymbolicExpression):\n yield from arg.leaf_symbols()\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 54, "n_words": 14, "vocab_size": 14, "complexity": 3, "nloc": 10, "token_counts": 33, "n_ast_nodes": 54, "n_identifiers": 8, "d_id": 24940, "documentation": { "docstring": "\n Return a generator of all leaf symbols.\n\n Useful for when you want to inspect when the symbols come from.\n No deduplication even if the symbols has duplicates.\n ", "n_words": 27, "vocab_size": 24, "n_whitespaces": 56, "language": "en" } }, { "id": 64926, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/payment_request/payment_request.py", "file_name": "payment_request.py", "fun_name": "get_existing_payment_request_amount", "commit_message": "style: format code with black", "code": "def get_existing_payment_request_amount(ref_dt, ref_dn):\n\t\n\texisting_payment_request_amount = frappe.db.sql(\n\t\t,\n\t\t(ref_dt, ref_dn),\n\t)\n\treturn flt(existing_payment_request_amount[0][0]) if existing_payment_request_amount else 0\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 10, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 16, "token_counts": 40, "n_ast_nodes": 62, "n_identifiers": 8, "d_id": 13755, "documentation": { "docstring": "\n\tGet the existing payment request which are unpaid or partially paid for payment channel other than Phone\n\tand get the summation of existing paid payment request for Phone payment channel.\n\t\n\t\tselect sum(grand_total)\n\t\tfrom `tabPayment Request`\n\t\twhere\n\t\t\treference_doctype = %s\n\t\t\tand reference_name = %s\n\t\t\tand docstatus = 1\n\t\t\tand (status != 'Paid'\n\t\t\tor (payment_channel = 'Phone'\n\t\t\t\tand status = 'Paid'))\n\t", "n_words": 59, "vocab_size": 40, "n_whitespaces": 48, "language": "en" } }, { "id": 166613, "commit_id": "244f747bb63f45c1c439193f0672c6162853b168", "repo": "pandas", "path": "pandas/core/series.py", "file_name": "series.py", "fun_name": "argsort", "commit_message": "make series axis parameter docs consistent (#47109)\n\n* make series docs consistent\r\n\r\nadd series unused param info to DF docs\r\n\r\n* fix trailing whitespace\r\n\r\n* fix docs build\r\n\r\n* add unused\r\n\r\n* add or update docs for all series methods\r\n\r\n* small fix\r\n\r\n* fix line length\r\n\r\n* fix param order\r\n\r\n* fix param order\r\n\r\n* add\r\n\r\n* add backticks to None and fix space\r\n\r\nCo-authored-by: uncjackg ", "code": "def argsort(self, axis=0, kind=\"quicksort\", order=None) -> Series:\n \n values = self._values\n mask = isna(values)\n\n if mask.any():\n result = np.full(len(self), -1, dtype=np.intp)\n notmask = ~mask\n result[notmask] = np.argsort(values[notmask], kind=kind)\n else:\n result = np.argsort(values, kind=kind)\n\n res = self._constructor(result, index=self.index, name=self.name, dtype=np.intp)\n return res.__finalize__(self, method=\"argsort\")\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 134, "n_words": 41, "vocab_size": 32, "complexity": 2, "nloc": 37, "token_counts": 131, "n_ast_nodes": 203, "n_identifiers": 24, "d_id": 39842, "documentation": { "docstring": "\n Return the integer indices that would sort the Series values.\n\n Override ndarray.argsort. Argsorts the value, omitting NA/null values,\n and places the result in the same locations as the non-NA values.\n\n Parameters\n ----------\n axis : {0 or 'index'}\n Unused. Parameter needed for compatibility with DataFrame.\n kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort'\n Choice of sorting algorithm. See :func:`numpy.sort` for more\n information. 'mergesort' and 'stable' are the only stable algorithms.\n order : None\n Has no effect but is accepted for compatibility with numpy.\n\n Returns\n -------\n Series[np.intp]\n Positions of values within the sort order with -1 indicating\n nan values.\n\n See Also\n --------\n numpy.ndarray.argsort : Returns the indices that would sort this array.\n ", "n_words": 110, "vocab_size": 82, "n_whitespaces": 282, "language": "en" } }, { "id": 317428, "commit_id": "d989e4373d576c403790c9a7e5eb7a29d08e3c47", "repo": "core", "path": "tests/components/androidtv/test_media_player.py", "file_name": "test_media_player.py", "fun_name": "test_get_image_disabled", "commit_message": "Remove websocket_api send_big_result (#75452)", "code": "async def test_get_image_disabled(hass):\n \n patch_key, entity_id, config_entry = _setup(CONFIG_ANDROIDTV_DEFAULT)\n config_entry.add_to_hass(hass)\n hass.config_entries.async_update_entry(\n config_entry, options={CONF_SCREENCAP: False}\n )\n\n with patchers.patch_connect(True)[patch_key], patchers.patch_shell(\n SHELL_RESPONSE_OFF\n )[patch_key]:\n assert await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n\n with patchers.patch_shell(\"11\")[patch_key]:\n await async_update_entity(hass, entity_id)\n\n media_player_name = \"media_player.\" + slugify(\n CONFIG_ANDROIDTV_DEFAULT[TEST_ENTITY_NAME]\n )\n state = hass.states.get(media_player_name)\n assert \"entity_picture_local\" not in state.attributes\n assert \"entity_picture\" not in state.attributes\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 130, "n_words": 49, "vocab_size": 38, "complexity": 1, "nloc": 19, "token_counts": 130, "n_ast_nodes": 218, "n_identifiers": 27, "d_id": 115995, "documentation": { "docstring": "Test that the screencap option can disable entity_picture.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 19575, "commit_id": "3387881a6d4fc2d8bdc0f05c484cb2f7222acfb8", "repo": "pipenv", "path": "pipenv/utils/shell.py", "file_name": "shell.py", "fun_name": "find_python", "commit_message": "Code reorg utils into utils module reduces complexity (#4990)\n\n* Split apart the massive utils.py into a utils module", "code": "def find_python(finder, line=None):\n \n\n if line and not isinstance(line, str):\n raise TypeError(\n f\"Invalid python search type: expected string, received {line!r}\"\n )\n if line and os.path.isabs(line):\n if os.name == \"nt\":\n line = make_posix(line)\n return line\n if not finder:\n from pipenv.vendor.pythonfinder import Finder\n finder = Finder(global_search=True)\n if not line:\n result = next(iter(finder.find_all_python_versions()), None)\n elif line and line[0].isdigit() or re.match(r'[\\d\\.]+', line):\n result = finder.find_python_version(line)\n else:\n result = finder.find_python_version(name=line)\n if not result:\n result = finder.which(line)\n if not result and not line.startswith(\"python\"):\n line = f\"python{line}\"\n result = find_python(finder, line)\n\n if result:\n if not isinstance(result, str):\n return result.path.as_posix()\n return result\n return\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 258, "n_words": 94, "vocab_size": 53, "complexity": 16, "nloc": 28, "token_counts": 194, "n_ast_nodes": 326, "n_identifiers": 27, "d_id": 3027, "documentation": { "docstring": "\n Given a `pythonfinder.Finder` instance and an optional line, find a corresponding python\n\n :param finder: A :class:`pythonfinder.Finder` instance to use for searching\n :type finder: :class:pythonfinder.Finder`\n :param str line: A version, path, name, or nothing, defaults to None\n :return: A path to python\n :rtype: str\n ", "n_words": 43, "vocab_size": 33, "n_whitespaces": 65, "language": "en" } }, { "id": 163656, "commit_id": "3510b1fd2a9cf752638f4af751bdeb33496db766", "repo": "pandas", "path": "pandas/tests/series/indexing/test_setitem.py", "file_name": "test_setitem.py", "fun_name": "val", "commit_message": "BUG: setting pd.NA into Series casts to object (#45431)", "code": "def val(self, request):\n \n return request.param\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 21, "n_identifiers": 4, "d_id": 39479, "documentation": { "docstring": "\n NA values that should generally be valid_na for *all* dtypes.\n\n Include both python float NaN and np.float64; only np.float64 has a\n `dtype` attribute.\n ", "n_words": 23, "vocab_size": 23, "n_whitespaces": 52, "language": "en" } }, { "id": 269614, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "arange", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def arange(start, stop=None, step=1, dtype=\"int32\"):\n \n # Match the behavior of numpy and Theano by returning an empty sequence.\n if stop is None and start < 0:\n start = 0\n result = tf.range(start, limit=stop, delta=step, name=\"arange\")\n if dtype != \"int32\":\n result = cast(result, dtype)\n return result\n\n\n@keras_export(\"keras.backend.tile\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.backend.tile\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 77, "n_words": 48, "vocab_size": 41, "complexity": 4, "nloc": 7, "token_counts": 65, "n_ast_nodes": 134, "n_identifiers": 18, "d_id": 80233, "documentation": { "docstring": "Creates a 1D tensor containing a sequence of integers.\n\n The function arguments use the same convention as\n Theano's arange: if only one argument is provided,\n it is in fact the \"stop\" argument and \"start\" is 0.\n\n The default type of the returned tensor is `'int32'` to\n match TensorFlow's default.\n\n Args:\n start: Start value.\n stop: Stop value.\n step: Difference between two successive values.\n dtype: Integer dtype to use.\n\n Returns:\n An integer tensor.\n\n Example:\n\n >>> tf.keras.backend.arange(start=0, stop=10, step=1.5)\n \n\n\n\n ", "n_words": 91, "vocab_size": 77, "n_whitespaces": 178, "language": "en" } }, { "id": 248583, "commit_id": "fe1daad67237c2154a3d8d8cdf6c603f0d33682e", "repo": "synapse", "path": "tests/util/test_macaroons.py", "file_name": "test_macaroons.py", "fun_name": "test_guest_access_token", "commit_message": "Move the \"email unsubscribe\" resource, refactor the macaroon generator & simplify the access token verification logic. (#12986)\n\nThis simplifies the access token verification logic by removing the `rights`\r\nparameter which was only ever used for the unsubscribe link in email\r\nnotifications. The latter has been moved under the `/_synapse` namespace,\r\nsince it is not a standard API.\r\n\r\nThis also makes the email verification link more secure, by embedding the\r\napp_id and pushkey in the macaroon and verifying it. This prevents the user\r\nfrom tampering the query parameters of that unsubscribe link.\r\n\r\nMacaroon generation is refactored:\r\n\r\n- Centralised all macaroon generation and verification logic to the\r\n `MacaroonGenerator`\r\n- Moved to `synapse.utils`\r\n- Changed the constructor to require only a `Clock`, hostname, and a secret key\r\n (instead of a full `Homeserver`).\r\n- Added tests for all methods.", "code": "def test_guest_access_token(self):\n \n token = self.macaroon_generator.generate_guest_access_token(\"@user:tesths\")\n user_id = self.macaroon_generator.verify_guest_token(token)\n self.assertEqual(user_id, \"@user:tesths\")\n\n # Raises with another secret key\n with self.assertRaises(MacaroonVerificationFailedException):\n self.other_macaroon_generator.verify_guest_token(token)\n\n # Check that an old access token without the guest caveat does not work\n macaroon = self.macaroon_generator._generate_base_macaroon(\"access\")\n macaroon.add_first_party_caveat(f\"user_id = {user_id}\")\n macaroon.add_first_party_caveat(\"nonce = 0123456789abcdef\")\n token = macaroon.serialize()\n\n with self.assertRaises(MacaroonVerificationFailedException):\n self.macaroon_generator.verify_guest_token(token)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 154, "n_words": 48, "vocab_size": 36, "complexity": 1, "nloc": 12, "token_counts": 96, "n_ast_nodes": 177, "n_identifiers": 15, "d_id": 72364, "documentation": { "docstring": "Test the generation and verification of guest access tokens", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 13330, "commit_id": "f5a362f0ffc5070c104c840ab7833689d39b7bdb", "repo": "jina", "path": "jina/orchestrate/pods/container_helper.py", "file_name": "container_helper.py", "fun_name": "get_docker_network", "commit_message": "chore: add pragma no cover to TYPE_CHECKING branch (#5299)", "code": "def get_docker_network(client) -> Optional[str]:\n \n import docker\n\n if TYPE_CHECKING: # pragma: no cover\n from docker.models.containers import Container\n\n container: 'Container' = None\n try:\n hostname = socket.gethostname()\n container = client.containers.get(hostname)\n except docker.errors.NotFound:\n try:\n # https://stackoverflow.com/a/52988227/15683245\n with open('/proc/1/cpuset') as f:\n hostname = os.path.basename(f.read().rstrip())\n container = client.containers.get(hostname)\n except Exception:\n return None\n try:\n networks = container.attrs['NetworkSettings']['Networks']\n if networks:\n net_mode = list(networks.keys())[0]\n return networks[net_mode]['NetworkID']\n else:\n return None\n except Exception:\n return None\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 243, "n_words": 64, "vocab_size": 41, "complexity": 6, "nloc": 32, "token_counts": 144, "n_ast_nodes": 249, "n_identifiers": 29, "d_id": 2601, "documentation": { "docstring": "Do a best-effort guess if the caller is already in a docker network\n\n Check if `hostname` exists in list of docker containers.\n If a container is found, check its network id\n\n :param client: docker client object\n :return: network id if exists\n ", "n_words": 41, "vocab_size": 29, "n_whitespaces": 56, "language": "en" } }, { "id": 228639, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/bar/_error_x.py", "file_name": "_error_x.py", "fun_name": "valueminus", "commit_message": "switch to black .22", "code": "def valueminus(self):\n \n return self[\"valueminus\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 60312, "documentation": { "docstring": "\n Sets the value of either the percentage (if `type` is set to\n \"percent\") or the constant (if `type` is set to \"constant\")\n corresponding to the lengths of the error bars in the bottom\n (left) direction for vertical (horizontal) bars\n\n The 'valueminus' property is a number and may be specified as:\n - An int or float in the interval [0, inf]\n\n Returns\n -------\n int|float\n ", "n_words": 63, "vocab_size": 46, "n_whitespaces": 136, "language": "en" } }, { "id": 159357, "commit_id": "dc762814317ce46873a5226ee09033031a7d3604", "repo": "rasa", "path": "rasa/nlu/classifiers/logistic_regression_classifier.py", "file_name": "logistic_regression_classifier.py", "fun_name": "required_packages", "commit_message": "Add Logistic Regression to our NLU classifiers. (#10650)\n\n* added-logistic-regression\r\n\r\n* added\r\n\r\n* d0h! gotta copy the imports correctly\r\n\r\n* run black\r\n\r\n* black issues fixed\r\n\r\n* stash\r\n\r\n* added tolerance hyperparam\r\n\r\n* added random seed\r\n\r\n* fixed testing path\r\n\r\n* ran black\r\n\r\n* use joblib directly\r\n\r\n* insurance against sklearn changes\r\n\r\n* added try except\r\n\r\n* ran black\r\n\r\n* make code more DRY\r\n\r\n* flake8\r\n\r\n* added type information\r\n\r\n* add train -> persists -> load -> load\r\n\r\n* add to test_train.py\r\n\r\n* fixed style issues\r\n\r\n* actually persist model\r\n\r\n* persist, i insist\r\n\r\n* fixed-bug\r\n\r\n* added-documentation\r\n\r\n* black\r\n\r\n* added changelog\r\n\r\n* added\r\n\r\n* moar-whitespace\r\n\r\n* removed stale param\r\n\r\n* added comments", "code": "def required_packages() -> List[Text]:\n \n return [\"sklearn\"]\n", "url": "https://github.com/RasaHQ/rasa.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 27, "n_identifiers": 3, "d_id": 38212, "documentation": { "docstring": "Any extra python dependencies required for this component to run.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 67625, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/doctype/item/item.py", "file_name": "item.py", "fun_name": "get_uom_conv_factor", "commit_message": "style: format code with black", "code": "def get_uom_conv_factor(uom, stock_uom):\n\t\n\tif uom == stock_uom:\n\t\treturn 1.0\n\n\tfrom_uom, to_uom = uom, stock_uom # renaming for readability\n\n\texact_match = frappe.db.get_value(\n\t\t\"UOM Conversion Factor\", {\"to_uom\": to_uom, \"from_uom\": from_uom}, [\"value\"], as_dict=1\n\t)\n\tif exact_match:\n\t\treturn exact_match.value\n\n\tinverse_match = frappe.db.get_value(\n\t\t\"UOM Conversion Factor\", {\"to_uom\": from_uom, \"from_uom\": to_uom}, [\"value\"], as_dict=1\n\t)\n\tif inverse_match:\n\t\treturn 1 / inverse_match.value\n\n\t# This attempts to try and get conversion from intermediate UOM.\n\t# case:\n\t# \t\t\t g -> mg = 1000\n\t# \t\t\t g -> kg = 0.001\n\t# therefore\t kg -> mg = 1000 / 0.001 = 1,000,000\n\tintermediate_match = frappe.db.sql(\n\t\t,\n\t\t{\"to_uom\": to_uom, \"from_uom\": from_uom},\n\t\tas_dict=1,\n\t)\n\n\tif intermediate_match:\n\t\treturn intermediate_match[0].value\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 82, "n_words": 105, "vocab_size": 64, "complexity": 5, "nloc": 30, "token_counts": 131, "n_ast_nodes": 229, "n_identifiers": 15, "d_id": 14581, "documentation": { "docstring": "Get UOM conversion factor from uom to stock_uom\n\te.g. uom = \"Kg\", stock_uom = \"Gram\" then returns 1000.0\n\t\n\t\t\tselect (first.value / second.value) as value\n\t\t\tfrom `tabUOM Conversion Factor` first\n\t\t\tjoin `tabUOM Conversion Factor` second\n\t\t\t\ton first.from_uom = second.from_uom\n\t\t\twhere\n\t\t\t\tfirst.to_uom = %(to_uom)s\n\t\t\t\tand second.to_uom = %(from_uom)s\n\t\t\tlimit 1\n\t\t\t", "n_words": 48, "vocab_size": 38, "n_whitespaces": 38, "language": "en" } }, { "id": 269422, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/applications/resnet.py", "file_name": "resnet.py", "fun_name": "stack1", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def stack1(x, filters, blocks, stride1=2, name=None):\n \n x = block1(x, filters, stride=stride1, name=name + \"_block1\")\n for i in range(2, blocks + 1):\n x = block1(\n x, filters, conv_shortcut=False, name=name + \"_block\" + str(i)\n )\n return x\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 72, "n_words": 35, "vocab_size": 26, "complexity": 2, "nloc": 7, "token_counts": 73, "n_ast_nodes": 109, "n_identifiers": 12, "d_id": 80070, "documentation": { "docstring": "A set of stacked residual blocks.\n\n Args:\n x: input tensor.\n filters: integer, filters of the bottleneck layer in a block.\n blocks: integer, blocks in the stacked blocks.\n stride1: default 2, stride of the first layer in the first block.\n name: string, stack label.\n\n Returns:\n Output tensor for the stacked blocks.\n ", "n_words": 50, "vocab_size": 34, "n_whitespaces": 89, "language": "en" } }, { "id": 199955, "commit_id": "f68e8de4252200cfc74b9433d00f77c4510ac68d", "repo": "sympy", "path": "sympy/core/facts.py", "file_name": "facts.py", "fun_name": "print_rules", "commit_message": "refactor", "code": "def print_rules(self) -> Iterator[str]:\n \n yield from self._defined_facts_lines()\n yield ''\n yield ''\n yield from self._full_implications_lines()\n yield ''\n yield ''\n yield from self._prereq_lines()\n yield ''\n yield ''\n yield from self._beta_rules_lines()\n yield ''\n yield ''\n yield \"generated_assumptions = {'defined_facts': defined_facts, 'full_implications': full_implications,\"\n yield \" 'prereq': prereq, 'beta_rules': beta_rules, 'beta_triggers': beta_triggers}\"\n yield ''\n yield ''\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 184, "n_words": 51, "vocab_size": 24, "complexity": 1, "nloc": 18, "token_counts": 63, "n_ast_nodes": 140, "n_identifiers": 8, "d_id": 49448, "documentation": { "docstring": " Returns a generator with lines to represent the facts and rules ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 12, "language": "en" } }, { "id": 101239, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "plugins/extract/align/_base.py", "file_name": "_base.py", "fun_name": "_process_input", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def _process_input(self, batch):\n \n if not self._additional_keys:\n existing_keys = list(batch.keys())\n\n original_boxes = np.array([(face.left, face.top, face.width, face.height)\n for face in batch[\"detected_faces\"]])\n adjusted_boxes = self._get_adjusted_boxes(original_boxes)\n retval = {}\n for bounding_boxes in adjusted_boxes:\n for face, box in zip(batch[\"detected_faces\"], bounding_boxes):\n face.left, face.top, face.width, face.height = box\n\n result = self.process_input(batch)\n if not self._additional_keys:\n self._additional_keys = [key for key in result if key not in existing_keys]\n for key in self._additional_keys:\n retval.setdefault(key, []).append(batch[key])\n del batch[key]\n\n # Place the original bounding box back to detected face objects\n for face, box in zip(batch[\"detected_faces\"], original_boxes):\n face.left, face.top, face.width, face.height = box\n\n batch.update(retval)\n return batch\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 323, "n_words": 93, "vocab_size": 53, "complexity": 10, "nloc": 20, "token_counts": 205, "n_ast_nodes": 314, "n_identifiers": 27, "d_id": 20659, "documentation": { "docstring": " Process the input to the aligner model multiple times based on the user selected\n `re-feed` command line option. This adjusts the bounding box for the face to be fed\n into the model by a random amount within 0.05 pixels of the detected face's shortest axis.\n\n References\n ----------\n https://studios.disneyresearch.com/2020/06/29/high-resolution-neural-face-swapping-for-visual-effects/\n\n Parameters\n ----------\n batch: dict\n Contains the batch that is currently being passed through the plugin process\n\n Returns\n -------\n dict\n The batch with input processed\n ", "n_words": 72, "vocab_size": 58, "n_whitespaces": 179, "language": "en" } }, { "id": 279867, "commit_id": "47a4cfe06faf54e271ab50e6d0aae73b06a35f86", "repo": "keras", "path": "keras/engine/training.py", "file_name": "training.py", "fun_name": "reduce_per_replica", "commit_message": "Update training.py", "code": "def reduce_per_replica(values, strategy, reduction):\n \n\n if reduction == \"auto\":\n reduction = \"first\" if backend.is_tpu_strategy(strategy) else \"sum\"\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 28, "n_words": 15, "vocab_size": 13, "complexity": 3, "nloc": 5, "token_counts": 40, "n_ast_nodes": 49, "n_identifiers": 6, "d_id": 83158, "documentation": { "docstring": "Attempt to reduce the structure `values` to single values.\n\n Given `values` (a `tf.Tensor` or a `PerReplica` structure),\n which represents the values across all the replicas, `reduce_per_replica`\n attempts to \"reduce\" those values and returns the corresponding structure\n that represents only single values.\n\n Currently, `reduce_per_replica` is only used for reducing the metric results\n from `tf.distribute.Strategy.run()`. Depending on the underlying\n `Strategy` implementation, `values` may be a `PerReplica` object,\n which can be thought of as a collection of values across the replicas,\n or a `tf.Tensor`, if the strategy has already conducted the reduction\n for the downstream library.\n\n There are five possible outcomes of reduction:\n\n 1) if the `values` is a structure of simple `tf.Tensor`s, meaning that\n reduction is not actually needed, `reduce_per_replica` returns the\n structure as-is.\n 2) else, if `reduction=\"auto\"`, then it assumes \"first\" if running\n under `TPUStrategy`, and \"sum\" otherwise. This should only be used\n for training cases (`fit()`).\n 3) else, if `reduction=\"first\"`, then `reduce_per_replica`\n returns the values of the first replica. This is used in the case of\n training and evaluation, where `values` is expected to hold the same\n value across the replicas as a result of `Strategy`'s synchronization\n across the replicas.\n `reduce_per_replica` does not synchronize the values.\n 4) else, if `reduction=\"sum\"`, then `reduce_per_replica` returns the sum\n of values for all replicas. This is used in the custom training loop\n case, where each replica contain different values which are not\n synchronized.\n 5) else, if `reduction=\"concat\"`, then `reduce_per_replica`\n returns the concatenation of the values across the replicas, along the\n axis of dimension 0. This is used in the inference case (`predict()`).\n\n Args:\n values: Structure of `PerReplica` objects or `tf.Tensor`s. `tf.Tensor`s\n are returned as-is.\n strategy: `tf.distribute.Strategy` object.\n reduction: One of `\"auto\"`, `\"first\"`, `\"concat\"`, or `\"sum\"`.\n `\"auto\"` will select `\"first\"` when used under a TPUStrategy, or\n `\"sum\"` otherwise.\n\n Returns:\n Structure of `Tensor`s, representing the result of reduction.\n\n Raises:\n ValueError: if the reduction method is not supported.\n ", "n_words": 311, "vocab_size": 161, "n_whitespaces": 502, "language": "en" } }, { "id": 260010, "commit_id": "f5871a39f445d84b55c5d7897c875a86d590408e", "repo": "scikit-learn", "path": "sklearn/metrics/_ranking.py", "file_name": "_ranking.py", "fun_name": "auc", "commit_message": "DOC Ensures that sklearn.metrics._ranking.auc passes numpydoc validation (#23433)", "code": "def auc(x, y):\n \n check_consistent_length(x, y)\n x = column_or_1d(x)\n y = column_or_1d(y)\n\n if x.shape[0] < 2:\n raise ValueError(\n \"At least 2 points are needed to compute area under curve, but x.shape = %s\"\n % x.shape\n )\n\n direction = 1\n dx = np.diff(x)\n if np.any(dx < 0):\n if np.all(dx <= 0):\n direction = -1\n else:\n raise ValueError(\"x is neither increasing nor decreasing : {}.\".format(x))\n\n area = direction * np.trapz(y, x)\n if isinstance(area, np.memmap):\n # Reductions such as .sum used internally in np.trapz do not return a\n # scalar by default for numpy.memmap instances contrary to\n # regular numpy.ndarray instances.\n area = area.dtype.type(area)\n return area\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 235, "n_words": 102, "vocab_size": 79, "complexity": 5, "nloc": 20, "token_counts": 126, "n_ast_nodes": 209, "n_identifiers": 20, "d_id": 76030, "documentation": { "docstring": "Compute Area Under the Curve (AUC) using the trapezoidal rule.\n\n This is a general function, given points on a curve. For computing the\n area under the ROC-curve, see :func:`roc_auc_score`. For an alternative\n way to summarize a precision-recall curve, see\n :func:`average_precision_score`.\n\n Parameters\n ----------\n x : ndarray of shape (n,)\n X coordinates. These must be either monotonic increasing or monotonic\n decreasing.\n y : ndarray of shape, (n,)\n Y coordinates.\n\n Returns\n -------\n auc : float\n Area Under the Curve.\n\n See Also\n --------\n roc_auc_score : Compute the area under the ROC curve.\n average_precision_score : Compute average precision from prediction scores.\n precision_recall_curve : Compute precision-recall pairs for different\n probability thresholds.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn import metrics\n >>> y = np.array([1, 1, 2, 2])\n >>> pred = np.array([0.1, 0.4, 0.35, 0.8])\n >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)\n >>> metrics.auc(fpr, tpr)\n 0.75\n ", "n_words": 143, "vocab_size": 103, "n_whitespaces": 258, "language": "en" } }, { "id": 224035, "commit_id": "e7f07cc82ab2be920ab426ba07456d8b2592714d", "repo": "mkdocs", "path": "mkdocs/structure/files.py", "file_name": "files.py", "fun_name": "_get_stem", "commit_message": "Remove spaces at the ends of docstrings, normalize quotes", "code": "def _get_stem(self):\n \n filename = os.path.basename(self.src_path)\n stem, ext = os.path.splitext(filename)\n return 'index' if stem in ('index', 'README') else stem\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 46, "n_words": 18, "vocab_size": 16, "complexity": 2, "nloc": 4, "token_counts": 42, "n_ast_nodes": 73, "n_identifiers": 10, "d_id": 57182, "documentation": { "docstring": "Return the name of the file without it's extension.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 217640, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/heapq.py", "file_name": "heapq.py", "fun_name": "heappop", "commit_message": "add python 3.10.4 for windows", "code": "def heappop(heap):\n \n lastelt = heap.pop() # raises appropriate IndexError if heap is empty\n if heap:\n returnitem = heap[0]\n heap[0] = lastelt\n _siftup(heap, 0)\n return returnitem\n return lastelt\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 70, "n_words": 27, "vocab_size": 19, "complexity": 2, "nloc": 8, "token_counts": 38, "n_ast_nodes": 64, "n_identifiers": 6, "d_id": 54859, "documentation": { "docstring": "Pop the smallest item off the heap, maintaining the heap invariant.", "n_words": 11, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 299828, "commit_id": "29bda196b5e0a90a2bea7e1797742236114afc1c", "repo": "core", "path": "tests/components/history/test_init.py", "file_name": "test_init.py", "fun_name": "test_get_significant_states_only", "commit_message": "Break apart recorder into tasks and core modules (#71222)", "code": "def test_get_significant_states_only(hass_history):\n \n hass = hass_history\n entity_id = \"sensor.test\"\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 17, "n_words": 8, "vocab_size": 7, "complexity": 2, "nloc": 36, "token_counts": 264, "n_ast_nodes": 25, "n_identifiers": 4, "d_id": 98730, "documentation": { "docstring": "Test significant states when significant_states_only is set.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 322232, "commit_id": "7b455cce47204d4d664deea9661670a838ec8d35", "repo": "PaddleNLP", "path": "paddlenlp/datasets/conll2002.py", "file_name": "conll2002.py", "fun_name": "get_labels", "commit_message": "feat: add conll2002 dataset (#1561)\n\nCo-authored-by: Zeyu Chen ", "code": "def get_labels(self):\n \n return [\"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", \"B-MISC\", \"I-MISC\"], \\\n self.BUILDER_CONFIGS[self.name]['pos_tags']\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 42, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 3, "token_counts": 39, "n_ast_nodes": 72, "n_identifiers": 4, "d_id": 118099, "documentation": { "docstring": "\n Returns labels of ner tags and pos tags.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 65658, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/controllers/queries.py", "file_name": "queries.py", "fun_name": "supplier_query", "commit_message": "style: format code with black", "code": "def supplier_query(doctype, txt, searchfield, start, page_len, filters):\n\tsupp_master_name = frappe.defaults.get_user_default(\"supp_master_name\")\n\n\tif supp_master_name == \"Supplier Name\":\n\t\tfields = [\"name\", \"supplier_group\"]\n\telse:\n\t\tfields = [\"name\", \"supplier_name\", \"supplier_group\"]\n\n\tfields = get_fields(\"Supplier\", fields)\n\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\t**{\"field\": \", \".join(fields), \"key\": searchfield, \"mcond\": get_match_cond(doctype)}\n\t\t),\n\t\t{\"txt\": \"%%%s%%\" % txt, \"_txt\": txt.replace(\"%\", \"\"), \"start\": start, \"page_len\": page_len},\n\t)\n\n\n@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()\n@frappe.validate_and_sanitize_search_inputs", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 39, "n_words": 54, "vocab_size": 43, "complexity": 2, "nloc": 24, "token_counts": 119, "n_ast_nodes": 228, "n_identifiers": 21, "d_id": 13978, "documentation": { "docstring": "select {field} from `tabSupplier`\n\t\twhere docstatus < 2\n\t\t\tand ({key} like %(txt)s\n\t\t\tor supplier_name like %(txt)s) and disabled=0\n\t\t\tand (on_hold = 0 or (on_hold = 1 and CURDATE() > release_date))\n\t\t\t{mcond}\n\t\torder by\n\t\t\tif(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),\n\t\t\tif(locate(%(_txt)s, supplier_name), locate(%(_txt)s, supplier_name), 99999),\n\t\t\tidx desc,\n\t\t\tname, supplier_name\n\t\tlimit %(start)s, %(page_len)s ", "n_words": 50, "vocab_size": 37, "n_whitespaces": 39, "language": "en" } }, { "id": 196019, "commit_id": "498015021131af4dbb07eb110e5badaba8250c7b", "repo": "sympy", "path": "sympy/algebras/quaternion.py", "file_name": "quaternion.py", "fun_name": "_eval_evalf", "commit_message": "Updated import locations", "code": "def _eval_evalf(self, prec):\n \n\n return Quaternion(*[arg.evalf(n=prec_to_dps(prec)) for arg in self.args])\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 23, "n_words": 9, "vocab_size": 9, "complexity": 2, "nloc": 2, "token_counts": 32, "n_ast_nodes": 52, "n_identifiers": 9, "d_id": 47519, "documentation": { "docstring": "Returns the floating point approximations (decimal numbers) of the quaternion.\n\n Returns\n =======\n\n Quaternion\n Floating point approximations of quaternion(self)\n\n Examples\n ========\n\n >>> from sympy import Quaternion\n >>> from sympy import sqrt\n >>> q = Quaternion(1/sqrt(1), 1/sqrt(2), 1/sqrt(3), 1/sqrt(4))\n >>> q.evalf()\n 1.00000000000000\n + 0.707106781186547*i\n + 0.577350269189626*j\n + 0.500000000000000*k\n\n ", "n_words": 46, "vocab_size": 32, "n_whitespaces": 155, "language": "en" } }, { "id": 153543, "commit_id": "97769988a6f19e4b76f34238c97bf159ee7626a5", "repo": "modin", "path": "modin/core/io/column_stores/column_store_dispatcher.py", "file_name": "column_store_dispatcher.py", "fun_name": "call_deploy", "commit_message": "REFACTOR-#3853: interacting with Dask interface through 'DaskWrapper' class (#3854)\n\nCo-authored-by: Devin Petersohn \r\nCo-authored-by: Dmitry Chigarev \r\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Anatoly Myachev ", "code": "def call_deploy(cls, fname, col_partitions, **kwargs):\n \n return np.array(\n [\n cls.deploy(\n cls.parse,\n num_returns=NPartitions.get() + 2,\n fname=fname,\n columns=cols,\n num_splits=NPartitions.get(),\n **kwargs,\n )\n for cols in col_partitions\n ]\n ).T\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 226, "n_words": 24, "vocab_size": 24, "complexity": 2, "nloc": 14, "token_counts": 65, "n_ast_nodes": 96, "n_identifiers": 16, "d_id": 35432, "documentation": { "docstring": "\n Deploy remote tasks to the workers with passed parameters.\n\n Parameters\n ----------\n fname : str, path object or file-like object\n Name of the file to read.\n col_partitions : list\n List of arrays with columns names that should be read\n by each partition.\n **kwargs : dict\n Parameters of deploying read_* function.\n\n Returns\n -------\n np.ndarray\n Array with references to the task deploy result for each partition.\n ", "n_words": 63, "vocab_size": 49, "n_whitespaces": 189, "language": "en" } }, { "id": 9812, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "daemon/api/dependencies.py", "file_name": "dependencies.py", "fun_name": "load_and_dump", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "def load_and_dump(self) -> None:\n \n with ExitStack() as stack:\n # set env vars\n stack.enter_context(change_env('JINA_FULL_CLI', 'true'))\n\n # change directory to `workspace`\n stack.enter_context(change_cwd(get_workspace_path(self.workspace_id)))\n\n # load and build\n f: Flow = Flow.load_config(\n str(self.localpath()), substitute=True, context=self.envs\n ).build()\n # get & set the ports mapping, set `runs_in_docker`\n port_mapping = []\n port_mapping.append(\n PortMapping(\n pod_name='gateway',\n pea_name='gateway',\n ports=Ports(port_expose=f.port_expose),\n )\n )\n for pod_name, pod in f._pod_nodes.items():\n runtime_cls = update_runtime_cls(pod.args, copy=True).runtime_cls\n if runtime_cls in ['WorkerRuntime'] + list(\n GATEWAY_RUNTIME_DICT.values()\n ):\n pod.args.runs_in_docker = True\n current_ports = Ports()\n for port_name in Ports.__fields__:\n setattr(\n current_ports,\n port_name,\n getattr(pod.args, port_name, None),\n )\n\n port_mapping.append(\n PortMapping(pod_name=pod_name, pea_name='', ports=current_ports)\n )\n elif (\n runtime_cls in ['ContainerRuntime']\n and hasattr(pod.args, 'replicas')\n and pod.args.replicas > 1\n ):\n for pea_args in [pod.peas_args['head']]:\n self._update_port_mapping(pea_args, pod_name, port_mapping)\n\n self.ports = port_mapping\n # save to a new file & set it for partial-daemon\n f.save_config(filename=self.newfile)\n self.params.uses = self.newname\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 858, "n_words": 128, "vocab_size": 94, "complexity": 8, "nloc": 59, "token_counts": 273, "n_ast_nodes": 446, "n_identifiers": 53, "d_id": 1704, "documentation": { "docstring": "\n every Flow created inside JinaD lives inside a container. It is important to know the\n list of ports to be published with localhost before actually starting the container.\n\n 1. `load` the flow yaml here.\n - yaml is stored in `workspace` directory, so we'll `cd` there\n - yaml might include env vars. so we'll set them (passed via query params)\n 2. `build` the Flow so that `gateway` gets added.\n - get the list of ports to be published (port_expose, port_in, port_out, port_ctrl)\n - ports need to be published for gateway & executors that are not `ContainerRuntime` or `JinadRuntime` based\n - Pod level args for ports are enough, as we don't need to publish Pea ports\n - all the above Pods also run in docker, hence we set `runs_in_docker`\n 3. `save` the Flow config.\n - saves port configs of all `executors` into the new yaml.\n - set `JINA_FULL_CLI` envvar, so that `gateway` args are also added.\n - save the config into a new file.\n 4. pass this new file as filename to `partial-daemon` to start the Flow\n ", "n_words": 175, "vocab_size": 109, "n_whitespaces": 324, "language": "en" } }, { "id": 287828, "commit_id": "81abeac83ed85c5753cb8f2ac317caf079cf1868", "repo": "core", "path": "homeassistant/components/netatmo/climate.py", "file_name": "climate.py", "fun_name": "hvac_action", "commit_message": "Netatmo refactor to use pyatmo 7.0.1 (#73482) (#78523)\n\nCo-authored-by: Robert Svensson ", "code": "def hvac_action(self) -> HVACAction:\n \n if self._model != NA_VALVE and self._boilerstatus is not None:\n return CURRENT_HVAC_MAP_NETATMO[self._boilerstatus]\n # Maybe it is a valve\n if (\n heating_req := getattr(self._room, \"heating_power_request\", 0)\n ) is not None and heating_req > 0:\n return HVACAction.HEATING\n return HVACAction.IDLE\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 115, "n_words": 40, "vocab_size": 32, "complexity": 5, "nloc": 9, "token_counts": 60, "n_ast_nodes": 95, "n_identifiers": 12, "d_id": 87015, "documentation": { "docstring": "Return the current running hvac operation if supported.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 134048, "commit_id": "d1aa5608979891e3dd859c07fa919fa01cfead5f", "repo": "ray", "path": "ci/run/bazel_sharding/tests/test_bazel_sharding.py", "file_name": "test_bazel_sharding.py", "fun_name": "test_actual_timeouts", "commit_message": "[CI] Make bazel sharding for parallel buildkite more intelligent (#29221)\n\nThis PR implements two changes to our `bazel-sharding.py` script, used for determining which bazel tests to run on each instance when buildkite parallelism is used:\r\n* An ability to filter tests before they are sharded, using the same logic as `bazel test`. This is done by specifying the `--tag_filters` argument, eg. `--tag_filters=air,-gpu`. If we filter tests with `bazel test` *after* they are sharded, we can end up with imbalanced shards as eg. all tests we want to filter out are assigned to one shard. This feature is enabled for Serve tests and it will be required for the changes I want to make to AIR CI.\r\n* A new algorithm to balance the shards, finally implementing what that comment was asking for all this time. Instead of trying to assign the same number of tests (which have variable timeouts) to each shard, the new algorithm tries to make sure each shard will finish in more or less the same time. This is achieved through a simple but good enough heuristic. The old algorithm can still be accessed through the `--sharding_strategy` argument.\r\n\r\nThose two changes do cause the complexity of the script to increase, necessitating proper testing. In order to facilitate that, this PR also adds a basic buildkite test harness for CI tools/scripts.\r\n\r\nAfter this PR is merged, the next step will be to move most of our manually parallelized jobs to use buildkite parallelism with the new logic here.\r\n\r\nSigned-off-by: Antoni Baum ", "code": "def test_actual_timeouts(mock_build_dir):\n \n query = bazel_sharding.get_target_expansion_query(\n [\"...\"], tests_only=False, exclude_manual=False\n )\n xml_output = bazel_sharding.run_bazel_query(query, debug=False)\n rules = set(bazel_sharding.extract_rules_from_xml(xml_output))\n expected_timeouts = {\n \"test_default\": 60 * 5,\n \"test_small\": 60,\n \"test_medium\": 60 * 5,\n \"test_large\": 60 * 15,\n \"test_enormous\": 60 * 60,\n \"test_short\": 60,\n \"test_moderate\": 60 * 5,\n \"test_long\": 60 * 15,\n \"test_eternal\": 60 * 60,\n \"test_both_size_and_timeout\": 60 * 15,\n }\n assert len(rules) == len(expected_timeouts)\n assert (rule.actual_timeout_s == expected_timeouts[rule.name] for rule in rules)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 172, "n_words": 68, "vocab_size": 42, "complexity": 2, "nloc": 20, "token_counts": 134, "n_ast_nodes": 218, "n_identifiers": 18, "d_id": 30180, "documentation": { "docstring": "Test that size and timeout attrs are mapped to seconds correctly.\n\n Assert that each of the fake rules is mapped correctly.\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 27, "language": "en" } }, { "id": 270641, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/dtensor/test_util.py", "file_name": "test_util.py", "fun_name": "configTestMesh", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def configTestMesh(device_type_mesh_map): # pylint: disable=invalid-name\n \n reset_context()\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 21, "n_words": 6, "vocab_size": 6, "complexity": 2, "nloc": 12, "token_counts": 75, "n_ast_nodes": 20, "n_identifiers": 3, "d_id": 80502, "documentation": { "docstring": "Configs corresponding mesh given test context.\n\n If runs on a CPU mesh, set virtual device on CPU.\n If runs on a GPU mesh, sets virtual device on GPU with proper memory limits.\n if runs on a TPU mesh, initializes TPU system.\n\n Args:\n device_type_mesh_map: A dictionary containing device_type -> mesh mapping.\n\n Returns:\n A properly configured mesh for use in test.\n ", "n_words": 59, "vocab_size": 41, "n_whitespaces": 119, "language": "en" } }, { "id": 294272, "commit_id": "dbef90654f3693401a2df88fa00afbbffbdffcd2", "repo": "core", "path": "tests/components/hue/test_light_v2.py", "file_name": "test_light_v2.py", "fun_name": "test_lights", "commit_message": "Add effects feature to Hue lights (#68567)", "code": "async def test_lights(hass, mock_bridge_v2, v2_resources_test_data):\n \n await mock_bridge_v2.api.load_test_data(v2_resources_test_data)\n\n await setup_platform(hass, mock_bridge_v2, \"light\")\n # there shouldn't have been any requests at this point\n assert len(mock_bridge_v2.mock_requests) == 0\n # 6 entities should be created from test data (grouped_lights are disabled by default)\n assert len(hass.states.async_all()) == 6\n\n # test light which supports color and color temperature\n light_1 = hass.states.get(\"light.hue_light_with_color_and_color_temperature_1\")\n assert light_1 is not None\n assert (\n light_1.attributes[\"friendly_name\"]\n == \"Hue light with color and color temperature 1\"\n )\n assert light_1.state == \"on\"\n assert light_1.attributes[\"brightness\"] == int(46.85 / 100 * 255)\n assert light_1.attributes[\"mode\"] == \"normal\"\n assert light_1.attributes[\"color_mode\"] == COLOR_MODE_XY\n assert set(light_1.attributes[\"supported_color_modes\"]) == {\n COLOR_MODE_COLOR_TEMP,\n COLOR_MODE_XY,\n }\n assert light_1.attributes[\"xy_color\"] == (0.5614, 0.4058)\n assert light_1.attributes[\"min_mireds\"] == 153\n assert light_1.attributes[\"max_mireds\"] == 500\n assert light_1.attributes[\"dynamics\"] == \"dynamic_palette\"\n assert light_1.attributes[\"effect_list\"] == [\"None\", \"candle\", \"fire\"]\n assert light_1.attributes[\"effect\"] == \"None\"\n\n # test light which supports color temperature only\n light_2 = hass.states.get(\"light.hue_light_with_color_temperature_only\")\n assert light_2 is not None\n assert (\n light_2.attributes[\"friendly_name\"] == \"Hue light with color temperature only\"\n )\n assert light_2.state == \"off\"\n assert light_2.attributes[\"mode\"] == \"normal\"\n assert light_2.attributes[\"supported_color_modes\"] == [COLOR_MODE_COLOR_TEMP]\n assert light_2.attributes[\"min_mireds\"] == 153\n assert light_2.attributes[\"max_mireds\"] == 454\n assert light_2.attributes[\"dynamics\"] == \"none\"\n assert light_2.attributes[\"effect_list\"] == [\"None\", \"candle\", \"sunrise\"]\n\n # test light which supports color only\n light_3 = hass.states.get(\"light.hue_light_with_color_only\")\n assert light_3 is not None\n assert light_3.attributes[\"friendly_name\"] == \"Hue light with color only\"\n assert light_3.state == \"on\"\n assert light_3.attributes[\"brightness\"] == 128\n assert light_3.attributes[\"mode\"] == \"normal\"\n assert light_3.attributes[\"supported_color_modes\"] == [COLOR_MODE_XY]\n assert light_3.attributes[\"color_mode\"] == COLOR_MODE_XY\n assert light_3.attributes[\"dynamics\"] == \"dynamic_palette\"\n\n # test light which supports on/off only\n light_4 = hass.states.get(\"light.hue_on_off_light\")\n assert light_4 is not None\n assert light_4.attributes[\"friendly_name\"] == \"Hue on/off light\"\n assert light_4.state == \"off\"\n assert light_4.attributes[\"mode\"] == \"normal\"\n assert light_4.attributes[\"supported_color_modes\"] == []\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 458, "n_words": 264, "vocab_size": 124, "complexity": 1, "nloc": 52, "token_counts": 423, "n_ast_nodes": 729, "n_identifiers": 22, "d_id": 93309, "documentation": { "docstring": "Test if all v2 lights get created with correct features.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 304501, "commit_id": "8b1713a691bd0c90824261be785f1998ad89f66f", "repo": "core", "path": "tests/components/lutron_caseta/__init__.py", "file_name": "__init__.py", "fun_name": "get_devices", "commit_message": "Add support for non-serialized devices (light, switch, cover, fan in RA3 Zones) (#75323)\n\nCo-authored-by: J. Nick Koston ", "code": "def get_devices(self) -> dict[str, dict]:\n \n return self.devices\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 28, "n_identifiers": 5, "d_id": 103308, "documentation": { "docstring": "Will return all known devices connected to the Smart Bridge.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 70498, "commit_id": "4248d406c011d6ba6207bb0e0e9b885813d961be", "repo": "wagtail", "path": "wagtail/search/backends/database/__init__.py", "file_name": "__init__.py", "fun_name": "SearchBackend", "commit_message": "Test for presence of fts5 extension in sqlite backend initialisation and migration", "code": "def SearchBackend(params):\n \n if connection.vendor == 'postgresql':\n from .postgres.postgres import PostgresSearchBackend\n return PostgresSearchBackend(params)\n elif connection.vendor == 'mysql':\n from .mysql.mysql import MySQLSearchBackend\n return MySQLSearchBackend(params)\n elif connection.vendor == 'sqlite':\n from .sqlite.utils import fts5_available\n if fts5_available():\n from .sqlite.sqlite import SQLiteSearchBackend\n return SQLiteSearchBackend(params)\n else:\n from .fallback import DatabaseSearchBackend\n return DatabaseSearchBackend(params)\n else:\n from .fallback import DatabaseSearchBackend\n return DatabaseSearchBackend(params)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 174, "n_words": 52, "vocab_size": 28, "complexity": 5, "nloc": 18, "token_counts": 99, "n_ast_nodes": 177, "n_identifiers": 14, "d_id": 15513, "documentation": { "docstring": "\n Returns the appropriate search backend for the current 'default' database system\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 18, "language": "en" } }, { "id": 153617, "commit_id": "605efa618e7994681f57b11d04d417f353ef8d50", "repo": "modin", "path": "modin/pandas/base.py", "file_name": "base.py", "fun_name": "at_time", "commit_message": "DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333)\n\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Alexander Myskov ", "code": "def at_time(self, time, asof=False, axis=None): # noqa: PR01, RT01, D200\n \n axis = self._get_axis_number(axis)\n idx = self.index if axis == 0 else self.columns\n indexer = pandas.Series(index=idx).at_time(time, asof=asof).index\n return self.loc[indexer] if axis == 0 else self.loc[:, indexer]\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 71, "n_words": 35, "vocab_size": 27, "complexity": 3, "nloc": 5, "token_counts": 78, "n_ast_nodes": 118, "n_identifiers": 13, "d_id": 35498, "documentation": { "docstring": "\n Select values at particular time of day (e.g., 9:30AM).\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 167386, "commit_id": "4bb1fd50a63badd38b5d96d9c4323dae7bc36d8d", "repo": "pandas", "path": "pandas/plotting/_misc.py", "file_name": "_misc.py", "fun_name": "autocorrelation_plot", "commit_message": "TYP: Missing return annotations in util/tseries/plotting (#47510)\n\n* TYP: Missing return annotations in util/tseries/plotting\r\n\r\n* the more tricky parts", "code": "def autocorrelation_plot(series, ax=None, **kwargs) -> Axes:\n \n plot_backend = _get_plot_backend(\"matplotlib\")\n return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwargs)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 22, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 32, "token_counts": 37, "n_ast_nodes": 60, "n_identifiers": 7, "d_id": 39990, "documentation": { "docstring": "\n Autocorrelation plot for time series.\n\n Parameters\n ----------\n series : Time series\n ax : Matplotlib axis object, optional\n **kwargs\n Options to pass to matplotlib plotting method.\n\n Returns\n -------\n class:`matplotlib.axis.Axes`\n\n Examples\n --------\n\n The horizontal lines in the plot correspond to 95% and 99% confidence bands.\n\n The dashed line is 99% confidence band.\n\n .. plot::\n :context: close-figs\n\n >>> spacing = np.linspace(-9 * np.pi, 9 * np.pi, num=1000)\n >>> s = pd.Series(0.7 * np.random.rand(1000) + 0.3 * np.sin(spacing))\n >>> pd.plotting.autocorrelation_plot(s)\n \n ", "n_words": 79, "vocab_size": 64, "n_whitespaces": 167, "language": "en" } }, { "id": 197309, "commit_id": "65be461082dda54c8748922f9c29a19af1279fe1", "repo": "sympy", "path": "sympy/core/basic.py", "file_name": "basic.py", "fun_name": "_atomic", "commit_message": "Remove abbreviations in documentation", "code": "def _atomic(e, recursive=False):\n \n pot = _preorder_traversal(e)\n seen = set()\n if isinstance(e, Basic):\n free = getattr(e, \"free_symbols\", None)\n if free is None:\n return {e}\n else:\n return set()\n from .symbol import Symbol\n from .function import Derivative, Function\n atoms = set()\n for p in pot:\n if p in seen:\n pot.skip()\n continue\n seen.add(p)\n if isinstance(p, Symbol) and p in free:\n atoms.add(p)\n elif isinstance(p, (Derivative, Function)):\n if not recursive:\n pot.skip()\n atoms.add(p)\n return atoms\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 228, "n_words": 68, "vocab_size": 46, "complexity": 9, "nloc": 24, "token_counts": 140, "n_ast_nodes": 232, "n_identifiers": 20, "d_id": 48452, "documentation": { "docstring": "Return atom-like quantities as far as substitution is\n concerned: Derivatives, Functions and Symbols. Do not\n return any 'atoms' that are inside such quantities unless\n they also appear outside, too, unless `recursive` is True.\n\n Examples\n ========\n\n >>> from sympy import Derivative, Function, cos\n >>> from sympy.abc import x, y\n >>> from sympy.core.basic import _atomic\n >>> f = Function('f')\n >>> _atomic(x + y)\n {x, y}\n >>> _atomic(x + f(y))\n {x, f(y)}\n >>> _atomic(Derivative(f(x), x) + cos(x) + y)\n {y, cos(x), Derivative(f(x), x)}\n\n ", "n_words": 80, "vocab_size": 60, "n_whitespaces": 128, "language": "en" } }, { "id": 198418, "commit_id": "bd9f607176c58dfba01e27c05c2b7d49ff97c901", "repo": "sympy", "path": "sympy/solvers/deutils.py", "file_name": "deutils.py", "fun_name": "ode_order", "commit_message": "Improve loop performance in solvers", "code": "def ode_order(expr, func):\n \n a = Wild('a', exclude=[func])\n if expr.match(a):\n return 0\n\n if isinstance(expr, Derivative):\n if expr.args[0] == func:\n return len(expr.variables)\n else:\n return max(ode_order(arg, func) for arg in expr.args[0].args) + len(expr.variables)\n else:\n return max(ode_order(arg, func) for arg in expr.args)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 103, "n_words": 38, "vocab_size": 26, "complexity": 6, "nloc": 11, "token_counts": 103, "n_ast_nodes": 161, "n_identifiers": 14, "d_id": 48925, "documentation": { "docstring": "\n Returns the order of a given differential\n equation with respect to func.\n\n This function is implemented recursively.\n\n Examples\n ========\n\n >>> from sympy import Function\n >>> from sympy.solvers.deutils import ode_order\n >>> from sympy.abc import x\n >>> f, g = map(Function, ['f', 'g'])\n >>> ode_order(f(x).diff(x, 2) + f(x).diff(x)**2 +\n ... f(x).diff(x), f(x))\n 2\n >>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), f(x))\n 2\n >>> ode_order(f(x).diff(x, 2) + g(x).diff(x, 3), g(x))\n 3\n\n ", "n_words": 67, "vocab_size": 46, "n_whitespaces": 119, "language": "en" } }, { "id": 153194, "commit_id": "8d1004fdbdaa05700613c8e6287641a732acf606", "repo": "modin", "path": "modin/core/execution/ray/implementations/pandas_on_ray/partitioning/virtual_partition.py", "file_name": "virtual_partition.py", "fun_name": "add_to_apply_calls", "commit_message": "FIX-#3675: Expand virtual partitioning utility (#3886)\n\nCo-authored-by: mvashishtha \r\nCo-authored-by: jeffreykennethli \r\nCo-authored-by: Anatoly Myachev \r\nCo-authored-by: Vasily Litvinov \r\nCo-authored-by: Alexey Prutskov \r\nCo-authored-by: Mahesh Vashishtha \r\nCo-authored-by: Naren Krishna <92325366+naren-ponder@users.noreply.github.com>\r\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Dmitry Chigarev <62142979+dchigarev@users.noreply.github.com>\r\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Doris Lee \r\nCo-authored-by: Aditya Parameswaran \r\nCo-authored-by: Rehan Sohail Durrani \r\nCo-authored-by: Susmit Vengurlekar \r\nSigned-off-by: Devin Petersohn ", "code": "def add_to_apply_calls(self, func, *args, **kwargs):\n \n return type(self)(\n self.list_of_partitions_to_combine,\n full_axis=self.full_axis,\n call_queue=self.call_queue + [(func, args, kwargs)],\n )\n\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 69, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 6, "token_counts": 47, "n_ast_nodes": 69, "n_identifiers": 9, "d_id": 35294, "documentation": { "docstring": "\n Add a function to the call queue.\n\n Parameters\n ----------\n func : callable or ray.ObjectRef\n Function to be added to the call queue.\n *args : iterable\n Additional positional arguments to be passed in `func`.\n **kwargs : dict\n Additional keyword arguments to be passed in `func`.\n\n Returns\n -------\n PandasOnRayDataframeVirtualPartition\n A new ``PandasOnRayDataframeVirtualPartition`` object.\n\n Notes\n -----\n It does not matter if `func` is callable or an ``ray.ObjectRef``. Ray will\n handle it correctly either way. The keyword arguments are sent as a dictionary.\n ", "n_words": 79, "vocab_size": 58, "n_whitespaces": 222, "language": "en" } }, { "id": 159565, "commit_id": "e798bf049f036a5865c14d4343ed8a833864aabe", "repo": "rasa", "path": "rasa/shared/core/trackers.py", "file_name": "trackers.py", "fun_name": "is_active_loop_rejected", "commit_message": "convert TrackerActiveLoop to a dataclass", "code": "def is_active_loop_rejected(self) -> bool:\n \n return self.active_loop is not None and self.active_loop.rejected\n", "url": "https://github.com/RasaHQ/rasa.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 25, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 3, "token_counts": 21, "n_ast_nodes": 35, "n_identifiers": 5, "d_id": 38337, "documentation": { "docstring": "Return True if there is an active loop and it's rejected.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 220548, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/locks.py", "file_name": "locks.py", "fun_name": "clear", "commit_message": "add python 3.10.4 for windows", "code": "def clear(self):\n \n self._value = False\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 21, "n_identifiers": 3, "d_id": 56047, "documentation": { "docstring": "Reset the internal flag to false. Subsequently, coroutines calling\n wait() will block until set() is called to set the internal flag\n to true again.", "n_words": 24, "vocab_size": 19, "n_whitespaces": 37, "language": "en" } }, { "id": 44593, "commit_id": "fded2ca0b9c995737b401896b89e5c9fd7f24c91", "repo": "airflow", "path": "airflow/models/baseoperator.py", "file_name": "baseoperator.py", "fun_name": "unmap", "commit_message": "Rewrite decorated task mapping (#21328)", "code": "def unmap(self) -> BaseOperator:\n \n dag = self.get_dag()\n if not dag:\n raise RuntimeError(\"Cannot unmap a task unless it has a DAG\")\n dag._remove_task(self.task_id)\n return self.create_unmapped_operator(dag)\n\n\n# TODO: Deprecate for Airflow 3.0\nChainable = Union[DependencyMixin, Sequence[DependencyMixin]]\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 77, "n_words": 33, "vocab_size": 31, "complexity": 2, "nloc": 7, "token_counts": 39, "n_ast_nodes": 85, "n_identifiers": 13, "d_id": 8308, "documentation": { "docstring": "Get the \"normal\" Operator after applying the current mapping", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 70994, "commit_id": "de3fcba9e95818e9634ab7de6bfcb1f4221f2775", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/options.py", "file_name": "options.py", "fun_name": "get_admin_urls_for_registration", "commit_message": "Fix warnings from flake8-comprehensions.", "code": "def get_admin_urls_for_registration(self):\n \n urls = ()\n for instance in self.modeladmin_instances:\n urls += instance.get_admin_urls_for_registration()\n return urls\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 53, "n_words": 14, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 26, "n_ast_nodes": 45, "n_identifiers": 5, "d_id": 15593, "documentation": { "docstring": "\n Utilised by Wagtail's 'register_admin_urls' hook to register urls for\n used by any associated ModelAdmin instances\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 37, "language": "en" } }, { "id": 281535, "commit_id": "82747072c511beb1b2672846ae2ee4aec53eb562", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/stocks/backtesting/bt_controller.py", "file_name": "bt_controller.py", "fun_name": "print_help", "commit_message": "Terminal Wide Rich (#1161)\n\n* My idea for how we handle Rich moving forward\r\n\r\n* remove independent consoles\r\n\r\n* FIxed pylint issues\r\n\r\n* add a few vars\r\n\r\n* Switched print to console\r\n\r\n* More transitions\r\n\r\n* Changed more prints\r\n\r\n* Replaced all prints\r\n\r\n* Fixing tabulate\r\n\r\n* Finished replace tabulate\r\n\r\n* Finished removing rich from Tabulate\r\n\r\n* add Panel around menu\r\n\r\n* add GST watermark under feature flag\r\n\r\n* Fixed 46 tests\r\n\r\n* Delete test_screener[False].yaml\r\n\r\n* Delete test_screener[True].yaml\r\n\r\n* Fixed the rest of the tests\r\n\r\n* add help and source color vars and use rgb\r\n\r\n* rich on stocks/options\r\n\r\n* update rich on disc, dps, sia\r\n\r\n* rich in gov, ins and scr menus\r\n\r\n* ba and ca menus with rich\r\n\r\n* Fixed import issue\r\n\r\n* Fixed some tests\r\n\r\n* removed termcolor\r\n\r\n* Removed prettytable\r\n\r\n* add rich to remaining stocks menus\r\n\r\n* FIxed linting issue\r\n\r\n* Added James' changes\r\n\r\n* Updated dependencies\r\n\r\n* Add rich to cryptocurrency menu\r\n\r\n* refactor economy and forex\r\n\r\n* refactor etf with rich\r\n\r\n* refactor mfunds\r\n\r\n* refactor rich rest\r\n\r\n* not specify style so default color works well on any background\r\n\r\n* Fixing mypy issues\r\n\r\n* Updated tests\r\n\r\n* More test fixes\r\n\r\n* James' test fixes\r\n\r\n* Updating tests : stocks/screener - fix cassettes using BR\r\n\r\n* Updating tests : crypto\r\n\r\n* Updating tests : disable DEBUG_MODE\r\n\r\n* Updating tests : stocks/fa/yfinance\r\n\r\n* minor fixes that escape\r\n\r\n* Improve the rich table function (that replaces tabulate :D )\r\n\r\n* Fixed bad code\r\n\r\n* delete rogue file + dcf fix + NoConsole\r\n\r\n* sia mypy\r\n\r\n* fuck you linter\r\n\r\n* fuck you linter pt 2\r\n\r\n* skip hehe\r\n\r\n* i hate the black linter\r\n\r\n* ubuntu mypy attempt\r\n\r\n* Update : rich_config + gtff\r\n\r\n* Updating tests : conftest\r\n\r\n* Updating tests : stocks\r\n\r\n* Update : rich_config\r\n\r\n* Updating : rich_config\r\n\r\n* make panel configurable for Theodore :b\r\n\r\n* colors update\r\n\r\n* Merged\r\n\r\n* Updating : rich_config + feature_flags\r\n\r\n* Updating : rich_config\r\n\r\n* Updating tests : stocks\r\n\r\n* Updating : feature_flags\r\n\r\nCo-authored-by: DidierRLopes \r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: james \r\nCo-authored-by: jose-donato ", "code": "def print_help(self):\n \n help_text = f\n console.print(text=help_text, menu=\"Stocks - Backtesting\")\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 11, "token_counts": 22, "n_ast_nodes": 54, "n_identifiers": 9, "d_id": 83833, "documentation": { "docstring": "Print help\n[param]Ticker: [/param]{self.ticker.upper()}[cmds]\n\n whatif what if you had bought X shares on day Y\n\n ema buy when price exceeds EMA(l)\n ema_cross buy when EMA(short) > EMA(long)\n rsi buy when RSI < low and sell when RSI > high[/cmds]\n ", "n_words": 39, "vocab_size": 32, "n_whitespaces": 80, "language": "en" } }, { "id": 25584, "commit_id": "4e6dca3085479e0ed0c471fe64dbd4ccd7a77a12", "repo": "saleor", "path": "saleor/graphql/app/types.py", "file_name": "types.py", "fun_name": "resolve_url", "commit_message": "Add new type of target and include openAs option", "code": "def resolve_url(root, info):\n \n open_as = root.get(\"open_as\", AppExtensionOpenAs.POPUP)\n app_url = root[\"app_url\"]\n url = root[\"url\"]\n if url.startswith(\"/\") and app_url and open_as == AppExtensionOpenAs.POPUP:\n parsed_url = urlparse(app_url)\n new_path = urljoin(parsed_url.path, url[1:])\n return parsed_url._replace(path=new_path).geturl()\n return url\n\n", "url": "https://github.com/saleor/saleor.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 107, "n_words": 32, "vocab_size": 23, "complexity": 4, "nloc": 9, "token_counts": 83, "n_ast_nodes": 139, "n_identifiers": 17, "d_id": 4908, "documentation": { "docstring": "Return an extension url.\n\n Apply url stitching when these 3 conditions are met:\n - url starts with /\n - openAs == \"POPUP\"\n - appUrl is defined\n ", "n_words": 26, "vocab_size": 23, "n_whitespaces": 73, "language": "en" } }, { "id": 323137, "commit_id": "44a290e94d1becd1f09fddc3d873f9e19c9d6919", "repo": "PaddleNLP", "path": "paddlenlp/trainer/trainer_base.py", "file_name": "trainer_base.py", "fun_name": "get_train_dataloader", "commit_message": "[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761)\n\n* add some datasets for finetune.\r\n\r\n* support fine tune for all tastks.\r\n\r\n* add trainer prototype.\r\n\r\n* init verison for paddlenlp trainer.\r\n\r\n* refine trainer.\r\n\r\n* update for some details.\r\n\r\n* support multi-cards training evaluation.\r\n\r\n* support load from ckpt.\r\n\r\n* support for export inference model.\r\n\r\n* first version of trainer.\r\n\r\n* seq cls support clue.\r\n\r\n* trainer support for token classification and question answersing tasks.\r\n\r\n* fix as reviews.\r\n\r\nCo-authored-by: Zeyu Chen ", "code": "def get_train_dataloader(self):\n \n if self.train_dataset is None:\n raise ValueError(\"Trainer: training requires a train_dataset.\")\n\n train_dataset = self.train_dataset\n\n train_sampler = self._get_train_sampler()\n\n return DataLoader(\n train_dataset,\n batch_sampler=train_sampler,\n collate_fn=self.data_collator,\n num_workers=self.args.dataloader_num_workers, )\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 115, "n_words": 25, "vocab_size": 23, "complexity": 2, "nloc": 10, "token_counts": 54, "n_ast_nodes": 87, "n_identifiers": 13, "d_id": 118380, "documentation": { "docstring": "\n Returns the training [`~paddle.io.DataLoader`].\n\n Will use no sampler if `self.train_dataset` does not implement `__len__`, a random sampler (adapted to\n distributed training if necessary) otherwise.\n\n Subclass and override this method if you want to inject some custom behavior.\n ", "n_words": 37, "vocab_size": 32, "n_whitespaces": 73, "language": "en" } }, { "id": 210019, "commit_id": "0a3d768ce3464fca945ba58f0742fbe003930ec7", "repo": "PaddleDetection", "path": "ppdet/data/transform/batch_operators.py", "file_name": "batch_operators.py", "fun_name": "__call__", "commit_message": "[dev] update assigner and tood_head (#5169)", "code": "def __call__(self, samples, context=None):\n \n coarsest_stride = self.pad_to_stride\n\n # multi scale input is nested list\n if isinstance(samples,\n typing.Sequence) and len(samples) > 0 and isinstance(\n samples[0], typing.Sequence):\n inner_samples = samples[0]\n else:\n inner_samples = samples\n\n max_shape = np.array(\n [data['image'].shape for data in inner_samples]).max(axis=0)\n if coarsest_stride > 0:\n max_shape[1] = int(\n np.ceil(max_shape[1] / coarsest_stride) * coarsest_stride)\n max_shape[2] = int(\n np.ceil(max_shape[2] / coarsest_stride) * coarsest_stride)\n\n for data in inner_samples:\n im = data['image']\n im_c, im_h, im_w = im.shape[:]\n padding_im = np.zeros(\n (im_c, max_shape[1], max_shape[2]), dtype=np.float32)\n padding_im[:, :im_h, :im_w] = im\n data['image'] = padding_im\n if 'semantic' in data and data['semantic'] is not None:\n semantic = data['semantic']\n padding_sem = np.zeros(\n (1, max_shape[1], max_shape[2]), dtype=np.float32)\n padding_sem[:, :im_h, :im_w] = semantic\n data['semantic'] = padding_sem\n if 'gt_segm' in data and data['gt_segm'] is not None:\n gt_segm = data['gt_segm']\n padding_segm = np.zeros(\n (gt_segm.shape[0], max_shape[1], max_shape[2]),\n dtype=np.uint8)\n padding_segm[:, :im_h, :im_w] = gt_segm\n data['gt_segm'] = padding_segm\n\n if 'gt_rbox2poly' in data and data['gt_rbox2poly'] is not None:\n # ploy to rbox\n polys = data['gt_rbox2poly']\n rbox = bbox_utils.poly2rbox(polys)\n data['gt_rbox'] = rbox\n\n return samples\n\n\n@register_op", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "@register_op", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 699, "n_words": 166, "vocab_size": 85, "complexity": 13, "nloc": 40, "token_counts": 363, "n_ast_nodes": 575, "n_identifiers": 38, "d_id": 52850, "documentation": { "docstring": "\n Args:\n samples (list): a batch of sample, each is dict.\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 36, "language": "en" } }, { "id": 169777, "commit_id": "0106c26529900bad0561efb9c9180f7f016365b0", "repo": "pandas", "path": "pandas/core/dtypes/common.py", "file_name": "common.py", "fun_name": "is_categorical", "commit_message": "REVERT caching in find_stack_level (#49053)\n\nRevert \"PERF cache find_stack_level (#48023)\"\r\n\r\nThis reverts commit 2f8d0a36703e81e4dca52ca9fe4f58c910c1b304.\r\n\r\nCo-authored-by: MarcoGorelli <>", "code": "def is_categorical(arr) -> bool:\n \n warnings.warn(\n \"is_categorical is deprecated and will be removed in a future version. \"\n \"Use is_categorical_dtype instead.\",\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return isinstance(arr, ABCCategorical) or is_categorical_dtype(arr)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 68, "n_words": 28, "vocab_size": 28, "complexity": 2, "nloc": 39, "token_counts": 36, "n_ast_nodes": 62, "n_identifiers": 11, "d_id": 40464, "documentation": { "docstring": "\n Check whether an array-like is a Categorical instance.\n\n .. deprecated:: 1.1.0\n Use ``is_categorical_dtype`` instead.\n\n Parameters\n ----------\n arr : array-like\n The array-like to check.\n\n Returns\n -------\n boolean\n Whether or not the array-like is of a Categorical instance.\n\n Examples\n --------\n >>> is_categorical([1, 2, 3])\n False\n\n Categoricals, Series Categoricals, and CategoricalIndex will return True.\n\n >>> cat = pd.Categorical([1, 2, 3])\n >>> is_categorical(cat)\n True\n >>> is_categorical(pd.Series(cat))\n True\n >>> is_categorical(pd.CategoricalIndex([1, 2, 3]))\n True\n ", "n_words": 68, "vocab_size": 51, "n_whitespaces": 153, "language": "en" } }, { "id": 7068, "commit_id": "e65f74e87e8e29922f4e9f9d839978ffb2c5b029", "repo": "ludwig", "path": "ludwig/utils/calibration.py", "file_name": "calibration.py", "fun_name": "regularization_terms", "commit_message": "Adds mechanism for calibrating probabilities for category and binary features (#1949)\n\n* Started adding files for calibration implementation.\r\n\r\n* Adds option to return logits and labels in predictor.\r\n\r\n* Pre-commit fixes\r\n\r\n* First pass temperature scaling working.\r\n\r\n* Fixes calibration for categorical feature.\r\n\r\n* Separate calibrated logits from logits.\r\n\r\n* Adds option to revert temperature scaling.\r\n\r\n* Refactoring, move binary prediction logic into calibration class.\r\n\r\n* Reverted accidental commit to simple_model_training.py\r\n\r\n* Adds checks and comments.\r\n\r\n* Fixes matrix scaling, convert pandas series to numpy arrays.\r\n\r\n* Fixes number of classes for categorical features.\r\n\r\n* Adds structured calibration result, unit tests.\r\n\r\n* Make create_calibration_module not abstract, default implementation returns None.\r\n\r\n* Relax precision requirement for calibration test.\r\n\r\n* Save weights after calibration, so calibration results are included in save file.\r\n\r\n* Implemented dirichlet scaling with l2 off-diagonal regularization.\r\n\r\n* Adds masked_select off_diagonal method.\r\n\r\n* Change back to matrix scaling.\r\n\r\n* Updates test expectations to reflect learning rate settings.\r\n\r\n* Tuned default regularization weight.\r\n\r\n* Comments.\r\n\r\n* Set random seed, testing to see if that makes a difference.\r\n\r\n* Remove checks for exact NLL, ECE values post calibration.\r\n\r\n* Restored LOGITS to EXCLUDE_PRED_SET, added another option to return logits in batch_predict.\r\n\r\n* Factor calibration method out of Trainer into Calibrator\r\n\r\n* Removed horovod argument from calibrator.\r\n\r\n* Return batch_size if eval_batch_size not specified.\r\n\r\n* Fix calibration_module docstring.\r\n\r\n* Updates comment, adds fallback method of calibrating on training set if no validation set available.\r\n\r\n* Adds calibration registry, replaces if statements for instantiating calibration.\r\n\r\n* Raise ValueError if unsupported calibration method specified.\r\n\r\n* Remove calibrate method from Trainer\r\n\r\n* f string\r\n\r\n* Use backend to create predictor for calibration.\r\n\r\n* Moves saving out of calibrator\r\n\r\n* Fix comment.\r\n\r\n* Adds ray test of calibration.\r\n\r\n* Implements collect_logits in ray predictor.\r\n\r\n* First pass implementation of collect_labels.\r\n\r\n* Implements collect_logits and collect_labels in ray backend.\r\n\r\n* Merge predictions and labels in ray backend\r\n\r\n* Reverts collect_labels, get labels from dataset in calibrate.\r\n\r\n* Allow overriding EXCLUDE_PRED_SET when getting preds.\r\n\r\n* Changes 'calibration' config option to binary.\r\n\r\n* Test both binary and category output features in ray test.\r\n\r\n* Comments/\r\n\r\n* Adds type hints.\r\n\r\nCo-authored-by: Daniel Treiman ", "code": "def regularization_terms(self) -> torch.Tensor:\n \n off_diagonal_entries = torch.masked_select(self.w, ~torch.eye(self.num_classes, dtype=bool))\n weight_matrix_loss = self.off_diagonal_l2 * torch.linalg.vector_norm(off_diagonal_entries)\n bias_vector_loss = self.mu * torch.linalg.vector_norm(self.b, 2)\n return bias_vector_loss + weight_matrix_loss\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 59, "n_words": 24, "vocab_size": 19, "complexity": 1, "nloc": 11, "token_counts": 70, "n_ast_nodes": 110, "n_identifiers": 18, "d_id": 1113, "documentation": { "docstring": "Off-Diagonal and Intercept Regularisation (ODIR).\n\n Described in \"Beyond temperature scaling: Obtaining well-calibrated multiclass probabilities with Dirichlet\n calibration\"\n https://proceedings.neurips.cc/paper/2019/file/8ca01ea920679a0fe3728441494041b9-Paper.pdf\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 46, "language": "en" } }, { "id": 179091, "commit_id": "2be32787538f1b0ef83f648ee60d2d4d4868d3fd", "repo": "DeepFaceLive", "path": "xlib/api/win32/dshow/helper.py", "file_name": "helper.py", "fun_name": "get_video_input_devices_names", "commit_message": "update xlib.api.win32", "code": "def get_video_input_devices_names() -> List[str]:\n \n # based on https://docs.microsoft.com/ru-ru/windows/win32/directshow/selecting-a-capture-device\n\n names = []\n sys_dev_enum = strmif.ICreateDevEnum()\n if ole32.CoCreateInstance(uuids.CLSID_SystemDeviceEnum, None, ole32.CLSCTX.CLSCTX_INPROC_SERVER, strmif.ICreateDevEnum.IID, sys_dev_enum) == wintypes.ERROR.SUCCESS:\n pEnumCat = objidl.IEnumMoniker()\n\n if sys_dev_enum.CreateClassEnumerator(uuids.CLSID_VideoInputDeviceCategory, pEnumCat, 0) == wintypes.ERROR.SUCCESS:\n\n moniker = objidl.IMoniker()\n\n while pEnumCat.Next(1, moniker, None) == wintypes.ERROR.SUCCESS:\n\n prop_bag = oaidl.IPropertyBag()\n if moniker.BindToStorage(None, None, oaidl.IPropertyBag.IID, prop_bag) == wintypes.ERROR.SUCCESS:\n var = wintypes.VARIANT()\n\n hr = prop_bag.Read(wintypes.LPCOLESTR('Description'), var, None )\n if hr != wintypes.ERROR.SUCCESS:\n hr = prop_bag.Read(wintypes.LPCOLESTR('FriendlyName'), var, None )\n\n names.append(var.value.bstrVal.value if hr == wintypes.ERROR.SUCCESS else 'unnamed')\n\n prop_bag.Release()\n moniker.Release()\n pEnumCat.Release()\n sys_dev_enum.Release()\n\n return names", "url": "https://github.com/iperov/DeepFaceLive.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 317, "n_words": 82, "vocab_size": 55, "complexity": 7, "nloc": 25, "token_counts": 230, "n_ast_nodes": 363, "n_identifiers": 38, "d_id": 42899, "documentation": { "docstring": "\n returns a list of available names of VideoInputDevice's\n\n ole32 should be initialized before use\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 24, "language": "en" } }, { "id": 96697, "commit_id": "71583b888a5c079749333875a0bbb277188ef693", "repo": "sentry", "path": "src/sentry/db/postgres/decorators.py", "file_name": "decorators.py", "fun_name": "more_better_error_messages", "commit_message": "ref(lang): 🙊 (#32292)", "code": "def more_better_error_messages(func):\n \n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 4, "token_counts": 15, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 19339, "documentation": { "docstring": "\n Wraps functions where the first param is a SQL statement and enforces\n any exceptions thrown will also contain the statement in the message.\n ", "n_words": 23, "vocab_size": 20, "n_whitespaces": 33, "language": "en" } }, { "id": 156257, "commit_id": "7471eb3d1e9ccf085b70b219413aa891c8c2c167", "repo": "dask", "path": "dask/array/utils.py", "file_name": "utils.py", "fun_name": "meta_from_array", "commit_message": "masked scalars input to da.from_array (#8895)", "code": "def meta_from_array(x, ndim=None, dtype=None):\n \n # If using x._meta, x must be a Dask Array, some libraries (e.g. zarr)\n # implement a _meta attribute that are incompatible with Dask Array._meta\n if hasattr(x, \"_meta\") and isinstance(x, Array):\n x = x._meta\n\n if dtype is None and x is None:\n raise ValueError(\"You must specify the meta or dtype of the array\")\n\n if np.isscalar(x):\n x = np.array(x)\n\n if x is None:\n x = np.ndarray\n elif dtype is None and hasattr(x, \"dtype\"):\n dtype = x.dtype\n\n if isinstance(x, type):\n x = x(shape=(0,) * (ndim or 0), dtype=dtype)\n\n if isinstance(x, list) or isinstance(x, tuple):\n ndims = [\n 0\n if isinstance(a, numbers.Number)\n else a.ndim\n if hasattr(a, \"ndim\")\n else len(a)\n for a in x\n ]\n a = [a if nd == 0 else meta_from_array(a, nd) for a, nd in zip(x, ndims)]\n return a if isinstance(x, list) else tuple(x)\n\n if (\n not hasattr(x, \"shape\")\n or not hasattr(x, \"dtype\")\n or not isinstance(x.shape, tuple)\n ):\n return x\n\n if ndim is None:\n ndim = x.ndim\n\n try:\n meta = x[tuple(slice(0, 0, None) for _ in range(x.ndim))]\n if meta.ndim != ndim:\n if ndim > x.ndim:\n meta = meta[(Ellipsis,) + tuple(None for _ in range(ndim - meta.ndim))]\n meta = meta[tuple(slice(0, 0, None) for _ in range(meta.ndim))]\n elif ndim == 0:\n meta = meta.sum()\n else:\n meta = meta.reshape((0,) * ndim)\n if meta is np.ma.masked:\n meta = np.ma.array(np.empty((0,) * ndim, dtype=dtype or x.dtype), mask=True)\n except Exception:\n meta = np.empty((0,) * ndim, dtype=dtype or x.dtype)\n\n if np.isscalar(meta):\n meta = np.array(meta)\n\n if dtype and meta.dtype != dtype:\n try:\n meta = meta.astype(dtype)\n except ValueError as e:\n if (\n any(\n s in str(e)\n for s in [\n \"invalid literal\",\n \"could not convert string to float\",\n ]\n )\n and meta.dtype.kind in \"SU\"\n ):\n meta = np.array([]).astype(dtype)\n else:\n raise e\n\n return meta\n\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 887, "n_words": 287, "vocab_size": 149, "complexity": 40, "nloc": 66, "token_counts": 524, "n_ast_nodes": 816, "n_identifiers": 42, "d_id": 36621, "documentation": { "docstring": "Normalize an array to appropriate meta object\n\n Parameters\n ----------\n x: array-like, callable\n Either an object that looks sufficiently like a Numpy array,\n or a callable that accepts shape and dtype keywords\n ndim: int\n Number of dimensions of the array\n dtype: Numpy dtype\n A valid input for ``np.dtype``\n\n Returns\n -------\n array-like with zero elements of the correct dtype\n ", "n_words": 57, "vocab_size": 45, "n_whitespaces": 112, "language": "en" } }, { "id": 205280, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/migrations/autodetector.py", "file_name": "autodetector.py", "fun_name": "generate_deleted_models", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def generate_deleted_models(self):\n \n new_keys = self.new_model_keys | self.new_unmanaged_keys\n deleted_models = self.old_model_keys - new_keys\n deleted_unmanaged_models = self.old_unmanaged_keys - new_keys\n all_deleted_models = chain(\n sorted(deleted_models), sorted(deleted_unmanaged_models)\n )\n for app_label, model_name in all_deleted_models:\n model_state = self.from_state.models[app_label, model_name]\n # Gather related fields\n related_fields = {}\n for field_name, field in model_state.fields.items():\n if field.remote_field:\n if field.remote_field.model:\n related_fields[field_name] = field\n if getattr(field.remote_field, \"through\", None):\n related_fields[field_name] = field\n # Generate option removal first\n unique_together = model_state.options.pop(\"unique_together\", None)\n index_together = model_state.options.pop(\"index_together\", None)\n if unique_together:\n self.add_operation(\n app_label,\n operations.AlterUniqueTogether(\n name=model_name,\n unique_together=None,\n ),\n )\n if index_together:\n self.add_operation(\n app_label,\n operations.AlterIndexTogether(\n name=model_name,\n index_together=None,\n ),\n )\n # Then remove each related field\n for name in sorted(related_fields):\n self.add_operation(\n app_label,\n operations.RemoveField(\n model_name=model_name,\n name=name,\n ),\n )\n # Finally, remove the model.\n # This depends on both the removal/alteration of all incoming fields\n # and the removal of all its own related fields, and if it's\n # a through model the field that references it.\n dependencies = []\n relations = self.from_state.relations\n for (\n related_object_app_label,\n object_name,\n ), relation_related_fields in relations[app_label, model_name].items():\n for field_name, field in relation_related_fields.items():\n dependencies.append(\n (related_object_app_label, object_name, field_name, False),\n )\n if not field.many_to_many:\n dependencies.append(\n (\n related_object_app_label,\n object_name,\n field_name,\n \"alter\",\n ),\n )\n\n for name in sorted(related_fields):\n dependencies.append((app_label, model_name, name, False))\n # We're referenced in another field's through=\n through_user = self.through_users.get((app_label, model_state.name_lower))\n if through_user:\n dependencies.append(\n (through_user[0], through_user[1], through_user[2], False)\n )\n # Finally, make the operation, deduping any dependencies\n self.add_operation(\n app_label,\n operations.DeleteModel(\n name=model_state.name,\n ),\n dependencies=list(set(dependencies)),\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1559, "n_words": 223, "vocab_size": 130, "complexity": 14, "nloc": 75, "token_counts": 389, "n_ast_nodes": 593, "n_identifiers": 49, "d_id": 51062, "documentation": { "docstring": "\n Find all deleted models (managed and unmanaged) and make delete\n operations for them as well as separate operations to delete any\n foreign key or M2M relationships (these are optimized later, if\n possible).\n\n Also bring forward removal of any model options that refer to\n collections of fields - the inverse of generate_created_models().\n ", "n_words": 51, "vocab_size": 43, "n_whitespaces": 101, "language": "en" } }, { "id": 6557, "commit_id": "23a33eef3bc7ea3ba33ec56dc9b56ba38462648a", "repo": "ludwig", "path": "scripts/extract_schema.py", "file_name": "extract_schema.py", "fun_name": "all_subclasses", "commit_message": "feat: Modify Trainer to use marshmallow_dataclass syntax for handling hyperparameters. Add basic scripting for docstring extraction to marshmallow schema. Fix some existing marshmallow issues. (#1606)", "code": "def all_subclasses(cls):\n \n return set(cls.__subclasses__()).union([s for c in cls.__subclasses__() for s in all_subclasses(c)])\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 18, "n_words": 12, "vocab_size": 10, "complexity": 3, "nloc": 2, "token_counts": 37, "n_ast_nodes": 61, "n_identifiers": 7, "d_id": 1029, "documentation": { "docstring": "Returns recursively-generated list of all children classes inheriting from given `cls`.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 248621, "commit_id": "99d3931974e65865d1102ee79d7b7e2b017a3180", "repo": "synapse", "path": "tests/rest/client/test_upgrade_room.py", "file_name": "test_upgrade_room.py", "fun_name": "test_second_upgrade_after_delay", "commit_message": "Add more tests for room upgrades (#13074)\n\nSigned-off-by: Sean Quah ", "code": "def test_second_upgrade_after_delay(self) -> None:\n \n channel1 = self._upgrade_room()\n self.assertEqual(200, channel1.code, channel1.result)\n\n channel2 = self._upgrade_room(expire_cache=True)\n self.assertEqual(200, channel2.code, channel2.result)\n\n self.assertNotEqual(\n channel1.json_body[\"replacement_room\"],\n channel2.json_body[\"replacement_room\"],\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 91, "n_words": 20, "vocab_size": 18, "complexity": 1, "nloc": 10, "token_counts": 72, "n_ast_nodes": 115, "n_identifiers": 11, "d_id": 72379, "documentation": { "docstring": "A second room upgrade is not deduplicated after some time has passed.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 300937, "commit_id": "a4c1bcefb9d2a6f2aa0bc189fca496d46c78e3b0", "repo": "core", "path": "tests/components/recorder/test_util.py", "file_name": "test_util.py", "fun_name": "test_setup_connection_for_dialect_sqlite", "commit_message": "Tune sqlite based on configured settings (#72016)", "code": "def test_setup_connection_for_dialect_sqlite(sqlite_version, db_supports_row_number):\n \n instance_mock = MagicMock(_db_supports_row_number=True)\n execute_args = []\n close_mock = MagicMock()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 24, "n_words": 12, "vocab_size": 10, "complexity": 1, "nloc": 22, "token_counts": 143, "n_ast_nodes": 44, "n_identifiers": 8, "d_id": 99791, "documentation": { "docstring": "Test setting up the connection for a sqlite dialect.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 46963, "commit_id": "04082ac091e92587b22c8323170ebe38bc68a19a", "repo": "airflow", "path": "airflow/providers/cncf/kubernetes/operators/kubernetes_pod.py", "file_name": "kubernetes_pod.py", "fun_name": "dry_run", "commit_message": "Cleanup dup code now that k8s provider requires 2.3.0+ (#22845)", "code": "def dry_run(self) -> None:\n \n pod = self.build_pod_request_obj()\n print(yaml.dump(prune_dict(pod.to_dict(), mode='strict')))\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 8, "token_counts": 35, "n_ast_nodes": 62, "n_identifiers": 10, "d_id": 9046, "documentation": { "docstring": "\n Prints out the pod definition that would be created by this operator.\n Does not include labels specific to the task instance (since there isn't\n one in a dry_run) and excludes all empty elements.\n ", "n_words": 33, "vocab_size": 32, "n_whitespaces": 62, "language": "en" } }, { "id": 36650, "commit_id": "5b40a37bc4da9dc6cd33876ce9bb3f7f48450a03", "repo": "transformers", "path": "src/transformers/models/vit_mae/modeling_tf_vit_mae.py", "file_name": "modeling_tf_vit_mae.py", "fun_name": "get_2d_sincos_pos_embed", "commit_message": "Add TF ViT MAE (#16255)\n\n* ported TFViTMAEIntermediate and TFViTMAEOutput.\r\n\r\n* added TFViTMAEModel and TFViTMAEDecoder.\r\n\r\n* feat: added a noise argument in the implementation for reproducibility.\r\n\r\n* feat: vit mae models with an additional noise argument for reproducibility.\r\n\r\nCo-authored-by: ariG23498 \r\nCo-authored-by: ydshieh ", "code": "def get_2d_sincos_pos_embed(embed_dim, grid_size, add_cls_token=False):\n \n grid_h = tf.range(grid_size, dtype=tf.float32)\n grid_w = tf.range(grid_size, dtype=tf.float32)\n grid = tf.meshgrid(grid_w, grid_h) # here w goes first\n grid = tf.stack(grid, axis=0)\n\n grid = tf.reshape(grid, [2, 1, grid_size, grid_size])\n pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)\n if add_cls_token:\n pos_embed = tf.concat([tf.zeros((1, embed_dim)), pos_embed], axis=0)\n return pos_embed\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 81, "n_words": 46, "vocab_size": 32, "complexity": 2, "nloc": 10, "token_counts": 118, "n_ast_nodes": 176, "n_identifiers": 19, "d_id": 6659, "documentation": { "docstring": "\n Create 2D sin/cos positional embeddings.\n\n Args:\n embed_dim (`int`):\n Embedding dimension.\n grid_size (`int`):\n The grid height and width.\n add_cls_token (`bool`, *optional*, defaults to `False`):\n Whether or not to add a classification (CLS) token.\n\n Returns:\n (`tf.Tensor` of shape (grid_size*grid_size, embed_dim) or (1+grid_size*grid_size, embed_dim): the position\n embeddings (with or without classification token)\n ", "n_words": 49, "vocab_size": 44, "n_whitespaces": 130, "language": "en" } }, { "id": 100688, "commit_id": "5a8b5d7b3c6b0b413fe2b4d9247b9dd0cd692fa0", "repo": "faceswap", "path": "plugins/convert/writer/ffmpeg.py", "file_name": "ffmpeg.py", "fun_name": "_test_for_audio_stream", "commit_message": "bugfix: ffmpeg writer - prevent crash if no audio in source", "code": "def _test_for_audio_stream(self) -> bool:\n \n exe = im_ffm.get_ffmpeg_exe()\n cmd = [exe, \"-hide_banner\", \"-i\", self._source_video, \"-f\", \"ffmetadata\", \"-\"]\n\n try:\n out = check_output(cmd, stderr=STDOUT)\n except CalledProcessError as err:\n out = err.output.decode(errors=\"ignore\")\n raise ValueError(\"Error checking audio stream. Status: \"\n f\"{err.returncode}\\n{out}\") from err\n\n retval = False\n for line in out.splitlines():\n if not line.strip().startswith(b\"Stream #\"):\n continue\n logger.debug(\"scanning Stream line: %s\", line.decode(errors=\"ignore\").strip())\n if b\"Audio\" in line:\n retval = True\n break\n logger.debug(\"Audio found: %s\", retval)\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 271, "n_words": 69, "vocab_size": 57, "complexity": 5, "nloc": 34, "token_counts": 138, "n_ast_nodes": 251, "n_identifiers": 26, "d_id": 20144, "documentation": { "docstring": " Check whether the source video file contains an audio stream.\n\n If we attempt to mux audio from a source video that does not contain an audio stream\n ffmpeg will crash faceswap in a fairly ugly manner.\n\n Returns\n -------\n bool\n ``True if an audio stream is found in the source video file, otherwise ``False``\n\n Raises\n ------\n RuntimeError\n If a subprocess error is raised scanning the input video file\n ", "n_words": 67, "vocab_size": 48, "n_whitespaces": 153, "language": "en" } }, { "id": 125444, "commit_id": "b856daebbdc923a216ce412be477c61e6cc5707e", "repo": "ray", "path": "python/ray/serve/scripts.py", "file_name": "scripts.py", "fun_name": "process_dict_for_yaml_dump", "commit_message": "[Serve] Fix Formatting of Error Messages printed in `serve status` (#26578)", "code": "def process_dict_for_yaml_dump(data):\n \n\n for k, v in data.items():\n if isinstance(v, dict):\n data[k] = process_dict_for_yaml_dump(v)\n elif isinstance(v, str):\n data[k] = remove_ansi_escape_sequences(v)\n\n return data\n\n\n@click.group(help=\"CLI for managing Serve instances on a Ray cluster.\")", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@click.group(help=\"CLI for managing Serve instances on a Ray cluster.\")", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 74, "n_words": 30, "vocab_size": 26, "complexity": 4, "nloc": 7, "token_counts": 53, "n_ast_nodes": 102, "n_identifiers": 12, "d_id": 27872, "documentation": { "docstring": "\n Removes ANSI escape sequences recursively for all strings in dict.\n\n We often need to use yaml.dump() to print dictionaries that contain exception\n tracebacks, which can contain ANSI escape sequences that color printed text. However\n yaml.dump() will format the tracebacks incorrectly if ANSI escape sequences are\n present, so we need to remove them before dumping.\n ", "n_words": 54, "vocab_size": 42, "n_whitespaces": 73, "language": "en" } }, { "id": 52989, "commit_id": "86956bde0a7efe9699703c5a318afdc76a59efab", "repo": "prefect", "path": "tests/orion/database/test_dependencies.py", "file_name": "test_dependencies.py", "fun_name": "test_inject_db", "commit_message": "Expand on regression test description", "code": "async def test_inject_db(db):\n \n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 5, "token_counts": 25, "n_ast_nodes": 14, "n_identifiers": 2, "d_id": 10682, "documentation": { "docstring": "\n Regression test for async-mangling behavior of inject_db() decorator.\n\n Previously, when wrapping a coroutine function, the decorator returned\n that function's coroutine object, instead of the coroutine function.\n\n This worked fine in most cases because both a coroutine function and a\n coroutine object can be awaited, but it broke our Pytest setup because\n we were auto-marking coroutine functions as async, and any async test\n wrapped by inject_db() was no longer a coroutine function, but instead\n a coroutine object, so we skipped marking it.\n ", "n_words": 81, "vocab_size": 59, "n_whitespaces": 109, "language": "en" } }, { "id": 137755, "commit_id": "8e680c483ce326cefc62e44f68ab1a6948b1c3d2", "repo": "ray", "path": "rllib/env/wrappers/kaggle_wrapper.py", "file_name": "kaggle_wrapper.py", "fun_name": "build_agent_spaces", "commit_message": "[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)", "code": "def build_agent_spaces(self) -> Tuple[Space, Space]:\n # noqa: E501\n action_space = Discrete(19)\n # The football field's corners are [+-1., +-0.42]. However, the players\n # and balls may get out of the field. Thus we multiply those limits by\n # a factor of 2.\n xlim = 1.0 * 2\n ylim = 0.42 * 2\n num_players: int = 11\n xy_space = Box(\n np.array([-xlim, -ylim], dtype=np.float32),\n np.array([xlim, ylim], dtype=np.float32),\n )\n xyz_space = Box(\n np.array([-xlim, -ylim, 0], dtype=np.float32),\n np.array([xlim, ylim, np.inf], dtype=np.float32),\n )\n observation_space = DictSpace(\n {\n \"controlled_players\": Discrete(2),\n \"players_raw\": TupleSpace(\n [\n DictSpace(\n {\n # ball information\n \"ball\": xyz_space,\n \"ball_direction\": Box(-np.inf, np.inf, (3,)),\n \"ball_rotation\": Box(-np.inf, np.inf, (3,)),\n \"ball_owned_team\": Discrete(3),\n \"ball_owned_player\": Discrete(num_players + 1),\n # left team\n \"left_team\": TupleSpace([xy_space] * num_players),\n \"left_team_direction\": TupleSpace(\n [xy_space] * num_players\n ),\n \"left_team_tired_factor\": Box(0.0, 1.0, (num_players,)),\n \"left_team_yellow_card\": MultiBinary(num_players),\n \"left_team_active\": MultiBinary(num_players),\n \"left_team_roles\": MultiDiscrete([10] * num_players),\n # right team\n \"right_team\": TupleSpace([xy_space] * num_players),\n \"right_team_direction\": TupleSpace(\n [xy_space] * num_players\n ),\n \"right_team_tired_factor\": Box(\n 0.0, 1.0, (num_players,)\n ),\n \"right_team_yellow_card\": MultiBinary(num_players),\n \"right_team_active\": MultiBinary(num_players),\n \"right_team_roles\": MultiDiscrete([10] * num_players),\n # controlled player information\n \"active\": Discrete(num_players),\n \"designated\": Discrete(num_players),\n \"sticky_actions\": MultiBinary(10),\n # match state\n \"score\": Box(-np.inf, np.inf, (2,)),\n \"steps_left\": Box(0, np.inf, (1,)),\n \"game_mode\": Discrete(7),\n }\n )\n ]\n ),\n }\n )\n return action_space, observation_space\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 1613, "n_words": 192, "vocab_size": 129, "complexity": 1, "nloc": 62, "token_counts": 408, "n_ast_nodes": 627, "n_identifiers": 23, "d_id": 31237, "documentation": { "docstring": "Construct the action and observation spaces\n\n Description of actions and observations:\n https://github.com/google-research/football/blob/master/gfootball/doc/\n observation.md\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 41, "language": "en" } }, { "id": 104761, "commit_id": "445107bae3fcd6ac9eeae503232960fa4ba8ccfd", "repo": "datasets", "path": "src/datasets/arrow_dataset.py", "file_name": "arrow_dataset.py", "fun_name": "column_names", "commit_message": "Add code examples to API docs (#4168)\n\n* add code examples for functions related to the base dataset class\r\n\r\n* ✨ make style\r\n\r\n* 🖍 make each code example fully reproducible where applicable\r\n\r\n* 🖍 show parameter usage for some functions\r\n\r\n* 🖍 add examples for DatasetInfo functions", "code": "def column_names(self) -> List[str]:\n \n return self._data.column_names\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 13, "token_counts": 17, "n_ast_nodes": 29, "n_identifiers": 5, "d_id": 21956, "documentation": { "docstring": "Names of the columns in the dataset.\n\n Example:\n\n ```py\n >>> from datasets import load_dataset\n >>> ds = load_dataset(\"rotten_tomatoes\", split=\"validation\")\n >>> ds.column_names\n ['text', 'label']\n ```\n ", "n_words": 24, "vocab_size": 21, "n_whitespaces": 80, "language": "en" } }, { "id": 745, "commit_id": "2260fe77c69381a2c815a7213562115969cbf8a3", "repo": "PySyft", "path": "packages/syft/src/syft/core/store/storeable_object.py", "file_name": "storeable_object.py", "fun_name": "clean_copy", "commit_message": "syft: integrate upload to s3 in send method\n- data proxy property in storable object class\n- add method to get presigned GET url in ProxyDataClass\n- update .get method to support s3 presigned url in case of proxy data class\n\nCo-authored-by: IonesioJunior ", "code": "def clean_copy(self) -> \"StorableObject\":\n \n if self.is_proxy:\n self._data.generate_presigned_url()\n return StorableObject(\n id=self.id,\n data=self._data,\n tags=self.tags,\n description=self.description,\n )\n else:\n return StorableObject(\n id=self.id, data=self.data, tags=self.tags, description=self.description\n )\n", "url": "https://github.com/OpenMined/PySyft.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 173, "n_words": 22, "vocab_size": 17, "complexity": 2, "nloc": 17, "token_counts": 77, "n_ast_nodes": 119, "n_identifiers": 10, "d_id": 110, "documentation": { "docstring": "\n This method return a copy of self, but clean up the search_permissions and\n read_permissions attributes.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 37, "language": "en" } }, { "id": 128591, "commit_id": "e142be077f0c727ab11ba51ecaba9a98b7bfe474", "repo": "ray", "path": "python/ray/tune/tests/test_cluster.py", "file_name": "test_cluster.py", "fun_name": "test_cluster_rllib_restore", "commit_message": "[tune] Store sync config/checkpoint config in experiment, trial (#29019)\n\nThis is some clean-up required for future changes to the syncing/checkpointing behavior. At the moment we pass single attributes of these configs to the Experiment class, and then subsequently to the Trial class, from which it is passed on to the trainable. If we extend the configurability in the future (e.g. provide fallback mechanisms in the checkpoint config, or make retry wait times configurable in the sync config), we would have to add more and more attributes to these intermediate classes. Instead, we should just pass and store the full config.\r\n\r\nAs a next follow-up, we can pass these configs to the Trainable.\r\n\r\nSigned-off-by: Kai Fricke ", "code": "def test_cluster_rllib_restore(start_connected_cluster, tmpdir):\n cluster = start_connected_cluster\n dirpath = str(tmpdir)\n script = .format(\n address=cluster.address, checkpoint_dir=dirpath\n )\n run_string_as_driver_nonblocking(script)\n # Wait until the right checkpoint is saved.\n # The trainable returns every 0.5 seconds, so this should not miss\n # the checkpoint.\n local_checkpoint_dir = os.path.join(dirpath, \"experiment\")\n for i in range(100):\n if TrialRunner.checkpoint_exists(local_checkpoint_dir):\n # Inspect the internal trialrunner\n runner = TrialRunner(\n resume=\"LOCAL\", local_checkpoint_dir=local_checkpoint_dir\n )\n trials = runner.get_trials()\n last_res = trials[0].last_result\n if last_res and last_res.get(\"training_iteration\"):\n break\n time.sleep(0.3)\n\n if not TrialRunner.checkpoint_exists(local_checkpoint_dir):\n raise RuntimeError(\"Checkpoint file didn't appear.\")\n\n ray.shutdown()\n cluster.shutdown()\n cluster = _start_new_cluster()\n cluster.wait_for_nodes()\n\n # Restore properly from checkpoint\n trials2 = tune.run_experiments(\n {\n \"experiment\": {\n \"run\": \"PG\",\n \"checkpoint_config\": CheckpointConfig(checkpoint_frequency=1),\n \"local_dir\": dirpath,\n }\n },\n resume=True,\n )\n assert all(t.status == Trial.TERMINATED for t in trials2)\n ray.shutdown()\n cluster.shutdown()\n\n\n# TODO(ujvl): Fix test.\n@pytest.mark.skip(reason=\"Not very consistent.\")", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@pytest.mark.skip(reason=\"Not very consistent.\")", "n_ast_errors": 1, "ast_levels": 15, "n_whitespaces": 396, "n_words": 124, "vocab_size": 95, "complexity": 7, "nloc": 55, "token_counts": 204, "n_ast_nodes": 365, "n_identifiers": 47, "d_id": 28755, "documentation": { "docstring": "\nimport time\nimport ray\nfrom ray import tune\n\nray.init(address=\"{address}\")\n\n\ntune.run(\n \"PG\",\n name=\"experiment\",\n config=dict(env=\"CartPole-v1\", framework=\"tf\"),\n stop=dict(training_iteration=10),\n local_dir=\"{checkpoint_dir}\",\n checkpoint_freq=1,\n max_failures=1,\n dict(experiment=kwargs),\n raise_on_failed_trial=False)\n", "n_words": 20, "vocab_size": 17, "n_whitespaces": 42, "language": "en" } }, { "id": 158223, "commit_id": "b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2", "repo": "d2l-zh", "path": "d2l/mxnet.py", "file_name": "mxnet.py", "fun_name": "predict_snli", "commit_message": "[PaddlePaddle] Merge master into Paddle branch (#1186)\n\n* change 15.2 title in chinese version (#1109)\r\n\r\nchange title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘\r\n\r\n* 修改部分语义表述 (#1105)\r\n\r\n* Update r0.17.5 (#1120)\r\n\r\n* Bump versions in installation\r\n\r\n* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)\r\n\r\n* line 313: \"bert.mall\" -> \"bert.small\" (#1130)\r\n\r\n* fix: update language as native reader (#1114)\r\n\r\n* Fix the translation of \"stride\" (#1115)\r\n\r\n* Update index.md (#1118)\r\n\r\n修改部分语义表述\r\n\r\n* Update self-attention-and-positional-encoding.md (#1133)\r\n\r\n依照本书的翻译习惯,将pooling翻译成汇聚\r\n\r\n* maybe a comment false (#1149)\r\n\r\n* maybe a little false\r\n\r\n* maybe a little false\r\n\r\n* A minor bug in the rcnn section (Chinese edition) (#1148)\r\n\r\n* Update bert.md (#1137)\r\n\r\n一个笔误\r\n# 假设batch_size=2,num_pred_positions=3\r\n# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]\r\n\r\n* Update calculus.md (#1135)\r\n\r\n* fix typo in git documentation (#1106)\r\n\r\n* fix: Update the Chinese translation in lr-scheduler.md (#1136)\r\n\r\n* Update lr-scheduler.md\r\n\r\n* Update chapter_optimization/lr-scheduler.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* fix translation for kaggle-house-price.md (#1107)\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\nSigned-off-by: sunhaizhou \r\n\r\n* Update weight-decay.md (#1150)\r\n\r\n* Update weight-decay.md\r\n\r\n关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解\r\n关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。\r\n并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释\r\n解释为何会增加复杂性以及为何需要细粒度工具。\r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Fix a spelling error (#1161)\r\n\r\n* Update gru.md (#1152)\r\n\r\nThe key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.\r\n翻译错误\r\n\r\n* Unify the function naming (#1113)\r\n\r\nUnify naming of the function 'init_xavier()'.\r\n\r\n* Update mlp-concise.md (#1166)\r\n\r\n* Update mlp-concise.md\r\n\r\n语句不通顺\r\n\r\n* Update environment.md\r\n\r\n语序异常\r\n\r\n* Update config.ini\r\n\r\n* fix the imprecise description (#1168)\r\n\r\nCo-authored-by: yuande \r\n\r\n* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)\r\n\r\n* Fix some typos. (#1163)\r\n\r\n* Update batch-norm.md (#1170)\r\n\r\nfixing typos u->x in article\r\n\r\n* Update linear-regression.md (#1090)\r\n\r\nWe invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that\r\n\r\n原译文把who也直接翻译出来了。\r\n\r\n* Update mlp.md (#1117)\r\n\r\n* Update mlp.md\r\n\r\n修改部分语义表述\r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: goldmermaid \r\n\r\n* Correct a translation error. (#1091)\r\n\r\n* Correct a translation error.\r\n\r\n* Update chapter_computer-vision/image-augmentation.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update aws.md (#1121)\r\n\r\n* Update aws.md\r\n\r\n* Update chapter_appendix-tools-for-deep-learning/aws.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update image-augmentation.md (#1093)\r\n\r\n* Update anchor.md (#1088)\r\n\r\nfix a minor issue in code\r\n\r\n* Update anchor.md\r\n\r\n* Update image-augmentation.md\r\n\r\n* fix typo and improve translation in chapter_linear-networks\\softmax-regression.md (#1087)\r\n\r\n* Avoid `torch.meshgrid` user warning (#1174)\r\n\r\nAvoids the following user warning:\r\n```python\r\n~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)\r\n return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\r\n```\r\n\r\n* bump to 2.0.0-beta1\r\n\r\n* Update sequence.md\r\n\r\n* bump beta1 on readme\r\n\r\n* Add latex code block background to config\r\n\r\n* BLD: Bump python support version 3.9 (#1183)\r\n\r\n* BLD: Bump python support version 3.9\r\n\r\n* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4\r\n\r\n* BLD: Bump torch and tensorflow\r\n\r\n* Update Jenkinsfile\r\n\r\n* Update chapter_installation/index.md\r\n\r\n* Update chapter_installation/index.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update config.ini\r\n\r\n* Update INFO.md\r\n\r\n* Update INFO.md\r\n\r\n* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)\r\n\r\n* resolve the conflicts\r\n\r\n* revise from publisher (#1089)\r\n\r\n* revise from publisher\r\n\r\n* d2l api\r\n\r\n* post_latex\r\n\r\n* revise from publisher\r\n\r\n* revise ch11\r\n\r\n* Delete d2l-Copy1.bib\r\n\r\n* clear cache\r\n\r\n* rm d2lbook clear\r\n\r\n* debug anchor\r\n\r\n* keep original d2l doc\r\n\r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\n\r\n* 重复语句 (#1188)\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improve expression for chapter_preliminaries/pandas.md (#1184)\r\n\r\n* Update pandas.md\r\n\r\n* Improve expression\r\n\r\n* Improve expression\r\n\r\n* Update chapter_preliminaries/pandas.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)\r\n\r\n* Improce expression\r\n\r\n* Improve code comments\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Fix multibox_detection bugs\r\n\r\n* Update d2l to 0.17.5 version\r\n\r\n* restore older version\r\n\r\n* Upgrade pandas\r\n\r\n* change to python3.8\r\n\r\n* Test warning log\r\n\r\n* relocate warning log\r\n\r\n* test logs filtering\r\n\r\n* Update gru.md\r\n\r\n* Add DeprecationWarning filter\r\n\r\n* Test warning log\r\n\r\n* Update attention mechanisms & computational performance\r\n\r\n* Update multilayer perceptron& linear & convolution networks & computer vision\r\n\r\n* Update recurrent&optimition&nlp pretraining & nlp applications\r\n\r\n* ignore warnings\r\n\r\n* Update index.md\r\n\r\n* Update linear networks\r\n\r\n* Update multilayer perceptrons&deep learning computation\r\n\r\n* Update preliminaries\r\n\r\n* Check and Add warning filter\r\n\r\n* Update kaggle-cifar10.md\r\n\r\n* Update object-detection-dataset.md\r\n\r\n* Update ssd.md fcn.md\r\n\r\n* Update hybridize.md\r\n\r\n* Update hybridize.md\r\n\r\nSigned-off-by: sunhaizhou \r\nCo-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com>\r\nCo-authored-by: Xinwei Liu \r\nCo-authored-by: Anirudh Dagar \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com>\r\nCo-authored-by: gyro永不抽风 <1247006353@qq.com>\r\nCo-authored-by: CanChengZheng \r\nCo-authored-by: linlin \r\nCo-authored-by: iuk \r\nCo-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com>\r\nCo-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com>\r\nCo-authored-by: Chiyuan Fu \r\nCo-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com>\r\nCo-authored-by: Haiker Sun \r\nCo-authored-by: Ming Liu \r\nCo-authored-by: goldmermaid \r\nCo-authored-by: silenceZheng66 <13754430639@163.com>\r\nCo-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com>\r\nCo-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com>\r\nCo-authored-by: Krahets \r\nCo-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com>\r\nCo-authored-by: Jameson \r\nCo-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com>\r\nCo-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com>\r\nCo-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com>\r\nCo-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com>\r\nCo-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com>\r\nCo-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com>\r\nCo-authored-by: VigourJiang \r\nCo-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com>\r\nCo-authored-by: LYF <27893441+liyufan@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\nCo-authored-by: xiaotinghe \r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com>\r\nCo-authored-by: HinGwenWoong \r\nCo-authored-by: Shuai Zhang ", "code": "def predict_snli(net, vocab, premise, hypothesis):\n \n premise = np.array(vocab[premise], ctx=d2l.try_gpu())\n hypothesis = np.array(vocab[hypothesis], ctx=d2l.try_gpu())\n label = np.argmax(net([premise.reshape((1, -1)),\n hypothesis.reshape((1, -1))]), axis=1)\n return 'entailment' if label == 0 else 'contradiction' if label == 1 \\\n else 'neutral'\n\nd2l.DATA_HUB['ml-100k'] = (\n 'https://files.grouplens.org/datasets/movielens/ml-100k.zip',\n 'cd4dcac4241c8a4ad7badc7ca635da8a69dddb83')\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 97, "n_words": 40, "vocab_size": 31, "complexity": 3, "nloc": 7, "token_counts": 104, "n_ast_nodes": 183, "n_identifiers": 15, "d_id": 37391, "documentation": { "docstring": "Predict the logical relationship between the premise and hypothesis.\n\n Defined in :numref:`sec_natural-language-inference-attention`", "n_words": 12, "vocab_size": 11, "n_whitespaces": 14, "language": "en" } }, { "id": 36379, "commit_id": "4975002df50c472cbb6f8ac3580e475f570606ab", "repo": "transformers", "path": "src/transformers/utils/doc.py", "file_name": "doc.py", "fun_name": "_prepare_output_docstrings", "commit_message": "Reorganize file utils (#16264)\n\n* Split file_utils in several submodules\r\n\r\n* Fixes\r\n\r\n* Add back more objects\r\n\r\n* More fixes\r\n\r\n* Who exactly decided to import that from there?\r\n\r\n* Second suggestion to code with code review\r\n\r\n* Revert wront move\r\n\r\n* Fix imports\r\n\r\n* Adapt all imports\r\n\r\n* Adapt all imports everywhere\r\n\r\n* Revert this import, will fix in a separate commit", "code": "def _prepare_output_docstrings(output_type, config_class, min_indent=None):\n \n output_docstring = output_type.__doc__\n\n # Remove the head of the docstring to keep the list of args only\n lines = output_docstring.split(\"\\n\")\n i = 0\n while i < len(lines) and re.search(r\"^\\s*(Args|Parameters):\\s*$\", lines[i]) is None:\n i += 1\n if i < len(lines):\n params_docstring = \"\\n\".join(lines[(i + 1) :])\n params_docstring = _convert_output_args_doc(params_docstring)\n\n # Add the return introduction\n full_output_type = f\"{output_type.__module__}.{output_type.__name__}\"\n intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith(\"TF\") else PT_RETURN_INTRODUCTION\n intro = intro.format(full_output_type=full_output_type, config_class=config_class)\n result = intro + params_docstring\n\n # Apply minimum indent if necessary\n if min_indent is not None:\n lines = result.split(\"\\n\")\n # Find the indent of the first nonempty line\n i = 0\n while len(lines[i]) == 0:\n i += 1\n indent = len(_get_indent(lines[i]))\n # If too small, add indentation to all nonempty lines\n if indent < min_indent:\n to_add = \" \" * (min_indent - indent)\n lines = [(f\"{to_add}{line}\" if len(line) > 0 else line) for line in lines]\n result = \"\\n\".join(lines)\n\n return result\n\n\nPT_TOKEN_CLASSIFICATION_SAMPLE = r\n\nPT_QUESTION_ANSWERING_SAMPLE = r\n\nPT_SEQUENCE_CLASSIFICATION_SAMPLE = r\n\n\nPT_MASKED_LM_SAMPLE = r\n\nPT_BASE_MODEL_SAMPLE = r\n\nPT_MULTIPLE_CHOICE_SAMPLE = r\n\nPT_CAUSAL_LM_SAMPLE = r\n\nPT_SPEECH_BASE_MODEL_SAMPLE = r\n\nPT_SPEECH_CTC_SAMPLE = r\n\nPT_SPEECH_SEQ_CLASS_SAMPLE = r\n\n\nPT_SPEECH_FRAME_CLASS_SAMPLE = r\n\n\nPT_SPEECH_XVECTOR_SAMPLE = r\n\nPT_VISION_BASE_MODEL_SAMPLE = r\n\nPT_VISION_SEQ_CLASS_SAMPLE = r\n\n\nPT_SAMPLE_DOCSTRINGS = {\n \"SequenceClassification\": PT_SEQUENCE_CLASSIFICATION_SAMPLE,\n \"QuestionAnswering\": PT_QUESTION_ANSWERING_SAMPLE,\n \"TokenClassification\": PT_TOKEN_CLASSIFICATION_SAMPLE,\n \"MultipleChoice\": PT_MULTIPLE_CHOICE_SAMPLE,\n \"MaskedLM\": PT_MASKED_LM_SAMPLE,\n \"LMHead\": PT_CAUSAL_LM_SAMPLE,\n \"BaseModel\": PT_BASE_MODEL_SAMPLE,\n \"SpeechBaseModel\": PT_SPEECH_BASE_MODEL_SAMPLE,\n \"CTC\": PT_SPEECH_CTC_SAMPLE,\n \"AudioClassification\": PT_SPEECH_SEQ_CLASS_SAMPLE,\n \"AudioFrameClassification\": PT_SPEECH_FRAME_CLASS_SAMPLE,\n \"AudioXVector\": PT_SPEECH_XVECTOR_SAMPLE,\n \"VisionBaseModel\": PT_VISION_BASE_MODEL_SAMPLE,\n \"ImageClassification\": PT_VISION_SEQ_CLASS_SAMPLE,\n}\n\n\nTF_TOKEN_CLASSIFICATION_SAMPLE = r\n\nTF_QUESTION_ANSWERING_SAMPLE = r\n\nTF_SEQUENCE_CLASSIFICATION_SAMPLE = r\n\nTF_MASKED_LM_SAMPLE = r\n\nTF_BASE_MODEL_SAMPLE = r\n\nTF_MULTIPLE_CHOICE_SAMPLE = r\n\nTF_CAUSAL_LM_SAMPLE = r\n\nTF_SAMPLE_DOCSTRINGS = {\n \"SequenceClassification\": TF_SEQUENCE_CLASSIFICATION_SAMPLE,\n \"QuestionAnswering\": TF_QUESTION_ANSWERING_SAMPLE,\n \"TokenClassification\": TF_TOKEN_CLASSIFICATION_SAMPLE,\n \"MultipleChoice\": TF_MULTIPLE_CHOICE_SAMPLE,\n \"MaskedLM\": TF_MASKED_LM_SAMPLE,\n \"LMHead\": TF_CAUSAL_LM_SAMPLE,\n \"BaseModel\": TF_BASE_MODEL_SAMPLE,\n}\n\n\nFLAX_TOKEN_CLASSIFICATION_SAMPLE = r\n\nFLAX_QUESTION_ANSWERING_SAMPLE = r\n\nFLAX_SEQUENCE_CLASSIFICATION_SAMPLE = r\n\nFLAX_MASKED_LM_SAMPLE = r\n\nFLAX_BASE_MODEL_SAMPLE = r\n\nFLAX_MULTIPLE_CHOICE_SAMPLE = r\n\nFLAX_CAUSAL_LM_SAMPLE = r\n\nFLAX_SAMPLE_DOCSTRINGS = {\n \"SequenceClassification\": FLAX_SEQUENCE_CLASSIFICATION_SAMPLE,\n \"QuestionAnswering\": FLAX_QUESTION_ANSWERING_SAMPLE,\n \"TokenClassification\": FLAX_TOKEN_CLASSIFICATION_SAMPLE,\n \"MultipleChoice\": FLAX_MULTIPLE_CHOICE_SAMPLE,\n \"MaskedLM\": FLAX_MASKED_LM_SAMPLE,\n \"BaseModel\": FLAX_BASE_MODEL_SAMPLE,\n \"LMHead\": FLAX_CAUSAL_LM_SAMPLE,\n}\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 513, "n_words": 304, "vocab_size": 165, "complexity": 10, "nloc": 24, "token_counts": 209, "n_ast_nodes": 784, "n_identifiers": 59, "d_id": 6603, "documentation": { "docstring": "\n Prepares the return part of the docstring using `output_type`.\n \n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import torch\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> labels = torch.tensor([1] * inputs[\"input_ids\"].size(1)).unsqueeze(0) # Batch size 1\n\n >>> outputs = model(**inputs, labels=labels)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import torch\n\n >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> question, text = \"Who was Jim Henson?\", \"Jim Henson was a nice puppet\"\n >>> inputs = tokenizer(question, text, return_tensors=\"pt\")\n >>> start_positions = torch.tensor([1])\n >>> end_positions = torch.tensor([3])\n\n >>> outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions)\n >>> loss = outputs.loss\n >>> round(loss.item(), 2)\n {expected_loss}\n\n >>> start_scores = outputs.start_logits\n >>> list(start_scores.shape)\n {expected_output}\n\n >>> end_scores = outputs.end_logits\n >>> list(end_scores.shape)\n {expected_output}\n ```\n\n Example of single-label classification:\n\n ```python\n >>> import torch\n >>> from transformers import {processor_class}, {model_class}\n\n >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\", num_labels=2)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1\n >>> outputs = model(**inputs, labels=labels)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n >>> list(logits.shape)\n {expected_output}\n ```\n\n Example of multi-label classification:\n\n ```python\n >>> import torch\n >>> from transformers import {processor_class}, {model_class}\n\n >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\", problem_type=\"multi_label_classification\", num_labels=2)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> labels = torch.tensor([[1, 1]], dtype=torch.float) # need dtype=float for BCEWithLogitsLoss\n >>> outputs = model(**inputs, labels=labels)\n >>> loss = outputs.loss\n >>> list(logits.shape)\n {expected_output}\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import torch\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"The capital of France is {mask}.\", return_tensors=\"pt\")\n >>> labels = tokenizer(\"The capital of France is Paris.\", return_tensors=\"pt\")[\"input_ids\"]\n\n >>> outputs = model(**inputs, labels=labels)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import torch\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> last_hidden_states = outputs.last_hidden_state\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import torch\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> prompt = \"In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.\"\n >>> choice0 = \"It is eaten with a fork and a knife.\"\n >>> choice1 = \"It is eaten while held in the hand.\"\n >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1\n\n >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=\"pt\", padding=True)\n >>> outputs = model(**{{k: v.unsqueeze(0) for k, v in encoding.items()}}, labels=labels) # batch size is 1\n\n >>> # the linear classifier still needs to be trained\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> import torch\n >>> from transformers import {processor_class}, {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs, labels=inputs[\"input_ids\"])\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import torch\n >>> from datasets import load_dataset\n\n >>> dataset = load_dataset(\"hf-internal-testing/librispeech_asr_demo\", \"clean\", split=\"validation\")\n >>> dataset = dataset.sort(\"id\")\n >>> sampling_rate = dataset.features[\"audio\"].sampling_rate\n\n >>> processor = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> # audio file is decoded on the fly\n >>> inputs = processor(dataset[0][\"audio\"][\"array\"], sampling_rate=sampling_rate, return_tensors=\"pt\")\n >>> with torch.no_grad():\n ... outputs = model(**inputs)\n\n >>> last_hidden_states = outputs.last_hidden_state\n >>> list(last_hidden_states.shape)\n {expected_output}\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> from datasets import load_dataset\n >>> import torch\n\n >>> dataset = load_dataset(\"hf-internal-testing/librispeech_asr_demo\", \"clean\", split=\"validation\")\n >>> dataset = dataset.sort(\"id\")\n >>> sampling_rate = dataset.features[\"audio\"].sampling_rate\n\n >>> processor = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> # audio file is decoded on the fly\n >>> inputs = processor(dataset[0][\"audio\"][\"array\"], sampling_rate=sampling_rate, return_tensors=\"pt\")\n >>> with torch.no_grad():\n ... logits = model(**inputs).logits\n >>> predicted_ids = torch.argmax(logits, dim=-1)\n\n >>> # transcribe speech\n >>> transcription = processor.batch_decode(predicted_ids)\n >>> transcription[0]\n {expected_output}\n ```\n\n ```python\n >>> with processor.as_target_processor():\n ... inputs[\"labels\"] = processor(dataset[0][\"text\"], return_tensors=\"pt\").input_ids\n\n >>> # compute loss\n >>> loss = model(**inputs).loss\n >>> round(loss.item(), 2)\n {expected_loss}\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> from datasets import load_dataset\n >>> import torch\n\n >>> dataset = load_dataset(\"hf-internal-testing/librispeech_asr_demo\", \"clean\", split=\"validation\")\n >>> dataset = dataset.sort(\"id\")\n >>> sampling_rate = dataset.features[\"audio\"].sampling_rate\n\n >>> feature_extractor = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> # audio file is decoded on the fly\n >>> inputs = feature_extractor(dataset[0][\"audio\"][\"array\"], sampling_rate=sampling_rate, return_tensors=\"pt\")\n\n >>> with torch.no_grad():\n ... logits = model(**inputs).logits\n\n >>> predicted_class_ids = torch.argmax(logits, dim=-1).item()\n >>> predicted_label = model.config.id2label[predicted_class_ids]\n >>> predicted_label\n {expected_output}\n ```\n\n ```python\n >>> # compute loss - target_label is e.g. \"down\"\n >>> target_label = model.config.id2label[0]\n >>> inputs[\"labels\"] = torch.tensor([model.config.label2id[target_label]])\n >>> loss = model(**inputs).loss\n >>> round(loss.item(), 2)\n {expected_loss}\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> from datasets import load_dataset\n >>> import torch\n\n >>> dataset = load_dataset(\"hf-internal-testing/librispeech_asr_demo\", \"clean\", split=\"validation\")\n >>> dataset = dataset.sort(\"id\")\n >>> sampling_rate = dataset.features[\"audio\"].sampling_rate\n\n >>> feature_extractor = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> # audio file is decoded on the fly\n >>> inputs = feature_extractor(dataset[0][\"audio\"][\"array\"], return_tensors=\"pt\", sampling_rate=sampling_rate)\n >>> with torch.no_grad():\n ... logits = model(**inputs).logits\n\n >>> probabilities = torch.sigmoid(logits[0])\n >>> # labels is a one-hot array of shape (num_frames, num_speakers)\n >>> labels = (probabilities > 0.5).long()\n >>> labels[0].tolist()\n {expected_output}\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> from datasets import load_dataset\n >>> import torch\n\n >>> dataset = load_dataset(\"hf-internal-testing/librispeech_asr_demo\", \"clean\", split=\"validation\")\n >>> dataset = dataset.sort(\"id\")\n >>> sampling_rate = dataset.features[\"audio\"].sampling_rate\n\n >>> feature_extractor = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> # audio file is decoded on the fly\n >>> inputs = feature_extractor(\n ... [d[\"array\"] for d in dataset[:2][\"audio\"]], sampling_rate=sampling_rate, return_tensors=\"pt\", padding=True\n ... )\n >>> with torch.no_grad():\n ... embeddings = model(**inputs).embeddings\n\n >>> embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu()\n\n >>> # the resulting embeddings can be used for cosine similarity-based retrieval\n >>> cosine_sim = torch.nn.CosineSimilarity(dim=-1)\n >>> similarity = cosine_sim(embeddings[0], embeddings[1])\n >>> threshold = 0.7 # the optimal threshold is dataset-dependent\n >>> if similarity < threshold:\n ... print(\"Speakers are not the same!\")\n >>> round(similarity.item(), 2)\n {expected_output}\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import torch\n >>> from datasets import load_dataset\n\n >>> dataset = load_dataset(\"huggingface/cats-image\")\n >>> image = dataset[\"test\"][\"image\"][0]\n\n >>> feature_extractor = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = feature_extractor(image, return_tensors=\"pt\")\n\n >>> with torch.no_grad():\n ... outputs = model(**inputs)\n\n >>> last_hidden_states = outputs.last_hidden_state\n >>> list(last_hidden_states.shape)\n {expected_output}\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import torch\n >>> from datasets import load_dataset\n\n >>> dataset = load_dataset(\"huggingface/cats-image\")\n >>> image = dataset[\"test\"][\"image\"][0]\n\n >>> feature_extractor = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = feature_extractor(image, return_tensors=\"pt\")\n\n >>> with torch.no_grad():\n ... logits = model(**inputs).logits\n\n >>> # model predicts one of the 1000 ImageNet classes\n >>> predicted_label = logits.argmax(-1).item()\n >>> print(model.config.id2label[predicted_label])\n {expected_output}\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"tf\")\n >>> input_ids = inputs[\"input_ids\"]\n >>> inputs[\"labels\"] = tf.reshape(\n ... tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))\n >>> ) # Batch size 1\n\n >>> outputs = model(inputs)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> question, text = \"Who was Jim Henson?\", \"Jim Henson was a nice puppet\"\n >>> input_dict = tokenizer(question, text, return_tensors=\"tf\")\n >>> outputs = model(input_dict)\n >>> start_logits = outputs.start_logits\n >>> end_logits = outputs.end_logits\n\n >>> all_tokens = tokenizer.convert_ids_to_tokens(input_dict[\"input_ids\"].numpy()[0])\n >>> answer = \" \".join(all_tokens[tf.math.argmax(start_logits, 1)[0] : tf.math.argmax(end_logits, 1)[0] + 1])\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"tf\")\n >>> inputs[\"labels\"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1\n\n >>> outputs = model(inputs)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"The capital of France is {mask}.\", return_tensors=\"tf\")\n >>> inputs[\"labels\"] = tokenizer(\"The capital of France is Paris.\", return_tensors=\"tf\")[\"input_ids\"]\n\n >>> outputs = model(inputs)\n >>> loss = outputs.loss\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"tf\")\n >>> outputs = model(inputs)\n\n >>> last_hidden_states = outputs.last_hidden_state\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> prompt = \"In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.\"\n >>> choice0 = \"It is eaten with a fork and a knife.\"\n >>> choice1 = \"It is eaten while held in the hand.\"\n\n >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=\"tf\", padding=True)\n >>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}}\n >>> outputs = model(inputs) # batch size is 1\n\n >>> # the linear classifier still needs to be trained\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n >>> import tensorflow as tf\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"tf\")\n >>> outputs = model(inputs)\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"jax\")\n\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> question, text = \"Who was Jim Henson?\", \"Jim Henson was a nice puppet\"\n >>> inputs = tokenizer(question, text, return_tensors=\"jax\")\n\n >>> outputs = model(**inputs)\n >>> start_scores = outputs.start_logits\n >>> end_scores = outputs.end_logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"jax\")\n\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"The capital of France is {mask}.\", return_tensors=\"jax\")\n\n >>> outputs = model(**inputs)\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"jax\")\n >>> outputs = model(**inputs)\n\n >>> last_hidden_states = outputs.last_hidden_state\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> prompt = \"In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced.\"\n >>> choice0 = \"It is eaten with a fork and a knife.\"\n >>> choice1 = \"It is eaten while held in the hand.\"\n\n >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors=\"jax\", padding=True)\n >>> outputs = model(**{{k: v[None, :] for k, v in encoding.items()}})\n\n >>> logits = outputs.logits\n ```\n\n Example:\n\n ```python\n >>> from transformers import {processor_class}, {model_class}\n\n >>> tokenizer = {processor_class}.from_pretrained(\"{checkpoint}\")\n >>> model = {model_class}.from_pretrained(\"{checkpoint}\")\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"np\")\n >>> outputs = model(**inputs)\n\n >>> # retrieve logts for next token\n >>> next_token_logits = outputs.logits[:, -1]\n ```\n", "n_words": 1837, "vocab_size": 302, "n_whitespaces": 3163, "language": "en" } }, { "id": 249805, "commit_id": "a3623af74e0af0d2f6cbd37b47dc54a1acd314d5", "repo": "synapse", "path": "tests/rest/admin/test_user.py", "file_name": "test_user.py", "fun_name": "test_medium_does_not_exist", "commit_message": "Add an Admin API endpoint for looking up users based on 3PID (#14405)", "code": "def test_medium_does_not_exist(self) -> None:\n \n # test for unknown medium\n url = \"/_synapse/admin/v1/threepid/publickey/users/unknown-key\"\n\n channel = self.make_request(\n \"GET\",\n url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])\n\n # test for unknown user with a known medium\n url = \"/_synapse/admin/v1/threepid/email/users/unknown\"\n\n channel = self.make_request(\n \"GET\",\n url,\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(404, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.NOT_FOUND, channel.json_body[\"errcode\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 205, "n_words": 48, "vocab_size": 28, "complexity": 1, "nloc": 19, "token_counts": 110, "n_ast_nodes": 178, "n_identifiers": 13, "d_id": 73140, "documentation": { "docstring": "Tests that both a lookup for a medium that does not exist and a user that\n doesn't exist with that third party ID returns a 404", "n_words": 26, "vocab_size": 19, "n_whitespaces": 32, "language": "en" } }, { "id": 308638, "commit_id": "8a8ffa1c0844106a8827dd28b1d42792b366c5ee", "repo": "core", "path": "homeassistant/components/sonos/binary_sensor.py", "file_name": "binary_sensor.py", "fun_name": "_async_poll", "commit_message": "Add support for Sonos microphone binary_sensor (#63097)\n\nCo-authored-by: J. Nick Koston ", "code": "async def _async_poll(self) -> None:\n \n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 12, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 8, "n_ast_nodes": 17, "n_identifiers": 2, "d_id": 107383, "documentation": { "docstring": "Stub for abstract class implementation. Not a pollable attribute.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 160087, "commit_id": "95a7bb4746197a05fd23dbe39c7b3dbb105a18d9", "repo": "numpy", "path": "numpy/core/fromnumeric.py", "file_name": "fromnumeric.py", "fun_name": "argpartition", "commit_message": "DOC: typo corrected in numpy.argpartition (#21201)\n\n* DOC: numpy.argpartition typo corrected\r\n\r\nCo-authored-by: Matti Picus ", "code": "def argpartition(a, kth, axis=-1, kind='introselect', order=None):\n \n return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 19, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 2, "token_counts": 42, "n_ast_nodes": 63, "n_identifiers": 7, "d_id": 38481, "documentation": { "docstring": "\n Perform an indirect partition along the given axis using the\n algorithm specified by the `kind` keyword. It returns an array of\n indices of the same shape as `a` that index data along the given\n axis in partitioned order.\n\n .. versionadded:: 1.8.0\n\n Parameters\n ----------\n a : array_like\n Array to sort.\n kth : int or sequence of ints\n Element index to partition by. The k-th element will be in its\n final sorted position and all smaller elements will be moved\n before it and all larger elements behind it. The order all\n elements in the partitions is undefined. If provided with a\n sequence of k-th it will partition all of them into their sorted\n position at once.\n\n .. deprecated:: 1.22.0\n Passing booleans as index is deprecated.\n axis : int or None, optional\n Axis along which to sort. The default is -1 (the last axis). If\n None, the flattened array is used.\n kind : {'introselect'}, optional\n Selection algorithm. Default is 'introselect'\n order : str or list of str, optional\n When `a` is an array with fields defined, this argument\n specifies which fields to compare first, second, etc. A single\n field can be specified as a string, and not all fields need be\n specified, but unspecified fields will still be used, in the\n order in which they come up in the dtype, to break ties.\n\n Returns\n -------\n index_array : ndarray, int\n Array of indices that partition `a` along the specified axis.\n If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`.\n More generally, ``np.take_along_axis(a, index_array, axis)``\n always yields the partitioned `a`, irrespective of dimensionality.\n\n See Also\n --------\n partition : Describes partition algorithms used.\n ndarray.partition : Inplace partition.\n argsort : Full indirect sort.\n take_along_axis : Apply ``index_array`` from argpartition\n to an array as if by calling partition.\n\n Notes\n -----\n See `partition` for notes on the different selection algorithms.\n\n Examples\n --------\n One dimensional array:\n\n >>> x = np.array([3, 4, 2, 1])\n >>> x[np.argpartition(x, 3)]\n array([2, 1, 3, 4])\n >>> x[np.argpartition(x, (1, 3))]\n array([1, 2, 3, 4])\n\n >>> x = [3, 4, 2, 1]\n >>> np.array(x)[np.argpartition(x, 3)]\n array([2, 1, 3, 4])\n\n Multi-dimensional array:\n\n >>> x = np.array([[3, 4, 2], [1, 3, 1]])\n >>> index_array = np.argpartition(x, kth=1, axis=-1)\n >>> np.take_along_axis(x, index_array, axis=-1) # same as np.partition(x, kth=1)\n array([[2, 3, 4],\n [1, 1, 3]])\n\n ", "n_words": 374, "vocab_size": 213, "n_whitespaces": 681, "language": "en" } }, { "id": 107579, "commit_id": "c6e43ff4cfd3cb583b30f9882d6228041edc0fd6", "repo": "matplotlib", "path": "lib/matplotlib/axes/_base.py", "file_name": "_base.py", "fun_name": "get_ylim", "commit_message": "Fix ambiguous link targets in docs.", "code": "def get_ylim(self):\n \n return tuple(self.viewLim.intervaly)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 27, "n_identifiers": 5, "d_id": 22826, "documentation": { "docstring": "\n Return the y-axis view limits.\n\n Returns\n -------\n bottom, top : (float, float)\n The current y-axis limits in data coordinates.\n\n See Also\n --------\n .Axes.set_ylim\n set_ybound, get_ybound\n invert_yaxis, yaxis_inverted\n\n Notes\n -----\n The y-axis may be inverted, in which case the *bottom* value\n will be greater than the *top* value.\n ", "n_words": 47, "vocab_size": 40, "n_whitespaces": 157, "language": "en" } }, { "id": 63709, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/resolvelib/structs.py", "file_name": "structs.py", "fun_name": "remove", "commit_message": "upd; format", "code": "def remove(self, key):\n \n self._vertices.remove(key)\n for f in self._forwards.pop(key):\n self._backwards[f].remove(key)\n for t in self._backwards.pop(key):\n self._forwards[t].remove(key)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 64, "n_words": 14, "vocab_size": 12, "complexity": 3, "nloc": 6, "token_counts": 62, "n_ast_nodes": 98, "n_identifiers": 9, "d_id": 13480, "documentation": { "docstring": "Remove a vertex from the graph, disconnecting all edges from/to it.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 111911, "commit_id": "b52f7756fbcf6669dbe92e97e11415c4084cf881", "repo": "nni", "path": "nni/algorithms/hpo/tpe_tuner.py", "file_name": "tpe_tuner.py", "fun_name": "adaptive_parzen_normal", "commit_message": "HPO doc (#4579)", "code": "def adaptive_parzen_normal(args, history_mus, prior_mu, prior_sigma):\n \n mus = np.append(history_mus, prior_mu)\n order = np.argsort(mus)\n mus = mus[order]\n prior_index = np.searchsorted(mus, prior_mu)\n\n if len(mus) == 1:\n sigmas = np.asarray([prior_sigma])\n elif len(mus) == 2:\n sigmas = np.asarray([prior_sigma * 0.5, prior_sigma * 0.5])\n sigmas[prior_index] = prior_sigma\n else:\n l_delta = mus[1:-1] - mus[:-2]\n r_delta = mus[2:] - mus[1:-1]\n sigmas_mid = np.maximum(l_delta, r_delta)\n sigmas = np.concatenate([[mus[1] - mus[0]], sigmas_mid, [mus[-1] - mus[-2]]])\n sigmas[prior_index] = prior_sigma\n # \"magic formula\" in official implementation\n n = min(100, len(mus) + 1)\n sigmas = np.clip(sigmas, prior_sigma / n, prior_sigma)\n\n weights = np.append(linear_forgetting_weights(args, len(mus)), args.prior_weight)\n weights = weights[order]\n\n return weights / np.sum(weights), mus, sigmas\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 199, "n_words": 101, "vocab_size": 65, "complexity": 3, "nloc": 21, "token_counts": 249, "n_ast_nodes": 373, "n_identifiers": 27, "d_id": 24508, "documentation": { "docstring": "\n The \"Adaptive Parzen Estimator\" described in paper section 4.2, for normal distribution.\n\n Because TPE internally only supports categorical and normal distributed space (domain),\n this function is used for everything other than \"choice\" and \"randint\".\n\n Parameters\n ----------\n args: TpeArguments\n Algorithm arguments.\n history_mus: 1-d array of float\n Parameter values evaluated in history.\n These are the \"observations\" in paper section 4.2. (\"placing density in the vicinity of K observations\")\n prior_mu: float\n µ value of normal search space.\n piror_sigma: float\n σ value of normal search space.\n\n Returns\n -------\n Tuple of three 1-d float arrays: (weight, µ, σ).\n\n The tuple represents N+1 \"vicinity of observations\" and each one's weight,\n calculated from \"N\" history and \"1\" user provided prior.\n\n The result is sorted by µ.\n ", "n_words": 119, "vocab_size": 91, "n_whitespaces": 203, "language": "en" } }, { "id": 241747, "commit_id": "97710406210a64f94b135500742165d40ef69cf8", "repo": "lightning", "path": "tests/callbacks/test_tqdm_progress_bar.py", "file_name": "test_tqdm_progress_bar.py", "fun_name": "test_tqdm_progress_bar_print_no_train", "commit_message": "Add typing to `TQDMProgressBar` (#11369)", "code": "def test_tqdm_progress_bar_print_no_train(tqdm_write, tmpdir):\n \n model = PrintModel()\n bar = TQDMProgressBar()\n trainer = Trainer(\n default_root_dir=tmpdir,\n num_sanity_val_steps=0,\n limit_val_batches=1,\n limit_test_batches=1,\n limit_predict_batches=1,\n max_steps=1,\n callbacks=[bar],\n )\n\n trainer.validate(model)\n trainer.test(model)\n trainer.predict(model)\n assert tqdm_write.call_args_list == [\n call(\"validation_step\", file=sys.stderr),\n call(\"test_step\"),\n call(\"predict_step\"),\n ]\n\n\n@mock.patch(\"builtins.print\")\n@mock.patch(\"pytorch_lightning.callbacks.progress.tqdm_progress.Tqdm.write\")", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "@mock.patch(\"builtins.print\")\n@mock.patch(\"pytorch_lightning.callbacks.progress.tqdm_progress.Tqdm.write\")", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 132, "n_words": 34, "vocab_size": 32, "complexity": 1, "nloc": 20, "token_counts": 99, "n_ast_nodes": 183, "n_identifiers": 26, "d_id": 69681, "documentation": { "docstring": "Test that printing in the LightningModule redirects arguments to the progress bar without training.", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 248735, "commit_id": "21eeacc99551febcddcef21db96a2bd82166fc7e", "repo": "synapse", "path": "synapse/storage/databases/main/appservice.py", "file_name": "appservice.py", "fun_name": "get_appservice_last_pos", "commit_message": "Federation Sender & Appservice Pusher Stream Optimisations (#13251)\n\n* Replace `get_new_events_for_appservice` with `get_all_new_events_stream`\r\n\r\nThe functions were near identical and this brings the AS worker closer\r\nto the way federation senders work which can allow for multiple workers\r\nto handle AS traffic.\r\n\r\n* Pull received TS alongside events when processing the stream\r\n\r\nThis avoids an extra query -per event- when both federation sender\r\nand appservice pusher process events.", "code": "async def get_appservice_last_pos(self) -> int:\n \n\n return await self.db_pool.simple_select_one_onecol(\n table=\"appservice_stream_position\",\n retcol=\"stream_ordering\",\n keyvalues={},\n desc=\"get_appservice_last_pos\",\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 78, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 10, "token_counts": 34, "n_ast_nodes": 60, "n_identifiers": 9, "d_id": 72433, "documentation": { "docstring": "\n Get the last stream ordering position for the appservice process.\n ", "n_words": 10, "vocab_size": 9, "n_whitespaces": 25, "language": "en" } }, { "id": 261659, "commit_id": "40d7d880eddaf3a9a5e37ba2a8206caf22744926", "repo": "scikit-learn", "path": "sklearn/metrics/_plot/tests/test_predict_error_display.py", "file_name": "test_predict_error_display.py", "fun_name": "test_from_estimator_not_fitted", "commit_message": "FEA add PredictionErrorDisplay (#18020)\n\nCo-authored-by: jeremie du boisberranger \r\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Christian Lorentzen ", "code": "def test_from_estimator_not_fitted(pyplot):\n \n regressor = Ridge()\n with pytest.raises(NotFittedError, match=\"is not fitted yet.\"):\n PredictionErrorDisplay.from_estimator(regressor, X, y)\n\n\n@pytest.mark.parametrize(\"class_method\", [\"from_estimator\", \"from_predictions\"])\n@pytest.mark.parametrize(\"kind\", [\"actual_vs_predicted\", \"residual_vs_predicted\"])", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"class_method\", [\"from_estimator\", \"from_predictions\"])\n@pytest.mark.parametrize(\"kind\", [\"actual_vs_predicted\", \"residual_vs_predicted\"])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 34, "n_words": 20, "vocab_size": 20, "complexity": 1, "nloc": 4, "token_counts": 33, "n_ast_nodes": 113, "n_identifiers": 14, "d_id": 76920, "documentation": { "docstring": "Check that we raise a `NotFittedError` when the passed regressor is not\n fit.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 15, "language": "en" } }, { "id": 246307, "commit_id": "df36945ff0e4a293a9dac0da07e2c94256835b32", "repo": "synapse", "path": "tests/rest/client/test_relations.py", "file_name": "test_relations.py", "fun_name": "test_pagination_from_sync_and_messages", "commit_message": "Support pagination tokens from /sync and /messages in the relations API. (#11952)", "code": "def test_pagination_from_sync_and_messages(self):\n \n channel = self._send_relation(RelationTypes.ANNOTATION, \"m.reaction\", \"A\")\n self.assertEquals(200, channel.code, channel.json_body)\n annotation_id = channel.json_body[\"event_id\"]\n # Send an event after the relation events.\n self.helper.send(self.room, body=\"Latest event\", tok=self.user_token)\n\n # Request /sync, limiting it such that only the latest event is returned\n # (and not the relation).\n filter = urllib.parse.quote_plus(\n '{\"room\": {\"timeline\": {\"limit\": 1}}}'.encode()\n )\n channel = self.make_request(\n \"GET\", f\"/sync?filter={filter}\", access_token=self.user_token\n )\n self.assertEquals(200, channel.code, channel.json_body)\n room_timeline = channel.json_body[\"rooms\"][\"join\"][self.room][\"timeline\"]\n sync_prev_batch = room_timeline[\"prev_batch\"]\n self.assertIsNotNone(sync_prev_batch)\n # Ensure the relation event is not in the batch returned from /sync.\n self.assertNotIn(\n annotation_id, [ev[\"event_id\"] for ev in room_timeline[\"events\"]]\n )\n\n # Request /messages, limiting it such that only the latest event is\n # returned (and not the relation).\n channel = self.make_request(\n \"GET\",\n f\"/rooms/{self.room}/messages?dir=b&limit=1\",\n access_token=self.user_token,\n )\n self.assertEquals(200, channel.code, channel.json_body)\n messages_end = channel.json_body[\"end\"]\n self.assertIsNotNone(messages_end)\n # Ensure the relation event is not in the chunk returned from /messages.\n self.assertNotIn(\n annotation_id, [ev[\"event_id\"] for ev in channel.json_body[\"chunk\"]]\n )\n\n # Request /relations with the pagination tokens received from both the\n # /sync and /messages responses above, in turn.\n #\n # This is a tiny bit silly since the client wouldn't know the parent ID\n # from the requests above; consider the parent ID to be known from a\n # previous /sync.\n for from_token in (sync_prev_batch, messages_end):\n channel = self.make_request(\n \"GET\",\n f\"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?from={from_token}\",\n access_token=self.user_token,\n )\n self.assertEquals(200, channel.code, channel.json_body)\n\n # The relation should be in the returned chunk.\n self.assertIn(\n annotation_id, [ev[\"event_id\"] for ev in channel.json_body[\"chunk\"]]\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 681, "n_words": 226, "vocab_size": 111, "complexity": 5, "nloc": 39, "token_counts": 289, "n_ast_nodes": 505, "n_identifiers": 32, "d_id": 71142, "documentation": { "docstring": "Pagination tokens from /sync and /messages can be used to paginate /relations.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 61401, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py", "file_name": "versioncontrol.py", "fun_name": "make_vcs_requirement_url", "commit_message": "upd; format", "code": "def make_vcs_requirement_url(repo_url, rev, project_name, subdir=None):\n # type: (str, str, str, Optional[str]) -> str\n \n egg_project_name = project_name.replace(\"-\", \"_\")\n req = f'{repo_url}@{rev}#egg={egg_project_name}'\n if subdir:\n req += f'&subdirectory={subdir}'\n\n return req\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 52, "n_words": 27, "vocab_size": 23, "complexity": 2, "nloc": 6, "token_counts": 37, "n_ast_nodes": 80, "n_identifiers": 8, "d_id": 12547, "documentation": { "docstring": "\n Return the URL for a VCS requirement.\n\n Args:\n repo_url: the remote VCS url, with any needed VCS prefix (e.g. \"git+\").\n project_name: the (unescaped) project name.\n ", "n_words": 25, "vocab_size": 21, "n_whitespaces": 45, "language": "en" } }, { "id": 80146, "commit_id": "ad65741b94f36fbe793cf15f0ab002482070cdb6", "repo": "wagtail", "path": "wagtail/tests/streamfield_migrations/test_simple_structures.py", "file_name": "test_simple_structures.py", "fun_name": "test_alter_value", "commit_message": "Add tests for streamfield migration helpers\n\nCurrently failing due to wagtail-factories being broken on Wagtail 4.1: https://github.com/wagtail/wagtail-factories/issues/65", "code": "def test_alter_value(self):\n \n\n altered_raw_data = apply_changes_to_raw_data(\n raw_data=self.raw_data,\n block_path_str=\"char1\",\n operation=AlterBlockValueOperation(new_value=\"foo\"),\n streamfield=models.SampleModel.content,\n )\n\n self.assertEqual(altered_raw_data[0][\"value\"], \"foo\")\n self.assertEqual(altered_raw_data[1][\"value\"], self.raw_data[1][\"value\"])\n self.assertEqual(altered_raw_data[2][\"value\"], \"foo\")\n self.assertEqual(altered_raw_data[3][\"value\"], self.raw_data[3][\"value\"])\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 111, "n_words": 18, "vocab_size": 17, "complexity": 1, "nloc": 11, "token_counts": 110, "n_ast_nodes": 181, "n_identifiers": 14, "d_id": 17024, "documentation": { "docstring": "Change the value of each `char1` block to `foo`\n\n Check whether the value of each `char1` block has changed to `foo`.\n Check whether the values of other blocks are intact.\n ", "n_words": 30, "vocab_size": 19, "n_whitespaces": 51, "language": "en" } }, { "id": 45122, "commit_id": "6e5c9c845f7f0975178dbeb76d4ccfe95d0ed803", "repo": "airflow", "path": "airflow/migrations/versions/30867afad44a_rename_concurrency_column_in_dag_table_.py", "file_name": "30867afad44a_rename_concurrency_column_in_dag_table_.py", "fun_name": "upgrade", "commit_message": "Fix some migrations (#21670)\n\nIn the xcom migration, there's a bad join. The clauses need to be wrapped in and_. And in both, for sqlite we need to temporarily suspend FK enforcement before dropping the tables.", "code": "def upgrade():\n \n conn = op.get_bind()\n is_sqlite = bool(conn.dialect.name == \"sqlite\")\n\n if is_sqlite:\n op.execute(\"PRAGMA foreign_keys=off\")\n with op.batch_alter_table('dag') as batch_op:\n batch_op.alter_column(\n 'concurrency',\n new_column_name='max_active_tasks',\n type_=sa.Integer(),\n nullable=False,\n )\n if is_sqlite:\n op.execute(\"PRAGMA foreign_keys=on\")\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 118, "n_words": 28, "vocab_size": 24, "complexity": 3, "nloc": 14, "token_counts": 75, "n_ast_nodes": 137, "n_identifiers": 17, "d_id": 8488, "documentation": { "docstring": "Apply Rename concurrency column in dag table to max_active_tasks", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 281227, "commit_id": "006b3570b795215a17c64841110b649b03db9a98", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/decorators.py", "file_name": "decorators.py", "fun_name": "try_except", "commit_message": "Baseclass (#1141)\n\n* A working decorator\r\n\r\n* Basic intro\r\n\r\n* Added more\r\n\r\n* Refactor\r\n\r\n* Refactor\r\n\r\n* Cleaned code\r\n\r\n* Simplified function (thanks Chavi)\r\n\r\n* Small change\r\n\r\n* Updating tests : fix issue with mock\r\n\r\n* Updating tests : fix remaining mocks after merging\r\n\r\n* Updating tests : black\r\n\r\n* Cleaned up\r\n\r\n* Finished base cases\r\n\r\n* Notes\r\n\r\n* Slight changes\r\n\r\n* Added dynamic options handling, error persists\r\n\r\n* Fixed pylint issues\r\n\r\n* Fixed mock\r\n\r\n* fix decorator with dynamic dictionary of args\r\n\r\n* move choices from dynamic to const in crypto/ov\r\n\r\n* Updated var names\r\n\r\n* Check\r\n\r\n* Moved decorators\r\n\r\n* Fixed import issues\r\n\r\n* Fixed tests, update payoff controller\r\n\r\n* Fixed tests\r\n\r\n* Fixed pylint\r\n\r\n* Updated files\r\n\r\n* Added base class\r\n\r\n* Added reset\r\n\r\n* Improved base class\r\n\r\n* For James\r\n\r\n* More menues converted\r\n\r\n* Added contexts\r\n\r\n* 24 controllers left\r\n\r\n* 18 Controllers left\r\n\r\n* Changes choices\r\n\r\n* 9 controllers left\r\n\r\n* Added all controllers\r\n\r\n* Fixed glitch\r\n\r\n* Replaced all improper callings of class\r\n\r\n* Removed menu decorator\r\n\r\n* refactored try_except\r\n\r\n* Last commit\r\n\r\n* Black fix\r\n\r\n* Bug fix\r\n\r\n* Added James' new menus\r\n\r\n* Fixed tests\r\n\r\n* Fixed 8 tests\r\n\r\n* Fixing mypy issue\r\n\r\n* Updating tests : stocks/options\r\n\r\n* Fixed options\r\n\r\n* Fixed tests\r\n\r\n* Updating tests : stocks/options\r\n\r\n* Fixed tests\r\n\r\n* More test fixes\r\n\r\n* Updating tests : stocks/ba\r\n\r\n* Fixed options test\r\n\r\n* More bug fixes\r\n\r\n* Fixed tests\r\n\r\n* fixed pylint\r\n\r\n* Skipped test_call_load\r\n\r\n* Add typings to base class\r\n\r\n* Fix issue with appending auto completer options + bugfixes\r\n\r\n* Add typings to base class\r\n\r\n* Terminal throws error for bad path\r\n\r\n* sexy solution to auto completer in runtime\r\n\r\n* more sexy reset with reset_level stored\r\n\r\n* no so sexy jump between indirect menus\r\n\r\n* Removing choices argument\r\n\r\n* refactor custom_reset\r\n\r\n* Fixed tests\r\n\r\n* Theo fixes\r\n\r\n* Added back function\r\n\r\n* Fixed tests\r\n\r\nCo-authored-by: Chavithra PARANA \r\nCo-authored-by: DidierRLopes ", "code": "def try_except(f):\n \n # pylint: disable=inconsistent-return-statements", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 4, "token_counts": 17, "n_ast_nodes": 14, "n_identifiers": 2, "d_id": 83631, "documentation": { "docstring": "Adds a try except block if the user is not in development mode\n\n Parameters\n ----------\n f: function\n The function to be wrapped\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 41, "language": "en" } }, { "id": 160177, "commit_id": "f404e9e92e87a3990712d723d5c562a89300ac01", "repo": "numpy", "path": "numpy/distutils/ccompiler_opt.py", "file_name": "ccompiler_opt.py", "fun_name": "feature_extra_checks", "commit_message": "Add space after argument name", "code": "def feature_extra_checks(self, name):\n \n assert isinstance(name, str)\n d = self.feature_supported[name]\n extra_checks = d.get(\"extra_checks\", [])\n if not extra_checks:\n return []\n\n self.dist_log(\"Testing extra checks for feature '%s'\" % name, extra_checks)\n flags = self.feature_flags(name)\n available = []\n not_available = []\n for chk in extra_checks:\n test_path = os.path.join(\n self.conf_check_path, \"extra_%s.c\" % chk.lower()\n )\n if not os.path.exists(test_path):\n self.dist_fatal(\"extra check file does not exist\", test_path)\n\n is_supported = self.dist_test(test_path, flags + self.cc_flags[\"werror\"])\n if is_supported:\n available.append(chk)\n else:\n not_available.append(chk)\n\n if not_available:\n self.dist_log(\"testing failed for checks\", not_available, stderr=True)\n return available\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 311, "n_words": 79, "vocab_size": 59, "complexity": 6, "nloc": 24, "token_counts": 162, "n_ast_nodes": 267, "n_identifiers": 28, "d_id": 38549, "documentation": { "docstring": "\n Return a list of supported extra checks after testing them against\n the compiler.\n\n Parameters\n ----------\n names : str\n CPU feature name in uppercase.\n ", "n_words": 23, "vocab_size": 23, "n_whitespaces": 77, "language": "en" } }, { "id": 176547, "commit_id": "5a7985fc41bc0c686c035de43c66cf4fb5fcc94f", "repo": "networkx", "path": "networkx/algorithms/tournament.py", "file_name": "tournament.py", "fun_name": "is_reachable", "commit_message": "Added examples in tournament and tree functions (#5536)\n\n* examples\r\n\r\n* examples\r\n\r\n* examples\r\n\r\n* Example changed\r\n\r\n* improved styling\r\n\r\n* revised\r\n\r\n* edge labels\r\n\r\n* improved styling\r\n\r\n* spacing\r\n\r\n* error testing\r\n\r\n* examples\r\n\r\n* styling\r\n\r\n* add_nodes removed\r\n\r\n* spaceing\r\n\r\n* spacing\r\n\r\n* spacing\r\n\r\n* added examples\r\n\r\n* removed random_tournament example\r\n\r\n* added examples in branching and aborescence\r\n\r\n* error removed", "code": "def is_reachable(G, s, t):\n \n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 7, "n_words": 4, "vocab_size": 4, "complexity": 5, "nloc": 5, "token_counts": 54, "n_ast_nodes": 17, "n_identifiers": 4, "d_id": 41956, "documentation": { "docstring": "Decides whether there is a path from `s` to `t` in the\n tournament.\n\n This function is more theoretically efficient than the reachability\n checks than the shortest path algorithms in\n :mod:`networkx.algorithms.shortest_paths`.\n\n The given graph **must** be a tournament, otherwise this function's\n behavior is undefined.\n\n Parameters\n ----------\n G : NetworkX graph\n A directed graph representing a tournament.\n\n s : node\n A node in the graph.\n\n t : node\n A node in the graph.\n\n Returns\n -------\n bool\n Whether there is a path from `s` to `t` in `G`.\n\n Examples\n --------\n >>> from networkx.algorithms import tournament\n >>> G = nx.DiGraph([(1, 0), (1, 3), (1, 2), (2, 3), (2, 0), (3, 0)])\n >>> tournament.is_reachable(G, 1, 3)\n True\n >>> tournament.is_reachable(G, 3, 2)\n False\n\n Notes\n -----\n Although this function is more theoretically efficient than the\n generic shortest path functions, a speedup requires the use of\n parallelism. Though it may in the future, the current implementation\n does not use parallelism, thus you may not see much of a speedup.\n\n This algorithm comes from [1].\n\n References\n ----------\n .. [1] Tantau, Till.\n \"A note on the complexity of the reachability problem for\n tournaments.\"\n *Electronic Colloquium on Computational Complexity*. 2001.\n \n ", "n_words": 190, "vocab_size": 119, "n_whitespaces": 357, "language": "en" } }, { "id": 50263, "commit_id": "ffcde21305c61d950a9f93e57e6180c9a9665b87", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/ernie_modeling.py", "file_name": "ernie_modeling.py", "fun_name": "forward", "commit_message": "add disco_diffusion_ernievil_base", "code": "def forward(self, inputs, labels):\n \n\n logits = self.nsp(inputs)\n loss = F.cross_entropy(logits, labels)\n return loss\n\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 41, "n_words": 13, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 48, "n_identifiers": 9, "d_id": 10073, "documentation": { "docstring": "\n Args:\n start_pos (optional, `Variable` of shape [batch_size]):\n token index of start of answer span in `context`\n end_pos (optional, `Variable` of shape [batch_size]):\n token index of end of answer span in `context`\n Returns:\n loss (`Variable` of shape []):\n Cross entropy loss mean over batch and time, ignore positions where label == -100\n if labels not set, returns None\n start_logits (`Variable` of shape [batch_size, hidden_size]):\n output logits of start position\n end_logits (`Variable` of shape [batch_size, hidden_size]):\n output logits of end position\n ", "n_words": 79, "vocab_size": 46, "n_whitespaces": 246, "language": "en" } }, { "id": 266878, "commit_id": "8b2e6285650ec42ec4a19075a8567047e8304ba2", "repo": "ansible", "path": "lib/ansible/galaxy/dependency_resolution/providers.py", "file_name": "providers.py", "fun_name": "identify", "commit_message": "galaxy - Clean up type hints and imports.", "code": "def identify(self, requirement_or_candidate):\n # type: (Candidate | Requirement) -> str\n \n return requirement_or_candidate.canonical_package_id\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 33, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 4, "d_id": 78637, "documentation": { "docstring": "Given requirement or candidate, return an identifier for it.\n\n This is used to identify a requirement or candidate, e.g.\n whether two requirements should have their specifier parts\n (version ranges or pins) merged, whether two candidates would\n conflict with each other (because they have same name but\n different versions).\n ", "n_words": 48, "vocab_size": 41, "n_whitespaces": 90, "language": "en" } }, { "id": 20864, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/rich/text.py", "file_name": "text.py", "fun_name": "detect_indentation", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def detect_indentation(self) -> int:\n \n\n _indentations = {\n len(match.group(1))\n for match in re.finditer(r\"^( *)(.*)$\", self.plain, flags=re.MULTILINE)\n }\n\n try:\n indentation = (\n reduce(gcd, [indent for indent in _indentations if not indent % 2]) or 1\n )\n except TypeError:\n indentation = 1\n\n return indentation\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 153, "n_words": 41, "vocab_size": 32, "complexity": 6, "nloc": 17, "token_counts": 74, "n_ast_nodes": 116, "n_identifiers": 17, "d_id": 3602, "documentation": { "docstring": "Auto-detect indentation of code.\n\n Returns:\n int: Number of spaces used to indent code.\n ", "n_words": 13, "vocab_size": 11, "n_whitespaces": 38, "language": "en" } }, { "id": 280200, "commit_id": "e6f739a31247c43a86c37c33b0b8b2ba6be6a5f6", "repo": "keras", "path": "keras/saving/experimental/saving_lib.py", "file_name": "saving_lib.py", "fun_name": "save_model", "commit_message": "- Add standalone weights file saving/loading functionality.\n- Switch to in-memory, single write / single read archive saving for better performance.\n- Remove ability to pick between zipping or not zipping a Keras saved artifact: it's always a zip archive now.\n\nPiperOrigin-RevId: 483705728", "code": "def save_model(model, filepath, weights_format=\"h5\"):\n \n if not filepath.endswith(\".keras\"):\n raise ValueError(\n \"Invalid filename: expected a `.keras` extension. \"\n f\"Received: filepath={filepath}\"\n )\n if weights_format == \"h5\" and h5py is None:\n raise ImportError(\"h5py must be installed in order to save a model.\")\n\n if not model.built:\n warnings.warn(\n \"You are saving a model that has not yet been built. \"\n \"It might not contain any weights yet. \"\n \"Consider building the model first by calling it \"\n \"on some data.\",\n stacklevel=2,\n )\n saving_v3_enabled_value = getattr(_SAVING_V3_ENABLED, \"value\", False)\n _SAVING_V3_ENABLED.value = True\n\n serialized_model_dict = serialize_keras_object(model)\n config_json = json.dumps(serialized_model_dict)\n metadata_json = json.dumps(\n {\n \"keras_version\": keras.__version__,\n \"date_saved\": datetime.datetime.now().strftime(\"%Y-%m-%d@%H:%M:%S\"),\n }\n )\n try:\n with zipfile.ZipFile(filepath, \"w\") as zf:\n\n with zf.open(_METADATA_FILENAME, \"w\") as f:\n f.write(metadata_json.encode())\n with zf.open(_CONFIG_FILENAME, \"w\") as f:\n f.write(config_json.encode())\n\n if weights_format == \"h5\":\n weights_store = H5IOStore(\n _VARS_FNAME + \".h5\", archive=zf, mode=\"w\"\n )\n elif weights_format == \"npz\":\n weights_store = NpzIOStore(\n _VARS_FNAME + \".npz\", archive=zf, mode=\"w\"\n )\n else:\n raise ValueError(\n \"Unknown weights_format. Expected 'h5' or 'npz'. \"\n f\"Received: {weights_format}\"\n )\n\n asset_store = DiskIOStore(_ASSETS_DIRNAME, archive=zf, mode=\"w\")\n\n _save_state(\n model,\n weights_handler=weights_store,\n assets_handler=asset_store,\n inner_path=\"\",\n visited_trackables=set(),\n )\n weights_store.close()\n asset_store.close()\n\n except Exception as e:\n raise e\n finally:\n _SAVING_V3_ENABLED.value = saving_v3_enabled_value\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 770, "n_words": 181, "vocab_size": 129, "complexity": 9, "nloc": 59, "token_counts": 291, "n_ast_nodes": 521, "n_identifiers": 54, "d_id": 83285, "documentation": { "docstring": "Save a zip-archive representing a Keras model to the given filepath.\n\n The zip-based archive contains the following structure:\n\n - JSON-based configuration file (config.json): Records of model, layer, and\n other trackables' configuration.\n - NPZ-based trackable state files, found in respective directories, such as\n model/states.npz, model/dense_layer/states.npz, etc.\n - Metadata file.\n\n The states of Keras trackables (layers, optimizers, loss, and metrics) are\n automatically saved as long as they can be discovered through the attributes\n returned by `dir(Model)`. Typically, the state includes the variables\n associated with the trackable, but some specially purposed layers may\n contain more such as the vocabularies stored in the hashmaps. The trackables\n define how their states are saved by exposing `save_state()` and\n `load_state()` APIs.\n\n For the case of layer states, the variables will be visited as long as\n they are either 1) referenced via layer attributes, or 2) referenced via a\n container (list, tuple, or dict), and the container is referenced via a\n layer attribute.\n ", "n_words": 155, "vocab_size": 106, "n_whitespaces": 217, "language": "en" } }, { "id": 64731, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/deferred_revenue.py", "file_name": "deferred_revenue.py", "fun_name": "process_deferred_accounting", "commit_message": "style: format code with black", "code": "def process_deferred_accounting(posting_date=None):\n\t\n\n\tif not posting_date:\n\t\tposting_date = today()\n\n\tif not cint(\n\t\tfrappe.db.get_singles_value(\n\t\t\t\"Accounts Settings\", \"automatically_process_deferred_accounting_entry\"\n\t\t)\n\t):\n\t\treturn\n\n\tstart_date = add_months(today(), -1)\n\tend_date = add_days(today(), -1)\n\n\tcompanies = frappe.get_all(\"Company\")\n\n\tfor company in companies:\n\t\tfor record_type in (\"Income\", \"Expense\"):\n\t\t\tdoc = frappe.get_doc(\n\t\t\t\tdict(\n\t\t\t\t\tdoctype=\"Process Deferred Accounting\",\n\t\t\t\t\tcompany=company.name,\n\t\t\t\t\tposting_date=posting_date,\n\t\t\t\t\tstart_date=start_date,\n\t\t\t\t\tend_date=end_date,\n\t\t\t\t\ttype=record_type,\n\t\t\t\t)\n\t\t\t)\n\n\t\t\tdoc.insert()\n\t\t\tdoc.submit()\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 28, "n_words": 54, "vocab_size": 43, "complexity": 5, "nloc": 26, "token_counts": 124, "n_ast_nodes": 207, "n_identifiers": 23, "d_id": 13709, "documentation": { "docstring": "Converts deferred income/expense into income/expense\n\tExecuted via background jobs on every month end", "n_words": 13, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 249779, "commit_id": "cc3a52b33df72bb4230367536b924a6d1f510d36", "repo": "synapse", "path": "tests/rest/client/test_auth.py", "file_name": "test_auth.py", "fun_name": "test_disabled", "commit_message": "Support OIDC backchannel logouts (#11414)\n\nIf configured an OIDC IdP can log a user's session out of\r\nSynapse when they log out of the identity provider.\r\n\r\nThe IdP sends a request directly to Synapse (and must be\r\nconfigured with an endpoint) when a user logs out.", "code": "def test_disabled(self) -> None:\n \n fake_oidc_server = self.helper.fake_oidc_server()\n user = \"john\"\n\n login_resp, grant = self.helper.login_via_oidc(\n fake_oidc_server, user, with_sid=True\n )\n access_token: str = login_resp[\"access_token\"]\n self.helper.whoami(access_token, expect_code=HTTPStatus.OK)\n\n # Logging out shouldn't work\n logout_token = fake_oidc_server.generate_logout_token(grant)\n channel = self.submit_logout_token(logout_token)\n self.assertEqual(channel.code, 400)\n\n # And the token should still be valid\n self.helper.whoami(access_token, expect_code=HTTPStatus.OK)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 149, "n_words": 47, "vocab_size": 39, "complexity": 1, "nloc": 15, "token_counts": 100, "n_ast_nodes": 163, "n_identifiers": 21, "d_id": 73122, "documentation": { "docstring": "\n Receiving a logout token should do nothing if it is disabled in the config\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 29, "language": "en" } }, { "id": 161979, "commit_id": "0294ef37a19ecd995823678462faedbe10a09b22", "repo": "numba", "path": "numba/tests/test_slices.py", "file_name": "test_slices.py", "fun_name": "test_literal_slice_boxing", "commit_message": "support for boxing SliceLiteral type", "code": "def test_literal_slice_boxing(self):\n ", "url": "https://github.com/numba/numba.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 9, "n_words": 2, "vocab_size": 2, "complexity": 2, "nloc": 13, "token_counts": 77, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 39117, "documentation": { "docstring": "\n Tests that a literal slice can be used\n as an argument to a JIT function.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 45, "language": "en" } }, { "id": 270679, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/base_layer.py", "file_name": "base_layer.py", "fun_name": "_expects_training_arg", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _expects_training_arg(self):\n \n return self._call_spec.expects_training_arg\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 4, "d_id": 80522, "documentation": { "docstring": "Whether the call function uses 'training' as a parameter.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 153621, "commit_id": "605efa618e7994681f57b11d04d417f353ef8d50", "repo": "modin", "path": "modin/pandas/base.py", "file_name": "base.py", "fun_name": "__array_wrap__", "commit_message": "DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333)\n\nCo-authored-by: Yaroslav Igoshev \r\nSigned-off-by: Alexander Myskov ", "code": "def __array_wrap__(self, result, context=None):\n \n # TODO: This is very inefficient. __array__ and as_matrix have been\n # changed to call the more efficient to_numpy, but this has been left\n # unchanged since we are not sure of its purpose.\n return self._default_to_pandas(\"__array_wrap__\", result, context=context)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 77, "n_words": 42, "vocab_size": 38, "complexity": 1, "nloc": 2, "token_counts": 25, "n_ast_nodes": 43, "n_identifiers": 5, "d_id": 35502, "documentation": { "docstring": "\n Get called after a ufunc and other functions.\n\n Parameters\n ----------\n result : np.ndarray\n The result of the ufunc or other function called on the NumPy array\n returned by __array__.\n context : tuple of (func, tuple, int), optional\n This parameter is returned by ufuncs as a 3-element tuple: (name of the\n ufunc, arguments of the ufunc, domain of the ufunc), but is not set by\n other NumPy functions.\n\n Returns\n -------\n BasePandasDataset\n Wrapped Modin object.\n ", "n_words": 73, "vocab_size": 51, "n_whitespaces": 203, "language": "en" } }, { "id": 203224, "commit_id": "c5cd8783825b5f6384417dac5f3889b4210b7d08", "repo": "django", "path": "django/db/backends/oracle/introspection.py", "file_name": "introspection.py", "fun_name": "get_primary_key_column", "commit_message": "Refs #33476 -- Refactored problematic code before reformatting by Black.\n\nIn these cases Black produces unexpected results, e.g.\r\n\r\ndef make_random_password(\r\n self,\r\n length=10,\r\n allowed_chars='abcdefghjkmnpqrstuvwxyz' 'ABCDEFGHJKLMNPQRSTUVWXYZ' '23456789',\r\n):\r\n\r\nor\r\n\r\ncursor.execute(\"\"\"\r\nSELECT ...\r\n\"\"\",\r\n [table name],\r\n)", "code": "def get_primary_key_column(self, cursor, table_name):\n cursor.execute(\n ,\n [table_name],\n )\n row = cursor.fetchone()\n return self.identifier_converter(row[0]) if row else None\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 66, "n_words": 17, "vocab_size": 16, "complexity": 2, "nloc": 18, "token_counts": 41, "n_ast_nodes": 63, "n_identifiers": 8, "d_id": 50258, "documentation": { "docstring": "\n SELECT\n cols.column_name\n FROM\n user_constraints,\n user_cons_columns cols\n WHERE\n user_constraints.constraint_name = cols.constraint_name AND\n user_constraints.constraint_type = 'P' AND\n user_constraints.table_name = UPPER(%s) AND\n cols.position = 1\n ", "n_words": 22, "vocab_size": 17, "n_whitespaces": 172, "language": "en" } }, { "id": 53193, "commit_id": "a452d8b8917000774302411a7aeb949f7e326814", "repo": "prefect", "path": "src/prefect/utilities/settings.py", "file_name": "settings.py", "fun_name": "get_extra_loggers", "commit_message": "Strip logger name to prevent accidental spaces", "code": "def get_extra_loggers(self) -> List[str]:\n \n return (\n [name.strip() for name in self.extra_loggers.split(\",\")]\n if self.extra_loggers\n else []\n )\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 70, "n_words": 16, "vocab_size": 16, "complexity": 3, "nloc": 9, "token_counts": 39, "n_ast_nodes": 64, "n_identifiers": 8, "d_id": 10735, "documentation": { "docstring": "\n Parse the `extra_loggers` CSV and trim whitespace from logger names\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 25, "language": "en" } }, { "id": 270328, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/distribute/distributed_training_utils_v1.py", "file_name": "distributed_training_utils_v1.py", "fun_name": "_get_input_from_iterator", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _get_input_from_iterator(iterator, model):\n \n next_element = iterator.get_next()\n\n # `len(nest.flatten(x))` is going to not count empty elements such as {}.\n # len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is\n # going to get flattened in `_prepare_feed_values` to work around that. Empty\n # elements are going to get filtered out as part of the flattening.\n if len(tf.nest.flatten(next_element)) == len(model.inputs):\n x = next_element\n y = None\n sample_weights = None\n elif len(tf.nest.flatten(next_element)) == (\n len(model.inputs) + len(model.outputs)\n ):\n x, y = next_element\n sample_weights = None\n else:\n x, y, sample_weights = next_element\n\n # Validate that all the elements in x and y are of the same type and shape.\n validate_distributed_dataset_inputs(\n model._distribution_strategy, x, y, sample_weights\n )\n return x, y, sample_weights\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 215, "n_words": 115, "vocab_size": 67, "complexity": 3, "nloc": 17, "token_counts": 108, "n_ast_nodes": 176, "n_identifiers": 16, "d_id": 80431, "documentation": { "docstring": "Get elements from the iterator and verify the input shape and type.", "n_words": 12, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 130910, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/serve/http_state.py", "file_name": "http_state.py", "fun_name": "_get_target_nodes", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def _get_target_nodes(self) -> List[Tuple[str, str]]:\n \n location = self._config.location\n target_nodes = get_all_node_ids()\n\n if location == DeploymentMode.NoServer:\n return []\n\n if location == DeploymentMode.HeadOnly:\n head_node_resource_key = get_current_node_resource_key()\n return [\n (node_id, node_resource)\n for node_id, node_resource in target_nodes\n if node_resource == head_node_resource_key\n ][:1]\n\n if location == DeploymentMode.FixedNumber:\n num_replicas = self._config.fixed_number_replicas\n if num_replicas > len(target_nodes):\n logger.warning(\n \"You specified fixed_number_replicas=\"\n f\"{num_replicas} but there are only \"\n f\"{len(target_nodes)} total nodes. Serve will start one \"\n \"HTTP proxy per node.\"\n )\n num_replicas = len(target_nodes)\n\n # Seed the random state so sample is deterministic.\n # i.e. it will always return the same set of nodes.\n random.seed(self._config.fixed_number_selection_seed)\n return random.sample(sorted(target_nodes), k=num_replicas)\n\n return target_nodes\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 426, "n_words": 101, "vocab_size": 72, "complexity": 7, "nloc": 26, "token_counts": 137, "n_ast_nodes": 235, "n_identifiers": 28, "d_id": 29422, "documentation": { "docstring": "Return the list of (id, resource_key) to deploy HTTP servers on.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 43650, "commit_id": "c20ad79b40ea2b213f6dca221221c6dbd55bd08f", "repo": "airflow", "path": "airflow/jobs/triggerer_job.py", "file_name": "triggerer_job.py", "fun_name": "cancel_triggers", "commit_message": "Rename `to_delete` to `to_cancel` in TriggerRunner (#20658)\n\nThe queue's purpose is to track triggers that need to be canceled. The language `to_delete` was a bit confusing because for one it does not actually delete them but cancel them. The deletion work is actually in `cleanup_finished_triggers`. It seems that this method will usually not do anything and it's only for cancelling triggers that are currently running but for whatever reason no longer should be. E.g. when a task is killed and therefore the trigger is no longer needed, or some multi-triggerer scenarios. So putting cancel in the name also highlights that this is about stopping running triggers, not e.g. purging completed ones.", "code": "async def cancel_triggers(self):\n \n while self.to_cancel:\n trigger_id = self.to_cancel.popleft()\n if trigger_id in self.triggers:\n # We only delete if it did not exit already\n self.triggers[trigger_id][\"task\"].cancel()\n await asyncio.sleep(0)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 102, "n_words": 25, "vocab_size": 23, "complexity": 3, "nloc": 6, "token_counts": 47, "n_ast_nodes": 83, "n_identifiers": 9, "d_id": 8018, "documentation": { "docstring": "\n Drain the to_cancel queue and ensure all triggers that are not in the\n DB are cancelled, so the cleanup job deletes them.\n ", "n_words": 22, "vocab_size": 19, "n_whitespaces": 44, "language": "en" } }, { "id": 73844, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/core/models/__init__.py", "file_name": "__init__.py", "fun_name": "set_url_path", "commit_message": "Reformat with black", "code": "def set_url_path(self, parent):\n \n if parent:\n self.url_path = parent.url_path + self.slug + \"/\"\n else:\n # a page without a parent is the tree root, which always has a url_path of '/'\n self.url_path = \"/\"\n\n return self.url_path\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 96, "n_words": 35, "vocab_size": 28, "complexity": 2, "nloc": 6, "token_counts": 35, "n_ast_nodes": 63, "n_identifiers": 5, "d_id": 16139, "documentation": { "docstring": "\n Populate the url_path field based on this page's slug and the specified parent page.\n (We pass a parent in here, rather than retrieving it via get_parent, so that we can give\n new unsaved pages a meaningful URL when previewing them; at that point the page has not\n been assigned a position in the tree, as far as treebeard is concerned.\n ", "n_words": 60, "vocab_size": 51, "n_whitespaces": 96, "language": "en" } }, { "id": 200253, "commit_id": "8fc835bcd86ea080644783a363e47adca6dff3a7", "repo": "sympy", "path": "sympy/ntheory/elliptic_curve.py", "file_name": "elliptic_curve.py", "fun_name": "order", "commit_message": "Remove redundant list calls", "code": "def order(self):\n \n if self.characteristic == 0:\n raise NotImplementedError(\"Still not implemented\")\n return len(self.points())\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 44, "n_words": 12, "vocab_size": 12, "complexity": 2, "nloc": 4, "token_counts": 27, "n_ast_nodes": 49, "n_identifiers": 6, "d_id": 49567, "documentation": { "docstring": "\n Number of points in Finite field.\n\n Examples\n ========\n\n >>> from sympy.ntheory.elliptic_curve import EllipticCurve\n >>> e2 = EllipticCurve(1, 0, modulus=19)\n >>> e2.order\n 19\n\n ", "n_words": 22, "vocab_size": 20, "n_whitespaces": 79, "language": "en" } }, { "id": 288579, "commit_id": "5d7756885be0fd044d86e60ec0d2639f9d114ea3", "repo": "core", "path": "tests/components/energy/test_websocket_api.py", "file_name": "test_websocket_api.py", "fun_name": "test_fossil_energy_consumption", "commit_message": "Normalize to kWh when handling WS energy/fossil_energy_consumption (#79649)\n\n* Normalize to kWh when handling WS energy/fossil_energy_consumption\r\n\r\n* Improve test", "code": "async def test_fossil_energy_consumption(hass, hass_ws_client, recorder_mock):\n \n now = dt_util.utcnow()\n later = dt_util.as_utc(dt_util.parse_datetime(\"2022-09-01 00:00:00\"))\n\n await async_setup_component(hass, \"history\", {})\n await async_setup_component(hass, \"sensor\", {})\n await async_recorder_block_till_done(hass)\n\n period1 = dt_util.as_utc(dt_util.parse_datetime(\"2021-09-01 00:00:00\"))\n period2 = dt_util.as_utc(dt_util.parse_datetime(\"2021-09-30 23:00:00\"))\n period2_day_start = dt_util.as_utc(dt_util.parse_datetime(\"2021-09-30 00:00:00\"))\n period3 = dt_util.as_utc(dt_util.parse_datetime(\"2021-10-01 00:00:00\"))\n period4 = dt_util.as_utc(dt_util.parse_datetime(\"2021-10-31 23:00:00\"))\n period4_day_start = dt_util.as_utc(dt_util.parse_datetime(\"2021-10-31 00:00:00\"))\n\n external_energy_statistics_1 = (\n {\n \"start\": period1,\n \"last_reset\": None,\n \"state\": 0,\n \"sum\": 2,\n },\n {\n \"start\": period2,\n \"last_reset\": None,\n \"state\": 1,\n \"sum\": 3,\n },\n {\n \"start\": period3,\n \"last_reset\": None,\n \"state\": 2,\n \"sum\": 4,\n },\n {\n \"start\": period4,\n \"last_reset\": None,\n \"state\": 3,\n \"sum\": 5,\n },\n )\n external_energy_metadata_1 = {\n \"has_mean\": False,\n \"has_sum\": True,\n \"name\": \"Total imported energy\",\n \"source\": \"test\",\n \"statistic_id\": \"test:total_energy_import_tariff_1\",\n \"unit_of_measurement\": \"kWh\",\n }\n external_energy_statistics_2 = (\n {\n \"start\": period1,\n \"last_reset\": None,\n \"state\": 0,\n \"sum\": 20000,\n },\n {\n \"start\": period2,\n \"last_reset\": None,\n \"state\": 1,\n \"sum\": 30000,\n },\n {\n \"start\": period3,\n \"last_reset\": None,\n \"state\": 2,\n \"sum\": 40000,\n },\n {\n \"start\": period4,\n \"last_reset\": None,\n \"state\": 3,\n \"sum\": 50000,\n },\n )\n external_energy_metadata_2 = {\n \"has_mean\": False,\n \"has_sum\": True,\n \"name\": \"Total imported energy\",\n \"source\": \"test\",\n \"statistic_id\": \"test:total_energy_import_tariff_2\",\n \"unit_of_measurement\": \"Wh\",\n }\n external_co2_statistics = (\n {\n \"start\": period1,\n \"last_reset\": None,\n \"mean\": 10,\n },\n {\n \"start\": period2,\n \"last_reset\": None,\n \"mean\": 30,\n },\n {\n \"start\": period3,\n \"last_reset\": None,\n \"mean\": 60,\n },\n {\n \"start\": period4,\n \"last_reset\": None,\n \"mean\": 90,\n },\n )\n external_co2_metadata = {\n \"has_mean\": True,\n \"has_sum\": False,\n \"name\": \"Fossil percentage\",\n \"source\": \"test\",\n \"statistic_id\": \"test:fossil_percentage\",\n \"unit_of_measurement\": \"%\",\n }\n\n async_add_external_statistics(\n hass, external_energy_metadata_1, external_energy_statistics_1\n )\n async_add_external_statistics(\n hass, external_energy_metadata_2, external_energy_statistics_2\n )\n async_add_external_statistics(hass, external_co2_metadata, external_co2_statistics)\n await async_wait_recording_done(hass)\n\n client = await hass_ws_client()\n await client.send_json(\n {\n \"id\": 1,\n \"type\": \"energy/fossil_energy_consumption\",\n \"start_time\": now.isoformat(),\n \"end_time\": later.isoformat(),\n \"energy_statistic_ids\": [\n \"test:total_energy_import_tariff_1\",\n \"test:total_energy_import_tariff_2\",\n ],\n \"co2_statistic_id\": \"test:fossil_percentage\",\n \"period\": \"hour\",\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\n period2.isoformat(): pytest.approx((33.0 - 22.0) * 0.3),\n period3.isoformat(): pytest.approx((44.0 - 33.0) * 0.6),\n period4.isoformat(): pytest.approx((55.0 - 44.0) * 0.9),\n }\n\n await client.send_json(\n {\n \"id\": 2,\n \"type\": \"energy/fossil_energy_consumption\",\n \"start_time\": now.isoformat(),\n \"end_time\": later.isoformat(),\n \"energy_statistic_ids\": [\n \"test:total_energy_import_tariff_1\",\n \"test:total_energy_import_tariff_2\",\n ],\n \"co2_statistic_id\": \"test:fossil_percentage\",\n \"period\": \"day\",\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\n period2_day_start.isoformat(): pytest.approx((33.0 - 22.0) * 0.3),\n period3.isoformat(): pytest.approx((44.0 - 33.0) * 0.6),\n period4_day_start.isoformat(): pytest.approx((55.0 - 44.0) * 0.9),\n }\n\n await client.send_json(\n {\n \"id\": 3,\n \"type\": \"energy/fossil_energy_consumption\",\n \"start_time\": now.isoformat(),\n \"end_time\": later.isoformat(),\n \"energy_statistic_ids\": [\n \"test:total_energy_import_tariff_1\",\n \"test:total_energy_import_tariff_2\",\n ],\n \"co2_statistic_id\": \"test:fossil_percentage\",\n \"period\": \"month\",\n }\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == {\n period1.isoformat(): pytest.approx((33.0 - 22.0) * 0.3),\n period3.isoformat(): pytest.approx(\n ((44.0 - 33.0) * 0.6) + ((55.0 - 44.0) * 0.9)\n ),\n }\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 1806, "n_words": 397, "vocab_size": 139, "complexity": 1, "nloc": 183, "token_counts": 904, "n_ast_nodes": 1475, "n_identifiers": 33, "d_id": 87736, "documentation": { "docstring": "Test fossil_energy_consumption with co2 sensor data.", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 246233, "commit_id": "64ec45fc1b0856dc7daacca7d3ab75d50bd89f84", "repo": "synapse", "path": "tests/handlers/test_appservice.py", "file_name": "test_appservice.py", "fun_name": "test_application_services_receive_bursts_of_to_device", "commit_message": "Send to-device messages to application services (#11215)\n\nCo-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com>", "code": "def test_application_services_receive_bursts_of_to_device(self):\n \n # Register two application services with exclusive interest in a user\n interested_appservices = []\n for _ in range(2):\n appservice = self._register_application_service(\n namespaces={\n ApplicationService.NS_USERS: [\n {\n \"regex\": \"@exclusive_as_user:.+\",\n \"exclusive\": True,\n }\n ],\n },\n )\n interested_appservices.append(appservice)\n\n # ...and an application service which does not have any user interest.\n self._register_application_service()\n\n to_device_message_content = {\n \"some key\": \"some interesting value\",\n }\n\n # We need to send a large burst of to-device messages. We also would like to\n # include them all in the same application service transaction so that we can\n # test large transactions.\n #\n # To do this, we can send a single to-device message to many user devices at\n # once.\n #\n # We insert number_of_messages - 1 messages into the database directly. We'll then\n # send a final to-device message to the real device, which will also kick off\n # an AS transaction (as just inserting messages into the DB won't).\n number_of_messages = 150\n fake_device_ids = [f\"device_{num}\" for num in range(number_of_messages - 1)]\n messages = {\n self.exclusive_as_user: {\n device_id: to_device_message_content for device_id in fake_device_ids\n }\n }\n\n # Create a fake device per message. We can't send to-device messages to\n # a device that doesn't exist.\n self.get_success(\n self.hs.get_datastore().db_pool.simple_insert_many(\n desc=\"test_application_services_receive_burst_of_to_device\",\n table=\"devices\",\n keys=(\"user_id\", \"device_id\"),\n values=[\n (\n self.exclusive_as_user,\n device_id,\n )\n for device_id in fake_device_ids\n ],\n )\n )\n\n # Seed the device_inbox table with our fake messages\n self.get_success(\n self.hs.get_datastore().add_messages_to_device_inbox(messages, {})\n )\n\n # Now have local_user send a final to-device message to exclusive_as_user. All unsent\n # to-device messages should be sent to any application services\n # interested in exclusive_as_user.\n chan = self.make_request(\n \"PUT\",\n \"/_matrix/client/r0/sendToDevice/m.room_key_request/4\",\n content={\n \"messages\": {\n self.exclusive_as_user: {\n self.exclusive_as_user_device_id: to_device_message_content\n }\n }\n },\n access_token=self.local_user_token,\n )\n self.assertEqual(chan.code, 200, chan.result)\n\n self.send_mock.assert_called()\n\n # Count the total number of to-device messages that were sent out per-service.\n # Ensure that we only sent to-device messages to interested services, and that\n # each interested service received the full count of to-device messages.\n service_id_to_message_count: Dict[str, int] = {}\n\n for call in self.send_mock.call_args_list:\n service, _events, _ephemeral, to_device_messages = call[0]\n\n # Check that this was made to an interested service\n self.assertIn(service, interested_appservices)\n\n # Add to the count of messages for this application service\n service_id_to_message_count.setdefault(service.id, 0)\n service_id_to_message_count[service.id] += len(to_device_messages)\n\n # Assert that each interested service received the full count of messages\n for count in service_id_to_message_count.values():\n self.assertEqual(count, number_of_messages)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1357, "n_words": 373, "vocab_size": 199, "complexity": 7, "nloc": 64, "token_counts": 308, "n_ast_nodes": 514, "n_identifiers": 54, "d_id": 71114, "documentation": { "docstring": "\n Test that when a user sends >100 to-device messages at once, any\n interested AS's will receive them in separate transactions.\n\n Also tests that uninterested application services do not receive messages.\n ", "n_words": 30, "vocab_size": 28, "n_whitespaces": 59, "language": "en" } }, { "id": 231700, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/_smith.py", "file_name": "_smith.py", "fun_name": "imaginaryaxis", "commit_message": "switch to black .22", "code": "def imaginaryaxis(self):\n \n return self[\"imaginaryaxis\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 63144, "documentation": { "docstring": "\n The 'imaginaryaxis' property is an instance of Imaginaryaxis\n that may be specified as:\n - An instance of :class:`plotly.graph_objs.layout.smith.Imaginaryaxis`\n - A dict of string/value properties that will be passed\n to the Imaginaryaxis constructor\n\n Supported dict properties:\n\n color\n Sets default for all colors associated with\n this axis all at once: line, font, tick, and\n grid colors. Grid color is lightened by\n blending this with the plot background\n Individual pieces can override this.\n gridcolor\n Sets the color of the grid lines.\n gridwidth\n Sets the width (in px) of the grid lines.\n hoverformat\n Sets the hover text formatting rule using d3\n formatting mini-languages which are very\n similar to those in Python. For numbers, see: h\n ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f\n ormat. And for dates see:\n https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format. We add two\n items to d3's date formatter: \"%h\" for half of\n the year as a decimal number as well as \"%{n}f\"\n for fractional seconds with n digits. For\n example, *2016-10-13 09:15:23.456* with\n tickformat \"%H~%M~%S.%2f\" would display\n \"09~15~23.46\"\n layer\n Sets the layer on which this axis is displayed.\n If *above traces*, this axis is displayed above\n all the subplot's traces If *below traces*,\n this axis is displayed below all the subplot's\n traces, but above the grid lines. Useful when\n used together with scatter-like traces with\n `cliponaxis` set to False to show markers\n and/or text nodes above this axis.\n linecolor\n Sets the axis line color.\n linewidth\n Sets the width (in px) of the axis line.\n showgrid\n Determines whether or not grid lines are drawn.\n If True, the grid lines are drawn at every tick\n mark.\n showline\n Determines whether or not a line bounding this\n axis is drawn.\n showticklabels\n Determines whether or not the tick labels are\n drawn.\n showtickprefix\n If \"all\", all tick labels are displayed with a\n prefix. If \"first\", only the first tick is\n displayed with a prefix. If \"last\", only the\n last tick is displayed with a suffix. If\n \"none\", tick prefixes are hidden.\n showticksuffix\n Same as `showtickprefix` but for tick suffixes.\n tickcolor\n Sets the tick color.\n tickfont\n Sets the tick font.\n tickformat\n Sets the tick label formatting rule using d3\n formatting mini-languages which are very\n similar to those in Python. For numbers, see: h\n ttps://github.com/d3/d3-format/tree/v1.4.5#d3-f\n ormat. And for dates see:\n https://github.com/d3/d3-time-\n format/tree/v2.2.3#locale_format. We add two\n items to d3's date formatter: \"%h\" for half of\n the year as a decimal number as well as \"%{n}f\"\n for fractional seconds with n digits. For\n example, *2016-10-13 09:15:23.456* with\n tickformat \"%H~%M~%S.%2f\" would display\n \"09~15~23.46\"\n ticklen\n Sets the tick length (in px).\n tickprefix\n Sets a tick label prefix.\n ticks\n Determines whether ticks are drawn or not. If\n \"\", this axis' ticks are not drawn. If\n \"outside\" (\"inside\"), this axis' are drawn\n outside (inside) the axis lines.\n ticksuffix\n Sets a tick label suffix.\n tickvals\n Sets the values at which ticks on this axis\n appear. Defaults to `realaxis.tickvals` plus\n the same as negatives and zero.\n tickvalssrc\n Sets the source reference on Chart Studio Cloud\n for `tickvals`.\n tickwidth\n Sets the tick width (in px).\n visible\n A single toggle to hide the axis while\n preserving interaction like dragging. Default\n is true when a cheater plot is present on the\n axis, otherwise false\n\n Returns\n -------\n plotly.graph_objs.layout.smith.Imaginaryaxis\n ", "n_words": 517, "vocab_size": 232, "n_whitespaces": 2370, "language": "en" } }, { "id": 177286, "commit_id": "a796f526c7ce6a7f182aee4b81b8499feabe1a45", "repo": "networkx", "path": "networkx/algorithms/isomorphism/vf2pp.py", "file_name": "vf2pp.py", "fun_name": "vf2pp_is_isomorphic", "commit_message": "VF2++ for Directed Graphs (#5972)\n\nModify vf2pp implementation to support directed graphs. Updates all helper\r\nfunctions and state/parameter objects to account for in/out degree.\r\n\r\nIncludes other changes such as renaming the keyword argument from\r\nnode_labels to node_label to better reflect the fact that the label kwarg expects\r\na single value.\r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Dan Schult ", "code": "def vf2pp_is_isomorphic(G1, G2, node_label=None, default_label=None):\n \n if vf2pp_isomorphism(G1, G2, node_label, default_label) is not None:\n return True\n return False\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 33, "n_words": 17, "vocab_size": 15, "complexity": 2, "nloc": 4, "token_counts": 35, "n_ast_nodes": 51, "n_identifiers": 6, "d_id": 42325, "documentation": { "docstring": "Examines whether G1 and G2 are isomorphic.\n\n Parameters\n ----------\n G1, G2 : NetworkX Graph or MultiGraph instances.\n The two graphs to check for isomorphism.\n\n node_label : str, optional\n The name of the node attribute to be used when comparing nodes.\n The default is `None`, meaning node attributes are not considered\n in the comparison. Any node that doesn't have the `node_label`\n attribute uses `default_label` instead.\n\n default_label : scalar\n Default value to use when a node doesn't have an attribute\n named `node_label`. Default is `None`.\n\n Returns\n -------\n bool\n True if the two graphs are isomorphic, False otherwise.\n ", "n_words": 95, "vocab_size": 71, "n_whitespaces": 178, "language": "en" } }, { "id": 46790, "commit_id": "4ffd4f09532fceb67675fce4c1f5cd383eff992e", "repo": "airflow", "path": "dev/breeze/src/airflow_breeze/utils/run_utils.py", "file_name": "run_utils.py", "fun_name": "commit_sha", "commit_message": "Prepare Breeze2 for prime time :) (#22713)\n\nThis is a review and clean-up for all the parameters and\r\ncommands for Breeze2 in order to prepare it for being\r\nused by the contribugors.\r\n\r\nThere are various small fixes here and there, removal\r\nof duplicated code, refactoring and moving code around\r\nas well as cleanup and review all the parameters used\r\nfor all implemented commands.\r\n\r\nThe parameters, default values and their behaviours were\r\nupdated to match \"new\" life of Breeze rather than old\r\none.\r\n\r\nSome improvements are made to the autocomplete and\r\nclick help messages printed. Full list of choices is\r\nalways displayed, parameters are groups according to\r\ntheir target audience, and they were sorted according\r\nto importance and frequency of use.\r\n\r\nVarious messages have been colourised according to their\r\nmeaning - warnings as yellow, errors as red and\r\ninformational messages as bright_blue.\r\n\r\nThe `dry-run` option has been added to just show what\r\nwould have been run without actually running some\r\npotentially \"write\" commands (read commands are still\r\nexecuted) so that you can easily verify and manually\r\ncopy and execute the commands with option to modify\r\nthem before. The `dry_run` and `verbose` options are\r\nnow used for all commands.\r\n\r\nThe \"main\" command now runs \"shell\" by default similarly\r\nas the original Breeze.\r\n\r\nAll \"shortcut\" parameters have been standardized - i.e\r\ncommon options (verbose/dry run/help) have one and all\r\ncommon flags that are likely to be used often have an\r\nassigned shortcute.\r\n\r\nThe \"stop\" and \"cleanup\" command have been added\r\nas they are necessary for average user to complete the\r\nregular usage cycle.\r\n\r\nDocumentation for all the important methods have been\r\nupdated.", "code": "def commit_sha():\n \n return run_command(\n ['git', 'rev-parse', 'HEAD'], capture_output=True, text=True, check=False\n ).stdout.strip()\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 27, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 34, "n_ast_nodes": 58, "n_identifiers": 7, "d_id": 8994, "documentation": { "docstring": "Returns commit SHA of current repo. Cached for various usages.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 42927, "commit_id": "ec84ffe71cfa8246155b9b4cb10bf2167e75adcf", "repo": "airflow", "path": "airflow/providers/google/cloud/transfers/gcs_to_gcs.py", "file_name": "gcs_to_gcs.py", "fun_name": "_copy_source_without_wildcard", "commit_message": "Fix GCSToGCSOperator cannot copy a single file/folder without copying other files/folders with that prefix (#24039)", "code": "def _copy_source_without_wildcard(self, hook, prefix):\n \n objects = hook.list(self.source_bucket, prefix=prefix, delimiter=self.delimiter)\n\n if not self.replace:\n # If we are not replacing, ignore files already existing in source buckets\n objects = self._ignore_existing_files(hook, prefix, objects=objects, delimiter=self.delimiter)\n\n # If objects is empty and we have prefix, let's check if prefix is a blob\n # and copy directly\n if len(objects) == 0 and prefix:\n if hook.exists(self.source_bucket, prefix):\n self._copy_single_object(\n hook=hook, source_object=prefix, destination_object=self.destination_object\n )\n elif self.source_object_required:\n msg = f\"{prefix} does not exist in bucket {self.source_bucket}\"\n self.log.warning(msg)\n raise AirflowException(msg)\n\n for source_obj in objects:\n if self.exact_match and (source_obj != prefix or not source_obj.endswith(prefix)):\n continue\n if self.destination_object is None:\n destination_object = source_obj\n else:\n destination_object = source_obj.replace(prefix, self.destination_object, 1)\n self._copy_single_object(\n hook=hook, source_object=source_obj, destination_object=destination_object\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 413, "n_words": 111, "vocab_size": 77, "complexity": 11, "nloc": 23, "token_counts": 185, "n_ast_nodes": 296, "n_identifiers": 23, "d_id": 7766, "documentation": { "docstring": "\n For source_objects with no wildcard, this operator would first list\n all files in source_objects, using provided delimiter if any. Then copy\n files from source_objects to destination_object and rename each source\n file.\n\n Example 1:\n\n\n The following Operator would copy all the files from ``a/``folder\n (i.e a/a.csv, a/b.csv, a/c.csv)in ``data`` bucket to the ``b/`` folder in\n the ``data_backup`` bucket (b/a.csv, b/b.csv, b/c.csv) ::\n\n copy_files = GCSToGCSOperator(\n task_id='copy_files_without_wildcard',\n source_bucket='data',\n source_objects=['a/'],\n destination_bucket='data_backup',\n destination_object='b/',\n gcp_conn_id=google_cloud_conn_id\n )\n\n Example 2:\n\n\n The following Operator would copy all avro files from ``a/``folder\n (i.e a/a.avro, a/b.avro, a/c.avro)in ``data`` bucket to the ``b/`` folder in\n the ``data_backup`` bucket (b/a.avro, b/b.avro, b/c.avro) ::\n\n copy_files = GCSToGCSOperator(\n task_id='copy_files_without_wildcard',\n source_bucket='data',\n source_objects=['a/'],\n destination_bucket='data_backup',\n destination_object='b/',\n delimiter='.avro',\n gcp_conn_id=google_cloud_conn_id\n )\n ", "n_words": 112, "vocab_size": 68, "n_whitespaces": 443, "language": "en" } }, { "id": 22102, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/models.py", "file_name": "models.py", "fun_name": "_encode_files", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def _encode_files(files, data):\n \n if not files:\n raise ValueError(\"Files must be provided.\")\n elif isinstance(data, basestring):\n raise ValueError(\"Data must not be a string.\")\n\n new_fields = []\n fields = to_key_val_list(data or {})\n files = to_key_val_list(files or {})\n\n for field, val in fields:\n if isinstance(val, basestring) or not hasattr(val, \"__iter__\"):\n val = [val]\n for v in val:\n if v is not None:\n # Don't call str() on bytestrings: in Py3 it all goes wrong.\n if not isinstance(v, bytes):\n v = str(v)\n\n new_fields.append(\n (\n field.decode(\"utf-8\")\n if isinstance(field, bytes)\n else field,\n v.encode(\"utf-8\") if isinstance(v, str) else v,\n )\n )\n\n for (k, v) in files:\n # support for explicit filename\n ft = None\n fh = None\n if isinstance(v, (tuple, list)):\n if len(v) == 2:\n fn, fp = v\n elif len(v) == 3:\n fn, fp, ft = v\n else:\n fn, fp, ft, fh = v\n else:\n fn = guess_filename(v) or k\n fp = v\n\n if isinstance(fp, (str, bytes, bytearray)):\n fdata = fp\n elif hasattr(fp, \"read\"):\n fdata = fp.read()\n elif fp is None:\n continue\n else:\n fdata = fp\n\n rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)\n rf.make_multipart(content_type=ft)\n new_fields.append(rf)\n\n body, content_type = encode_multipart_formdata(new_fields)\n\n return body, content_type\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 905, "n_words": 184, "vocab_size": 106, "complexity": 21, "nloc": 49, "token_counts": 313, "n_ast_nodes": 497, "n_identifiers": 39, "d_id": 4180, "documentation": { "docstring": "Build the body for a multipart/form-data request.\n\n Will successfully encode files when passed as a dict or a list of\n tuples. Order is retained if data is a list of tuples but arbitrary\n if parameters are supplied as a dict.\n The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)\n or 4-tuples (filename, fileobj, contentype, custom_headers).\n ", "n_words": 57, "vocab_size": 43, "n_whitespaces": 99, "language": "en" } }, { "id": 231632, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/_polar.py", "file_name": "_polar.py", "fun_name": "sector", "commit_message": "switch to black .22", "code": "def sector(self):\n \n return self[\"sector\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 63076, "documentation": { "docstring": "\n Sets angular span of this polar subplot with two angles (in\n degrees). Sector are assumed to be spanned in the\n counterclockwise direction with 0 corresponding to rightmost\n limit of the polar subplot.\n\n The 'sector' property is an info array that may be specified as:\n\n * a list or tuple of 2 elements where:\n (0) The 'sector[0]' property is a number and may be specified as:\n - An int or float\n (1) The 'sector[1]' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n list\n ", "n_words": 90, "vocab_size": 58, "n_whitespaces": 237, "language": "en" } }, { "id": 264877, "commit_id": "3a461d02793e6f9d41c2b1a92647e691de1abaac", "repo": "netbox", "path": "netbox/dcim/api/views.py", "file_name": "views.py", "fun_name": "paths", "commit_message": "Update Cable instantiations to match new signature", "code": "def paths(self, request, pk):\n \n obj = get_object_or_404(self.queryset, pk=pk)\n cablepaths = CablePath.objects.filter(_nodes__contains=obj).prefetch_related('origin', 'destination')\n serializer = serializers.CablePathSerializer(cablepaths, context={'request': request}, many=True)\n\n return Response(serializer.data)\n\n\n#\n# Regions\n#\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 56, "n_words": 24, "vocab_size": 20, "complexity": 1, "nloc": 5, "token_counts": 68, "n_ast_nodes": 113, "n_identifiers": 20, "d_id": 77892, "documentation": { "docstring": "\n Return all CablePaths which traverse a given pass-through port.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 260476, "commit_id": "3bb4bad1425ee7add6001a32f0d83cb459ffa30c", "repo": "scikit-learn", "path": "sklearn/linear_model/tests/test_coordinate_descent.py", "file_name": "test_coordinate_descent.py", "fun_name": "test_read_only_buffer", "commit_message": "MNT Replaced `np.ndarray` with memview where applicable in `linear_model/_cd_fast.pyx` (#23147)\n\nCo-authored-by: Thomas J. Fan ", "code": "def test_read_only_buffer():\n \n\n rng = np.random.RandomState(0)\n clf = ElasticNet(alpha=0.1, copy_X=True, random_state=rng)\n X = np.asfortranarray(rng.uniform(size=(100, 10)))\n X.setflags(write=False)\n\n y = rng.rand(100)\n clf.fit(X, y)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 41, "n_words": 20, "vocab_size": 17, "complexity": 1, "nloc": 7, "token_counts": 76, "n_ast_nodes": 118, "n_identifiers": 19, "d_id": 76274, "documentation": { "docstring": "Test that sparse coordinate descent works for read-only buffers", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 255147, "commit_id": "83fa57c74edfd13ddac9548b8a12f9e3e2ed05bd", "repo": "onnx", "path": "onnx/backend/test/runner/__init__.py", "file_name": "__init__.py", "fun_name": "test_cases", "commit_message": "Use Python type annotations rather than comments (#3962)\n\n* These have been supported since Python 3.5.\r\n\r\nONNX doesn't support Python < 3.6, so we can use the annotations.\r\n\r\nDiffs generated by https://pypi.org/project/com2ann/.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Remove MYPY conditional logic in gen_proto.py\r\n\r\nIt breaks the type annotations and shouldn't be needed.\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* Get rid of MYPY bool from more scripts\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* move Descriptors class above where its referenced in type annotation\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fixes\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* remove extra blank line\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotations\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix type annotation in gen_docs\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix Operators.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix TestCoverage.md\r\n\r\nSigned-off-by: Gary Miguel \r\n\r\n* fix protoc-gen-mypy.py\r\n\r\nSigned-off-by: Gary Miguel ", "code": "def test_cases(self) -> Dict[str, Type[unittest.TestCase]]:\n \n test_cases = {}\n for category, items_map in self._filtered_test_items.items():\n test_case_name = str('OnnxBackend{}Test').format(category)\n test_case = self._get_test_case(test_case_name)\n for name, item in sorted(items_map.items()):\n setattr(test_case, name, item.func)\n test_cases[test_case_name] = test_case\n return test_cases\n", "url": "https://github.com/onnx/onnx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 119, "n_words": 32, "vocab_size": 24, "complexity": 3, "nloc": 14, "token_counts": 86, "n_ast_nodes": 137, "n_identifiers": 20, "d_id": 74735, "documentation": { "docstring": "\n List of test cases to be applied on the parent scope\n Example usage:\n globals().update(BackendTest(backend).test_cases)\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 47, "language": "en" } }, { "id": 212147, "commit_id": "1b3e6acd6eebd352106cc5ecf5e12dbf90e0607c", "repo": "bokeh", "path": "bokeh/core/property/bases.py", "file_name": "bases.py", "fun_name": "_may_have_unstable_default", "commit_message": "Add Init signatures to Bokeh models (#12035)\n\n* Add signatures to Bokeh Model initializers\r\n\r\n* use explicit type for override default\r\n\r\n* move InstanceDefault to bokeh.core.properties\r\n\r\n* enable assertions", "code": "def _may_have_unstable_default(self) -> bool:\n \n return callable(self._default)\n", "url": "https://github.com/bokeh/bokeh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 7, "token_counts": 15, "n_ast_nodes": 27, "n_identifiers": 5, "d_id": 53177, "documentation": { "docstring": " False if we have a default that is immutable, and will be the\n same every time (some defaults are generated on demand by a function\n to be called).\n\n ", "n_words": 28, "vocab_size": 26, "n_whitespaces": 50, "language": "en" } }, { "id": 245712, "commit_id": "d915740fa8228cf57741b27d9e5d66e358456b8e", "repo": "mmdetection", "path": "mmdet/models/task_modules/assigners/iou2d_calculator.py", "file_name": "iou2d_calculator.py", "fun_name": "__call__", "commit_message": "[Refactor] Refactor anchor head and base head with boxlist (#8625)\n\n* Refactor anchor head\r\n\r\n* Update\r\n\r\n* Update\r\n\r\n* Update\r\n\r\n* Add a series of boxes tools\r\n\r\n* Fix box type to support n x box_dim boxes\r\n\r\n* revert box type changes\r\n\r\n* Add docstring\r\n\r\n* refactor retina_head\r\n\r\n* Update\r\n\r\n* Update\r\n\r\n* Fix comments\r\n\r\n* modify docstring of coder and ioucalculator\r\n\r\n* Replace with_boxlist with use_box_type", "code": "def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):\n \n bboxes1 = get_box_tensor(bboxes1)\n bboxes2 = get_box_tensor(bboxes2)\n assert bboxes1.size(-1) in [0, 4, 5]\n assert bboxes2.size(-1) in [0, 4, 5]\n if bboxes2.size(-1) == 5:\n bboxes2 = bboxes2[..., :4]\n if bboxes1.size(-1) == 5:\n bboxes1 = bboxes1[..., :4]\n\n if self.dtype == 'fp16':\n # change tensor type to save cpu and cuda memory and keep speed\n bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype)\n bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype)\n overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)\n if not overlaps.is_cuda and overlaps.dtype == torch.float16:\n # resume cpu float32\n overlaps = overlaps.float()\n return overlaps\n\n return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 275, "n_words": 94, "vocab_size": 54, "complexity": 6, "nloc": 17, "token_counts": 183, "n_ast_nodes": 279, "n_identifiers": 17, "d_id": 70858, "documentation": { "docstring": "Calculate IoU between 2D bboxes.\n\n Args:\n bboxes1 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)\n in format, or shape (m, 5) in format.\n bboxes2 (Tensor or :obj:`BaseBoxes`): bboxes have shape (m, 4)\n in format, shape (m, 5) in format, or be empty. If ``is_aligned `` is ``True``,\n then m and n must be equal.\n mode (str): \"iou\" (intersection over union), \"iof\" (intersection\n over foreground), or \"giou\" (generalized intersection over\n union).\n is_aligned (bool, optional): If True, then m and n must be equal.\n Default False.\n\n Returns:\n Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)\n ", "n_words": 115, "vocab_size": 64, "n_whitespaces": 311, "language": "en" } }, { "id": 22153, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/utils.py", "file_name": "utils.py", "fun_name": "to_key_val_list", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def to_key_val_list(value):\n \n if value is None:\n return None\n\n if isinstance(value, (str, bytes, bool, int)):\n raise ValueError(\"cannot encode objects that are not 2-tuples\")\n\n if isinstance(value, Mapping):\n value = value.items()\n\n return list(value)\n\n\n# From mitsuhiko/werkzeug (used with permission).", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 71, "n_words": 36, "vocab_size": 31, "complexity": 4, "nloc": 8, "token_counts": 54, "n_ast_nodes": 88, "n_identifiers": 11, "d_id": 4224, "documentation": { "docstring": "Take an object and test to see if it can be represented as a\n dictionary. If it can be, return a list of tuples, e.g.,\n\n ::\n\n >>> to_key_val_list([('key', 'val')])\n [('key', 'val')]\n >>> to_key_val_list({'key': 'val'})\n [('key', 'val')]\n >>> to_key_val_list('string')\n Traceback (most recent call last):\n ...\n ValueError: cannot encode objects that are not 2-tuples\n\n :rtype: list\n ", "n_words": 54, "vocab_size": 46, "n_whitespaces": 122, "language": "en" } }, { "id": 34307, "commit_id": "ac227093e41cecb07c7e0f2fc9a504850907bd06", "repo": "transformers", "path": "src/transformers/models/vilt/modeling_vilt.py", "file_name": "modeling_vilt.py", "fun_name": "_set_gradient_checkpointing", "commit_message": "Add ViLT (#14895)\n\n* First commit\r\n\r\n* Add conversion script\r\n\r\n* Make conversion script work for base model\r\n\r\n* More improvements\r\n\r\n* Update conversion script, works for vqa\r\n\r\n* Add indexing argument to meshgrid\r\n\r\n* Make conversion script work for ViltForPreTraining\r\n\r\n* Add ViltForPreTraining to docs\r\n\r\n* Fix device issue\r\n\r\n* Add processor\r\n\r\n* Add MinMaxResize to feature extractor\r\n\r\n* Implement call method of ViltProcessor\r\n\r\n* Fix tests\r\n\r\n* Add integration test\r\n\r\n* Add loss calculation for VQA\r\n\r\n* Improve tests\r\n\r\n* Improve some more tests\r\n\r\n* Debug tests\r\n\r\n* Small improvements\r\n\r\n* Add support for attention_mask\r\n\r\n* Remove mask_it\r\n\r\n* Add pixel_mask\r\n\r\n* Add tests for ViltFeatureExtractor\r\n\r\n* Improve tests\r\n\r\n* Add ViltForNaturalLanguageVisualReasoning\r\n\r\n* Add ViltForNaturalLanguageVisualReasoning to conversion script\r\n\r\n* Minor fixes\r\n\r\n* Add support for image_embeds, update docstrings to markdown\r\n\r\n* Update docs to markdown\r\n\r\n* Improve conversion script\r\n\r\n* Rename ViltForPreTraining to ViltForMaskedLM\r\n\r\n* Improve conversion script\r\n\r\n* Convert docstrings to markdown\r\n\r\n* Fix code example of retrieval model\r\n\r\n* Properly convert masked language model\r\n\r\n* Add integration test for nlvr\r\n\r\n* Fix code quality\r\n\r\n* Apply suggestions from code review\r\n\r\n* Add copied from statements\r\n\r\n* Fix pretrained_config_archive_map\r\n\r\n* Fix docs\r\n\r\n* Add model to README\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n* Apply more suggestions from code review\r\n\r\n* Make code more readable\r\n\r\n* Add ViltForNaturalLanguageVisualReasoning to the tests\r\n\r\n* Rename ViltForVisualQuestionAnswering to ViltForQuestionAnswering\r\n\r\n* Replace pixel_values_2 by single tensor\r\n\r\n* Add hidden_states and attentions\r\n\r\n* Fix one more test\r\n\r\n* Fix all tests\r\n\r\n* Update year\r\n\r\n* Fix rebase issues\r\n\r\n* Fix another rebase issue\r\n\r\n* Remove ViltForPreTraining from auto mapping\r\n\r\n* Rename ViltForImageRetrievalTextRetrieval to ViltForImageAndTextRetrieval\r\n\r\n* Make it possible to use BertTokenizerFast in the processor\r\n\r\n* Use BertTokenizerFast by default\r\n\r\n* Rename ViltForNaturalLanguageVisualReasoning, define custom model output\r\n\r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, ViltEncoder):\n module.gradient_checkpointing = value\n\n\nVILT_START_DOCSTRING = r\n\nVILT_INPUTS_DOCSTRING = r\n\nVILT_IMAGES_AND_TEXT_CLASSIFICATION_INPUTS_DOCSTRING = r\n\n\n@add_start_docstrings(\n \"The bare ViLT Model transformer outputting raw hidden-states without any specific head on top.\",\n VILT_START_DOCSTRING,\n)", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "@add_start_docstrings(\n \"The bare ViLT Model transformer outputting raw hidden-states without any specific head on top.\",\n VILT_START_DOCSTRING,\n)", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 54, "n_words": 36, "vocab_size": 31, "complexity": 2, "nloc": 3, "token_counts": 24, "n_ast_nodes": 71, "n_identifiers": 11, "d_id": 6254, "documentation": { "docstring": "\n This model is a PyTorch `torch.nn.Module `_ subclass. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ViltConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See\n [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input\n IDs?](../glossary#input-ids)\n\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n [What are attention masks?](../glossary#attention-mask)\n\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n [What are token type IDs?](../glossary#token-type-ids)\n\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`ViltFeatureExtractor`]. See\n [`ViltFeatureExtractor.__call__`] for details.\n\n pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):\n Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:\n\n - 1 for pixels that are real (i.e. **not masked**),\n - 0 for pixels that are padding (i.e. **masked**).\n `What are attention masks? <../glossary.html#attention-mask>`__\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):\n Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `pixel_values` into patch embeddings.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n\n Args:\n input_ids (`torch.LongTensor` of shape `({0})`):\n Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See\n [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input\n IDs?](../glossary#input-ids)\n\n attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n [What are attention masks?](../glossary#attention-mask)\n\n token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n 1]`:\n - 0 corresponds to a *sentence A* token,\n - 1 corresponds to a *sentence B* token.\n [What are token type IDs?](../glossary#token-type-ids)\n\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_images, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`ViltFeatureExtractor`]. See\n [`ViltFeatureExtractor.__call__`] for details.\n\n pixel_mask (`torch.LongTensor` of shape `(batch_size, num_images, height, width)`, *optional*):\n Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`:\n\n - 1 for pixels that are real (i.e. **not masked**),\n - 0 for pixels that are padding (i.e. **masked**).\n `What are attention masks? <../glossary.html#attention-mask>`__\n\n head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):\n Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n - 1 indicates the head is **not masked**,\n - 0 indicates the head is **masked**.\n\n inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):\n Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n is useful if you want more control over how to convert `input_ids` indices into associated vectors than the\n model's internal embedding lookup matrix.\n\n image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):\n Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.\n This is useful if you want more control over how to convert `pixel_values` into patch embeddings.\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n", "n_words": 802, "vocab_size": 200, "n_whitespaces": 1685, "language": "en" } }, { "id": 217981, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/imaplib.py", "file_name": "imaplib.py", "fun_name": "expunge", "commit_message": "add python 3.10.4 for windows", "code": "def expunge(self):\n \n name = 'EXPUNGE'\n typ, dat = self._simple_command(name)\n return self._untagged_response(typ, dat, name)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 41, "n_words": 13, "vocab_size": 12, "complexity": 1, "nloc": 4, "token_counts": 30, "n_ast_nodes": 51, "n_identifiers": 7, "d_id": 55053, "documentation": { "docstring": "Permanently remove deleted items from selected mailbox.\n\n Generates 'EXPUNGE' response for each deleted message.\n\n (typ, [data]) = .expunge()\n\n 'data' is list of 'EXPUNGE'd message numbers in order received.\n ", "n_words": 28, "vocab_size": 27, "n_whitespaces": 56, "language": "en" } }, { "id": 47565, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/models/test_dag.py", "file_name": "test_dag.py", "fun_name": "test_next_dagrun_after_auto_align", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def test_next_dagrun_after_auto_align(self):\n \n dag = DAG(\n dag_id='test_scheduler_auto_align_1',\n start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),\n schedule_interval=\"4 5 * * *\",\n )\n EmptyOperator(task_id='dummy', dag=dag, owner='airflow')\n\n next_info = dag.next_dagrun_info(None)\n assert next_info and next_info.logical_date == timezone.datetime(2016, 1, 2, 5, 4)\n\n dag = DAG(\n dag_id='test_scheduler_auto_align_2',\n start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),\n schedule_interval=\"10 10 * * *\",\n )\n EmptyOperator(task_id='dummy', dag=dag, owner='airflow')\n\n next_info = dag.next_dagrun_info(None)\n assert next_info and next_info.logical_date == timezone.datetime(2016, 1, 1, 10, 10)\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 209, "n_words": 66, "vocab_size": 32, "complexity": 3, "nloc": 17, "token_counts": 156, "n_ast_nodes": 235, "n_identifiers": 15, "d_id": 9160, "documentation": { "docstring": "\n Test if the schedule_interval will be auto aligned with the start_date\n such that if the start_date coincides with the schedule the first\n execution_date will be start_date, otherwise it will be start_date +\n interval.\n ", "n_words": 33, "vocab_size": 21, "n_whitespaces": 69, "language": "en" } }, { "id": 227484, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_ohlc.py", "file_name": "_ohlc.py", "fun_name": "tickwidth", "commit_message": "switch to black .22", "code": "def tickwidth(self):\n \n return self[\"tickwidth\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 59157, "documentation": { "docstring": "\n Sets the width of the open/close tick marks relative to the \"x\"\n minimal interval.\n\n The 'tickwidth' property is a number and may be specified as:\n - An int or float in the interval [0, 0.5]\n\n Returns\n -------\n int|float\n ", "n_words": 38, "vocab_size": 35, "n_whitespaces": 97, "language": "en" } }, { "id": 156986, "commit_id": "142de2608df2494bf11e08038aadddb544b4500c", "repo": "dask", "path": "dask/dataframe/core.py", "file_name": "core.py", "fun_name": "median_approximate", "commit_message": "Add `DataFrame` and `Series` `median` method (#9483)", "code": "def median_approximate(self, method=\"default\"):\n \n return self.quantile(q=0.5, method=method)\n", "url": "https://github.com/dask/dask.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 25, "n_ast_nodes": 39, "n_identifiers": 5, "d_id": 36823, "documentation": { "docstring": "Return the approximate median of the values over the requested axis.\n\n Parameters\n ----------\n method : {'default', 'tdigest', 'dask'}, optional\n What method to use. By default will use Dask's internal custom\n algorithm (``\"dask\"``). If set to ``\"tdigest\"`` will use tdigest\n for floats and ints and fallback to the ``\"dask\"`` otherwise.\n ", "n_words": 49, "vocab_size": 40, "n_whitespaces": 111, "language": "en" } }, { "id": 257543, "commit_id": "4d8f40425bc4e7346359b7609720a50ac10b8af9", "repo": "haystack", "path": "test/nodes/test_summarizer.py", "file_name": "test_summarizer.py", "fun_name": "add_metadata_summerizer", "commit_message": "Passing the meta-data in the summerizer response (#2179)\n\n* Passing the all the meta-data in the summerizer\r\n\r\n* Disable metadata forwarding if `generate_single_summary` is `True`\r\n\r\n* Update Documentation & Code Style\r\n\r\n* simplify tests\r\n\r\n* Update Documentation & Code Style\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def add_metadata_summerizer():\n docs = [\n Document(\n content=,\n meta={\n \"sub_content\": \"Pegasus Example\",\n \"topic\": \"California's Electricity\",\n \"context\": \"Dummy - PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires.\",\n },\n ),\n Document(\n content=,\n meta={\"sub_content\": \"Paris best tour best tour\", \"topic\": \"Eiffel tower\"},\n ),\n ]\n # Original input is overwrote after the \"predict\". So adding the same input as check_output to assess the output\n check_output = deepcopy(docs)\n\n summarizer = TransformersSummarizer(model_name_or_path=\"google/pegasus-xsum\")\n summary = summarizer.predict(documents=docs)\n\n assert len(summary[0].meta) == len(check_output[0].meta)\n assert len(summary[1].meta) - 1 == len(check_output[1].meta)\n assert (\n summary[0].meta[\"context\"]\n == \n )\n\n summary = summarizer.predict(documents=docs, generate_single_summary=True)\n\n assert len(summary) == 1\n assert not summary[0].meta # Metadata is not returned in case of a single summary\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 304, "n_words": 122, "vocab_size": 88, "complexity": 1, "nloc": 27, "token_counts": 162, "n_ast_nodes": 273, "n_identifiers": 15, "d_id": 75094, "documentation": { "docstring": "PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.", "n_words": 222, "vocab_size": 117, "n_whitespaces": 221, "language": "en" } }, { "id": 176140, "commit_id": "e59a77b36afa41b93518b8bc4128e6e90da08fda", "repo": "edgedb", "path": "edb/ir/scopetree.py", "file_name": "scopetree.py", "fun_name": "dump_full", "commit_message": "Add a scopetree method to dump the root but to highlight the current node (#3330)", "code": "def dump_full(self) -> None:\n \n styles = {}\n if term.supports_colors(sys.stdout.fileno()):\n styles[self] = term.Style16(color='magenta', bold=True)\n print(self.root.pdebugformat(styles=styles))\n", "url": "https://github.com/edgedb/edgedb.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 53, "n_words": 14, "vocab_size": 13, "complexity": 2, "nloc": 6, "token_counts": 56, "n_ast_nodes": 94, "n_identifiers": 14, "d_id": 41716, "documentation": { "docstring": "Do a debug dump of the root but hilight the current node.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 303763, "commit_id": "ebbff7b60e43f17d65ead811d314602b9daddfc4", "repo": "core", "path": "tests/components/awair/conftest.py", "file_name": "conftest.py", "fun_name": "local_devices_fixture", "commit_message": "Add Awair Local API support (#75535)", "code": "def local_devices_fixture():\n \n return json.loads(load_fixture(\"awair/local_devices.json\"))\n\n\n@pytest.fixture(name=\"gen1_data\", scope=\"session\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture(name=\"gen1_data\", scope=\"session\")", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 11, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 54, "n_identifiers": 8, "d_id": 102572, "documentation": { "docstring": "Fixture representing devices returned by Awair local API.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 227179, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_icicle.py", "file_name": "_icicle.py", "fun_name": "root", "commit_message": "switch to black .22", "code": "def root(self):\n \n return self[\"root\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58852, "documentation": { "docstring": "\n The 'root' property is an instance of Root\n that may be specified as:\n - An instance of :class:`plotly.graph_objs.icicle.Root`\n - A dict of string/value properties that will be passed\n to the Root constructor\n\n Supported dict properties:\n\n color\n sets the color of the root node for a\n sunburst/treemap/icicle trace. this has no\n effect when a colorscale is used to set the\n markers.\n\n Returns\n -------\n plotly.graph_objs.icicle.Root\n ", "n_words": 63, "vocab_size": 47, "n_whitespaces": 237, "language": "en" } }, { "id": 3829, "commit_id": "a3aae8017a0a40ff2006e2567f71dccb04c997a5", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_base_insight_streams.py", "file_name": "test_base_insight_streams.py", "fun_name": "test_stream_slices_no_state_close_to_now", "commit_message": "🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805)\n\n* Facebook Marketing performance improvement\r\n\r\n* add comments and little refactoring\r\n\r\n* fix integration tests with the new config\r\n\r\n* improve job status handling, limit concurrency to 10\r\n\r\n* fix campaign jobs, refactor manager\r\n\r\n* big refactoring of async jobs, support random order of slices\r\n\r\n* update source _read_incremental to hook new state logic\r\n\r\n* fix issues with timeout\r\n\r\n* remove debugging and clean up, improve retry logic\r\n\r\n* merge changes from #8234\r\n\r\n* fix call super _read_increment\r\n\r\n* generalize batch execution, add use_batch flag\r\n\r\n* improve coverage, do some refactoring of spec\r\n\r\n* update test, remove overrides of source\r\n\r\n* add split by AdSet\r\n\r\n* add smaller insights\r\n\r\n* fix end_date < start_date case\r\n\r\n* add account_id to PK\r\n\r\n* add notes\r\n\r\n* fix new streams\r\n\r\n* fix reversed incremental stream\r\n\r\n* update spec.json for SAT\r\n\r\n* upgrade CDK and bump version\r\n\r\nCo-authored-by: Dmytro Rezchykov \r\nCo-authored-by: Eugene Kulak ", "code": "def test_stream_slices_no_state_close_to_now(self, api, async_manager_mock, recent_start_date):\n \n start_date = recent_start_date\n end_date = pendulum.now()\n stream = AdsInsights(api=api, start_date=start_date, end_date=end_date)\n async_manager_mock.completed_jobs.return_value = [1, 2, 3]\n\n slices = list(stream.stream_slices(stream_state=None, sync_mode=SyncMode.incremental))\n\n assert slices == [{\"insight_job\": 1}, {\"insight_job\": 2}, {\"insight_job\": 3}]\n async_manager_mock.assert_called_once()\n args, kwargs = async_manager_mock.call_args\n generated_jobs = list(kwargs[\"jobs\"])\n assert len(generated_jobs) == (end_date - start_date).days + 1\n assert generated_jobs[0].interval.start == start_date.date()\n assert generated_jobs[1].interval.start == start_date.date() + duration(days=1)\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 151, "n_words": 60, "vocab_size": 44, "complexity": 1, "nloc": 13, "token_counts": 165, "n_ast_nodes": 259, "n_identifiers": 31, "d_id": 574, "documentation": { "docstring": "Stream will use start_date when there is not state and start_date within 28d from now", "n_words": 15, "vocab_size": 14, "n_whitespaces": 14, "language": "en" } }, { "id": 66831, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v13_0/update_returned_qty_in_pr_dn.py", "file_name": "update_returned_qty_in_pr_dn.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"purchase_receipt\")\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"purchase_receipt_item\")\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"delivery_note\")\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"delivery_note_item\")\n\tfrappe.reload_doc(\"stock\", \"doctype\", \"stock_settings\")\n\n\tdef update_from_return_docs(doctype):\n\t\tfor return_doc in frappe.get_all(\n\t\t\tdoctype, filters={\"is_return\": 1, \"docstatus\": 1, \"return_against\": (\"!=\", \"\")}\n\t\t):\n\t\t\t# Update original receipt/delivery document from return\n\t\t\treturn_doc = frappe.get_cached_doc(doctype, return_doc.name)\n\t\t\ttry:\n\t\t\t\treturn_doc.update_prevdoc_status()\n\t\t\texcept OverAllowanceError:\n\t\t\t\tfrappe.db.rollback()\n\t\t\t\tcontinue\n\n\t\t\treturn_against = frappe.get_doc(doctype, return_doc.return_against)\n\t\t\treturn_against.update_billing_status()\n\t\t\tfrappe.db.commit()\n\n\t# Set received qty in stock uom in PR, as returned qty is checked against it\n\tfrappe.db.sql(\n\t\t\n\t)\n\n\tfor doctype in (\"Purchase Receipt\", \"Delivery Note\"):\n\t\tupdate_from_return_docs(doctype)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 56, "n_words": 81, "vocab_size": 63, "complexity": 2, "nloc": 14, "token_counts": 77, "n_ast_nodes": 297, "n_identifiers": 19, "d_id": 14353, "documentation": { "docstring": " update `tabPurchase Receipt Item`\n\t\tset received_stock_qty = received_qty * conversion_factor\n\t\twhere docstatus = 1 ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 176489, "commit_id": "f6755ffa00211b523c6c0bec5398bc6c3c43c8b1", "repo": "networkx", "path": "networkx/generators/geometric.py", "file_name": "geometric.py", "fun_name": "geometric_edges", "commit_message": "Update black (#5438)\n\n* CI: sync up black dev requirements version with precommit\r\n\r\n* Run black\r\n\r\nCo-authored-by: Jarrod Millman ", "code": "def geometric_edges(G, radius, p):\n \n nodes_pos = G.nodes(data=\"pos\")\n try:\n import scipy as sp\n import scipy.spatial # call as sp.spatial\n except ImportError:\n # no scipy KDTree so compute by for-loop\n radius_p = radius**p\n edges = [\n (u, v)\n for (u, pu), (v, pv) in combinations(nodes_pos, 2)\n if sum(abs(a - b) ** p for a, b in zip(pu, pv)) <= radius_p\n ]\n return edges\n # scipy KDTree is available\n nodes, coords = list(zip(*nodes_pos))\n kdtree = sp.spatial.cKDTree(coords) # Cannot provide generator.\n edge_indexes = kdtree.query_pairs(radius, p)\n edges = [(nodes[u], nodes[v]) for u, v in sorted(edge_indexes)]\n return edges\n\n\n@py_random_state(5)\n@nodes_or_number(0)", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "@py_random_state(5)\n@nodes_or_number(0)", "n_ast_errors": 1, "ast_levels": 18, "n_whitespaces": 206, "n_words": 94, "vocab_size": 70, "complexity": 6, "nloc": 18, "token_counts": 151, "n_ast_nodes": 252, "n_identifiers": 32, "d_id": 41932, "documentation": { "docstring": "Returns edge list of node pairs within `radius` of each other.\n\n Parameters\n ----------\n G : networkx graph\n The graph from which to generate the edge list. The nodes in `G` should\n have an attribute ``pos`` corresponding to the node position, which is\n used to compute the distance to other nodes.\n radius : scalar\n The distance threshold. Edges are included in the edge list if the\n distance between the two nodes is less than `radius`.\n p : scalar\n The `Minkowski distance metric\n `_ use to compute\n distances.\n\n Returns\n -------\n edges : list\n List of edges whose distances are less than `radius`\n\n Notes\n -----\n Radius uses Minkowski distance metric `p`.\n If scipy is available, `scipy.spatial.cKDTree` is used to speed computation.\n\n Examples\n --------\n Create a graph with nodes that have a \"pos\" attribute representing 2D\n coordinates.\n\n >>> G = nx.Graph()\n >>> G.add_nodes_from([\n ... (0, {\"pos\": (0, 0)}),\n ... (1, {\"pos\": (3, 0)}),\n ... (2, {\"pos\": (8, 0)}),\n ... ])\n >>> p = 2 # Euclidean distance\n >>> nx.geometric_edges(G, radius=1, p=p)\n []\n >>> nx.geometric_edges(G, radius=4, p=p)\n [(0, 1)]\n >>> nx.geometric_edges(G, radius=6, p=p)\n [(0, 1), (1, 2)]\n >>> nx.geometric_edges(G, radius=9, p=p)\n [(0, 1), (0, 2), (1, 2)]\n ", "n_words": 192, "vocab_size": 112, "n_whitespaces": 364, "language": "en" } }, { "id": 216481, "commit_id": "c78f1ee4f49df35ab04e921a45de0878716d8bf5", "repo": "salt", "path": "salt/client/mixins.py", "file_name": "mixins.py", "fun_name": "_proc_function_remote", "commit_message": "Implement ``__getstate__`` and ``__setstate__`` instead of using ``classmethod``\n\nSigned-off-by: Pedro Algarvio ", "code": "def _proc_function_remote(self, *, fun, low, user, tag, jid, daemonize=True):\n \n if daemonize and not salt.utils.platform.is_windows():\n # Shutdown the multiprocessing before daemonizing\n salt.log.setup.shutdown_multiprocessing_logging()\n\n salt.utils.process.daemonize()\n\n # Reconfigure multiprocessing logging after daemonizing\n salt.log.setup.setup_multiprocessing_logging()\n\n # pack a few things into low\n low[\"__jid__\"] = jid\n low[\"__user__\"] = user\n low[\"__tag__\"] = tag\n\n try:\n return self.cmd_sync(low)\n except salt.exceptions.EauthAuthenticationError as exc:\n log.error(exc)\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 186, "n_words": 53, "vocab_size": 47, "complexity": 4, "nloc": 12, "token_counts": 105, "n_ast_nodes": 175, "n_identifiers": 22, "d_id": 54603, "documentation": { "docstring": "\n Run this method in a multiprocess target to execute the function on the\n master and fire the return data on the event bus\n ", "n_words": 23, "vocab_size": 19, "n_whitespaces": 45, "language": "en" } }, { "id": 321569, "commit_id": "623b06bc3dabfd53f637e611ec8d3e4feb521189", "repo": "qutebrowser", "path": "tests/unit/keyinput/test_keyutils.py", "file_name": "test_keyutils.py", "fun_name": "test_text_qtest", "commit_message": "Fix remaining enum/flag issues", "code": "def test_text_qtest(self, qtest_key, qtbot, key_tester):\n \n with qtbot.wait_signal(key_tester.got_text):\n qtbot.keyPress(key_tester, qtest_key.member)\n\n info = keyutils.KeyInfo(qtest_key.member)\n assert info.text() == key_tester.text.lower()\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 55, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 5, "token_counts": 56, "n_ast_nodes": 91, "n_identifiers": 14, "d_id": 117802, "documentation": { "docstring": "Make sure KeyInfo.text() lines up with QTest::keyToAscii.\n\n See key_data.py for inputs and expected values.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 28, "language": "en" } }, { "id": 11483, "commit_id": "c7ad27e5614dfb2b1684f4718c5508840cd55de0", "repo": "jina", "path": "jina/parsers/orchestrate/runtimes/head.py", "file_name": "head.py", "fun_name": "mixin_head_parser", "commit_message": "refactor: add disable_reduce args (#4424)", "code": "def mixin_head_parser(parser):\n \n\n gp = add_arg_group(parser, title='Head')\n\n gp.add_argument(\n '--uses-before-address',\n type=str,\n help='The address of the uses-before runtime',\n )\n\n gp.add_argument(\n '--uses-after-address',\n type=str,\n help='The address of the uses-before runtime',\n )\n\n gp.add_argument(\n '--connection-list',\n type=str,\n help='dictionary JSON with a list of connections to configure',\n )\n\n gp.add_argument(\n '--disable-reduce',\n action='store_true',\n default=False,\n help='Disable the built-in reduce mechanism, set this if the reduction is to be handled by the Executor connected to this Head',\n )\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 186, "n_words": 65, "vocab_size": 44, "complexity": 1, "nloc": 23, "token_counts": 80, "n_ast_nodes": 137, "n_identifiers": 11, "d_id": 2054, "documentation": { "docstring": "Mixing in arguments required by head pods and runtimes into the given parser.\n :param parser: the parser instance to which we add arguments\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 29, "language": "en" } }, { "id": 32329, "commit_id": "99eb9b523f9b9ea6096323ce5610ce6633acc88a", "repo": "transformers", "path": "examples/pytorch/test_accelerate_examples.py", "file_name": "test_accelerate_examples.py", "fun_name": "test_run_clm_no_trainer", "commit_message": "Fix `no_trainer` CI (#18242)\n\n* Fix all tests", "code": "def test_run_clm_no_trainer(self):\n tmp_dir = self.get_auto_remove_tmp_dir()\n testargs = f.split()\n\n if torch.cuda.device_count() > 1:\n # Skipping because there are not enough batches to train the model + would need a drop_last to work.\n return\n\n run_command(self._launch_args + testargs)\n result = get_results(tmp_dir)\n self.assertLess(result[\"perplexity\"], 100)\n self.assertTrue(os.path.exists(os.path.join(tmp_dir, \"epoch_0\")))\n self.assertTrue(os.path.exists(os.path.join(tmp_dir, \"clm_no_trainer\")))\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 121, "n_words": 44, "vocab_size": 39, "complexity": 2, "nloc": 22, "token_counts": 101, "n_ast_nodes": 180, "n_identifiers": 20, "d_id": 5903, "documentation": { "docstring": "\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ", "n_words": 20, "vocab_size": 18, "n_whitespaces": 149, "language": "en" } }, { "id": 118727, "commit_id": "72703b38029f9358a0ec7ca5ed875a6b438ece19", "repo": "streamlit", "path": "lib/streamlit/elements/bokeh_chart.py", "file_name": "bokeh_chart.py", "fun_name": "bokeh_chart", "commit_message": "Replace static apps with live Cloud apps (#4317)\n\nCo-authored-by: kajarenc ", "code": "def bokeh_chart(self, figure, use_container_width=False):\n \n import bokeh\n\n if bokeh.__version__ != ST_BOKEH_VERSION:\n raise StreamlitAPIException(\n f\"Streamlit only supports Bokeh version {ST_BOKEH_VERSION}, \"\n f\"but you have version {bokeh.__version__} installed. Please \"\n f\"run `pip install --force-reinstall --no-deps bokeh==\"\n f\"{ST_BOKEH_VERSION}` to install the correct version.\"\n )\n\n # Generate element ID from delta path\n delta_path = self.dg._get_delta_path_str()\n element_id = hashlib.md5(delta_path.encode()).hexdigest()\n\n bokeh_chart_proto = BokehChartProto()\n marshall(bokeh_chart_proto, figure, use_container_width, element_id)\n return self.dg._enqueue(\"bokeh_chart\", bokeh_chart_proto)\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 208, "n_words": 63, "vocab_size": 57, "complexity": 2, "nloc": 14, "token_counts": 84, "n_ast_nodes": 153, "n_identifiers": 20, "d_id": 26384, "documentation": { "docstring": "Display an interactive Bokeh chart.\n\n Bokeh is a charting library for Python. The arguments to this function\n closely follow the ones for Bokeh's `show` function. You can find\n more about Bokeh at https://bokeh.pydata.org.\n\n Parameters\n ----------\n figure : bokeh.plotting.figure.Figure\n A Bokeh figure to plot.\n\n use_container_width : bool\n If True, set the chart width to the column width. This takes\n precedence over Bokeh's native `width` value.\n\n To show Bokeh charts in Streamlit, call `st.bokeh_chart`\n wherever you would call Bokeh's `show`.\n\n Example\n -------\n >>> import streamlit as st\n >>> from bokeh.plotting import figure\n >>>\n >>> x = [1, 2, 3, 4, 5]\n >>> y = [6, 7, 2, 4, 5]\n >>>\n >>> p = figure(\n ... title='simple line example',\n ... x_axis_label='x',\n ... y_axis_label='y')\n ...\n >>> p.line(x, y, legend_label='Trend', line_width=2)\n >>>\n >>> st.bokeh_chart(p, use_container_width=True)\n\n .. output::\n https://share.streamlit.io/streamlit/docs/main/python/api-examples-source/charts.bokeh_chart.py\n height: 700px\n\n ", "n_words": 135, "vocab_size": 102, "n_whitespaces": 389, "language": "en" } }, { "id": 258915, "commit_id": "1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe", "repo": "scikit-learn", "path": "sklearn/metrics/_classification.py", "file_name": "_classification.py", "fun_name": "matthews_corrcoef", "commit_message": "MNT Update black to stable version (#22474)", "code": "def matthews_corrcoef(y_true, y_pred, *, sample_weight=None):\n \n y_type, y_true, y_pred = _check_targets(y_true, y_pred)\n check_consistent_length(y_true, y_pred, sample_weight)\n if y_type not in {\"binary\", \"multiclass\"}:\n raise ValueError(\"%s is not supported\" % y_type)\n\n lb = LabelEncoder()\n lb.fit(np.hstack([y_true, y_pred]))\n y_true = lb.transform(y_true)\n y_pred = lb.transform(y_pred)\n\n C = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)\n t_sum = C.sum(axis=1, dtype=np.float64)\n p_sum = C.sum(axis=0, dtype=np.float64)\n n_correct = np.trace(C, dtype=np.float64)\n n_samples = p_sum.sum()\n cov_ytyp = n_correct * n_samples - np.dot(t_sum, p_sum)\n cov_ypyp = n_samples**2 - np.dot(p_sum, p_sum)\n cov_ytyt = n_samples**2 - np.dot(t_sum, t_sum)\n\n if cov_ypyp * cov_ytyt == 0:\n return 0.0\n else:\n return cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 168, "n_words": 93, "vocab_size": 62, "complexity": 3, "nloc": 21, "token_counts": 218, "n_ast_nodes": 336, "n_identifiers": 30, "d_id": 75481, "documentation": { "docstring": "Compute the Matthews correlation coefficient (MCC).\n\n The Matthews correlation coefficient is used in machine learning as a\n measure of the quality of binary and multiclass classifications. It takes\n into account true and false positives and negatives and is generally\n regarded as a balanced measure which can be used even if the classes are of\n very different sizes. The MCC is in essence a correlation coefficient value\n between -1 and +1. A coefficient of +1 represents a perfect prediction, 0\n an average random prediction and -1 an inverse prediction. The statistic\n is also known as the phi coefficient. [source: Wikipedia]\n\n Binary and multiclass labels are supported. Only in the binary case does\n this relate to information about true and false positives and negatives.\n See references below.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n y_true : array, shape = [n_samples]\n Ground truth (correct) target values.\n\n y_pred : array, shape = [n_samples]\n Estimated targets as returned by a classifier.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n .. versionadded:: 0.18\n\n Returns\n -------\n mcc : float\n The Matthews correlation coefficient (+1 represents a perfect\n prediction, 0 an average random prediction and -1 and inverse\n prediction).\n\n References\n ----------\n .. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the\n accuracy of prediction algorithms for classification: an overview\n `_.\n\n .. [2] `Wikipedia entry for the Matthews Correlation Coefficient\n `_.\n\n .. [3] `Gorodkin, (2004). Comparing two K-category assignments by a\n K-category correlation coefficient\n `_.\n\n .. [4] `Jurman, Riccadonna, Furlanello, (2012). A Comparison of MCC and CEN\n Error Measures in MultiClass Prediction\n `_.\n\n Examples\n --------\n >>> from sklearn.metrics import matthews_corrcoef\n >>> y_true = [+1, +1, +1, -1]\n >>> y_pred = [+1, -1, +1, +1]\n >>> matthews_corrcoef(y_true, y_pred)\n -0.33...\n ", "n_words": 283, "vocab_size": 177, "n_whitespaces": 482, "language": "en" } }, { "id": 247085, "commit_id": "7754af24ab163a3666bc04c7df409e59ace0d763", "repo": "synapse", "path": "tests/handlers/test_room_summary.py", "file_name": "test_room_summary.py", "fun_name": "test_fed_filtering", "commit_message": "Remove the unstable `/spaces` endpoint. (#12073)\n\n...and various code supporting it.\r\n\r\nThe /spaces endpoint was from an old version of MSC2946 and included\r\nboth a Client-Server and Server-Server API. Note that the unstable\r\n/hierarchy endpoint (from the final version of MSC2946) is not yet\r\nremoved.", "code": "def test_fed_filtering(self):\n \n fed_hostname = self.hs.hostname + \"2\"\n subspace = \"#subspace:\" + fed_hostname\n\n # Create a few rooms which will have different properties.\n public_room = \"#public:\" + fed_hostname\n knock_room = \"#knock:\" + fed_hostname\n not_invited_room = \"#not_invited:\" + fed_hostname\n invited_room = \"#invited:\" + fed_hostname\n restricted_room = \"#restricted:\" + fed_hostname\n restricted_accessible_room = \"#restricted_accessible:\" + fed_hostname\n world_readable_room = \"#world_readable:\" + fed_hostname\n joined_room = self.helper.create_room_as(self.user, tok=self.token)\n\n # Poke an invite over federation into the database.\n self._poke_fed_invite(invited_room, \"@remote:\" + fed_hostname)\n\n # Note that these entries are brief, but should contain enough info.\n children_rooms = (\n (\n public_room,\n {\n \"room_id\": public_room,\n \"world_readable\": False,\n \"join_rules\": JoinRules.PUBLIC,\n },\n ),\n (\n knock_room,\n {\n \"room_id\": knock_room,\n \"world_readable\": False,\n \"join_rules\": JoinRules.KNOCK,\n },\n ),\n (\n not_invited_room,\n {\n \"room_id\": not_invited_room,\n \"world_readable\": False,\n \"join_rules\": JoinRules.INVITE,\n },\n ),\n (\n invited_room,\n {\n \"room_id\": invited_room,\n \"world_readable\": False,\n \"join_rules\": JoinRules.INVITE,\n },\n ),\n (\n restricted_room,\n {\n \"room_id\": restricted_room,\n \"world_readable\": False,\n \"join_rules\": JoinRules.RESTRICTED,\n \"allowed_room_ids\": [],\n },\n ),\n (\n restricted_accessible_room,\n {\n \"room_id\": restricted_accessible_room,\n \"world_readable\": False,\n \"join_rules\": JoinRules.RESTRICTED,\n \"allowed_room_ids\": [self.room],\n },\n ),\n (\n world_readable_room,\n {\n \"room_id\": world_readable_room,\n \"world_readable\": True,\n \"join_rules\": JoinRules.INVITE,\n },\n ),\n (\n joined_room,\n {\n \"room_id\": joined_room,\n \"world_readable\": False,\n \"join_rules\": JoinRules.INVITE,\n },\n ),\n )\n\n subspace_room_entry = _RoomEntry(\n subspace,\n {\n \"room_id\": subspace,\n \"world_readable\": True,\n },\n # Place each room in the sub-space.\n [\n {\n \"type\": EventTypes.SpaceChild,\n \"room_id\": subspace,\n \"state_key\": room_id,\n \"content\": {\"via\": [fed_hostname]},\n }\n for room_id, _ in children_rooms\n ],\n )\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 1598, "n_words": 218, "vocab_size": 104, "complexity": 2, "nloc": 129, "token_counts": 484, "n_ast_nodes": 544, "n_identifiers": 33, "d_id": 71495, "documentation": { "docstring": "\n Rooms returned over federation should be properly filtered to only include\n rooms the user has access to.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 39, "language": "en" } }, { "id": 259434, "commit_id": "75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc", "repo": "scikit-learn", "path": "sklearn/_loss/tests/test_loss.py", "file_name": "test_loss.py", "fun_name": "test_tweedie_log_identity_consistency", "commit_message": "ENH migrate GLMs / TweedieRegressor to linear loss (#22548)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Thomas J. Fan ", "code": "def test_tweedie_log_identity_consistency(p):\n \n half_tweedie_log = HalfTweedieLoss(power=p)\n half_tweedie_identity = HalfTweedieLossIdentity(power=p)\n n_samples = 10\n y_true, raw_prediction = random_y_true_raw_prediction(\n loss=half_tweedie_log, n_samples=n_samples, seed=42\n )\n y_pred = half_tweedie_log.link.inverse(raw_prediction) # exp(raw_prediction)\n\n # Let's compare the loss values, up to some constant term that is dropped\n # in HalfTweedieLoss but not in HalfTweedieLossIdentity.\n loss_log = half_tweedie_log.loss(\n y_true=y_true, raw_prediction=raw_prediction\n ) + half_tweedie_log.constant_to_optimal_zero(y_true)\n loss_identity = half_tweedie_identity.loss(\n y_true=y_true, raw_prediction=y_pred\n ) + half_tweedie_identity.constant_to_optimal_zero(y_true)\n # Note that HalfTweedieLoss ignores different constant terms than\n # HalfTweedieLossIdentity. Constant terms means terms not depending on\n # raw_prediction. By adding these terms, `constant_to_optimal_zero`, both losses\n # give the same values.\n assert_allclose(loss_log, loss_identity)\n\n # For gradients and hessians, the constant terms do not matter. We have, however,\n # to account for the chain rule, i.e. with x=raw_prediction\n # gradient_log(x) = d/dx loss_log(x)\n # = d/dx loss_identity(exp(x))\n # = exp(x) * gradient_identity(exp(x))\n # Similarly,\n # hessian_log(x) = exp(x) * gradient_identity(exp(x))\n # + exp(x)**2 * hessian_identity(x)\n gradient_log, hessian_log = half_tweedie_log.gradient_hessian(\n y_true=y_true, raw_prediction=raw_prediction\n )\n gradient_identity, hessian_identity = half_tweedie_identity.gradient_hessian(\n y_true=y_true, raw_prediction=y_pred\n )\n assert_allclose(gradient_log, y_pred * gradient_identity)\n assert_allclose(\n hessian_log, y_pred * gradient_identity + y_pred**2 * hessian_identity\n )\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 383, "n_words": 174, "vocab_size": 109, "complexity": 1, "nloc": 25, "token_counts": 155, "n_ast_nodes": 255, "n_identifiers": 25, "d_id": 75768, "documentation": { "docstring": "Test for identical losses when only the link function is different.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 65075, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/subscription/subscription.py", "file_name": "subscription.py", "fun_name": "restart_subscription", "commit_message": "style: format code with black", "code": "def restart_subscription(name):\n\t\n\tsubscription = frappe.get_doc(\"Subscription\", name)\n\tsubscription.restart_subscription()\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 4, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 21, "n_ast_nodes": 50, "n_identifiers": 6, "d_id": 13786, "documentation": { "docstring": "\n\tRestarts a cancelled `Subscription`. The `Subscription` will 'forget' the history of\n\tall invoices it has generated\n\t", "n_words": 16, "vocab_size": 16, "n_whitespaces": 14, "language": "en" } }, { "id": 220514, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/futures.py", "file_name": "futures.py", "fun_name": "set_exception", "commit_message": "add python 3.10.4 for windows", "code": "def set_exception(self, exception):\n \n if self._state != _PENDING:\n raise exceptions.InvalidStateError(f'{self._state}: {self!r}')\n if isinstance(exception, type):\n exception = exception()\n if type(exception) is StopIteration:\n raise TypeError(\"StopIteration interacts badly with generators \"\n \"and cannot be raised into a Future\")\n self._exception = exception\n self._state = _FINISHED\n self.__schedule_callbacks()\n self.__log_traceback = True\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 160, "n_words": 44, "vocab_size": 36, "complexity": 4, "nloc": 12, "token_counts": 70, "n_ast_nodes": 132, "n_identifiers": 15, "d_id": 56024, "documentation": { "docstring": "Mark the future done and set an exception.\n\n If the future is already done when this method is called, raises\n InvalidStateError.\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 42, "language": "en" } }, { "id": 189467, "commit_id": "902e7eb4f0147b5882a613b67467e38a1d47f01e", "repo": "manim", "path": "manim/mobject/svg/svg_mobject.py", "file_name": "svg_mobject.py", "fun_name": "_move_into_position", "commit_message": "Hide more private methods from the docs. (#2468)\n\n* hide privs from text_mobject.py\r\n\r\n* hide privs from tex_mobject.py\r\n\r\n* hide privs from code_mobject.py\r\n\r\n* hide privs from svg_mobject.py\r\n\r\n* remove SVGPath and utils from __init__.py\r\n\r\n* don't import string_to_numbers\r\n\r\n* hide privs from geometry.py\r\n\r\n* hide privs from matrix.py\r\n\r\n* hide privs from numbers.py\r\n\r\n* hide privs from three_dimensions.py\r\n\r\n* forgot underscore under set_stroke_width_from_length\r\n\r\n* there were more i missed\r\n\r\n* unhidea method that was used in docs\r\n\r\n* forgot other text2hash\r\n\r\n* remove svg_path from docs", "code": "def _move_into_position(self, width, height):\n \n if self.should_center:\n self.center()\n if height is not None:\n self.height = height\n if width is not None:\n self.width = width\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 84, "n_words": 23, "vocab_size": 15, "complexity": 4, "nloc": 7, "token_counts": 42, "n_ast_nodes": 68, "n_identifiers": 6, "d_id": 46075, "documentation": { "docstring": "Uses the SVGMobject's config dictionary to set the Mobject's\n width, height, and/or center it. Use ``width``, ``height``, and\n ``should_center`` respectively to modify this.\n ", "n_words": 23, "vocab_size": 21, "n_whitespaces": 44, "language": "en" } }, { "id": 45007, "commit_id": "66c342d033bd3cb959b4dc4e7e4b8aad597aab63", "repo": "airflow", "path": "airflow/migrations/versions/8646922c8a04_change_default_pool_slots_to_1.py", "file_name": "8646922c8a04_change_default_pool_slots_to_1.py", "fun_name": "upgrade", "commit_message": "Support generating SQL script for upgrades (#20962)\n\nThis PR attempts to add support for generating sql scripts for upgrade.\r\nExample command:\r\n`airflow db upgrade --revision-range e8d98d8ss99:78daisdu38d`\r\n`airflow db upgrade --range 2.0.0:2.2.3`", "code": "def upgrade():\n \n with op.batch_alter_table(\"task_instance\", schema=None) as batch_op:\n batch_op.alter_column(\"pool_slots\", existing_type=sa.Integer, nullable=False, server_default='1')\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 24, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 39, "n_ast_nodes": 70, "n_identifiers": 11, "d_id": 8439, "documentation": { "docstring": "Change default pool_slots to 1 and make pool_slots not nullable", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 304817, "commit_id": "8896229ea641a558161d8caed796895e9a78f457", "repo": "core", "path": "tests/components/foobot/test_sensor.py", "file_name": "test_sensor.py", "fun_name": "test_setup_temporary_error", "commit_message": "Improve type hint in foobot sensor entity (#77164)", "code": "async def test_setup_temporary_error(hass, aioclient_mock):\n \n fake_async_add_entities = MagicMock()\n\n errors = [HTTPStatus.TOO_MANY_REQUESTS, HTTPStatus.INTERNAL_SERVER_ERROR]\n for error in errors:\n aioclient_mock.get(re.compile(\"api.foobot.io/v2/owner/.*\"), status=error)\n with pytest.raises(PlatformNotReady):\n await foobot.async_setup_platform(\n hass, VALID_CONFIG, fake_async_add_entities\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 88, "n_words": 25, "vocab_size": 23, "complexity": 2, "nloc": 9, "token_counts": 63, "n_ast_nodes": 104, "n_identifiers": 20, "d_id": 103612, "documentation": { "docstring": "Expected failures caused by temporary errors in API response.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 107161, "commit_id": "ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_constrainedlayout.py", "file_name": "test_constrainedlayout.py", "fun_name": "test_constrained_layout22", "commit_message": "ENH: implement and use base layout_engine for more flexible layout.", "code": "def test_constrained_layout22():\n \n fig, ax = plt.subplots(layout=\"constrained\")\n\n fig.draw_without_rendering()\n extents0 = np.copy(ax.get_position().extents)\n\n fig.suptitle(\"Suptitle\", y=0.5)\n fig.draw_without_rendering()\n extents1 = np.copy(ax.get_position().extents)\n\n np.testing.assert_allclose(extents0, extents1)\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 42, "n_words": 18, "vocab_size": 14, "complexity": 1, "nloc": 8, "token_counts": 77, "n_ast_nodes": 129, "n_identifiers": 17, "d_id": 22616, "documentation": { "docstring": "#11035: suptitle should not be include in CL if manually positioned", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 161775, "commit_id": "ebc5d2797e7bfb595183fe61aac50be58c9a5174", "repo": "rich", "path": "tests/test_syntax.py", "file_name": "test_syntax.py", "fun_name": "test_syntax_highlight_ranges", "commit_message": "[syntax] add a `highlight_ranges` optional arg to the Syntax ctor\n\nWith this new API we can apply a style from (LINE A, COLUMN A) to (LINE B, COLUMN B) - which is something we will need to be able to add arbitrary ranges to Syntax", "code": "def test_syntax_highlight_ranges():\n syntax = Syntax(\n CODE,\n lexer=\"python\",\n line_numbers=True,\n word_wrap=False,\n highlight_ranges=[\n SyntaxHighlightRange(\n # overline the 2nd char of the 1st line:\n start=SyntaxPosition(1, 1),\n end=SyntaxPosition(1, 2),\n style=Style(overline=True),\n ),\n SyntaxHighlightRange(\n start=SyntaxPosition(1, len(\"def loop_\")),\n end=SyntaxPosition(1, len(\"def loop_first_last\")),\n style=Style(underline=True),\n ),\n SyntaxHighlightRange(\n start=SyntaxPosition(1, len(\"def loop_first\")),\n end=SyntaxPosition(3, len(\" iter_values = iter\")),\n style=Style(bold=True),\n ),\n SyntaxHighlightRange(\n start=SyntaxPosition(9, len(\" for \")),\n end=SyntaxPosition(9, len(\" for value in\")),\n style=Style(strike=True),\n ),\n SyntaxHighlightRange(\n start=SyntaxPosition(6, len(\" except \")),\n end=SyntaxPosition(6, len(\" except StopIteration\")),\n style=Style(reverse=True),\n ),\n # Those should be out of range, and have no impact:\n SyntaxHighlightRange(\n start=SyntaxPosition(1, 100), # `column_index` is out of range\n end=SyntaxPosition(2, 2),\n style=Style(bold=True),\n ),\n SyntaxHighlightRange(\n start=SyntaxPosition(1, 1),\n end=SyntaxPosition(30, 2), # `line_number` is out of range\n style=Style(bold=True),\n ),\n ],\n )\n rendered_syntax = render(syntax, True)\n print(repr(rendered_syntax))\n expected = '\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 1 \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34md\\x1b[0m\\x1b[53;38;2;102;217;239;48;2;39;40;34me\\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mf\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;166;226;46;48;2;39;40;34mloop_\\x1b[0m\\x1b[4;38;2;166;226;46;48;2;39;40;34mfirst\\x1b[0m\\x1b[1;4;38;2;166;226;46;48;2;39;40;34m_last\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m(\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34mvalues\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m:\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34mIterable\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m[\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34mT\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m]\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m)\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[1;38;2;249;38;114;48;2;39;40;34m-\\x1b[0m\\x1b[1;38;2;249;38;114;48;2;39;40;34m>\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34mIterable\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m[\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34mTuple\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m[\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34mbool\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m,\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34mbool\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m,\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34mT\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m]\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m]\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m:\\x1b[0m\\n\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 2 \\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[1;38;2;230;219;116;48;2;39;40;34m\\x1b[0m\\n\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 3 \\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34miter_values\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[1;38;2;249;38;114;48;2;39;40;34m=\\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[1;38;2;248;248;242;48;2;39;40;34miter\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m(\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mvalues\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m)\\x1b[0m\\n\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 4 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mtry\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m:\\x1b[0m\\n\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 5 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;249;38;114;48;2;39;40;34m=\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mnext\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m(\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34miter_values\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m)\\x1b[0m\\n\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 6 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mexcept\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[7;38;2;166;226;46;48;2;39;40;34mStopIteration\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m:\\x1b[0m\\n\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 7 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mreturn\\x1b[0m\\n\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 8 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mfirst\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;249;38;114;48;2;39;40;34m=\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mTrue\\x1b[0m\\n\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m 9 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mfor\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[9;38;2;248;248;242;48;2;39;40;34mvalue\\x1b[0m\\x1b[9;38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[9;38;2;249;38;114;48;2;39;40;34min\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34miter_values\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m:\\x1b[0m\\n\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m10 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34myield\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mfirst\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m,\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mFalse\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m,\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\\x1b[0m\\n\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m11 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mfirst\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;249;38;114;48;2;39;40;34m=\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mFalse\\x1b[0m\\n\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m12 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;249;38;114;48;2;39;40;34m=\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mvalue\\x1b[0m\\n\\x1b[1;38;2;227;227;221;48;2;39;40;34m \\x1b[0m\\x1b[38;2;101;102;96;48;2;39;40;34m13 \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34myield\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mfirst\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m,\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;102;217;239;48;2;39;40;34mTrue\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m,\\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34m \\x1b[0m\\x1b[38;2;248;248;242;48;2;39;40;34mprevious_value\\x1b[0m\\n'\n assert rendered_syntax == expected\n\n", "url": "https://github.com/Textualize/rich.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 833, "n_words": 193, "vocab_size": 121, "complexity": 1, "nloc": 48, "token_counts": 291, "n_ast_nodes": 728, "n_identifiers": 25, "d_id": 39061, "documentation": { "docstring": "Iterate and generate a tuple with a flag for first and last value.", "n_words": 13, "vocab_size": 11, "n_whitespaces": 12, "language": "en" } }, { "id": 57811, "commit_id": "36d9870433a22fff3944fa07f8e2feeb1b622bd9", "repo": "prefect", "path": "src/prefect/cli/deployment.py", "file_name": "deployment.py", "fun_name": "str_presenter", "commit_message": "Working YAML generation with lots of bells and whistles", "code": "def str_presenter(dumper, data):\n \n if len(data.splitlines()) > 1: # check for multiline string\n return dumper.represent_scalar(\"tag:yaml.org,2002:str\", data, style=\"|\")\n return dumper.represent_scalar(\"tag:yaml.org,2002:str\", data)\n\n\nyaml.add_representer(str, str_presenter)\nyaml.representer.SafeRepresenter.add_representer(str, str_presenter)\n\ndeployment_app = PrefectTyper(\n name=\"deployment\", help=\"Commands for working with deployments.\"\n)\napp.add_typer(deployment_app)\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 49, "n_words": 34, "vocab_size": 30, "complexity": 2, "nloc": 4, "token_counts": 42, "n_ast_nodes": 135, "n_identifiers": 18, "d_id": 11712, "documentation": { "docstring": "\n configures yaml for dumping multiline strings\n Ref: https://stackoverflow.com/questions/8640959/how-can-i-control-what-scalar-form-pyyaml-uses-for-my-data\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 18, "language": "en" } }, { "id": 207137, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_filters/tests.py", "file_name": "tests.py", "fun_name": "test_lookup_using_custom_divider", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_lookup_using_custom_divider(self):\n \n jane = Employee.objects.create(name=\"Jane,Green\", department=self.design)\n modeladmin = EmployeeCustomDividerFilterAdmin(Employee, site)\n employees = [jane, self.jack]\n\n request = self.request_factory.get(\n \"/\", {\"name__in\": \"|\".join(e.name for e in employees)}\n )\n # test for lookup with custom divider\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n # Make sure the correct queryset is returned\n queryset = changelist.get_queryset(request)\n self.assertEqual(list(queryset), employees)\n\n # test for lookup with comma in the lookup string\n request = self.request_factory.get(\"/\", {\"name\": jane.name})\n request.user = self.alfred\n changelist = modeladmin.get_changelist_instance(request)\n # Make sure the correct queryset is returned\n queryset = changelist.get_queryset(request)\n self.assertEqual(list(queryset), [jane])\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 229, "n_words": 85, "vocab_size": 48, "complexity": 2, "nloc": 16, "token_counts": 156, "n_ast_nodes": 259, "n_identifiers": 27, "d_id": 51875, "documentation": { "docstring": "\n Filter __in lookups with a custom divider.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 110264, "commit_id": "9b6abd0b4933811e0a45c2535ab8fd107db65dd9", "repo": "matplotlib", "path": "lib/matplotlib/colors.py", "file_name": "colors.py", "fun_name": "blend_soft_light", "commit_message": "DOC: improve grammar and consistency", "code": "def blend_soft_light(self, rgb, intensity):\n \n return 2 * intensity * rgb + (1 - 2 * intensity) * rgb**2\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 32, "n_words": 18, "vocab_size": 14, "complexity": 1, "nloc": 2, "token_counts": 28, "n_ast_nodes": 44, "n_identifiers": 4, "d_id": 24006, "documentation": { "docstring": "\n Combine an RGB image with an intensity map using \"soft light\" blending,\n using the \"pegtop\" formula.\n\n Parameters\n ----------\n rgb : ndarray\n An MxNx3 RGB array of floats ranging from 0 to 1 (color image).\n intensity : ndarray\n An MxNx1 array of floats ranging from 0 to 1 (grayscale image).\n\n Returns\n -------\n ndarray\n An MxNx3 RGB array representing the combined images.\n ", "n_words": 60, "vocab_size": 38, "n_whitespaces": 164, "language": "en" } }, { "id": 108461, "commit_id": "686c9e5a413e31c46bb049407d5eca285bcab76d", "repo": "matplotlib", "path": "lib/matplotlib/cm.py", "file_name": "cm.py", "fun_name": "__call__", "commit_message": "Fix spelling errors", "code": "def __call__(self):\n \n return list(self)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 21, "n_identifiers": 3, "d_id": 23201, "documentation": { "docstring": "\n Return a list of the registered colormap names.\n\n This exists only for backward-compatibility in `.pyplot` which had a\n ``plt.colormaps()`` method. The recommended way to get this list is\n now ``list(colormaps)``.\n ", "n_words": 30, "vocab_size": 28, "n_whitespaces": 66, "language": "en" } }, { "id": 271050, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/compile_utils.py", "file_name": "compile_utils.py", "fun_name": "get_custom_object_name", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_custom_object_name(obj):\n \n if hasattr(obj, \"name\"): # Accept `Loss` instance as `Metric`.\n return obj.name\n elif hasattr(obj, \"__name__\"): # Function.\n return obj.__name__\n elif hasattr(obj, \"__class__\"): # Class instance.\n return generic_utils.to_snake_case(obj.__class__.__name__)\n else: # Unrecognized object.\n return None\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 81, "n_words": 34, "vocab_size": 25, "complexity": 4, "nloc": 9, "token_counts": 53, "n_ast_nodes": 95, "n_identifiers": 8, "d_id": 80685, "documentation": { "docstring": "Returns the name to use for a custom loss or metric callable.\n\n Args:\n obj: Custom loss of metric callable\n\n Returns:\n Name to use, or `None` if the object was not recognized.\n ", "n_words": 31, "vocab_size": 26, "n_whitespaces": 50, "language": "en" } }, { "id": 269576, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "permute_dimensions", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def permute_dimensions(x, pattern):\n \n return tf.compat.v1.transpose(x, perm=pattern)\n\n\n@keras_export(\"keras.backend.resize_images\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.backend.resize_images\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 12, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 2, "token_counts": 23, "n_ast_nodes": 66, "n_identifiers": 14, "d_id": 80199, "documentation": { "docstring": "Permutes axes in a tensor.\n\n Args:\n x: Tensor or variable.\n pattern: A tuple of\n dimension indices, e.g. `(0, 2, 1)`.\n\n Returns:\n A tensor.\n\n Example:\n\n >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])\n >>> a\n \n >>> tf.keras.backend.permute_dimensions(a, pattern=(1, 0))\n \n\n ", "n_words": 87, "vocab_size": 57, "n_whitespaces": 238, "language": "en" } }, { "id": 66913, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/payroll/doctype/payroll_entry/payroll_entry.py", "file_name": "payroll_entry.py", "fun_name": "get_sal_struct", "commit_message": "style: format code with black", "code": "def get_sal_struct(company, currency, salary_slip_based_on_timesheet, condition):\n\treturn frappe.db.sql_list(\n\t\t.format(\n\t\t\tcondition=condition\n\t\t),\n\t\t{\n\t\t\t\"company\": company,\n\t\t\t\"currency\": currency,\n\t\t\t\"salary_slip_based_on_timesheet\": salary_slip_based_on_timesheet,\n\t\t},\n\t)\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 8, "n_words": 19, "vocab_size": 17, "complexity": 1, "nloc": 20, "token_counts": 43, "n_ast_nodes": 68, "n_identifiers": 9, "d_id": 14378, "documentation": { "docstring": "\n\t\tselect\n\t\t\tname from `tabSalary Structure`\n\t\twhere\n\t\t\tdocstatus = 1 and\n\t\t\tis_active = 'Yes'\n\t\t\tand company = %(company)s\n\t\t\tand currency = %(currency)s and\n\t\t\tifnull(salary_slip_based_on_timesheet,0) = %(salary_slip_based_on_timesheet)s\n\t\t\t{condition}", "n_words": 26, "vocab_size": 19, "n_whitespaces": 17, "language": "en" } }, { "id": 49732, "commit_id": "f4d6e64cdc132ae868699a0ba442f4ab1d304a14", "repo": "PaddleHub", "path": "modules/image/text_to_image/disco_diffusion_cnclip_vitb16/cn_clip/clip/modeling_bert.py", "file_name": "modeling_bert.py", "fun_name": "gelu_new", "commit_message": "add disco_diffusion_cnclip_vitb16 module", "code": "def gelu_new(x):\n \n return 0.5 * x * (1 + paddle.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * paddle.pow(x, 3))))\n\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 25, "n_words": 19, "vocab_size": 15, "complexity": 1, "nloc": 2, "token_counts": 49, "n_ast_nodes": 72, "n_identifiers": 8, "d_id": 9898, "documentation": { "docstring": " Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).\n Also see https://arxiv.org/abs/1606.08415\n ", "n_words": 18, "vocab_size": 18, "n_whitespaces": 29, "language": "en" } }, { "id": 136720, "commit_id": "073e7bc04d989607848552537f9f5ac91fa07d85", "repo": "ray", "path": "python/ray/_private/metrics_agent.py", "file_name": "metrics_agent.py", "fun_name": "clean_stale_components", "commit_message": "[Dashboard] Remove opencensus from agent proxy export (#30469)\n\nThis PR removes the Opencensus usage on proxy export. Previously, OpenCensus APIs we are using for proxy export deepcopies the whole data {labels -> data} whenever there's a new export which causes O(N^2) write on metrics record. See the below section for more details on removing Opencensus.\r\n\r\nInstead of using their APIs, we will store the aggregation data in memory and export them using a custom Prometheus exporter (0 deepcopies, purely done by lock). Below is the flamegraph for the same workload (100 actors + submitting 1000 tasks per second + 1 second metrics export). Before this fix, the CPU usage was > 100% all the time. With this fix, the CPU usage is only about 10~15% with the same workload.", "code": "def clean_stale_components(self):\n \n with self._components_lock:\n stale_components = []\n stale_component_ids = []\n for id, component in self._components.items():\n elapsed = time.monotonic() - component.last_reported_time\n if elapsed > self._component_timeout_s:\n stale_component_ids.append(id)\n logger.info(\n \"Metrics from a worker ({}) is cleaned up due to \"\n \"timeout. Time since last report {}s\".format(id, elapsed)\n )\n for id in stale_component_ids:\n stale_components.append(self._components.pop(id))\n return stale_components\n\n # TODO(sang): add start and end timestamp", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 279, "n_words": 59, "vocab_size": 52, "complexity": 4, "nloc": 15, "token_counts": 90, "n_ast_nodes": 154, "n_identifiers": 19, "d_id": 30979, "documentation": { "docstring": "Clean up stale components.\n\n Stale means the component is dead or unresponsive.\n\n Stale components won't be reported to Prometheus anymore.\n ", "n_words": 20, "vocab_size": 19, "n_whitespaces": 41, "language": "en" } }, { "id": 101251, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "tools/alignments/jobs.py", "file_name": "jobs.py", "fun_name": "_annotate_pose", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def _annotate_pose(cls, image, face):\n \n center = np.array((face.aligned.size / 2,\n face.aligned.size / 2)).astype(\"int32\").reshape(1, 2)\n center = np.rint(face.aligned.transform_points(center, invert=True)).astype(\"int32\")\n points = face.aligned.pose.xyz_2d * face.aligned.size\n points = np.rint(face.aligned.transform_points(points, invert=True)).astype(\"int32\")\n cv2.line(image, tuple(center), tuple(points[1]), (0, 255, 0), 2)\n cv2.line(image, tuple(center), tuple(points[0]), (255, 0, 0), 2)\n cv2.line(image, tuple(center), tuple(points[2]), (0, 0, 255), 2)\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 129, "n_words": 47, "vocab_size": 29, "complexity": 1, "nloc": 9, "token_counts": 196, "n_ast_nodes": 291, "n_identifiers": 20, "d_id": 20671, "documentation": { "docstring": " Annotate the pose onto the frame.\n\n Parameters\n ----------\n image: :class:`numpy.ndarray`\n The frame that pose is to be annotated on to\n face: :class:`lib.align.AlignedFace`\n The aligned face loaded for head centering\n ", "n_words": 29, "vocab_size": 25, "n_whitespaces": 87, "language": "en" } }, { "id": 268868, "commit_id": "a449efe29b092e658a29cd847e0494979a47d252", "repo": "keras", "path": "keras/tests/keras_doctest.py", "file_name": "keras_doctest.py", "fun_name": "filter_on_submodules", "commit_message": "Add a keras doctest modeled on tensorflow doctest\n\nPiperOrigin-RevId: 424672415", "code": "def filter_on_submodules(all_modules, submodule):\n \n\n filtered_modules = [\n mod for mod in all_modules if PACKAGE + submodule in mod.__name__\n ]\n return filtered_modules\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 29, "n_words": 20, "vocab_size": 17, "complexity": 3, "nloc": 5, "token_counts": 27, "n_ast_nodes": 43, "n_identifiers": 7, "d_id": 79737, "documentation": { "docstring": "Filters all the modules based on the module flag.\n\n The module flag has to be relative to the core package imported.\n For example, if `submodule=keras.layers` then, this function will return\n all the modules in the submodule.\n\n Args:\n all_modules: All the modules in the core package.\n submodule: Submodule to filter from all the modules.\n\n Returns:\n All the modules in the submodule.\n ", "n_words": 60, "vocab_size": 38, "n_whitespaces": 75, "language": "en" } }, { "id": 259702, "commit_id": "69132ebbd39f070590ca01813340b5b12c0d02ab", "repo": "scikit-learn", "path": "sklearn/decomposition/_nmf.py", "file_name": "_nmf.py", "fun_name": "transform", "commit_message": "FEA Online implementation of non-negative matrix factorization (#16948)\n\nCo-authored-by: Tom Dupré la Tour \r\nCo-authored-by: jeremie du boisberranger \r\nCo-authored-by: Thomas J. Fan \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>", "code": "def transform(self, X):\n \n check_is_fitted(self)\n X = self._validate_data(\n X, accept_sparse=(\"csr\", \"csc\"), dtype=[np.float64, np.float32], reset=False\n )\n\n W = self._solve_W(X, self.components_, self._transform_max_iter)\n\n return W\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 74, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 7, "token_counts": 62, "n_ast_nodes": 96, "n_identifiers": 15, "d_id": 75877, "documentation": { "docstring": "Transform the data X according to the fitted MiniBatchNMF model.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n Data matrix to be transformed by the model.\n\n Returns\n -------\n W : ndarray of shape (n_samples, n_components)\n Transformed data.\n ", "n_words": 40, "vocab_size": 31, "n_whitespaces": 111, "language": "en" } }, { "id": 53269, "commit_id": "23365cf7727c45f38ad983d610ffec5c15ceca21", "repo": "prefect", "path": "src/prefect/cli/orion.py", "file_name": "orion.py", "fun_name": "kubernetes_manifest", "commit_message": "Add kubernetes manifest commands", "code": "def kubernetes_manifest():\n \n\n template = Template(\n (prefect.__module_path__ / \"cli\" / \"templates\" / \"kubernetes.yaml\").read_text()\n )\n manifest = template.substitute(\n {\n \"image_name\": get_prefect_image_name(),\n }\n )\n print(manifest)\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 72, "n_words": 22, "vocab_size": 18, "complexity": 1, "nloc": 10, "token_counts": 44, "n_ast_nodes": 83, "n_identifiers": 10, "d_id": 10764, "documentation": { "docstring": "\n Generates a kubernetes manifest for to deploy Orion to a cluster.\n\n Example:\n $ prefect orion kubernetes-manifest | kubectl apply -f -\n ", "n_words": 21, "vocab_size": 19, "n_whitespaces": 38, "language": "en" } }, { "id": 80945, "commit_id": "f52ef6e9677b01c111b012a8725da43a2580d8f1", "repo": "awx", "path": "awx/main/managers.py", "file_name": "managers.py", "fun_name": "active_count", "commit_message": "Fixes case sensitive host count", "code": "def active_count(self):\n \n return self.order_by().exclude(inventory_sources__source='controller').values(name_lower=Lower('name')).distinct().count()\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 37, "n_ast_nodes": 68, "n_identifiers": 10, "d_id": 17116, "documentation": { "docstring": "Return count of active, unique hosts for licensing.\n Construction of query involves:\n - remove any ordering specified in model's Meta\n - Exclude hosts sourced from another Tower\n - Restrict the query to only return the name column\n - Only consider results that are unique\n - Return the count of this query\n ", "n_words": 51, "vocab_size": 37, "n_whitespaces": 105, "language": "en" } }, { "id": 260298, "commit_id": "7f0b57e626d36a7c6d8f417261c6bbfe05376a98", "repo": "scikit-learn", "path": "sklearn/linear_model/tests/test_sgd.py", "file_name": "test_sgd.py", "fun_name": "test_partial_fit_weight_class_balanced", "commit_message": "MAINT parameter validation in SGD*, PassiveAgressive* and Perceptron (#23521)\n\nCo-authored-by: jeremie du boisberranger \r\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: Meekail Zain <34613774+Micky774@users.noreply.github.com>", "code": "def test_partial_fit_weight_class_balanced(klass):\n # partial_fit with class_weight='balanced' not supported", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"klass\", [SGDClassifier, SparseSGDClassifier])", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 10, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 13, "token_counts": 59, "n_ast_nodes": 121, "n_identifiers": 18, "d_id": 76168, "documentation": { "docstring": "\n regex = (\n r\"class_weight 'balanced' is not supported for \"\n r\"partial_fit\\. In order to use 'balanced' weights, \"\n r\"use compute_class_weight\\('balanced', classes=classes, y=y\\). \"\n r\"In place of y you can use a large enough sample \"\n r\"of the full training set target to properly \"\n r\"estimate the class frequency distributions\\. \"\n r\"Pass the resulting weights as the class_weight \"\n r\"parameter\\.\"\n )\n with pytest.raises(ValueError, match=regex):\n klass(class_weight=\"balanced\").partial_fit(X, Y, classes=np.unique(Y))\n\n\n@pytest.mark.parametrize(\"klass\", [SGDClassifier, SparseSGDClassifier])", "n_words": 69, "vocab_size": 57, "n_whitespaces": 140, "language": "en" } }, { "id": 177019, "commit_id": "b2f91c34a23058dd70b41784af0d87890216026a", "repo": "networkx", "path": "networkx/algorithms/tests/test_lowest_common_ancestors.py", "file_name": "test_lowest_common_ancestors.py", "fun_name": "test_naive_all_pairs_lowest_common_ancestor6", "commit_message": "Naive lowest common ancestor implementation (#5736)\n\n* Add naive lca methods\r\n\r\n* Naive algorithm implementation for LCA\r\n\r\n* Modify naive lca functions\r\n\r\n* Correct parameters of nx.ancestors\r\n\r\n* Update lowest_common_ancestors.py\r\n\r\n* Parametrize tests\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Yield instead of append\r\n\r\n* Tests for naive lca\r\n\r\n* Correct test cases for naive lca algorithms\r\n\r\n* Apply suggestions from code review\r\n\r\nCo-authored-by: Mridul Seth \r\n\r\n* Fix function name -when calling\r\n\r\n* Make requested changes\r\n\r\n* Inlining _get_a_lowest_common_ancestor\r\n\r\nCo-authored-by: dtuncturk \r\nCo-authored-by: Dan Schult \r\nCo-authored-by: Mridul Seth ", "code": "def test_naive_all_pairs_lowest_common_ancestor6(self):\n \n G = self.DG.copy()\n G.add_node(-1)\n gen = naive_all_pairs_lca(G, [(-1, -1), (-1, 0)])\n assert dict(gen) == {(-1, -1): -1}\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 54, "n_words": 19, "vocab_size": 18, "complexity": 1, "nloc": 5, "token_counts": 63, "n_ast_nodes": 101, "n_identifiers": 9, "d_id": 42231, "documentation": { "docstring": "Test that pairs with no LCA specified emits nothing.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 66968, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/payroll/report/salary_register/salary_register.py", "file_name": "salary_register.py", "fun_name": "get_columns", "commit_message": "style: format code with black", "code": "def get_columns(salary_slips):\n\t\n\tcolumns = [\n\t\t_(\"Salary Slip ID\") + \":Link/Salary Slip:150\",\n\t\t_(\"Employee\") + \":Link/Employee:120\",\n\t\t_(\"Employee Name\") + \"::140\",\n\t\t_(\"Date of Joining\") + \"::80\",\n\t\t_(\"Branch\") + \":Link/Branch:-1\",\n\t\t_(\"Department\") + \":Link/Department:-1\",\n\t\t_(\"Designation\") + \":Link/Designation:120\",\n\t\t_(\"Company\") + \":Link/Company:120\",\n\t\t_(\"Start Date\") + \"::80\",\n\t\t_(\"End Date\") + \"::80\",\n\t\t_(\"Leave Without Pay\") + \":Float:50\",\n\t\t_(\"Payment Days\") + \":Float:120\",\n\t]\n\n\tsalary_components = {_(\"Earning\"): [], _(\"Deduction\"): []}\n\n\tfor component in frappe.db.sql(\n\t\t\n\t\t% (\", \".join([\"%s\"] * len(salary_slips))),\n\t\ttuple([d.name for d in salary_slips]),\n\t\tas_dict=1,\n\t):\n\t\tsalary_components[_(component.type)].append(component.salary_component)\n\n\tcolumns = (\n\t\tcolumns\n\t\t+ [(e + \":Currency:120\") for e in salary_components[_(\"Earning\")]]\n\t\t+ [_(\"Gross Pay\") + \":Currency:120\"]\n\t\t+ [(d + \":Currency:120\") for d in salary_components[_(\"Deduction\")]]\n\t\t+ [\n\t\t\t_(\"Loan Repayment\") + \":Currency:120\",\n\t\t\t_(\"Total Deduction\") + \":Currency:120\",\n\t\t\t_(\"Net Pay\") + \":Currency:120\",\n\t\t]\n\t)\n\n\treturn columns, salary_components[_(\"Earning\")], salary_components[_(\"Deduction\")]\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 87, "n_words": 121, "vocab_size": 79, "complexity": 5, "nloc": 37, "token_counts": 267, "n_ast_nodes": 483, "n_identifiers": 19, "d_id": 14394, "documentation": { "docstring": "\n\tcolumns = [\n\t _(\"Salary Slip ID\") + \":Link/Salary Slip:150\",\n\t _(\"Employee\") + \":Link/Employee:120\",\n\t _(\"Employee Name\") + \"::140\",\n\t _(\"Date of Joining\") + \"::80\",\n\t _(\"Branch\") + \":Link/Branch:120\",\n\t _(\"Department\") + \":Link/Department:120\",\n\t _(\"Designation\") + \":Link/Designation:120\",\n\t _(\"Company\") + \":Link/Company:120\",\n\t _(\"Start Date\") + \"::80\",\n\t _(\"End Date\") + \"::80\",\n\t _(\"Leave Without Pay\") + \":Float:130\",\n\t _(\"Payment Days\") + \":Float:120\",\n\t _(\"Currency\") + \":Link/Currency:80\"\n\t]\n\tselect distinct sd.salary_component, sc.type\n\t\tfrom `tabSalary Detail` sd, `tabSalary Component` sc\n\t\twhere sc.name=sd.salary_component and sd.amount != 0 and sd.parent in (%s)", "n_words": 75, "vocab_size": 58, "n_whitespaces": 161, "language": "en" } }, { "id": 274614, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/metrics/__init__.py", "file_name": "__init__.py", "fun_name": "serialize", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def serialize(metric):\n \n return serialize_keras_object(metric)\n\n\n@keras_export(\"keras.metrics.deserialize\")", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.metrics.deserialize\")", "n_ast_errors": 1, "ast_levels": 7, "n_whitespaces": 10, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 32, "n_identifiers": 4, "d_id": 81240, "documentation": { "docstring": "Serializes metric function or `Metric` instance.\n\n Args:\n metric: A Keras `Metric` instance or a metric function.\n\n Returns:\n Metric configuration dictionary.\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 39, "language": "en" } }, { "id": 64417, "commit_id": "3936d8b70e4847dddd49bf467fcbc6e2fcd106c5", "repo": "erpnext", "path": "erpnext/payroll/report/income_tax_deductions/income_tax_deductions.py", "file_name": "income_tax_deductions.py", "fun_name": "get_data", "commit_message": "refactor: remove India specific code", "code": "def get_data(filters):\n\n\tdata = []\n\n\tcomponent_types = frappe.db.sql()\n\n\tcomponent_types = [comp_type[0] for comp_type in component_types]\n\n\tif not len(component_types):\n\t\treturn []\n\n\tconditions = get_conditions(filters)\n\n\tentry = frappe.db.sql( % (conditions , \", \".join(['%s']*len(component_types))), tuple(component_types), as_dict=1)\n\n\tfor d in entry:\n\t\tdata.append({\n\t\t\t\"employee\": d.employee,\n\t\t\t\"employee_name\": d.employee_name,\n\t\t\t\"it_comp\": d.salary_component,\n\t\t\t\"posting_date\": d.posting_date,\n\t\t\t\"it_amount\": d.amount,\n\t\t\t\"gross_pay\": d.gross_pay\n\t\t})\n\n\treturn data\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 35, "n_words": 53, "vocab_size": 43, "complexity": 4, "nloc": 26, "token_counts": 133, "n_ast_nodes": 220, "n_identifiers": 23, "d_id": 13631, "documentation": { "docstring": " select name from `tabSalary Component`\n\t\twhere is_income_tax_component = 1 select sal.employee, sal.employee_name, sal.posting_date, ded.salary_component, ded.amount,sal.gross_pay\n\t\tfrom `tabSalary Slip` sal, `tabSalary Detail` ded\n\t\twhere sal.name = ded.parent\n\t\tand ded.parentfield = 'deductions'\n\t\tand ded.parenttype = 'Salary Slip'\n\t\tand sal.docstatus = 1 %s\n\t\tand ded.salary_component in (%s)\n\t", "n_words": 44, "vocab_size": 31, "n_whitespaces": 38, "language": "en" } }, { "id": 197803, "commit_id": "d37a3c05b98c8144d401fa264af687a525b5e39c", "repo": "sympy", "path": "sympy/polys/numberfields/modules.py", "file_name": "modules.py", "fun_name": "generator", "commit_message": "Improve printing for `PrimeIdeal`\n\n* Support latex printing\n* Rename `_pretty()` --> `repr()` since this is not 2D printing.\n* Provide a `__str__()` method, which prints less info than the `__repr__()` method.", "code": "def generator(self):\n \n K = self.module.number_field\n return K.ext.alias if K and K.ext.is_aliased else self.T.gen\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 34, "n_words": 13, "vocab_size": 12, "complexity": 3, "nloc": 3, "token_counts": 33, "n_ast_nodes": 53, "n_identifiers": 10, "d_id": 48701, "documentation": { "docstring": "\n Return a :py:class:`~.Symbol` to be used when expressing this element\n as a polynomial.\n\n If we have an associated :py:class:`~.AlgebraicField` whose primitive\n element has an alias symbol, we use that. Otherwise we use the variable\n of the minimal polynomial defining the power basis to which we belong.\n ", "n_words": 46, "vocab_size": 36, "n_whitespaces": 89, "language": "en" } }, { "id": 247568, "commit_id": "ef3619e61d84493d98470eb2a69131d15eb1166b", "repo": "synapse", "path": "tests/storage/test_background_update.py", "file_name": "test_background_update.py", "fun_name": "test_disabling_background_update_sleep", "commit_message": "Add config settings for background update parameters (#11980)", "code": "def test_disabling_background_update_sleep(self):\n \n\n self.get_success(\n self.store.db_pool.simple_insert(\n \"background_updates\",\n values={\"update_name\": \"test_update\", \"progress_json\": '{\"my_key\": 1}'},\n )\n )\n\n self.update_handler.side_effect = self.update\n self.update_handler.reset_mock()\n self.updates.start_doing_background_updates(),\n\n # 2: advance the reactor very little\n self.reactor.pump([0.025])\n # check that an update has run\n self.update_handler.assert_called()\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 155, "n_words": 33, "vocab_size": 31, "complexity": 1, "nloc": 12, "token_counts": 77, "n_ast_nodes": 133, "n_identifiers": 16, "d_id": 71746, "documentation": { "docstring": "\n Test that disabling sleep in the config results in bg update not sleeping\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 28, "language": "en" } }, { "id": 247972, "commit_id": "85ca963c1add5ca12f59238a50dfc63df4846bb7", "repo": "synapse", "path": "tests/module_api/test_account_data_manager.py", "file_name": "test_account_data_manager.py", "fun_name": "test_get_global_no_mutability", "commit_message": "Add Module API for reading and writing global account data. (#12391)", "code": "def test_get_global_no_mutability(self) -> None:\n \n # First add some account data to set up the test.\n self.get_success(\n self._store.add_account_data_for_user(\n self.user_id, \"test.data\", {\"wombat\": True}\n )\n )\n\n # Now request that data and then mutate it (out of negligence or otherwise).\n the_data = self.get_success(\n self._account_data_mgr.get_global(self.user_id, \"test.data\")\n )\n with self.assertRaises(TypeError):\n # This throws an exception because it's a frozen dict.\n the_data[\"wombat\"] = False\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 184, "n_words": 58, "vocab_size": 51, "complexity": 1, "nloc": 15, "token_counts": 64, "n_ast_nodes": 114, "n_identifiers": 11, "d_id": 72031, "documentation": { "docstring": "\n Tests that modules can't introduce bugs into Synapse by mutating the result\n of `get_global`.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 36, "language": "en" } }, { "id": 184573, "commit_id": "6ee4d41bb7a39238a18949f5648773562c6a1c9b", "repo": "textual", "path": "src/textual/geometry.py", "file_name": "geometry.py", "fun_name": "size", "commit_message": "docs", "code": "def size(self) -> Size:\n \n return Size(self.width, self.height)\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 8, "token_counts": 19, "n_ast_nodes": 32, "n_identifiers": 5, "d_id": 44676, "documentation": { "docstring": "Get the size of the region.\n\n Returns:\n Size: Size of the region.\n\n ", "n_words": 12, "vocab_size": 8, "n_whitespaces": 37, "language": "en" } }, { "id": 185774, "commit_id": "9748850657337ba31f220387e4a7777a87ec019a", "repo": "textual", "path": "tests/test_widget_removing.py", "file_name": "test_widget_removing.py", "fun_name": "test_remove_order", "commit_message": "Add a unit test for removal ordering via Widget.remove", "code": "async def test_remove_order():\n \n\n removals: list[str] = []\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 10, "token_counts": 87, "n_ast_nodes": 27, "n_identifiers": 4, "d_id": 45174, "documentation": { "docstring": "The removal of a top-level widget should cause bottom-first removal.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 178696, "commit_id": "6b317645a6edf73a8628229c540555142725478d", "repo": "Nuitka", "path": "nuitka/plugins/standard/PySidePyQtPlugin.py", "file_name": "PySidePyQtPlugin.py", "fun_name": "createPreModuleLoadCode", "commit_message": "Plugins: Minor cleanups", "code": "def createPreModuleLoadCode(self, module):\n \n\n # This is only relevant on standalone mode for Windows\n if not isStandaloneMode():\n return\n\n full_name = module.getFullName()\n\n if full_name == self.binding_name and isWin32Windows():\n code = \n yield (\n code,\n \"Adding binary folder to runtime 'PATH' environment variable for proper Qt loading.\",\n )\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 154, "n_words": 44, "vocab_size": 40, "complexity": 4, "nloc": 14, "token_counts": 43, "n_ast_nodes": 78, "n_identifiers": 9, "d_id": 42792, "documentation": { "docstring": "Method called when a module is being imported.\n\n Notes:\n If full name equals to the binding we insert code to include the dist\n folder in the 'PATH' environment variable (on Windows only).\n\n Args:\n module: the module object\n Returns:\n Code to insert and descriptive text (tuple), or (None, None).\n import os\npath = os.environ.get(\"PATH\", \"\")\nif not path.startswith(__nuitka_binary_dir):\n os.environ[\"PATH\"] = __nuitka_binary_dir + \";\" + path\n", "n_words": 64, "vocab_size": 54, "n_whitespaces": 136, "language": "en" } }, { "id": 261798, "commit_id": "9a90af51510c0722ab880061107e5cfdcf09192f", "repo": "scikit-learn", "path": "sklearn/covariance/_shrunk_covariance.py", "file_name": "_shrunk_covariance.py", "fun_name": "ledoit_wolf", "commit_message": "MAINT Parameters validation for covariance.ledoit_wolf (#24870)\n\nCo-authored-by: Guillaume Lemaitre \r\nCo-authored-by: jeremiedbb ", "code": "def ledoit_wolf(X, *, assume_centered=False, block_size=1000):\n \n estimator = LedoitWolf(\n assume_centered=assume_centered,\n block_size=block_size,\n store_precision=False,\n ).fit(X)\n\n return estimator.covariance_, estimator.shrinkage_\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 48, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 7, "token_counts": 46, "n_ast_nodes": 70, "n_identifiers": 10, "d_id": 77003, "documentation": { "docstring": "Estimate the shrunk Ledoit-Wolf covariance matrix.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Data from which to compute the covariance estimate.\n\n assume_centered : bool, default=False\n If True, data will not be centered before computation.\n Useful to work with data whose mean is significantly equal to\n zero but is not exactly zero.\n If False, data will be centered before computation.\n\n block_size : int, default=1000\n Size of blocks into which the covariance matrix will be split.\n This is purely a memory optimization and does not affect results.\n\n Returns\n -------\n shrunk_cov : ndarray of shape (n_features, n_features)\n Shrunk covariance.\n\n shrinkage : float\n Coefficient in the convex combination used for the computation\n of the shrunk estimate.\n\n Notes\n -----\n The regularized (shrunk) covariance is:\n\n (1 - shrinkage) * cov + shrinkage * mu * np.identity(n_features)\n\n where mu = trace(cov) / n_features\n ", "n_words": 145, "vocab_size": 103, "n_whitespaces": 263, "language": "en" } }, { "id": 61169, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/direct_url_helpers.py", "file_name": "direct_url_helpers.py", "fun_name": "dist_get_direct_url", "commit_message": "upd; format", "code": "def dist_get_direct_url(dist):\n # type: (Distribution) -> Optional[DirectUrl]\n \n if not dist.has_metadata(DIRECT_URL_METADATA_NAME):\n return None\n try:\n return DirectUrl.from_json(dist.get_metadata(DIRECT_URL_METADATA_NAME))\n except (\n DirectUrlValidationError,\n json.JSONDecodeError,\n UnicodeDecodeError,\n ) as e:\n logger.warning(\n \"Error parsing %s for %s: %s\",\n DIRECT_URL_METADATA_NAME,\n dist.project_name,\n e,\n )\n return None\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 154, "n_words": 36, "vocab_size": 32, "complexity": 3, "nloc": 17, "token_counts": 62, "n_ast_nodes": 100, "n_identifiers": 15, "d_id": 12422, "documentation": { "docstring": "Obtain a DirectUrl from a pkg_resource.Distribution.\n\n Returns None if the distribution has no `direct_url.json` metadata,\n or if `direct_url.json` is invalid.\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 29, "language": "en" } }, { "id": 226571, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_cone.py", "file_name": "_cone.py", "fun_name": "cmax", "commit_message": "switch to black .22", "code": "def cmax(self):\n \n return self[\"cmax\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58244, "documentation": { "docstring": "\n Sets the upper bound of the color domain. Value should have the\n same units as u/v/w norm and if set, `cmin` must be set as\n well.\n\n The 'cmax' property is a number and may be specified as:\n - An int or float\n\n Returns\n -------\n int|float\n ", "n_words": 45, "vocab_size": 40, "n_whitespaces": 111, "language": "en" } }, { "id": 209126, "commit_id": "20ac1d00389d0735e6d8cd1347f0a53f478144ba", "repo": "scapy", "path": "scapy/layers/inet.py", "file_name": "inet.py", "fun_name": "tcp_pseudoheader", "commit_message": "Support TCP-MD5 and TCP-AO (#3358)\n\nSupport TCP-MD5 and TCP-AO", "code": "def tcp_pseudoheader(tcp):\n # type: (TCP) -> bytes\n \n if isinstance(tcp.underlayer, IP):\n plen = len(bytes(tcp))\n return in4_pseudoheader(socket.IPPROTO_TCP, tcp.underlayer, plen)\n elif conf.ipv6_enabled and _is_ipv6_layer(tcp.underlayer):\n plen = len(bytes(tcp))\n return raw(scapy.layers.inet6.in6_pseudoheader(\n socket.IPPROTO_TCP, tcp.underlayer, plen))\n else:\n raise ValueError(\"TCP packet does not have IP or IPv6 underlayer\")\n\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 101, "n_words": 40, "vocab_size": 35, "complexity": 4, "nloc": 10, "token_counts": 88, "n_ast_nodes": 142, "n_identifiers": 20, "d_id": 52615, "documentation": { "docstring": "Pseudoheader of a TCP packet as bytes\n\n Requires underlayer to be either IP or IPv6\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 21, "language": "en" } }, { "id": 266587, "commit_id": "43e55db20821a1341d21ffa1e4e7e6185b244105", "repo": "ansible", "path": "lib/ansible/galaxy/api.py", "file_name": "api.py", "fun_name": "get_collection_version_metadata", "commit_message": "ansible-galaxy - add signature verification of the MANIFEST.json (#76681)\n\n* ansible-galaxy collection install|verify:\r\n\r\n - Support verifying the origin of the MANIFEST.json when the Galaxy server has provided signatures.\r\n - Allow supplemental signatures to use during verification on the CLI/requirements file.\r\n\r\n* ansible-galaxy collection install:\r\n\r\n - Support disabling signature verification. This silences the warning provided by ansible-galaxy if the Galaxy server provided signatures it cannot use because no keyring is configured.\r\n - Store Galaxy server metadata alongside installed collections for provenance. This is used by 'ansible-galaxy collection verify --offline'.\r\n\r\n* Add unit tests for method that gets signatures from a Galaxy server\r\n\r\n* Add integration tests for user-provided signature sources\r\n\r\n- Test CLI option combinations\r\n- Test installing collections with valid/invalid signature sources\r\n- Test disabling GPG verification when installing collections\r\n- Test verifying collections with valid/invalid signature sources\r\n\r\n* Make signature verification advisory-by-default if signatures are provided by the Galaxy server\r\n\r\n- Make the default keyring None\r\n- Warn if the keyring is None but the Galaxy server provided signatures\r\n- Error if the keyring is None but the user supplied signatures\r\n- Error if the keyring is not None but is invalid\r\n\r\n* changelog\r\n\r\n* add ansible-galaxy user documentation for new options\r\n\r\nCo-authored-by: Matt Martz \r\nCo-authored-by: Sviatoslav Sydorenko \r\nCo-authored-by: Martin Krizek \r\nCo-authored-by: Sandra McCann \r\nCo-authored-by: Andy Mott \r\nCo-authored-by: John R Barker ", "code": "def get_collection_version_metadata(self, namespace, name, version):\n \n api_path = self.available_api_versions.get('v3', self.available_api_versions.get('v2'))\n url_paths = [self.api_server, api_path, 'collections', namespace, name, 'versions', version, '/']\n\n n_collection_url = _urljoin(*url_paths)\n error_context_msg = 'Error when getting collection version metadata for %s.%s:%s from %s (%s)' \\\n % (namespace, name, version, self.name, self.api_server)\n data = self._call_galaxy(n_collection_url, error_context_msg=error_context_msg, cache=True)\n self._set_cache()\n\n signatures = data.get('signatures') or []\n\n return CollectionVersionMetadata(data['namespace']['name'], data['collection']['name'], data['version'],\n data['download_url'], data['artifact']['sha256'],\n data['metadata']['dependencies'], data['href'], signatures)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 232, "n_words": 62, "vocab_size": 53, "complexity": 2, "nloc": 12, "token_counts": 163, "n_ast_nodes": 262, "n_identifiers": 19, "d_id": 78486, "documentation": { "docstring": "\n Gets the collection information from the Galaxy server about a specific Collection version.\n\n :param namespace: The collection namespace.\n :param name: The collection name.\n :param version: Version of the collection to get the information for.\n :return: CollectionVersionMetadata about the collection at the version requested.\n ", "n_words": 43, "vocab_size": 29, "n_whitespaces": 86, "language": "en" } }, { "id": 20897, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/typing_extensions.py", "file_name": "typing_extensions.py", "fun_name": "get_args", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def get_args(tp):\n \n if isinstance(tp, _AnnotatedAlias):\n return (tp.__origin__,) + tp.__metadata__\n if isinstance(tp, (typing._GenericAlias, GenericAlias)):\n if getattr(tp, \"_special\", False):\n return ()\n res = tp.__args__\n if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:\n res = (list(res[:-1]), res[-1])\n return res\n return ()\n\n\n# 3.10+\nif hasattr(typing, 'TypeAlias'):\n TypeAlias = typing.TypeAlias\n# 3.9\nelif sys.version_info[:2] >= (3, 9):", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 166, "n_words": 54, "vocab_size": 39, "complexity": 6, "nloc": 11, "token_counts": 101, "n_ast_nodes": 203, "n_identifiers": 22, "d_id": 3611, "documentation": { "docstring": "Get type arguments with all substitutions performed.\n\n For unions, basic simplifications used by Union constructor are performed.\n Examples::\n get_args(Dict[str, int]) == (str, int)\n get_args(int) == ()\n get_args(Union[int, Union[T, int], str][int]) == (int, str)\n get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])\n get_args(Callable[[], T][int]) == ([], int)\n ", "n_words": 45, "vocab_size": 36, "n_whitespaces": 121, "language": "en" } }, { "id": 43883, "commit_id": "2fdc23333909096d427171002582e2906f8bbc0a", "repo": "airflow", "path": "tests/models/test_dag.py", "file_name": "test_dag.py", "fun_name": "test_following_previous_schedule", "commit_message": "Fix remaining mypy issues in \"core\" Airflow (#20795)\n\nCo-authored-by: Josh Fell \r\nCo-authored-by: Tzu-ping Chung \r\nCo-authored-by: Jarek Potiuk ", "code": "def test_following_previous_schedule(self):\n \n local_tz = pendulum.timezone('Europe/Zurich')\n start = local_tz.convert(datetime.datetime(2018, 10, 28, 2, 55), dst_rule=pendulum.PRE_TRANSITION)\n assert start.isoformat() == \"2018-10-28T02:55:00+02:00\", \"Pre-condition: start date is in DST\"\n\n utc = timezone.convert_to_utc(start)\n assert utc.isoformat() == \"2018-10-28T00:55:00+00:00\", \"Pre-condition: correct DST->UTC conversion\"\n\n dag = DAG('tz_dag', start_date=start, schedule_interval='*/5 * * * *')\n _next = dag.following_schedule(utc)\n next_local = local_tz.convert(_next)\n\n assert _next.isoformat() == \"2018-10-28T01:00:00+00:00\"\n assert next_local.isoformat() == \"2018-10-28T02:00:00+01:00\"\n\n prev = dag.previous_schedule(utc)\n prev_local = local_tz.convert(prev)\n\n assert prev_local.isoformat() == \"2018-10-28T02:50:00+02:00\"\n\n prev = dag.previous_schedule(_next)\n prev_local = local_tz.convert(prev)\n\n assert prev_local.isoformat() == \"2018-10-28T02:55:00+02:00\"\n assert prev == utc\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 207, "n_words": 81, "vocab_size": 50, "complexity": 1, "nloc": 18, "token_counts": 167, "n_ast_nodes": 284, "n_identifiers": 23, "d_id": 8084, "documentation": { "docstring": "\n Make sure DST transitions are properly observed\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 9376, "commit_id": "7375ee364e0df2a417f92593e09557f1b2a3575a", "repo": "insightface", "path": "reconstruction/ostec/external/stylegan2/dnnlib/submission/run_context.py", "file_name": "run_context.py", "fun_name": "close", "commit_message": "initialize ostec", "code": "def close(self) -> None:\n \n if not self.has_closed:\n # update the run.txt with stopping time\n self.run_txt_data[\"stop_time\"] = datetime.datetime.now().isoformat(sep=\" \")\n with open(os.path.join(self.submit_config.run_dir, \"run.txt\"), \"w\") as f:\n pprint.pprint(self.run_txt_data, stream=f, indent=4, width=200, compact=False)\n self.has_closed = True\n\n # detach the global singleton\n global _run_context\n if _run_context is self:\n _run_context = None\n", "url": "https://github.com/deepinsight/insightface.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 167, "n_words": 46, "vocab_size": 37, "complexity": 3, "nloc": 11, "token_counts": 97, "n_ast_nodes": 163, "n_identifiers": 21, "d_id": 1590, "documentation": { "docstring": "Close the context and clean up.\n Should only be called once.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 17, "language": "en" } }, { "id": 178724, "commit_id": "98badaaafd4e56529378947358acae489035fa1e", "repo": "Nuitka", "path": "nuitka/utils/Execution.py", "file_name": "Execution.py", "fun_name": "wrapCommandForDebuggerForExec", "commit_message": "Windows: Make running in debugger work with cmd files as well", "code": "def wrapCommandForDebuggerForExec(*args):\n \n\n gdb_path = getExecutablePath(\"gdb\")\n\n # Windows extra ball, attempt the downloaded one.\n if isWin32Windows() and gdb_path is None:\n from nuitka.Options import assumeYesForDownloads\n\n mingw64_gcc_path = getCachedDownloadedMinGW64(\n target_arch=getArchitecture(),\n assume_yes_for_downloads=assumeYesForDownloads(),\n )\n\n with withEnvironmentPathAdded(\"PATH\", os.path.dirname(mingw64_gcc_path)):\n lldb_path = getExecutablePath(\"lldb\")\n\n if gdb_path is None and lldb_path is None:\n lldb_path = getExecutablePath(\"lldb\")\n\n if lldb_path is None:\n general.sysexit(\"Error, no 'gdb' or 'lldb' binary found in path.\")\n\n if gdb_path is not None:\n args = (gdb_path, \"gdb\", \"-ex=run\", \"-ex=where\", \"-ex=quit\", \"--args\") + args\n else:\n args = (lldb_path, \"lldb\", \"-o\", \"run\", \"-o\", \"bt\", \"-o\", \"quit\", \"--\") + args\n\n return args\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 214, "n_words": 90, "vocab_size": 60, "complexity": 7, "nloc": 19, "token_counts": 142, "n_ast_nodes": 254, "n_identifiers": 20, "d_id": 42804, "documentation": { "docstring": "Wrap a command for system debugger to call exec\n\n Args:\n args: (list of str) args for call to be debugged\n Returns:\n args tuple with debugger command inserted\n\n Notes:\n Currently only gdb and lldb are supported, but adding more\n debuggers would be very welcome.\n ", "n_words": 43, "vocab_size": 36, "n_whitespaces": 83, "language": "en" } }, { "id": 24501, "commit_id": "ddaa2c2552e19635cd6cdf38619f1f176c358f89", "repo": "PaddleOCR", "path": "ppstructure/table/table_master_match.py", "file_name": "table_master_match.py", "fun_name": "sort_bbox", "commit_message": "add SLANet", "code": "def sort_bbox(end2end_xywh_bboxes, no_match_end2end_indexes):\n \n groups = []\n bbox_groups = []\n for index, end2end_xywh_bbox in zip(no_match_end2end_indexes,\n end2end_xywh_bboxes):\n this_bbox = end2end_xywh_bbox\n if len(groups) == 0:\n groups.append([index])\n bbox_groups.append([this_bbox])\n else:\n flag = False\n for g, bg in zip(groups, bbox_groups):\n # this_bbox is belong to bg's row or not\n if is_abs_lower_than_threshold(this_bbox, bg[0]):\n g.append(index)\n bg.append(this_bbox)\n flag = True\n break\n if not flag:\n # this_bbox is not belong to bg's row, create a row.\n groups.append([index])\n bbox_groups.append([this_bbox])\n\n # sorted bboxes in a group\n tmp_groups, tmp_bbox_groups = [], []\n for g, bg in zip(groups, bbox_groups):\n g_sorted, bg_sorted = sort_line_bbox(g, bg)\n tmp_groups.append(g_sorted)\n tmp_bbox_groups.append(bg_sorted)\n\n # sorted groups, sort by coord y's value.\n sorted_groups = [None] * len(tmp_groups)\n sorted_bbox_groups = [None] * len(tmp_bbox_groups)\n ys = [bg[0][1] for bg in tmp_bbox_groups]\n sorted_ys = sorted(ys)\n for g, bg in zip(tmp_groups, tmp_bbox_groups):\n idx = sorted_ys.index(bg[0][1])\n sorted_groups[idx] = g\n sorted_bbox_groups[idx] = bg\n\n # flatten, get final result\n end2end_sorted_idx_list, end2end_sorted_bbox_list \\\n = flatten(sorted_groups, sorted_bbox_groups)\n\n # check sorted\n #img = cv2.imread('/data_0/yejiaquan/data/TableRecognization/singleVal/PMC3286376_004_00.png')\n #img = drawBboxAfterSorted(img, sorted_groups, sorted_bbox_groups)\n\n return end2end_sorted_idx_list, end2end_sorted_bbox_list, sorted_groups, sorted_bbox_groups\n\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 534, "n_words": 162, "vocab_size": 98, "complexity": 9, "nloc": 36, "token_counts": 260, "n_ast_nodes": 411, "n_identifiers": 29, "d_id": 4748, "documentation": { "docstring": "\n This function will group the render end2end bboxes in row.\n :param end2end_xywh_bboxes:\n :param no_match_end2end_indexes:\n :return:\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 31, "language": "en" } }, { "id": 62044, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/manifest.py", "file_name": "manifest.py", "fun_name": "process_directive", "commit_message": "upd; format", "code": "def process_directive(self, directive):\n \n # Parse the line: split it up, make sure the right number of words\n # is there, and return the relevant words. 'action' is always\n # defined: it's the first word of the line. Which of the other\n # three are defined depends on the action; it'll be either\n # patterns, (dir and patterns), or (dirpattern).\n action, patterns, thedir, dirpattern = self._parse_directive(directive)\n\n # OK, now we know that the action is valid and we have the\n # right number of words on the line for that action -- so we\n # can proceed with minimal error-checking.\n if action == 'include':\n for pattern in patterns:\n if not self._include_pattern(pattern, anchor=True):\n logger.warning('no files found matching %r', pattern)\n\n elif action == 'exclude':\n for pattern in patterns:\n found = self._exclude_pattern(pattern, anchor=True)\n #if not found:\n # logger.warning('no previously-included files '\n # 'found matching %r', pattern)\n\n elif action == 'global-include':\n for pattern in patterns:\n if not self._include_pattern(pattern, anchor=False):\n logger.warning('no files found matching %r '\n 'anywhere in distribution', pattern)\n\n elif action == 'global-exclude':\n for pattern in patterns:\n found = self._exclude_pattern(pattern, anchor=False)\n #if not found:\n # logger.warning('no previously-included files '\n # 'matching %r found anywhere in '\n # 'distribution', pattern)\n\n elif action == 'recursive-include':\n for pattern in patterns:\n if not self._include_pattern(pattern, prefix=thedir):\n logger.warning('no files found matching %r '\n 'under directory %r', pattern, thedir)\n\n elif action == 'recursive-exclude':\n for pattern in patterns:\n found = self._exclude_pattern(pattern, prefix=thedir)\n #if not found:\n # logger.warning('no previously-included files '\n # 'matching %r found under directory %r',\n # pattern, thedir)\n\n elif action == 'graft':\n if not self._include_pattern(None, prefix=dirpattern):\n logger.warning('no directories found matching %r',\n dirpattern)\n\n elif action == 'prune':\n if not self._exclude_pattern(None, prefix=dirpattern):\n logger.warning('no previously-included directories found '\n 'matching %r', dirpattern)\n else: # pragma: no cover\n # This should never happen, as it should be caught in\n # _parse_template_line\n raise DistlibException(\n 'invalid action %r' % action)\n\n #\n # Private API\n #\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1158, "n_words": 307, "vocab_size": 136, "complexity": 20, "nloc": 36, "token_counts": 247, "n_ast_nodes": 437, "n_identifiers": 17, "d_id": 12851, "documentation": { "docstring": "\n Process a directive which either adds some files from ``allfiles`` to\n ``files``, or removes some files from ``files``.\n\n :param directive: The directive to process. This should be in a format\n compatible with distutils ``MANIFEST.in`` files:\n\n http://docs.python.org/distutils/sourcedist.html#commands\n ", "n_words": 36, "vocab_size": 30, "n_whitespaces": 105, "language": "en" } }, { "id": 184969, "commit_id": "6f7d3b5ad711aa7df62ca6b3fca5cd638dcec665", "repo": "textual", "path": "src/textual/color.py", "file_name": "color.py", "fun_name": "hex6", "commit_message": "text color", "code": "def hex6(self) -> str:\n \n r, g, b, a = self.clamped\n return f\"#{r:02X}{g:02X}{b:02X}\"\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 33, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 9, "token_counts": 22, "n_ast_nodes": 54, "n_identifiers": 8, "d_id": 44871, "documentation": { "docstring": "The color in CSS hex form, with 6 digits for RGB. Alpha is ignored.\n\n Returns:\n str: A CSS hex-style color, e.g. \"#46b3de\"\n\n ", "n_words": 22, "vocab_size": 21, "n_whitespaces": 47, "language": "en" } }, { "id": 176367, "commit_id": "28b3014d68d2b4e40d3e02219770296a827bd55c", "repo": "networkx", "path": "networkx/algorithms/matching.py", "file_name": "matching.py", "fun_name": "is_matching", "commit_message": "Update matching functions for error validation and speed (#4897)\n\n* First steps to update matching functions for #4644\r\n\r\nExpand tests\r\nChange API to raise NetworkXError when matching involves nodes not in G\r\nUpdate is_*_matching to 100+ times faster.\r\n\r\n* improve matching_dict_to_set and docs for min_weight_matching\r\n\r\n* fix sphinx error", "code": "def is_matching(G, matching):\n \n if isinstance(matching, dict):\n matching = matching_dict_to_set(matching)\n\n nodes = set()\n for edge in matching:\n if len(edge) != 2:\n raise nx.NetworkXError(f\"matching has non-2-tuple edge {edge}\")\n u, v = edge\n if u not in G or v not in G:\n raise nx.NetworkXError(f\"matching contains edge {edge} with node not in G\")\n if u == v:\n return False\n if not G.has_edge(u, v):\n return False\n if u in nodes or v in nodes:\n return False\n nodes.update(edge)\n return True\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 201, "n_words": 75, "vocab_size": 44, "complexity": 10, "nloc": 18, "token_counts": 111, "n_ast_nodes": 185, "n_identifiers": 16, "d_id": 41853, "documentation": { "docstring": "Return True if ``matching`` is a valid matching of ``G``\n\n A *matching* in a graph is a set of edges in which no two distinct\n edges share a common endpoint. Each node is incident to at most one\n edge in the matching. The edges are said to be independent.\n\n Parameters\n ----------\n G : NetworkX graph\n\n matching : dict or set\n A dictionary or set representing a matching. If a dictionary, it\n must have ``matching[u] == v`` and ``matching[v] == u`` for each\n edge ``(u, v)`` in the matching. If a set, it must have elements\n of the form ``(u, v)``, where ``(u, v)`` is an edge in the\n matching.\n\n Returns\n -------\n bool\n Whether the given set or dictionary represents a valid matching\n in the graph.\n\n Raises\n ------\n NetworkXError\n If the proposed matching has an edge to a node not in G.\n Or if the matching is not a collection of 2-tuple edges.\n\n ", "n_words": 152, "vocab_size": 86, "n_whitespaces": 257, "language": "en" } }, { "id": 5061, "commit_id": "d4f8b25b8e3e109db866352cf1dcec0d73c92cbd", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-google-ads/unit_tests/test_source.py", "file_name": "test_source.py", "fun_name": "test_google_type_conversion", "commit_message": "Source Google Ads: Improve unit and integration tests (#12651)\n\n* #12650 source Googel ads: tests\r\n\r\n* #12650 source google ads: add changelog item\r\n\r\n* #12650 source google ads: add comments to tests\r\n\r\n* auto-bump connector version\r\n\r\nCo-authored-by: Octavia Squidington III ", "code": "def test_google_type_conversion(mock_fields_meta_data):\n \n desired_mapping = {\n \"accessible_bidding_strategy.target_impression_share.location\": \"string\", # \"ENUM\"\n \"campaign.name\": [\"string\", \"null\"], # STRING\n \"campaign.end_date\": [\"string\", \"null\"], # DATE\n \"campaign.optimization_score\": [\"number\", \"null\"], # DOUBLE\n \"campaign.resource_name\": [\"string\", \"null\"], # RESOURCE_NAME\n \"campaign.shopping_setting.campaign_priority\": [\"integer\", \"null\"], # INT32\n \"campaign.shopping_setting.merchant_id\": [\"integer\", \"null\"], # INT64\n \"campaign_budget.explicitly_shared\": [\"boolean\", \"null\"], # BOOLEAN\n \"bidding_strategy.enhanced_cpc\": [\"string\", \"null\"], # MESSAGE\n \"segments.date\": [\"string\", \"null\"], # autoadded, should be DATE\n }\n\n # query is select field of each type\n query = \n instance = stream_instance(query=query, api_mock=mock_fields_meta_data)\n final_schema = instance.get_json_schema()\n schema_properties = final_schema.get(\"properties\")\n for prop, value in schema_properties.items():\n assert desired_mapping[prop] == value.get(\"type\"), f\"{prop} should be {value}\"\n\n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 206, "n_words": 91, "vocab_size": 60, "complexity": 2, "nloc": 31, "token_counts": 142, "n_ast_nodes": 285, "n_identifiers": 14, "d_id": 714, "documentation": { "docstring": "\n query may be invalid (fields incompatibility did not checked).\n But we are just testing types, without submitting the query and further steps.\n Doing that with all possible types.\n \n SELECT\n accessible_bidding_strategy.target_impression_share.location,\n campaign.name,\n campaign.end_date,\n campaign.optimization_score,\n campaign.resource_name,\n campaign.shopping_setting.campaign_priority,\n campaign.shopping_setting.merchant_id,\n campaign_budget.explicitly_shared,\n bidding_strategy.enhanced_cpc\n FROM campaign\n ", "n_words": 40, "vocab_size": 39, "n_whitespaces": 174, "language": "en" } }, { "id": 118557, "commit_id": "704eab3478cf69847825b23dabf15813a8ac9fa2", "repo": "streamlit", "path": "lib/streamlit/forward_msg_cache.py", "file_name": "forward_msg_cache.py", "fun_name": "has_refs", "commit_message": "Rename and refactor `Report` machinery (#4141)\n\nThis refactor renames (almost) everything related to the outdated \"report\" concept with more precise concepts that we use throughout our code, primarily \"script run\", \"session\", and \"app\".", "code": "def has_refs(self) -> bool:\n \n return len(self._session_report_run_counts) > 0\n", "url": "https://github.com/streamlit/streamlit.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 6, "token_counts": 17, "n_ast_nodes": 30, "n_identifiers": 5, "d_id": 26290, "documentation": { "docstring": "True if this Entry has references from any AppSession.\n\n If not, it can be removed from the cache.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 40, "language": "en" } }, { "id": 320360, "commit_id": "55ef0d4a1b62c3abe8500cad97ddeecf9f746b84", "repo": "paperless-ngx", "path": "src/paperless_tesseract/tests/test_checks.py", "file_name": "test_checks.py", "fun_name": "test_multi_part_language_bad_format", "commit_message": "Fixes language code checks around two part languages", "code": "def test_multi_part_language_bad_format(self, m):\n \n m.return_value = [\"chi_sim\", \"eng\"]\n\n msgs = check_default_language_available(None)\n\n self.assertEqual(len(msgs), 1)\n self.assertEqual(msgs[0].level, ERROR)\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 49, "n_words": 14, "vocab_size": 13, "complexity": 1, "nloc": 5, "token_counts": 47, "n_ast_nodes": 78, "n_identifiers": 10, "d_id": 117148, "documentation": { "docstring": "\n GIVEN:\n - An OCR language which is multi part (ie chi-sim)\n - The language is correctly NOT formatted\n WHEN:\n - Installed packages are checked\n THEN:\n - No errors are reported\n ", "n_words": 30, "vocab_size": 24, "n_whitespaces": 103, "language": "en" } }, { "id": 244108, "commit_id": "4bb184bae070f37febb10f82bee3a217dc1ad7c5", "repo": "mmdetection", "path": "mmdet/models/dense_heads/maskformer_head.py", "file_name": "maskformer_head.py", "fun_name": "simple_test", "commit_message": "[Enhance] MaskFormer refactor (#7471)\n\n* maskformer refactor\r\n\r\nupdate docstring\r\n\r\nupdate docstring\r\n\r\nupdate unit test\r\n\r\nupdate unit test\r\n\r\nupdate unit test\r\n\r\n* remove redundant code\r\n\r\n* update unit test", "code": "def simple_test(self, feats, img_metas, **kwargs):\n \n all_cls_scores, all_mask_preds = self(feats, img_metas)\n mask_cls_results = all_cls_scores[-1]\n mask_pred_results = all_mask_preds[-1]\n\n # upsample masks\n img_shape = img_metas[0]['batch_input_shape']\n mask_pred_results = F.interpolate(\n mask_pred_results,\n size=(img_shape[0], img_shape[1]),\n mode='bilinear',\n align_corners=False)\n\n return mask_cls_results, mask_pred_results\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 133, "n_words": 33, "vocab_size": 27, "complexity": 1, "nloc": 11, "token_counts": 80, "n_ast_nodes": 125, "n_identifiers": 15, "d_id": 70242, "documentation": { "docstring": "Test without augmentaton.\n\n Args:\n feats (list[Tensor]): Multi-level features from the\n upstream network, each is a 4D-tensor.\n img_metas (list[dict]): List of image information.\n\n Returns:\n tuple: A tuple contains two tensors.\n\n - mask_cls_results (Tensor): Mask classification logits,\\\n shape (batch_size, num_queries, cls_out_channels).\n Note `cls_out_channels` should includes background.\n - mask_pred_results (Tensor): Mask logits, shape \\\n (batch_size, num_queries, h, w).\n ", "n_words": 55, "vocab_size": 49, "n_whitespaces": 191, "language": "en" } }, { "id": 257656, "commit_id": "e7627c3f8b241654b61f8523479c81f855102f0a", "repo": "haystack", "path": "test/document_stores/test_opensearch.py", "file_name": "test_opensearch.py", "fun_name": "test_query_by_embedding_excluded_meta_data_return_embedding_true", "commit_message": "Use opensearch-py in OpenSearchDocumentStore (#2691)\n\n* add Opensearch extras\r\n\r\n* let OpenSearchDocumentStore use opensearch-py\r\n\r\n* Update Documentation & Code Style\r\n\r\n* fix a bug found after adding tests\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>\r\nCo-authored-by: Sara Zan ", "code": "def test_query_by_embedding_excluded_meta_data_return_embedding_true(self, mocked_document_store):\n \n mocked_document_store.return_embedding = True\n mocked_document_store.excluded_meta_data = [\"foo\", \"embedding\"]\n mocked_document_store.query_by_embedding(self.query_emb)\n _, kwargs = mocked_document_store.client.search.call_args\n # we expect \"embedding\" was removed from the final query\n assert kwargs[\"body\"][\"_source\"] == {\"excludes\": [\"foo\"]}\n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 79, "n_words": 30, "vocab_size": 28, "complexity": 1, "nloc": 6, "token_counts": 57, "n_ast_nodes": 102, "n_identifiers": 12, "d_id": 75108, "documentation": { "docstring": "\n Test that when `return_embedding==True` the field should NOT be excluded even if it\n was added to `excluded_meta_data`\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 39, "language": "en" } }, { "id": 21509, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distro/distro.py", "file_name": "distro.py", "fun_name": "id", "commit_message": "Vendor in pip 22.1.2", "code": "def id() -> str:\n \n return _distro.id()\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 79, "token_counts": 13, "n_ast_nodes": 25, "n_identifiers": 3, "d_id": 3889, "documentation": { "docstring": "\n Return the distro ID of the current distribution, as a\n machine-readable string.\n\n For a number of OS distributions, the returned distro ID value is\n *reliable*, in the sense that it is documented and that it does not change\n across releases of the distribution.\n\n This package maintains the following reliable distro ID values:\n\n ============== =========================================\n Distro ID Distribution\n ============== =========================================\n \"ubuntu\" Ubuntu\n \"debian\" Debian\n \"rhel\" RedHat Enterprise Linux\n \"centos\" CentOS\n \"fedora\" Fedora\n \"sles\" SUSE Linux Enterprise Server\n \"opensuse\" openSUSE\n \"amzn\" Amazon Linux\n \"arch\" Arch Linux\n \"cloudlinux\" CloudLinux OS\n \"exherbo\" Exherbo Linux\n \"gentoo\" GenToo Linux\n \"ibm_powerkvm\" IBM PowerKVM\n \"kvmibm\" KVM for IBM z Systems\n \"linuxmint\" Linux Mint\n \"mageia\" Mageia\n \"mandriva\" Mandriva Linux\n \"parallels\" Parallels\n \"pidora\" Pidora\n \"raspbian\" Raspbian\n \"oracle\" Oracle Linux (and Oracle Enterprise Linux)\n \"scientific\" Scientific Linux\n \"slackware\" Slackware\n \"xenserver\" XenServer\n \"openbsd\" OpenBSD\n \"netbsd\" NetBSD\n \"freebsd\" FreeBSD\n \"midnightbsd\" MidnightBSD\n \"rocky\" Rocky Linux\n \"aix\" AIX\n ============== =========================================\n\n If you have a need to get distros for reliable IDs added into this set,\n or if you find that the :func:`distro.id` function returns a different\n distro ID for one of the listed distros, please create an issue in the\n `distro issue tracker`_.\n\n **Lookup hierarchy and transformations:**\n\n First, the ID is obtained from the following sources, in the specified\n order. The first available and non-empty value is used:\n\n * the value of the \"ID\" attribute of the os-release file,\n\n * the value of the \"Distributor ID\" attribute returned by the lsb_release\n command,\n\n * the first part of the file name of the distro release file,\n\n The so determined ID value then passes the following transformations,\n before it is returned by this method:\n\n * it is translated to lower case,\n\n * blanks (which should not be there anyway) are translated to underscores,\n\n * a normalization of the ID is performed, based upon\n `normalization tables`_. The purpose of this normalization is to ensure\n that the ID is as reliable as possible, even across incompatible changes\n in the OS distributions. A common reason for an incompatible change is\n the addition of an os-release file, or the addition of the lsb_release\n command, with ID values that differ from what was previously determined\n from the distro release file name.\n ", "n_words": 359, "vocab_size": 208, "n_whitespaces": 754, "language": "en" } }, { "id": 47528, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/jobs/test_scheduler_job.py", "file_name": "test_scheduler_job.py", "fun_name": "test_execute_task_instances_backfill_tasks_wont_execute", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def test_execute_task_instances_backfill_tasks_wont_execute(self, dag_maker):\n \n dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'\n task_id_1 = 'dummy_task'\n\n with dag_maker(dag_id=dag_id):\n task1 = EmptyOperator(task_id=task_id_1)\n\n self.scheduler_job = SchedulerJob(subdir=os.devnull)\n session = settings.Session()\n\n dr1 = dag_maker.create_dagrun(run_type=DagRunType.BACKFILL_JOB)\n\n ti1 = TaskInstance(task1, run_id=dr1.run_id)\n ti1.refresh_from_db()\n ti1.state = State.SCHEDULED\n session.merge(ti1)\n session.flush()\n\n assert dr1.is_backfill\n\n self.scheduler_job._critical_section_execute_task_instances(session)\n session.flush()\n ti1.refresh_from_db()\n assert State.SCHEDULED == ti1.state\n session.rollback()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 180, "n_words": 43, "vocab_size": 31, "complexity": 1, "nloc": 19, "token_counts": 131, "n_ast_nodes": 222, "n_identifiers": 33, "d_id": 9148, "documentation": { "docstring": "\n Tests that backfill tasks won't get executed.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 167613, "commit_id": "f538568afc2c76c2d738d32e3544cf9fe6742960", "repo": "pandas", "path": "pandas/conftest.py", "file_name": "conftest.py", "fun_name": "rand_series_with_duplicate_datetimeindex", "commit_message": "TYP: misc return type annotations (#47558)", "code": "def rand_series_with_duplicate_datetimeindex() -> Series:\n \n dates = [\n datetime(2000, 1, 2),\n datetime(2000, 1, 2),\n datetime(2000, 1, 2),\n datetime(2000, 1, 3),\n datetime(2000, 1, 3),\n datetime(2000, 1, 3),\n datetime(2000, 1, 4),\n datetime(2000, 1, 4),\n datetime(2000, 1, 4),\n datetime(2000, 1, 5),\n ]\n\n return Series(np.random.randn(len(dates)), index=dates)\n\n\n# ----------------------------------------------------------------\n# Scalars\n# ----------------------------------------------------------------\n@pytest.fixture(\n params=[\n (\n Interval(left=0, right=5, inclusive=\"right\"),\n IntervalDtype(\"int64\", inclusive=\"right\"),\n ),\n (\n Interval(left=0.1, right=0.5, inclusive=\"right\"),\n IntervalDtype(\"float64\", inclusive=\"right\"),\n ),\n (Period(\"2012-01\", freq=\"M\"), \"period[M]\"),\n (Period(\"2012-02-01\", freq=\"D\"), \"period[D]\"),\n (\n Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\n DatetimeTZDtype(tz=\"US/Eastern\"),\n ),\n (Timedelta(seconds=500), \"timedelta64[ns]\"),\n ]\n)", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.fixture(\n params=[\n (\n Interval(left=0, right=5, inclusive=\"right\"),\n IntervalDtype(\"int64\", inclusive=\"right\"),\n ),\n (\n Interval(left=0.1, right=0.5, inclusive=\"right\"),\n IntervalDtype(\"float64\", inclusive=\"right\"),\n ),\n (Period(\"2012-01\", freq=\"M\"), \"period[M]\"),\n (Period(\"2012-02-01\", freq=\"D\"), \"period[D]\"),\n (\n Timestamp(\"2011-01-01\", tz=\"US/Eastern\"),\n DatetimeTZDtype(tz=\"US/Eastern\"),\n ),\n (Timedelta(seconds=500), \"timedelta64[ns]\"),\n ]\n)", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 290, "n_words": 78, "vocab_size": 43, "complexity": 1, "nloc": 17, "token_counts": 120, "n_ast_nodes": 360, "n_identifiers": 24, "d_id": 40065, "documentation": { "docstring": "\n Fixture for Series with a DatetimeIndex that has duplicates.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 66864, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/patches/v5_7/update_item_description_based_on_item_master.py", "file_name": "update_item_description_based_on_item_master.py", "fun_name": "execute", "commit_message": "style: format code with black", "code": "def execute():\n\tname = frappe.db.sql(\n\t\t\n\t)\n\tif not name:\n\t\tfrappe.db.sql(\n\t\t\t\"update `tabProduction Order` pro \\\n\t\t\tset \\\n\t\t\t\tdescription = (select description from tabItem where name=pro.production_item) \\\n\t\t\twhere \\\n\t\t\t\tifnull(description, '') = ''\"\n\t\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 22, "n_words": 33, "vocab_size": 24, "complexity": 2, "nloc": 14, "token_counts": 26, "n_ast_nodes": 54, "n_identifiers": 5, "d_id": 14363, "documentation": { "docstring": " select name from `tabPatch Log` \\\n\t\twhere \\\n\t\t\tpatch like 'execute:frappe.db.sql(\"update `tabProduction Order` pro set description%' ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 15, "language": "en" } }, { "id": 47756, "commit_id": "197cff3194e855b9207c3c0da8ae093a0d5dda55", "repo": "airflow", "path": "airflow/models/mappedoperator.py", "file_name": "mappedoperator.py", "fun_name": "iter_mapped_dependencies", "commit_message": "Ensure TaskMap only checks \"relevant\" dependencies (#23053)\n\nWhen looking for \"mapped dependants\" of a task, we only want a task if\r\nit not only is a direct downstream of the task, but also it actually\r\n\"uses\" the task's pushed XCom for task mapping. So we need to peek into\r\nthe mapped downstream task's expansion kwargs, and only count it as a\r\nmapped dependant if the upstream is referenced there.", "code": "def iter_mapped_dependencies(self) -> Iterator[\"Operator\"]:\n \n from airflow.models.xcom_arg import XComArg\n\n for ref in XComArg.iter_xcom_args(self._get_expansion_kwargs()):\n yield ref.operator\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 46, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 5, "token_counts": 37, "n_ast_nodes": 62, "n_identifiers": 11, "d_id": 9245, "documentation": { "docstring": "Upstream dependencies that provide XComs used by this task for task mapping.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 203048, "commit_id": "436862787cbdbd68b0ba20ed8c23b295e3679df3", "repo": "django", "path": "django/contrib/sessions/backends/base.py", "file_name": "base.py", "fun_name": "get_expiry_date", "commit_message": "Refs #29708 -- Made SessionBase store expiry as string.", "code": "def get_expiry_date(self, **kwargs):\n \n try:\n modification = kwargs['modification']\n except KeyError:\n modification = timezone.now()\n # Same comment as in get_expiry_age\n try:\n expiry = kwargs['expiry']\n except KeyError:\n expiry = self.get('_session_expiry')\n\n if isinstance(expiry, datetime):\n return expiry\n elif isinstance(expiry, str):\n return datetime.fromisoformat(expiry)\n expiry = expiry or self.get_session_cookie_age()\n return modification + timedelta(seconds=expiry)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 182, "n_words": 46, "vocab_size": 30, "complexity": 6, "nloc": 15, "token_counts": 89, "n_ast_nodes": 152, "n_identifiers": 16, "d_id": 50223, "documentation": { "docstring": "Get session the expiry date (as a datetime object).\n\n Optionally, this function accepts `modification` and `expiry` keyword\n arguments specifying the modification and expiry of the session.\n ", "n_words": 26, "vocab_size": 22, "n_whitespaces": 47, "language": "en" } }, { "id": 322167, "commit_id": "621357338437ee420eabbbf5ab19065bc85e73a5", "repo": "PaddleNLP", "path": "examples/text_classification/multi_label/train.py", "file_name": "train.py", "fun_name": "evaluate", "commit_message": "Update neural search readme and Add Paddle Serving Support (#1558)\n\n* add recall inference similarity\r\n\r\n* update examples\r\n\r\n* updatea readme\r\n\r\n* update dir name\r\n\r\n* update neural search readme\r\n\r\n* update milvus readme\r\n\r\n* update domain adaptive pretraining readme\r\n\r\n* fix the mistakes\r\n\r\n* update readme\r\n\r\n* add recall Paddle Serving Support\r\n\r\n* update readme\r\n\r\n* update readme and format the code\r\n\r\n* reformat the files\r\n\r\n* move the files\r\n\r\n* reformat the code\r\n\r\n* remove redundant code\r\n\r\nCo-authored-by: Zeyu Chen \r\nCo-authored-by: tianxin ", "code": "def evaluate(model, criterion, metric, data_loader):\n \n model.eval()\n metric.reset()\n losses = []\n for batch in data_loader:\n input_ids, token_type_ids, labels = batch\n logits = model(input_ids, token_type_ids)\n loss = criterion(logits, labels)\n probs = F.sigmoid(logits)\n losses.append(loss.numpy())\n metric.update(probs, labels)\n auc, f1_score = metric.accumulate()\n print(\"eval loss: %.5f, auc: %.5f, f1 score: %.5f\" %\n (np.mean(losses), auc, f1_score))\n model.train()\n metric.reset()\n\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 129, "n_words": 51, "vocab_size": 41, "complexity": 2, "nloc": 16, "token_counts": 116, "n_ast_nodes": 187, "n_identifiers": 27, "d_id": 118077, "documentation": { "docstring": "\n Given a dataset, it evals model and computes the metric.\n\n Args:\n model(obj:`paddle.nn.Layer`): A model to classify texts.\n criterion(obj:`paddle.nn.Layer`): It can compute the loss.\n metric(obj:`paddle.metric.Metric`): The evaluation metric.\n data_loader(obj:`paddle.io.DataLoader`): The dataset loader which generates batches.\n ", "n_words": 34, "vocab_size": 30, "n_whitespaces": 72, "language": "en" } }, { "id": 226903, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_heatmap.py", "file_name": "_heatmap.py", "fun_name": "ygap", "commit_message": "switch to black .22", "code": "def ygap(self):\n \n return self[\"ygap\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 58576, "documentation": { "docstring": "\n Sets the vertical gap (in pixels) between bricks.\n\n The 'ygap' property is a number and may be specified as:\n - An int or float in the interval [0, inf]\n\n Returns\n -------\n int|float\n ", "n_words": 32, "vocab_size": 31, "n_whitespaces": 84, "language": "en" } }, { "id": 220938, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/asyncio/unix_events.py", "file_name": "unix_events.py", "fun_name": "__enter__", "commit_message": "add python 3.10.4 for windows", "code": "def __enter__(self):\n \n raise NotImplementedError()\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 20, "n_identifiers": 3, "d_id": 56170, "documentation": { "docstring": "Enter the watcher's context and allow starting new processes\n\n This function must return self", "n_words": 14, "vocab_size": 14, "n_whitespaces": 20, "language": "en" } }, { "id": 5687, "commit_id": "d79b319819650f99fae2ab8c6c8d3ab25d474cf1", "repo": "airbyte", "path": "airbyte-integrations/connectors/source-mixpanel/source_mixpanel/testing.py", "file_name": "testing.py", "fun_name": "adapt_streams_if_testing", "commit_message": ":tada: Source Mixpanel: Beta preparation (#13372)\n\n* Add extra mode to Source, to allow run acceptance tests\r\n* move streams into distinct modules\r\n* Add property name transformation for Export stream for avoiding collisions\r\n* Update doc\r\n* Add `date_window_size`", "code": "def adapt_streams_if_testing(func):\n \n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 4, "token_counts": 15, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 808, "documentation": { "docstring": "\n Due to API limitations (60 requests per hour) there is unavailable to make acceptance tests in normal mode,\n so we're reducing amount of requests by, if `is_testing` flag is set in config:\n\n 1. Take time range in only 1 month\n 2. Patch Funnels, so we download data only for one Funnel entity\n 3. Removing RPS limit for faster testing\n ", "n_words": 59, "vocab_size": 51, "n_whitespaces": 78, "language": "en" } }, { "id": 218386, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/inspect.py", "file_name": "inspect.py", "fun_name": "getsource", "commit_message": "add python 3.10.4 for windows", "code": "def getsource(object):\n \n lines, lnum = getsourcelines(object)\n return ''.join(lines)\n\n# --------------------------------------------------- class tree extraction", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 21, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 3, "token_counts": 21, "n_ast_nodes": 40, "n_identifiers": 6, "d_id": 55274, "documentation": { "docstring": "Return the text of the source code for an object.\n\n The argument may be a module, class, method, function, traceback, frame,\n or code object. The source code is returned as a single string. An\n OSError is raised if the source code cannot be retrieved.", "n_words": 44, "vocab_size": 32, "n_whitespaces": 54, "language": "en" } }, { "id": 277755, "commit_id": "f3cafc77c269f7ecbf80bb4cf4b54e28c153f4e6", "repo": "keras", "path": "keras/backend_config.py", "file_name": "backend_config.py", "fun_name": "set_floatx", "commit_message": "resolve line-too-long in root directory", "code": "def set_floatx(value):\n \n global _FLOATX\n accepted_dtypes = {\"float16\", \"float32\", \"float64\"}\n if value not in accepted_dtypes:\n raise ValueError(\n f\"Unknown `floatx` value: {value}. \"\n f\"Expected one of {accepted_dtypes}\"\n )\n _FLOATX = str(value)\n\n\n@keras_export(\"keras.backend.image_data_format\")\n@tf.__internal__.dispatch.add_dispatch_support", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.backend.image_data_format\")\n@tf.__internal__.dispatch.add_dispatch_support", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 80, "n_words": 31, "vocab_size": 29, "complexity": 2, "nloc": 9, "token_counts": 37, "n_ast_nodes": 98, "n_identifiers": 11, "d_id": 82185, "documentation": { "docstring": "Sets the default float type.\n\n Note: It is not recommended to set this to float16 for training, as this\n will likely cause numeric stability issues. Instead, mixed precision, which\n is using a mix of float16 and float32, can be used by calling\n `tf.keras.mixed_precision.set_global_policy('mixed_float16')`. See the\n [mixed precision guide](\n https://www.tensorflow.org/guide/keras/mixed_precision) for details.\n\n Args:\n value: String; `'float16'`, `'float32'`, or `'float64'`.\n\n Example:\n >>> tf.keras.backend.floatx()\n 'float32'\n >>> tf.keras.backend.set_floatx('float64')\n >>> tf.keras.backend.floatx()\n 'float64'\n >>> tf.keras.backend.set_floatx('float32')\n\n Raises:\n ValueError: In case of invalid value.\n ", "n_words": 76, "vocab_size": 65, "n_whitespaces": 140, "language": "en" } }, { "id": 177569, "commit_id": "aaa022d8acbeb002eab2930965da276e9298cd54", "repo": "label-studio", "path": "label_studio/tests/test_next_task.py", "file_name": "test_next_task.py", "fun_name": "test_fetch_final_taken_task", "commit_message": "[ext] Add video interpolation by param (DEV-74) (#1735)\n\n* Add video interpolation by param\r\n\r\n* Change label-studio-tools commit\r\n\r\n* Fix typo and add some comments\r\n\r\n* Fix context field\r\n\r\n* Fix label-studio-tools link\r\n\r\n* fix link to ext dep\r\n\r\n* Update requirements for label_studio_tools\r\n\r\n* Change label-studio-tools commit with refactoring\r\n\r\n* Change label-studio-tools requirement\r\n\r\n* Change label-studio-tools version to dev3\r\n\r\n* Change base settings\r\n\r\n* Add interpolate_key_frames option in ExportMixin\r\n\r\n* Change serializer options to context\r\n\r\n* Add serializer for Export\r\n\r\n- Add serializer for Export\r\n- Switch to is_video_object_tracking and new extract_key_frames logic\r\n- Change label-studio-tools requirement\r\n\r\n* Fix serializer fields\r\n\r\n* Fix export type in serializer\r\n\r\n* Add exportType to support both export params\r\n\r\n* Move to parsed_config in is_video_object_tracking\r\n\r\n* Add interpolate_key_frames to SerializationOptionsSerializer\r\n\r\n* Change label-studio-tools to version with sequence\r\n\r\n* Change label-studio-tools with time fix\r\n\r\n* Add parse_label_config to Project model\r\n\r\n* Fix new project condition\r\n\r\n* Change from presave signal to save method\r\n\r\n* Fix input data for tests\r\n\r\n* Upgrade label-studio-tools version\r\n\r\n* Change label-studio-tools version with key frames order fix\r\n\r\nCo-authored-by: Sergey Zhuk \r\nCo-authored-by: Max Tkachenko \r\nCo-authored-by: Sergei Ivashchenko ", "code": "def test_fetch_final_taken_task(business_client):\n config = dict(\n title='test_label_races',\n is_published=True,\n label_config=\n )\n annotation_result = json.dumps([{\n 'from_name': 'text_class',\n 'to_name': 'text',\n 'type': 'choices',\n 'value': {'choices': ['class_A']}\n }])\n project = make_project(config, business_client.user)\n project.sampling = Project.SEQUENCE\n project.save()\n\n ann1 = make_annotator({'email': 'ann1@testfetchfinal.com'}, project, True)\n ann2 = make_annotator({'email': 'ann2@testfetchfinal.com'}, project, True)\n\n # create tasks\n tasks = []\n num_tasks = 2\n for i in range(num_tasks):\n tasks.append({'data': {'text': f'this is {str(i)}'}})\n r = business_client.post(\n f'/api/projects/{project.id}/tasks/bulk/', data=json.dumps(tasks), content_type='application/json')\n assert r.status_code == 201\n\n # set max annotations\n r = business_client.patch(\n f'/api/projects/{project.id}/',\n data=json.dumps({'maximum_annotations': 2}),\n content_type='application/json'\n )\n assert r.status_code == 200\n\n print('ann1 takes any task and complete it')\n r = ann1.get(f'/api/projects/{project.id}/next')\n task_id = json.loads(r.content)['id']\n ann1.post(f'/api/tasks/{task_id}/annotations/', data={'task': task_id, 'result': annotation_result})\n\n print('ann2 takes the same task (because of depth-first) but just lock it - don\\'t complete')\n r = ann2.get(f'/api/projects/{project.id}/next')\n assert json.loads(r.content)['id'] == task_id\n\n print('ann1 takes another task')\n r = ann1.get(f'/api/projects/{project.id}/next')\n another_task_id = json.loads(r.content)['id']\n assert another_task_id != task_id\n\n print('ann1 should never take task_id since he has completed it')\n for i in range(3):\n r = ann1.get(f'/api/projects/{project.id}/next')\n assert json.loads(r.content)['id'] == another_task_id\n\n\n@pytest.mark.skipif(not redis_healthcheck(), reason='Multi user locks only supported with redis enabled')\n@pytest.mark.django_db", "url": "https://github.com/heartexlabs/label-studio.git", "language": "Python", "ast_errors": "@pytest.mark.skipif(not redis_healthcheck(), reason='Multi user locks only supported with redis enabled')\n@pytest.mark.django_db", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 363, "n_words": 172, "vocab_size": 119, "complexity": 3, "nloc": 52, "token_counts": 330, "n_ast_nodes": 662, "n_identifiers": 45, "d_id": 42442, "documentation": { "docstring": "\n \n \n \n \n \n \n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 105, "language": "en" } }, { "id": 274999, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/mixed_precision/layer_test.py", "file_name": "layer_test.py", "fun_name": "create_central_storage_strategy", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def create_central_storage_strategy():\n \n compute_devices = (\n [\"cpu:0\", \"gpu:0\"]\n if (tf.config.list_logical_devices(\"GPU\"))\n else [\"cpu:0\"]\n )\n return tf.distribute.experimental.CentralStorageStrategy(\n compute_devices, parameter_device=\"cpu:0\"\n )\n\n\nTESTCASES = (\n {\"testcase_name\": \"base\", \"strategy_fn\": default_strategy_fn},\n {\"testcase_name\": \"distribute\", \"strategy_fn\": create_mirrored_strategy},\n)\n\n\n@test_combinations.generate(test_combinations.combine(mode=[\"graph\", \"eager\"]))", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@test_combinations.generate(test_combinations.combine(mode=[\"graph\", \"eager\"]))", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 77, "n_words": 31, "vocab_size": 25, "complexity": 2, "nloc": 9, "token_counts": 44, "n_ast_nodes": 156, "n_identifiers": 16, "d_id": 81282, "documentation": { "docstring": "Create a CentralStorageStrategy, using a GPU if it is available.", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 268893, "commit_id": "c223693db91473c9a71c330d4e38a751d149f93c", "repo": "keras", "path": "keras/applications/resnet_rs.py", "file_name": "resnet_rs.py", "fun_name": "decode_predictions", "commit_message": "KERAS application addition of Resnet-RS model", "code": "def decode_predictions(preds, top=5):\n return imagenet_utils.decode_predictions(preds, top=top)\n\npreprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(\n mode='',\n ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,\n error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)\ndecode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__\n\nDOC = \n\nsetattr(ResNetRS50, '__doc__', ResNetRS50.__doc__ + DOC)\nsetattr(ResNetRS152, '__doc__', ResNetRS152.__doc__ + DOC)\nsetattr(ResNetRS200, '__doc__', ResNetRS200.__doc__ + DOC)\nsetattr(ResNetRS270, '__doc__', ResNetRS270.__doc__ + DOC)\nsetattr(ResNetRS350, '__doc__', ResNetRS350.__doc__ + DOC)\nsetattr(ResNetRS420, '__doc__', ResNetRS420.__doc__ + DOC)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 48, "n_words": 47, "vocab_size": 30, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 205, "n_identifiers": 21, "d_id": 79758, "documentation": { "docstring": "\n\n Reference:\n [Revisiting ResNets: Improved Training and Scaling Strategies](\n https://arxiv.org/pdf/2103.07579.pdf)\n\n For image classification use cases, see\n [this page for detailed examples](\n https://keras.io/api/applications/#usage-examples-for-image-classification-models).\n\n For transfer learning use cases, make sure to read the\n [guide to transfer learning & fine-tuning](\n https://keras.io/guides/transfer_learning/).\n\n Note: each Keras Application expects a specific kind of input preprocessing.\n For ResNetRs, by default input preprocessing is included as a part of the\n model (as a `Rescaling` layer), and thus\n `tf.keras.applications.resnet_rs.preprocess_input` is actually a\n pass-through function. In this use case, ResNetRS models expect their inputs\n to be float tensors of pixels with values in the [0-255] range.\n At the same time, preprocessing as a part of the model (i.e. `Rescaling`\n layer) can be disabled by setting `include_preprocessing` argument to False.\n With preprocessing disabled ResNetRS models expect their inputs to be float\n tensors of pixels with values in the [-1, 1] range.\n\n Args:\n depth: Depth of ResNet network.\n dropout_rate: dropout rate before final classifier layer.\n bn_momentum: Momentum parameter for Batch Normalization layers.\n bn_epsilon: Epsilon parameter for Batch Normalization layers.\n activation: activation function.\n block_args: list of dicts, parameters to construct block modules.\n se_ratio: Squeeze and Excitation layer ratio.\n model_name: name of the model.\n drop_connect_rate: dropout rate at skip connections.\n include_top: whether to include the fully-connected layer at the top of\n the network.\n weights: one of `None` (random initialization), `'imagenet'`\n (pre-training on ImageNet), or the path to the weights file to be loaded.\n Note: one model can have multiple imagenet variants depending on\n input shape it was trained with. For input_shape 224x224 pass\n `imagenet-i224` as argument. By default, highest input shape weights are\n downloaded.\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to\n use as image input for the model.\n input_shape: optional shape tuple. It should have exactly 3 inputs\n channels, and width and height should be no smaller than 32.\n E.g. (200, 200, 3) would be one valid value.\n pooling: optional pooling mode for feature extraction when `include_top`\n is `False`.\n - `None` means that the output of the model will be\n the 4D tensor output of the\n last convolutional layer.\n - `avg` means that global average pooling\n will be applied to the output of the\n last convolutional layer, and thus\n the output of the model will be a 2D tensor.\n - `max` means that global max pooling will\n be applied.\n classes: optional number of classes to classify images into, only to be\n specified if `include_top` is True, and if no `weights` argument is\n specified.\n classifier_activation: A `str` or callable. The activation function to\n use on the \"top\" layer. Ignored unless `include_top=True`. Set\n `classifier_activation=None` to return the logits of the \"top\" layer.\n include_preprocessing: Boolean, whether to include the preprocessing layer\n (`Rescaling`) at the bottom of the network. Defaults to `True`.\n\n Returns:\n A `keras.Model` instance.\n", "n_words": 450, "vocab_size": 256, "n_whitespaces": 883, "language": "en" } }, { "id": 12505, "commit_id": "ef662b529b2a2eecea7bb99759a9f7b9d86d3062", "repo": "jina", "path": "jina/serve/runtimes/gateway/graph/topology_graph.py", "file_name": "topology_graph.py", "fun_name": "all_nodes", "commit_message": "feat: add grpc health checking (#4779)", "code": "def all_nodes(self):\n \n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 9, "n_words": 2, "vocab_size": 2, "complexity": 4, "nloc": 11, "token_counts": 69, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 2324, "documentation": { "docstring": "\n The set of all the nodes inside this Graph\n\n :return: A list of nodes\n ", "n_words": 14, "vocab_size": 12, "n_whitespaces": 36, "language": "en" } }, { "id": 82255, "commit_id": "e87e041a2a2a6d168a84d3eeea6664985f1c8ab8", "repo": "awx", "path": "awx/main/access.py", "file_name": "access.py", "fun_name": "filtered_queryset", "commit_message": "Break up and conditionally add the RBAC checks for ActivityStream (#13279)\n\nThis should vastly improve the queries executed when accessing any of\r\nthe activity stream endpoints as a normal user, in many cases.", "code": "def filtered_queryset(self):\n \n qs = self.model.objects.all()\n # FIXME: the following fields will be attached to the wrong object\n # if they are included in prefetch_related because of\n # https://github.com/django-polymorphic/django-polymorphic/issues/68\n # 'job_template', 'job', 'project', 'project_update', 'workflow_job',\n # 'inventory_source', 'workflow_job_template'\n\n q = Q(user=self.user)\n inventory_set = Inventory.accessible_pk_qs(self.user, 'read_role')\n if inventory_set:\n q |= (\n Q(ad_hoc_command__inventory__in=inventory_set)\n | Q(inventory__in=inventory_set)\n | Q(host__inventory__in=inventory_set)\n | Q(group__inventory__in=inventory_set)\n | Q(inventory_source__inventory__in=inventory_set)\n | Q(inventory_update__inventory_source__inventory__in=inventory_set)\n )\n\n credential_set = Credential.accessible_pk_qs(self.user, 'read_role')\n if credential_set:\n q |= Q(credential__in=credential_set)\n\n auditing_orgs = (\n (Organization.accessible_objects(self.user, 'admin_role') | Organization.accessible_objects(self.user, 'auditor_role'))\n .distinct()\n .values_list('id', flat=True)\n )\n if auditing_orgs:\n q |= (\n Q(user__in=auditing_orgs.values('member_role__members'))\n | Q(organization__in=auditing_orgs)\n | Q(notification_template__organization__in=auditing_orgs)\n | Q(notification__notification_template__organization__in=auditing_orgs)\n | Q(label__organization__in=auditing_orgs)\n | Q(role__in=Role.objects.filter(ancestors__in=self.user.roles.all()) if auditing_orgs else [])\n )\n\n project_set = Project.accessible_pk_qs(self.user, 'read_role')\n if project_set:\n q |= Q(project__in=project_set) | Q(project_update__project__in=project_set)\n\n jt_set = JobTemplate.accessible_pk_qs(self.user, 'read_role')\n if jt_set:\n q |= Q(job_template__in=jt_set) | Q(job__job_template__in=jt_set)\n\n wfjt_set = WorkflowJobTemplate.accessible_pk_qs(self.user, 'read_role')\n if wfjt_set:\n q |= (\n Q(workflow_job_template__in=wfjt_set)\n | Q(workflow_job_template_node__workflow_job_template__in=wfjt_set)\n | Q(workflow_job__workflow_job_template__in=wfjt_set)\n )\n\n team_set = Team.accessible_pk_qs(self.user, 'read_role')\n if team_set:\n q |= Q(team__in=team_set)\n\n app_set = OAuth2ApplicationAccess(self.user).filtered_queryset()\n if app_set:\n q |= Q(o_auth2_application__in=app_set)\n\n token_set = OAuth2TokenAccess(self.user).filtered_queryset()\n if token_set:\n q |= Q(o_auth2_access_token__in=token_set)\n\n return qs.filter(q).distinct()\n", "url": "https://github.com/ansible/awx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 753, "n_words": 167, "vocab_size": 99, "complexity": 11, "nloc": 53, "token_counts": 404, "n_ast_nodes": 665, "n_identifiers": 60, "d_id": 17336, "documentation": { "docstring": "\n The full set is returned if the user is:\n - System Administrator\n - System Auditor\n These users will be able to see orphaned activity stream items\n (the related resource has been deleted), as well as the other\n obscure cases listed here\n\n Complex permissions omitted from the activity stream of a normal user:\n - host access via group\n - permissions (from prior versions)\n - notifications via team admin access\n\n Activity stream events that have been omitted from list for\n normal users since 2.4:\n - unified job templates\n - unified jobs\n - schedules\n - custom inventory scripts\n ", "n_words": 95, "vocab_size": 71, "n_whitespaces": 224, "language": "en" } }, { "id": 209950, "commit_id": "ada91610ad55339bce4d84bc7d5e44ee1cab0c6f", "repo": "scapy", "path": "scapy/layers/can.py", "file_name": "can.py", "fun_name": "read_packet", "commit_message": "Add support of CANFD (#3782)\n\n* Add support of CANFD\r\n\r\nCo-authored-by: superuserx\r\n\r\n* fix tests\r\n\r\n* fix flake\r\n\r\n* fix test\r\n\r\n* fix test for python2\r\n\r\n* fix test for python2\r\n\r\n* fix test for python2\r\n\r\nCo-authored-by: superuserx \r\nCo-authored-by: Nils Weiss ", "code": "def read_packet(self, size=CAN_MTU):\n # type: (int) -> Optional[Packet]\n \n line = self.f.readline()\n line = line.lstrip()\n if len(line) < 16:\n raise EOFError\n\n is_log_file_format = orb(line[0]) == orb(b\"(\")\n fd_flags = None\n if is_log_file_format:\n t_b, intf, f = line.split()\n if b'##' in f:\n idn, data = f.split(b'##')\n fd_flags = orb(data[0])\n data = data[1:]\n else:\n idn, data = f.split(b'#')\n le = None\n t = float(t_b[1:-1]) # type: Optional[float]\n else:\n h, data = line.split(b']')\n intf, idn, le = h.split()\n t = None\n\n if self.ifilter is not None and \\\n intf.decode('ASCII') not in self.ifilter:\n return None\n\n data = data.replace(b' ', b'')\n data = data.strip()\n\n if len(data) <= 8 and fd_flags is None:\n pkt = CAN(identifier=int(idn, 16), data=hex_bytes(data))\n else:\n pkt = CANFD(identifier=int(idn, 16), fd_flags=fd_flags,\n data=hex_bytes(data))\n\n if le is not None:\n pkt.length = int(le[1:])\n else:\n pkt.length = len(pkt.data)\n\n if len(idn) > 3:\n pkt.flags = 0b100\n\n if t is not None:\n pkt.time = t\n\n return pkt\n", "url": "https://github.com/secdev/scapy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 554, "n_words": 146, "vocab_size": 77, "complexity": 11, "nloc": 40, "token_counts": 312, "n_ast_nodes": 500, "n_identifiers": 35, "d_id": 52837, "documentation": { "docstring": "Read a packet from the specified file.\n\n This function will raise EOFError when no more packets are available.\n\n :param size: Not used. Just here to follow the function signature for\n SuperSocket emulation.\n :return: A single packet read from the file or None if filters apply\n ", "n_words": 45, "vocab_size": 40, "n_whitespaces": 93, "language": "en" } }, { "id": 261841, "commit_id": "8fd1ee1926a956a146188179baee143ef11a003d", "repo": "TTS", "path": "TTS/utils/synthesizer.py", "file_name": "synthesizer.py", "fun_name": "_set_speaker_encoder_paths_from_tts_config", "commit_message": "Print urls when BadZipError", "code": "def _set_speaker_encoder_paths_from_tts_config(self):\n \n if hasattr(self.tts_config, \"model_args\") and hasattr(\n self.tts_config.model_args, \"speaker_encoder_config_path\"\n ):\n self.encoder_checkpoint = self.tts_config.model_args.speaker_encoder_model_path\n self.encoder_config = self.tts_config.model_args.speaker_encoder_config_path\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 70, "n_words": 16, "vocab_size": 15, "complexity": 3, "nloc": 6, "token_counts": 49, "n_ast_nodes": 82, "n_identifiers": 9, "d_id": 77026, "documentation": { "docstring": "Set the encoder paths from the tts model config for models with speaker encoders.", "n_words": 14, "vocab_size": 13, "n_whitespaces": 13, "language": "en" } }, { "id": 197778, "commit_id": "1af5040d2466d2e6455eb07454f7da8dd345a9b8", "repo": "sympy", "path": "sympy/polys/domains/domain.py", "file_name": "domain.py", "fun_name": "alg_field_from_poly", "commit_message": "Support `alias` for prim. elt. of `AlgebraicField`", "code": "def alg_field_from_poly(self, poly, alias=None, root_index=-1):\n r\n from sympy.polys.rootoftools import CRootOf\n root = CRootOf(poly, root_index)\n alpha = AlgebraicNumber(root, alias=alias)\n return self.algebraic_field(alpha, alias=alias)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 55, "n_words": 21, "vocab_size": 19, "complexity": 1, "nloc": 41, "token_counts": 55, "n_ast_nodes": 81, "n_identifiers": 13, "d_id": 48688, "documentation": { "docstring": "\n Convenience method to construct an algebraic extension on a root of a\n polynomial, chosen by root index.\n\n Parameters\n ==========\n\n poly : :py:class:`~.Poly`\n The polynomial whose root generates the extension.\n alias : str, optional (default=None)\n Symbol name for the generator of the extension.\n E.g. \"alpha\" or \"theta\".\n root_index : int, optional (default=-1)\n Specifies which root of the polynomial is desired. The ordering is\n as defined by the :py:class:`~.ComplexRootOf` class. The default of\n ``-1`` selects the most natural choice in the common cases of\n quadratic and cyclotomic fields (the square root on the positive\n real or imaginary axis, resp. $\\mathrm{e}^{2\\pi i/n}$).\n\n Examples\n ========\n\n >>> from sympy import QQ, Poly\n >>> from sympy.abc import x\n >>> f = Poly(x**2 - 2)\n >>> K = QQ.alg_field_from_poly(f)\n >>> K.ext.minpoly == f\n True\n >>> g = Poly(8*x**3 - 6*x - 1)\n >>> L = QQ.alg_field_from_poly(g, \"alpha\")\n >>> L.ext.minpoly == g\n True\n >>> L.to_sympy(L([1, 1, 1]))\n alpha**2 + alpha + 1\n\n ", "n_words": 154, "vocab_size": 107, "n_whitespaces": 397, "language": "en" } }, { "id": 60539, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/cli/parser.py", "file_name": "parser.py", "fun_name": "_update_defaults", "commit_message": "upd; format", "code": "def _update_defaults(self, defaults):\n # type: (Dict[str, Any]) -> Dict[str, Any]\n \n\n # Accumulate complex default state.\n self.values = optparse.Values(self.defaults)\n late_eval = set()\n # Then set the options with those values\n for key, val in self._get_ordered_configuration_items():\n # '--' because configuration supports only long names\n option = self.get_option(\"--\" + key)\n\n # Ignore options not present in this parser. E.g. non-globals put\n # in [global] by users that want them to apply to all applicable\n # commands.\n if option is None:\n continue\n\n assert option.dest is not None\n\n if option.action in (\"store_true\", \"store_false\"):\n try:\n val = strtobool(val)\n except ValueError:\n self.error(\n \"{} is not a valid value for {} option, \" # noqa\n \"please specify a boolean value like yes/no, \"\n \"true/false or 1/0 instead.\".format(val, key)\n )\n elif option.action == \"count\":\n with suppress(ValueError):\n val = strtobool(val)\n with suppress(ValueError):\n val = int(val)\n if not isinstance(val, int) or val < 0:\n self.error(\n \"{} is not a valid value for {} option, \" # noqa\n \"please instead specify either a non-negative integer \"\n \"or a boolean value like yes/no or false/true \"\n \"which is equivalent to 1/0.\".format(val, key)\n )\n elif option.action == \"append\":\n val = val.split()\n val = [self.check_default(option, key, v) for v in val]\n elif option.action == \"callback\":\n assert option.callback is not None\n late_eval.add(option.dest)\n opt_str = option.get_opt_string()\n val = option.convert_value(opt_str, val)\n # From take_action\n args = option.callback_args or ()\n kwargs = option.callback_kwargs or {}\n option.callback(option, opt_str, val, self, *args, **kwargs)\n else:\n val = self.check_default(option, key, val)\n\n defaults[option.dest] = val\n\n for key in late_eval:\n defaults[key] = getattr(self.values, key)\n self.values = None\n return defaults\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 1029, "n_words": 254, "vocab_size": 148, "complexity": 14, "nloc": 47, "token_counts": 308, "n_ast_nodes": 518, "n_identifiers": 35, "d_id": 12199, "documentation": { "docstring": "Updates the given defaults with values from the config files and\n the environ. Does a little special handling for certain types of\n options (lists).", "n_words": 24, "vocab_size": 22, "n_whitespaces": 37, "language": "en" } }, { "id": 46452, "commit_id": "18da1217d7ae593ff33c681353b027fac9252523", "repo": "airflow", "path": "tests/dag_processing/test_manager.py", "file_name": "test_manager.py", "fun_name": "test_start_new_processes_with_same_filepath", "commit_message": "Replace timedelta.max with year long timdelta in test_manager (#22527)\n\nTimedelta.max used in tests is not realistic and in some\r\ncircumstances, when it is added to date, it might cause\r\ndate OverflowError. Using long (but not 999999999 days long)\r\ntimedelta solves the problem.", "code": "def test_start_new_processes_with_same_filepath(self):\n \n manager = DagFileProcessorManager(\n dag_directory='directory',\n max_runs=1,\n processor_timeout=timedelta(days=365),\n signal_conn=MagicMock(),\n dag_ids=[],\n pickle_dags=False,\n async_mode=True,\n )\n\n file_1 = 'file_1.py'\n file_2 = 'file_2.py'\n file_3 = 'file_3.py'\n manager._file_path_queue = [file_1, file_2, file_3]\n\n # Mock that only one processor exists. This processor runs with 'file_1'\n manager._processors[file_1] = MagicMock()\n # Start New Processes\n manager.start_new_processes()\n\n # Because of the config: '[scheduler] parsing_processes = 2'\n # verify that only one extra process is created\n # and since a processor with 'file_1' already exists,\n # even though it is first in '_file_path_queue'\n # a new processor is created with 'file_2' and not 'file_1'.\n\n assert file_1 in manager._processors.keys()\n assert file_2 in manager._processors.keys()\n assert [file_3] == manager._file_path_queue\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 315, "n_words": 105, "vocab_size": 71, "complexity": 1, "nloc": 19, "token_counts": 110, "n_ast_nodes": 185, "n_identifiers": 21, "d_id": 8892, "documentation": { "docstring": "\n Test that when a processor already exist with a filepath, a new processor won't be created\n with that filepath. The filepath will just be removed from the list.\n ", "n_words": 28, "vocab_size": 22, "n_whitespaces": 50, "language": "en" } }, { "id": 265890, "commit_id": "9628dead07ccef9608b32906aa8194bc948e5a09", "repo": "netbox", "path": "netbox/netbox/search/backends.py", "file_name": "backends.py", "fun_name": "search", "commit_message": "Closes #10560: New global search (#10676)\n\n* Initial work on new search backend\r\n\r\n* Clean up search backends\r\n\r\n* Return only the most relevant result per object\r\n\r\n* Clear any pre-existing cached entries on cache()\r\n\r\n* #6003: Implement global search functionality for custom field values\r\n\r\n* Tweak field weights & document guidance\r\n\r\n* Extend search() to accept a lookup type\r\n\r\n* Move get_registry() out of SearchBackend\r\n\r\n* Enforce object permissions when returning search results\r\n\r\n* Add indexers for remaining models\r\n\r\n* Avoid calling remove() on non-cacheable objects\r\n\r\n* Use new search backend by default\r\n\r\n* Extend search backend to filter by object type\r\n\r\n* Clean up search view form\r\n\r\n* Enable specifying lookup logic\r\n\r\n* Add indexes for value field\r\n\r\n* Remove object type selector from search bar\r\n\r\n* Introduce SearchTable and enable HTMX for results\r\n\r\n* Enable pagination\r\n\r\n* Remove legacy search backend\r\n\r\n* Cleanup\r\n\r\n* Use a UUID for CachedValue primary key\r\n\r\n* Refactoring search methods\r\n\r\n* Define max search results limit\r\n\r\n* Extend reindex command to support specifying particular models\r\n\r\n* Add clear() and size to SearchBackend\r\n\r\n* Optimize bulk caching performance\r\n\r\n* Highlight matched portion of field value\r\n\r\n* Performance improvements for reindexing\r\n\r\n* Started on search tests\r\n\r\n* Cleanup & docs\r\n\r\n* Documentation updates\r\n\r\n* Clean up SearchIndex\r\n\r\n* Flatten search registry to register by app_label.model_name\r\n\r\n* Clean up search backend classes\r\n\r\n* Clean up RestrictedGenericForeignKey and RestrictedPrefetch\r\n\r\n* Resolve migrations conflict", "code": "def search(self, value, user=None, object_types=None, lookup=DEFAULT_LOOKUP_TYPE):\n \n raise NotImplementedError\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 2, "token_counts": 22, "n_ast_nodes": 33, "n_identifiers": 8, "d_id": 78230, "documentation": { "docstring": "\n Search cached object representations for the given value.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 286499, "commit_id": "46141766d7250671b7bc75872e2034afe4938374", "repo": "OpenBBTerminal", "path": "openbb_terminal/parent_classes.py", "file_name": "parent_classes.py", "fun_name": "call_load", "commit_message": "Sdk dates (#3354)\n\n* example changes in slopes\r\n\r\n* change lettering size and side bar capitalization\r\n\r\n* revert back to Fira\r\n\r\n* start automatic website generation\r\n\r\n* this was autogen\r\n\r\n* add examples to slopes model\r\n\r\n* generate slopes doc\r\n\r\n* change to _index.md\r\n\r\n* allow italic formatting\r\n\r\n* fix regex\r\n\r\n* option to regenerate paths\r\n\r\n* update alt docs\r\n\r\n* fix generate\r\n\r\n* update alt\r\n\r\n* fix generate\r\n\r\n* update common\r\n\r\n* target italic only for types\r\n\r\n* format alt\r\n\r\n* format italic common\r\n\r\n* add sig indentation\r\n\r\n* update sig indent alt\r\n\r\n* update common ident\r\n\r\n* add todo\r\n\r\n* generate docstrings for all menus\r\n\r\n* fix maxdd\r\n\r\n* fix returns font size\r\n\r\n* fix keys docs\r\n\r\n* fix more docstrings\r\n\r\n* escape literal symbols\r\n\r\n* escape literal symbols\r\n\r\n* reformat keys\r\n\r\n* format opt\r\n\r\n* remove literal escape\r\n\r\n* remove another literal escape\r\n\r\n* remove another literal escape\r\n\r\n* unindent returns\r\n\r\n* update docs return unindent\r\n\r\n* add comma in last arg\r\n\r\n* fix funcs without params\r\n\r\n* fix signature\r\n\r\n* compact some code\r\n\r\n* refactor some more code\r\n\r\n* refactor some code\r\n\r\n* some final cleanup\r\n\r\n* write docstrings\r\n\r\n* change main\r\n\r\n* move futures paths\r\n\r\n* generate futures docs\r\n\r\n* add external axes references\r\n\r\n* fix typo\r\n\r\n* revert to double docstring\r\n\r\n* fix small bug\r\n\r\n* remove docs folder\r\n\r\n* generate.py in website folder\r\n\r\n* add forecast to docs\r\n\r\n* clear some warnings\r\n\r\n* fix underscore\r\n\r\n* remove cite\r\n\r\n* refresh website docs\r\n\r\n* fix forecast docstrings\r\n\r\n* fix po\r\n\r\n* fix po docs and remove italic\r\n\r\n* fix more docstrings\r\n\r\n* remove last warning\r\n\r\n* codespell\r\n\r\n* flake8\r\n\r\n* exclude website contente from flake\r\n\r\n* noqa on optimizer\r\n\r\n* update website\r\n\r\n* fix mypy\r\n\r\n* remove setup from mypy\r\n\r\n* mypy to openbbterminal\r\n\r\n* update precommit\r\n\r\n* pylint\r\n\r\n* try to remove sdk loading issue\r\n\r\n* fix dates active command\r\n\r\n* fix crypto.change formats\r\n\r\n* fix eb formats\r\n\r\n* nonzero fix\r\n\r\n* format dates crypto.load\r\n\r\n* format supply transac\r\n\r\n* format hr altindex\r\n\r\n* format load crypto\r\n\r\n* regenerate docs\r\n\r\n* format ba trend dates\r\n\r\n* regenerate docs\r\n\r\n* format ba trend\r\n\r\n* candle defaults\r\n\r\n* fix sentiment test\r\n\r\n* remove unused import\r\n\r\n* shopt\r\n\r\n* shopt again\r\n\r\n* revert crypto helpers\r\n\r\n* test shopt\r\n\r\n* fix some tests\r\n\r\n* skip trending test\r\n\r\n* fix alcoin test\r\n\r\n* helpers\r\n\r\n* write docs\r\n\r\n* rewrite helper\r\n\r\nCo-authored-by: Jeroen Bouma ", "code": "def call_load(self, other_args):\n \n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"load\",\n description=,\n )\n parser.add_argument(\n \"-c\",\n \"--coin\",\n help=\"Coin to get. Must be coin symbol (e.g., btc, eth)\",\n dest=\"coin\",\n type=str,\n required=\"-h\" not in other_args,\n )\n\n parser.add_argument(\n \"-s\",\n \"--start\",\n type=valid_date,\n default=(datetime.now() - timedelta(days=1100)).strftime(\"%Y-%m-%d\"),\n dest=\"start\",\n help=\"The starting date (format YYYY-MM-DD) of the crypto\",\n )\n\n parser.add_argument(\n \"--exchange\",\n help=\"Exchange to search\",\n dest=\"exchange\",\n type=str,\n default=\"binance\",\n choices=self.exchanges,\n )\n\n parser.add_argument(\n \"-e\",\n \"--end\",\n type=valid_date,\n default=datetime.now().strftime(\"%Y-%m-%d\"),\n dest=\"end\",\n help=\"The ending date (format YYYY-MM-DD) of the crypto\",\n )\n parser.add_argument(\n \"-i\",\n \"--interval\",\n action=\"store\",\n dest=\"interval\",\n type=str,\n default=\"1440\",\n choices=[\"1\", \"5\", \"15\", \"30\", \"60\", \"240\", \"1440\", \"10080\", \"43200\"],\n help=\"The interval of the crypto\",\n )\n\n parser.add_argument(\n \"--vs\",\n help=\"Quote currency (what to view coin vs). e.g., usdc, usdt, ... if source is ccxt, usd, eur, ... otherwise\", # noqa\n dest=\"vs\",\n default=\"usdt\",\n type=str,\n )\n\n if other_args and \"-\" not in other_args[0][0]:\n other_args.insert(0, \"-c\")\n\n ns_parser = self.parse_known_args_and_warn(\n parser, other_args, export_allowed=EXPORT_ONLY_RAW_DATA_ALLOWED\n )\n\n if ns_parser:\n if ns_parser.source in (\"YahooFinance\", \"CoinGecko\"):\n if ns_parser.vs == \"usdt\":\n ns_parser.vs = \"usd\"\n (self.current_df) = cryptocurrency_helpers.load(\n symbol=ns_parser.coin.lower(),\n vs_currency=ns_parser.vs,\n end_date=ns_parser.end.strftime(\"%Y-%m-%d\"),\n start_date=ns_parser.start.strftime(\"%Y-%m-%d\"),\n interval=ns_parser.interval,\n source=ns_parser.source,\n exchange=ns_parser.exchange,\n )\n if not self.current_df.empty:\n self.vs = ns_parser.vs\n self.exchange = ns_parser.exchange\n self.source = ns_parser.source\n self.current_interval = ns_parser.interval\n self.current_currency = ns_parser.vs\n self.symbol = ns_parser.coin.lower()\n cryptocurrency_helpers.show_quick_performance(\n self.current_df,\n self.symbol,\n self.current_currency,\n ns_parser.source,\n ns_parser.exchange,\n self.current_interval,\n )\n export_data(\n ns_parser.export,\n os.path.dirname(os.path.abspath(__file__)),\n \"load\",\n self.current_df.copy(),\n )\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 1328, "n_words": 198, "vocab_size": 141, "complexity": 7, "nloc": 99, "token_counts": 486, "n_ast_nodes": 791, "n_identifiers": 59, "d_id": 85834, "documentation": { "docstring": "Process load command.Load crypto currency to perform analysis on.\n Yahoo Finance is used as default source.\n Other sources can be used such as 'ccxt' or 'cg' with --source.\n If you select 'ccxt', you can then select any exchange with --exchange.\n You can also select a specific interval with --interval.", "n_words": 49, "vocab_size": 40, "n_whitespaces": 92, "language": "en" } }, { "id": 45221, "commit_id": "59c450ee5425a2d23ef813dbf219cde14df7c85c", "repo": "airflow", "path": "airflow/hooks/dbapi.py", "file_name": "dbapi.py", "fun_name": "get_uri", "commit_message": "Make DbApiHook use get_uri from Connection (#21764)\n\nDBApi has its own get_uri method which does not deal\r\nwith quoting properly and neither with empty passwords.\r\nConnection also has a get_uri method that deals properly\r\nwith the above issues.\r\n\r\nThis also fixes issues with RFC compliancy.", "code": "def get_uri(self) -> str:\n \n conn = self.get_connection(getattr(self, self.conn_name_attr))\n conn.schema = self.__schema or conn.schema\n return conn.get_uri()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 43, "n_words": 15, "vocab_size": 13, "complexity": 2, "nloc": 9, "token_counts": 40, "n_ast_nodes": 66, "n_identifiers": 9, "d_id": 8509, "documentation": { "docstring": "\n Extract the URI from the connection.\n\n :return: the extracted uri.\n ", "n_words": 10, "vocab_size": 8, "n_whitespaces": 32, "language": "en" } }, { "id": 202376, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/csrf_tests/tests.py", "file_name": "tests.py", "fun_name": "test_reading_post_data_raises_unreadable_post_error", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_reading_post_data_raises_unreadable_post_error(self):\n \n req = self._get_POST_request_with_token()\n mw = CsrfViewMiddleware(post_form_view)\n mw.process_request(req)\n resp = mw.process_view(req, post_form_view, (), {})\n self.assertIsNone(resp)\n\n req = self._get_POST_request_with_token(request_class=PostErrorRequest)\n req.post_error = UnreadablePostError(\"Error reading input data.\")\n mw.process_request(req)\n with self.assertLogs(\"django.security.csrf\", \"WARNING\") as cm:\n resp = mw.process_view(req, post_form_view, (), {})\n self.assertEqual(resp.status_code, 403)\n self.assertEqual(\n cm.records[0].getMessage(),\n \"Forbidden (%s): \" % REASON_CSRF_TOKEN_MISSING,\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 171, "n_words": 47, "vocab_size": 35, "complexity": 1, "nloc": 16, "token_counts": 129, "n_ast_nodes": 214, "n_identifiers": 22, "d_id": 50092, "documentation": { "docstring": "\n An UnreadablePostError raised while reading the POST data should be\n handled by the middleware.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 122213, "commit_id": "78ed03c4c2970e5e0d11f14a8d4fc968a4efbca2", "repo": "jax", "path": "jax/_src/lax/lax.py", "file_name": "lax.py", "fun_name": "broadcast_shapes", "commit_message": "[typing] add annotations to jax.numpy.linalg", "code": "def broadcast_shapes(*shapes):\n \n # NOTE: We have both cached and uncached versions to handle Tracers in shapes.\n try:\n return _broadcast_shapes_cached(*shapes)\n except:\n return _broadcast_shapes_uncached(*shapes)\n\n@cache()", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "@cache()", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 32, "n_words": 23, "vocab_size": 22, "complexity": 2, "nloc": 5, "token_counts": 23, "n_ast_nodes": 52, "n_identifiers": 5, "d_id": 27122, "documentation": { "docstring": "Returns the shape that results from NumPy broadcasting of `shapes`.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 149761, "commit_id": "fc837c4daa27a18ff0e86128f4d52089b88fa5fb", "repo": "freqtrade", "path": "freqtrade/freqai/freqai_interface.py", "file_name": "freqai_interface.py", "fun_name": "fit", "commit_message": "add freqao backend machinery, user interface, documentation", "code": "def fit(self) -> Any:\n \n\n return None\n ", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 24, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 10, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 34519, "documentation": { "docstring": "\n Most regressors use the same function names and arguments e.g. user \n can drop in LGBMRegressor in place of CatBoostRegressor and all data\n management will be properly handled by Freqai.\n :params:\n :data_dictionary: the dictionary constructed by DataHandler to hold \n all the training and test data/labels.\n ", "n_words": 44, "vocab_size": 37, "n_whitespaces": 96, "language": "en" } }, { "id": 227351, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_layout.py", "file_name": "_layout.py", "fun_name": "sliders", "commit_message": "switch to black .22", "code": "def sliders(self):\n \n return self[\"sliders\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 59024, "documentation": { "docstring": "\n The 'sliders' property is a tuple of instances of\n Slider that may be specified as:\n - A list or tuple of instances of plotly.graph_objs.layout.Slider\n - A list or tuple of dicts of string/value properties that\n will be passed to the Slider constructor\n\n Supported dict properties:\n\n active\n Determines which button (by index starting from\n 0) is considered active.\n activebgcolor\n Sets the background color of the slider grip\n while dragging.\n bgcolor\n Sets the background color of the slider.\n bordercolor\n Sets the color of the border enclosing the\n slider.\n borderwidth\n Sets the width (in px) of the border enclosing\n the slider.\n currentvalue\n :class:`plotly.graph_objects.layout.slider.Curr\n entvalue` instance or dict with compatible\n properties\n font\n Sets the font of the slider step labels.\n len\n Sets the length of the slider This measure\n excludes the padding of both ends. That is, the\n slider's length is this length minus the\n padding on both ends.\n lenmode\n Determines whether this slider length is set in\n units of plot \"fraction\" or in *pixels. Use\n `len` to set the value.\n minorticklen\n Sets the length in pixels of minor step tick\n marks\n name\n When used in a template, named items are\n created in the output figure in addition to any\n items the figure already has in this array. You\n can modify these items in the output figure by\n making your own item with `templateitemname`\n matching this `name` alongside your\n modifications (including `visible: false` or\n `enabled: false` to hide it). Has no effect\n outside of a template.\n pad\n Set the padding of the slider component along\n each side.\n steps\n A tuple of :class:`plotly.graph_objects.layout.\n slider.Step` instances or dicts with compatible\n properties\n stepdefaults\n When used in a template (as\n layout.template.layout.slider.stepdefaults),\n sets the default property values to use for\n elements of layout.slider.steps\n templateitemname\n Used to refer to a named item in this array in\n the template. Named items from the template\n will be created even without a matching item in\n the input figure, but you can modify one by\n making an item with `templateitemname` matching\n its `name`, alongside your modifications\n (including `visible: false` or `enabled: false`\n to hide it). If there is no template or no\n matching item, this item will be hidden unless\n you explicitly show it with `visible: true`.\n tickcolor\n Sets the color of the border enclosing the\n slider.\n ticklen\n Sets the length in pixels of step tick marks\n tickwidth\n Sets the tick width (in px).\n transition\n :class:`plotly.graph_objects.layout.slider.Tran\n sition` instance or dict with compatible\n properties\n visible\n Determines whether or not the slider is\n visible.\n x\n Sets the x position (in normalized coordinates)\n of the slider.\n xanchor\n Sets the slider's horizontal position anchor.\n This anchor binds the `x` position to the\n \"left\", \"center\" or \"right\" of the range\n selector.\n y\n Sets the y position (in normalized coordinates)\n of the slider.\n yanchor\n Sets the slider's vertical position anchor This\n anchor binds the `y` position to the \"top\",\n \"middle\" or \"bottom\" of the range selector.\n\n Returns\n -------\n tuple[plotly.graph_objs.layout.Slider]\n ", "n_words": 479, "vocab_size": 216, "n_whitespaces": 2252, "language": "en" } }, { "id": 211846, "commit_id": "630304e0b66c0528ecaa3bf2e88b44a14b7f3383", "repo": "PaddleDetection", "path": "ppdet/utils/download.py", "file_name": "download.py", "fun_name": "get_dataset_path", "commit_message": "fix auto download logger info (#7550)", "code": "def get_dataset_path(path, annotation, image_dir):\n \n if _dataset_exists(path, annotation, image_dir):\n return path\n\n data_name = os.path.split(path.strip().lower())[-1]\n if data_name not in DOWNLOAD_DATASETS_LIST:\n raise ValueError(\n \"Dataset {} is not valid for reason above, please check again.\".\n format(osp.realpath(path)))\n else:\n logger.WARNING(\n \"Dataset {} is not valid for reason above, try searching {} or \"\n \"downloading dataset...\".format(osp.realpath(path), DATASET_HOME))\n\n for name, dataset in DATASETS.items():\n if data_name == name:\n logger.debug(\"Parse dataset_dir {} as dataset \"\n \"{}\".format(path, name))\n data_dir = osp.join(DATASET_HOME, name)\n\n if name == \"spine_coco\":\n if _dataset_exists(data_dir, annotation, image_dir):\n return data_dir\n\n # For voc, only check dir VOCdevkit/VOC2012, VOCdevkit/VOC2007\n if name in ['voc', 'fruit', 'roadsign_voc']:\n exists = True\n for sub_dir in dataset[1]:\n check_dir = osp.join(data_dir, sub_dir)\n if osp.exists(check_dir):\n logger.info(\"Found {}\".format(check_dir))\n else:\n exists = False\n if exists:\n return data_dir\n\n # voc exist is checked above, voc is not exist here\n check_exist = name != 'voc' and name != 'fruit' and name != 'roadsign_voc'\n for url, md5sum in dataset[0]:\n get_path(url, data_dir, md5sum, check_exist)\n\n # voc should create list after download\n if name == 'voc':\n create_voc_list(data_dir)\n return data_dir\n\n raise ValueError(\"Dataset automaticly downloading Error.\")\n\n", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 639, "n_words": 170, "vocab_size": 102, "complexity": 15, "nloc": 37, "token_counts": 253, "n_ast_nodes": 424, "n_identifiers": 34, "d_id": 53148, "documentation": { "docstring": "\n If path exists, return path.\n Otherwise, get dataset path from DATASET_HOME, if not exists,\n download it.\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 29, "language": "en" } }, { "id": 218183, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/importlib/abc.py", "file_name": "abc.py", "fun_name": "is_package", "commit_message": "add python 3.10.4 for windows", "code": "def is_package(self, fullname):\n \n raise ImportError\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 18, "n_identifiers": 4, "d_id": 55185, "documentation": { "docstring": "Optional method which when implemented should return whether the\n module is a package. The fullname is a str. Returns a bool.\n\n Raises ImportError if the module cannot be found.\n ", "n_words": 29, "vocab_size": 24, "n_whitespaces": 52, "language": "en" } }, { "id": 201749, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/backends/postgresql/tests.py", "file_name": "tests.py", "fun_name": "test_connect_and_rollback", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_connect_and_rollback(self):\n \n new_connection = connection.copy()\n try:\n # Ensure the database default time zone is different than\n # the time zone in new_connection.settings_dict. We can\n # get the default time zone by reset & show.\n with new_connection.cursor() as cursor:\n cursor.execute(\"RESET TIMEZONE\")\n cursor.execute(\"SHOW TIMEZONE\")\n db_default_tz = cursor.fetchone()[0]\n new_tz = \"Europe/Paris\" if db_default_tz == \"UTC\" else \"UTC\"\n new_connection.close()\n\n # Invalidate timezone name cache, because the setting_changed\n # handler cannot know about new_connection.\n del new_connection.timezone_name\n\n # Fetch a new connection with the new_tz as default\n # time zone, run a query and rollback.\n with self.settings(TIME_ZONE=new_tz):\n new_connection.set_autocommit(False)\n new_connection.rollback()\n\n # Now let's see if the rollback rolled back the SET TIME ZONE.\n with new_connection.cursor() as cursor:\n cursor.execute(\"SHOW TIMEZONE\")\n tz = cursor.fetchone()[0]\n self.assertEqual(new_tz, tz)\n\n finally:\n new_connection.close()\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 448, "n_words": 119, "vocab_size": 79, "complexity": 3, "nloc": 19, "token_counts": 125, "n_ast_nodes": 237, "n_identifiers": 18, "d_id": 49987, "documentation": { "docstring": "\n PostgreSQL shouldn't roll back SET TIME ZONE, even if the first\n transaction is rolled back (#17062).\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 38, "language": "en" } }, { "id": 57431, "commit_id": "2bff1047c0c183ec79e606b9a1c4ac966e23c8d1", "repo": "prefect", "path": "src/prefect/infrastructure/docker.py", "file_name": "docker.py", "fun_name": "_get_container_name", "commit_message": "Add tests for docker container", "code": "def _get_container_name(self) -> Optional[str]:\n \n # Must match `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+` in the end\n if not self.name:\n return None\n\n return (\n slugify(\n self.name,\n lowercase=False,\n # Docker does not limit length but URL limits apply eventually so\n # limit the length for safety\n max_length=250,\n # Docker allows these characters for container names\n regex_pattern=r\"[^a-zA-Z0-9_.-]+\",\n ).lstrip(\n # Docker does not allow leading underscore, dash, or period\n \"_-.\"\n )\n # Docker does not allow 0 character names so cast to null if the name is\n # empty after slufification\n or None\n )\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 332, "n_words": 85, "vocab_size": 58, "complexity": 3, "nloc": 18, "token_counts": 49, "n_ast_nodes": 85, "n_identifiers": 10, "d_id": 11652, "documentation": { "docstring": "\n Generates a container name to match the configured name, ensuring it is Docker\n compatible.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 36, "language": "en" } }, { "id": 140551, "commit_id": "905258dbc19753c81039f993477e7ab027960729", "repo": "ray", "path": "python/ray/util/dask/scheduler.py", "file_name": "scheduler.py", "fun_name": "ray_dask_get_sync", "commit_message": "Clean up docstyle in python modules and add LINT rule (#25272)", "code": "def ray_dask_get_sync(dsk, keys, **kwargs):\n \n\n ray_callbacks = kwargs.pop(\"ray_callbacks\", None)\n persist = kwargs.pop(\"ray_persist\", False)\n\n with local_ray_callbacks(ray_callbacks) as ray_callbacks:\n # Unpack the Ray-specific callbacks.\n (\n ray_presubmit_cbs,\n ray_postsubmit_cbs,\n ray_pretask_cbs,\n ray_posttask_cbs,\n ray_postsubmit_all_cbs,\n ray_finish_cbs,\n ) = unpack_ray_callbacks(ray_callbacks)\n # NOTE: We hijack Dask's `get_async` function, injecting a different\n # task executor.\n object_refs = get_async(\n _apply_async_wrapper(\n apply_sync,\n _rayify_task_wrapper,\n ray_presubmit_cbs,\n ray_postsubmit_cbs,\n ray_pretask_cbs,\n ray_posttask_cbs,\n ),\n 1,\n dsk,\n keys,\n **kwargs,\n )\n if ray_postsubmit_all_cbs is not None:\n for cb in ray_postsubmit_all_cbs:\n cb(object_refs, dsk)\n # NOTE: We explicitly delete the Dask graph here so object references\n # are garbage-collected before this function returns, i.e. before all\n # Ray tasks are done. Otherwise, no intermediate objects will be\n # cleaned up until all Ray tasks are done.\n del dsk\n if persist:\n result = object_refs\n else:\n result = ray_get_unpack(object_refs)\n if ray_finish_cbs is not None:\n for cb in ray_finish_cbs:\n cb(result)\n\n return result\n\n\n@dataclass", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@dataclass", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 563, "n_words": 137, "vocab_size": 99, "complexity": 6, "nloc": 38, "token_counts": 138, "n_ast_nodes": 219, "n_identifiers": 24, "d_id": 32022, "documentation": { "docstring": "\n A synchronous Dask-Ray scheduler. This scheduler will send top-level\n (non-inlined) Dask tasks to a Ray cluster for execution. The scheduler will\n wait for the tasks to finish executing, fetch the results, and repackage\n them into the appropriate Dask collections. This particular scheduler\n submits Ray tasks synchronously, which can be useful for debugging.\n\n This can be passed directly to `dask.compute()`, as the scheduler:\n\n >>> dask.compute(obj, scheduler=ray_dask_get_sync)\n\n You can override the currently active global Dask-Ray callbacks (e.g.\n supplied via a context manager):\n\n >>> dask.compute(\n obj,\n scheduler=ray_dask_get_sync,\n ray_callbacks=some_ray_dask_callbacks,\n )\n\n Args:\n dsk: Dask graph, represented as a task DAG dictionary.\n keys (List[str]): List of Dask graph keys whose values we wish to\n compute and return.\n\n Returns:\n Computed values corresponding to the provided keys.\n ", "n_words": 119, "vocab_size": 86, "n_whitespaces": 231, "language": "en" } }, { "id": 308346, "commit_id": "81aff973ea421e848d2f3e084f123bf108bd808e", "repo": "core", "path": "homeassistant/components/camera/__init__.py", "file_name": "__init__.py", "fun_name": "async_refresh_providers", "commit_message": "Keep entity state management within entity (#63183)\n\nSimplify the entity state management for webrtc providers, incurring\r\nextra state writes on startup. Followup post-review comments for PR #62962", "code": "async def async_refresh_providers(self) -> None:\n \n old_state = self._rtsp_to_webrtc\n self._rtsp_to_webrtc = await self._async_use_rtsp_to_webrtc()\n if old_state != self._rtsp_to_webrtc:\n self.async_write_ha_state()\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 56, "n_words": 17, "vocab_size": 14, "complexity": 2, "nloc": 12, "token_counts": 35, "n_ast_nodes": 62, "n_identifiers": 6, "d_id": 107106, "documentation": { "docstring": "Determine if any of the registered providers are suitable for this entity.\n\n This affects state attributes, so it should be invoked any time the registered\n providers or inputs to the state attributes change.\n\n Returns True if any state was updated (and needs to be written)\n ", "n_words": 45, "vocab_size": 34, "n_whitespaces": 73, "language": "en" } }, { "id": 131426, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tests/test_client_proxy.py", "file_name": "test_client_proxy.py", "fun_name": "test_runtime_install_error_message", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def test_runtime_install_error_message(call_ray_start):\n \n with pytest.raises(ConnectionAbortedError) as excinfo:\n ray.client(\"localhost:25031\").env({\"pip\": [\"ray-this-doesnt-exist\"]}).connect()\n assert \"No matching distribution found for ray-this-doesnt-exist\" in str(\n excinfo.value\n ), str(excinfo.value)\n\n ray.util.disconnect()\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 50, "n_words": 21, "vocab_size": 21, "complexity": 1, "nloc": 7, "token_counts": 60, "n_ast_nodes": 110, "n_identifiers": 14, "d_id": 29523, "documentation": { "docstring": "\n Check that an error while preparing the runtime environment for the client\n server yields an actionable, clear error on the *client side*.\n ", "n_words": 22, "vocab_size": 18, "n_whitespaces": 32, "language": "en" } }, { "id": 196757, "commit_id": "ad766d1c02943e86f50559abfd0c72e582c9ca6a", "repo": "sympy", "path": "sympy/assumptions/assume.py", "file_name": "assume.py", "fun_name": "add_handler", "commit_message": "Update the AskHandler deprecation warnings\n\nn.b., the issue number in the original warning message was wrong. It should\nhave been #20837.", "code": "def add_handler(self, handler):\n sympy_deprecation_warning(\n ,\n deprecated_since_version=\"1.8\",\n active_deprecations_target='deprecated-askhandler',\n )\n self.handlers.append(handler)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 62, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 10, "token_counts": 28, "n_ast_nodes": 48, "n_identifiers": 8, "d_id": 48153, "documentation": { "docstring": "\n The AskHandler system is deprecated. Predicate.add_handler()\n should be replaced with the multipledispatch handler of Predicate.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 49, "language": "en" } }, { "id": 269012, "commit_id": "8ecef127f70db723c158dbe9ed3268b3d610ab55", "repo": "keras", "path": "keras/optimizers/__init__.py", "file_name": "__init__.py", "fun_name": "deserialize", "commit_message": "Remove experimental Keras mixed precision API.\n\nThe non-experimental mixed precision API was added in TensorFlow 2.4, and since then the experimental API has been deprecated. This change removes the experimental API.\n\nDeserializing the now-removed PolicyV1 and LossScaleOptimizerV1 classes is still supported, if they were serialized with get_config() prior to this change. These classes are deserialized into the non-experimental Policy and LossScaleOptimizer classes, which has been the case since TensorFlow 2.4. Eventually, support for deserializing these classes may be removed.\n\nPiperOrigin-RevId: 429410341", "code": "def deserialize(config, custom_objects=None):\n \n # loss_scale_optimizer has a direct dependency of optimizer, import here\n # rather than top to avoid the cyclic dependency.\n from keras.mixed_precision import loss_scale_optimizer # pylint: disable=g-import-not-at-top\n all_classes = {\n 'adadelta': adadelta_v2.Adadelta,\n 'adagrad': adagrad_v2.Adagrad,\n 'adam': adam_v2.Adam,\n 'adamax': adamax_v2.Adamax,\n 'experimentaladadelta': adadelta_experimental.Adadelta,\n 'experimentaladagrad': adagrad_experimental.Adagrad,\n 'experimentaladam': adam_experimental.Adam,\n 'experimentalsgd': sgd_experimental.SGD,\n 'nadam': nadam_v2.Nadam,\n 'rmsprop': rmsprop_v2.RMSprop,\n 'sgd': gradient_descent_v2.SGD,\n 'ftrl': ftrl.Ftrl,\n 'lossscaleoptimizer': loss_scale_optimizer.LossScaleOptimizer,\n 'lossscaleoptimizerv3': loss_scale_optimizer.LossScaleOptimizerV3,\n # LossScaleOptimizerV1 was an old version of LSO that was removed.\n # Deserializing it turns it into a LossScaleOptimizer\n 'lossscaleoptimizerv1': loss_scale_optimizer.LossScaleOptimizer,\n }\n\n # Make deserialization case-insensitive for built-in optimizers.\n if config['class_name'].lower() in all_classes:\n config['class_name'] = config['class_name'].lower()\n return deserialize_keras_object(\n config,\n module_objects=all_classes,\n custom_objects=custom_objects,\n printable_module_name='optimizer')\n\n\n@keras_export('keras.optimizers.get')", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export('keras.optimizers.get')", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 220, "n_words": 103, "vocab_size": 89, "complexity": 2, "nloc": 26, "token_counts": 152, "n_ast_nodes": 271, "n_identifiers": 34, "d_id": 79830, "documentation": { "docstring": "Inverse of the `serialize` function.\n\n Args:\n config: Optimizer configuration dictionary.\n custom_objects: Optional dictionary mapping names (strings) to custom\n objects (classes and functions) to be considered during deserialization.\n\n Returns:\n A Keras Optimizer instance.\n ", "n_words": 32, "vocab_size": 30, "n_whitespaces": 57, "language": "en" } }, { "id": 71045, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/admin/admin_url_finder.py", "file_name": "admin_url_finder.py", "fun_name": "construct_edit_url", "commit_message": "Reformat with black", "code": "def construct_edit_url(self, instance):\n \n if self.edit_url_name is None:\n raise ImproperlyConfigured(\n \"%r must define edit_url_name or override construct_edit_url\"\n % type(self)\n )\n return reverse(self.edit_url_name, args=(quote(instance.pk),))\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 95, "n_words": 22, "vocab_size": 22, "complexity": 2, "nloc": 7, "token_counts": 44, "n_ast_nodes": 72, "n_identifiers": 10, "d_id": 15608, "documentation": { "docstring": "\n Return the edit URL for the given instance - regardless of whether the user can access it -\n or None if no edit URL is available.\n ", "n_words": 26, "vocab_size": 21, "n_whitespaces": 48, "language": "en" } }, { "id": 95747, "commit_id": "6b29955072b4fbed6d8843ae193d65509e288f8f", "repo": "sentry", "path": "tests/sentry/api/endpoints/test_organization_metrics.py", "file_name": "test_organization_metrics.py", "fun_name": "test_pagination_offset_without_orderby", "commit_message": "feat(metrics): Add pagination to OrganizationMetricsDataEndpoint [INGEST-851] (#31181)\n\n* feat(metrics): Add pagination to OrganizationMetricsDataEndpoint\r\n\r\nAdds new paginator class `MetricsDataSeriesPaginator`\r\nto add pagination to the response of api requests made\r\nto `OrganizationMetricsDataEndpoint`", "code": "def test_pagination_offset_without_orderby(self):\n \n response = self.get_response(\n self.organization.slug,\n field=\"count(sentry.transactions.measurements.lcp)\",\n datasource=\"snuba\",\n groupBy=\"transaction\",\n cursor=Cursor(0, 1),\n )\n assert response.status_code == 400\n print(response.json())\n assert response.json()[\"detail\"] == (\n \"'cursor' is only supported in combination with 'orderBy'\"\n )\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 145, "n_words": 30, "vocab_size": 27, "complexity": 1, "nloc": 13, "token_counts": 67, "n_ast_nodes": 113, "n_identifiers": 14, "d_id": 19228, "documentation": { "docstring": "\n Test that ensures an exception is raised when pagination `per_page` parameter is sent\n without order by being set\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 40, "language": "en" } }, { "id": 82407, "commit_id": "c1290c9ff89cb00caa5469129fd527e9d82cd820", "repo": "django-cms", "path": "cms/tests/test_nested_plugins.py", "file_name": "test_nested_plugins.py", "fun_name": "test_copy_page_nested_plugin", "commit_message": "ci: Added codespell (#7355)\n\nCo-authored-by: Christian Clauss \r\n\r\n* ci: codespell config taken from #7292", "code": "def test_copy_page_nested_plugin(self):\n \n with self.settings(CMS_PERMISSION=False):\n # setup page 1\n page_one = create_page(\n \"Three Placeholder\", \"col_three.html\", \"en\",\n position=\"last-child\", published=True, in_navigation=True\n )\n page_one_ph_one = page_one.placeholders.get(slot=\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot=\"col_left\")\n page_one.placeholders.get(slot=\"col_right\")\n # add the text plugin to placeholder one\n text_plugin_en = add_plugin(page_one_ph_one, \"TextPlugin\", \"en\", body=\"Hello World\")\n self.assertEqual(text_plugin_en.id, CMSPlugin.objects.all()[0].id)\n self.assertEqual(text_plugin_en.get_children().count(), 0)\n pre_add_plugin_count = CMSPlugin.objects.count()\n self.assertEqual(pre_add_plugin_count, 1)\n ###\n # add a plugin to placeholder two\n ###\n pre_nesting_body = \"

    the nested text plugin with a link inside

    \"\n text_plugin_two = add_plugin(page_one_ph_two, \"TextPlugin\", \"en\", body=pre_nesting_body)\n text_plugin_two = self.reload(text_plugin_two)\n # prepare nesting plugin\n page_one_ph_two = self.reload(page_one_ph_two)\n text_plugin_two = self.reload(text_plugin_two)\n link_plugin = add_plugin(page_one_ph_two, \"LinkPlugin\", \"en\", target=text_plugin_two)\n link_plugin.name = \"django-cms Link\"\n link_plugin.external_link = \"https://www.django-cms.org\"\n link_plugin.parent = text_plugin_two\n link_plugin.save()\n\n link_plugin = self.reload(link_plugin)\n text_plugin_two = self.reload(text_plugin_two)\n in_txt = \n nesting_body = f\"{text_plugin_two.body}

    {(in_txt % (link_plugin.id))}

    \"\n # emulate the editor in admin that adds some txt for the nested plugin\n text_plugin_two.body = nesting_body\n text_plugin_two.save()\n text_plugin_two = self.reload(text_plugin_two)\n # the link is attached as a child?\n self.assertEqual(text_plugin_two.get_children().count(), 1)\n post_add_plugin_count = CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count()\n self.assertEqual(post_add_plugin_count, 3)\n page_one.save()\n # get the plugins from the original page\n page_one = self.reload(page_one)\n page_one_ph_one = page_one.placeholders.get(slot=\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot=\"col_left\")\n page_one_ph_three = page_one.placeholders.get(slot=\"col_right\")\n # verify that the plugins got created\n org_placeholder_one_plugins = page_one_ph_one.get_plugins()\n self.assertEqual(len(org_placeholder_one_plugins), 1)\n org_placeholder_two_plugins = page_one_ph_two.get_plugins()\n self.assertEqual(len(org_placeholder_two_plugins), 2)\n org_placeholder_three_plugins = page_one_ph_three.get_plugins()\n self.assertEqual(len(org_placeholder_three_plugins), 0)\n self.assertEqual(page_one.placeholders.count(), 3)\n placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()\n self.assertEqual(placeholder_count, 3)\n self.assertEqual(CMSPlugin.objects.filter(placeholder__page__publisher_is_draft=True).count(), 3)\n ##\n # setup page_copy_target page\n ##\n page_copy_target = create_page(\n \"Three Placeholder - page copy target\", \"col_three.html\", \"en\",\n position=\"last-child\", published=True, in_navigation=True\n )\n all_page_count = Page.objects.drafts().count()\n pre_copy_placeholder_count = Placeholder.objects.filter(page__publisher_is_draft=True).count()\n self.assertEqual(pre_copy_placeholder_count, 6)\n # copy the page\n superuser = self.get_superuser()\n with self.login_user_context(superuser):\n page_two = self.copy_page(page_one, page_copy_target)\n # validate the expected pages,placeholders,plugins,pluginbodies\n after_copy_page_plugin_count = CMSPlugin.objects.filter(\n placeholder__page__publisher_is_draft=True\n ).count()\n\n self.assertEqual(after_copy_page_plugin_count, 6)\n # check the amount of copied stuff\n after_copy_page_count = Page.objects.drafts().count()\n after_copy_placeholder_count = Placeholder.objects.filter(\n page__publisher_is_draft=True\n ).count()\n self.assertGreater(after_copy_page_count, all_page_count, \"no new page after copy\")\n self.assertGreater(after_copy_page_plugin_count, post_add_plugin_count, \"plugin count is not grown\")\n self.assertGreater(\n after_copy_placeholder_count, pre_copy_placeholder_count,\n \"placeholder count is not grown\"\n )\n self.assertEqual(after_copy_page_count, 3, \"no new page after copy\")\n # original placeholder\n page_one = self.reload(page_one)\n page_one_ph_one = page_one.placeholders.get(slot=\"col_sidebar\")\n page_one_ph_two = page_one.placeholders.get(slot=\"col_left\")\n page_one_ph_three = page_one.placeholders.get(slot=\"col_right\")\n # check if there are multiple pages assigned to this placeholders\n found_page = page_one_ph_one.page if page_one_ph_one else None\n self.assertEqual(found_page, page_one)\n found_page = page_one_ph_two.page if page_one_ph_two else None\n self.assertEqual(found_page, page_one)\n found_page = page_one_ph_three.page if page_one_ph_three else None\n self.assertEqual(found_page, page_one)\n\n page_two = self.reload(page_two)\n page_two_ph_one = page_two.placeholders.get(slot=\"col_sidebar\")\n page_two_ph_two = page_two.placeholders.get(slot=\"col_left\")\n page_two_ph_three = page_two.placeholders.get(slot=\"col_right\")\n # check if there are multiple pages assigned to this placeholders\n found_page = page_two_ph_one.page if page_two_ph_one else None\n self.assertEqual(found_page, page_two)\n found_page = page_two_ph_two.page if page_two_ph_two else None\n self.assertEqual(found_page, page_two)\n found_page = page_two_ph_three.page if page_two_ph_three else None\n self.assertEqual(found_page, page_two)\n # check the stored placeholders org vs copy\n msg = 'placehoder ids copy:{} org:{} copied page {} are identical - tree broken'.format(\n page_two_ph_one.pk, page_one_ph_one.pk, page_two.pk\n )\n self.assertNotEqual(page_two_ph_one.pk, page_one_ph_one.pk, msg)\n msg = 'placehoder ids copy:{} org:{} copied page {} are identical - tree broken'.format(\n page_two_ph_two.pk, page_one_ph_two.pk, page_two.pk\n )\n self.assertNotEqual(page_two_ph_two.pk, page_one_ph_two.pk, msg)\n msg = 'placehoder ids copy:{} org:{} copied page {} are identical - tree broken'.format(\n page_two_ph_three.pk, page_one_ph_three.pk, page_two.pk\n )\n self.assertNotEqual(page_two_ph_three.pk, page_one_ph_three.pk, msg)\n # get the plugins from the original page\n org_placeholder_one_plugins = page_one_ph_one.get_plugins()\n self.assertEqual(len(org_placeholder_one_plugins), 1)\n org_placeholder_two_plugins = page_one_ph_two.get_plugins()\n self.assertEqual(len(org_placeholder_two_plugins), 2)\n org_placeholder_three_plugins = page_one_ph_three.get_plugins()\n self.assertEqual(len(org_placeholder_three_plugins), 0)\n # get the plugins from the copied page\n copied_placeholder_one_plugins = page_two_ph_one.get_plugins()\n self.assertEqual(len(copied_placeholder_one_plugins), 1)\n copied_placeholder_two_plugins = page_two_ph_two.get_plugins()\n self.assertEqual(len(copied_placeholder_two_plugins), 2)\n copied_placeholder_three_plugins = page_two_ph_three.get_plugins()\n self.assertEqual(len(copied_placeholder_three_plugins), 0)\n # verify the plugins got copied\n # placeholder 1\n count_plugins_copied = len(copied_placeholder_one_plugins)\n count_plugins_org = len(org_placeholder_one_plugins)\n msg = f\"plugin count {count_plugins_copied} {count_plugins_org} for placeholder one not equal\"\n self.assertEqual(count_plugins_copied, count_plugins_org, msg)\n # placeholder 2\n count_plugins_copied = len(copied_placeholder_two_plugins)\n count_plugins_org = len(org_placeholder_two_plugins)\n msg = f\"plugin count {count_plugins_copied} {count_plugins_org} for placeholder two not equal\"\n self.assertEqual(count_plugins_copied, count_plugins_org, msg)\n # placeholder 3\n count_plugins_copied = len(copied_placeholder_three_plugins)\n count_plugins_org = len(org_placeholder_three_plugins)\n msg = f\"plugin count {count_plugins_copied} {count_plugins_org} for placeholder three not equal\"\n self.assertEqual(count_plugins_copied, count_plugins_org, msg)\n # verify the body of text plugin with nested link plugin\n # org to copied\n org_nested_text_plugin = None\n # do this iteration to find the real text plugin with the attached link\n # the inheritance mechanism for the cmsplugins works through\n # (tuple)get_plugin_instance()\n for x in org_placeholder_two_plugins:\n if x.plugin_type == \"TextPlugin\":\n instance = x.get_plugin_instance()[0]\n if instance.body.startswith(pre_nesting_body):\n org_nested_text_plugin = instance\n break\n copied_nested_text_plugin = None\n for x in copied_placeholder_two_plugins:\n if x.plugin_type == \"TextPlugin\":\n instance = x.get_plugin_instance()[0]\n if instance.body.startswith(pre_nesting_body):\n copied_nested_text_plugin = instance\n break\n msg = \"original nested text plugin not found\"\n self.assertNotEqual(org_nested_text_plugin, None, msg=msg)\n msg = \"copied nested text plugin not found\"\n self.assertNotEqual(copied_nested_text_plugin, None, msg=msg)\n # get the children ids of the texplugin with a nested link\n # to check if the body of the text is generated correctly\n org_link_child_plugin = org_nested_text_plugin.get_children()[0]\n copied_link_child_plugin = copied_nested_text_plugin.get_children()[0]\n # validate the textplugin body texts\n msg = \"org plugin and copied plugin are the same\"\n self.assertTrue(org_link_child_plugin.id != copied_link_child_plugin.id, msg)\n needle = \"%s\"\n msg = \"child plugin id differs to parent in body\"\n # linked child is in body\n self.assertTrue(org_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) != -1, msg)\n msg = \"copy: child plugin id differs to parent in body\"\n self.assertTrue(copied_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) != -1, msg)\n # really nothing else\n msg = \"child link plugin id differs to parent body\"\n self.assertTrue(org_nested_text_plugin.body.find(needle % (copied_link_child_plugin.id)) == -1, msg)\n msg = \"copy: child link plugin id differs to parent body\"\n self.assertTrue(copied_nested_text_plugin.body.find(needle % (org_link_child_plugin.id)) == -1, msg)\n # now reverse lookup the placeholders from the plugins\n org_placeholder = org_link_child_plugin.placeholder\n copied_placeholder = copied_link_child_plugin.placeholder\n msg = \"placeholder of the original plugin and copied plugin are the same\"\n ok = (org_placeholder.id != copied_placeholder.id)\n self.assertTrue(ok, msg)\n", "url": "https://github.com/django-cms/django-cms.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 3232, "n_words": 866, "vocab_size": 323, "complexity": 13, "nloc": 166, "token_counts": 1395, "n_ast_nodes": 2353, "n_identifiers": 92, "d_id": 17382, "documentation": { "docstring": "\n Test to verify that page copy with a nested plugin works\n page one - 3 placeholder\n col_sidebar: 1 text plugin\n col_left: 1 text plugin with nested link plugin\n col_right: no plugin\n page two (copy target)\n Verify copied page, placeholders, plugins and body text\n ", "n_words": 47, "vocab_size": 36, "n_whitespaces": 139, "language": "en" } }, { "id": 107482, "commit_id": "f156db08eee54d285ab0fb4e031e48d078ba6aa3", "repo": "matplotlib", "path": "lib/matplotlib/axis.py", "file_name": "axis.py", "fun_name": "tick_bottom", "commit_message": "DOC: More cleanup axes -> Axes", "code": "def tick_bottom(self):\n \n label = True\n if 'label1On' in self._major_tick_kw:\n label = (self._major_tick_kw['label1On']\n or self._major_tick_kw['label2On'])\n self.set_ticks_position('bottom')\n # If labels were turned off before this was called, leave them off.\n self.set_tick_params(which='both', labelbottom=label)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 103, "n_words": 30, "vocab_size": 28, "complexity": 3, "nloc": 7, "token_counts": 51, "n_ast_nodes": 93, "n_identifiers": 8, "d_id": 22771, "documentation": { "docstring": "\n Move ticks and ticklabels (if present) to the bottom of the Axes.\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 27, "language": "en" } }, { "id": 259456, "commit_id": "75a94f518f7bd7d0bf581ffb67d9f961e3c4efbc", "repo": "scikit-learn", "path": "sklearn/linear_model/_glm/tests/test_glm.py", "file_name": "test_glm.py", "fun_name": "test_sample_weights_validation", "commit_message": "ENH migrate GLMs / TweedieRegressor to linear loss (#22548)\n\nCo-authored-by: Olivier Grisel \r\nCo-authored-by: Thomas J. Fan ", "code": "def test_sample_weights_validation():\n \n # scalar value but not positive\n X = [[1]]\n y = [1]\n weights = 0\n glm = _GeneralizedLinearRegressor()\n\n # Positive weights are accepted\n glm.fit(X, y, sample_weight=1)\n\n # 2d array\n weights = [[0]]\n with pytest.raises(ValueError, match=\"must be 1D array or scalar\"):\n glm.fit(X, y, weights)\n\n # 1d but wrong length\n weights = [1, 0]\n msg = r\"sample_weight.shape == \\(2,\\), expected \\(1,\\)!\"\n with pytest.raises(ValueError, match=msg):\n glm.fit(X, y, weights)\n\n\n@pytest.mark.parametrize(\"fit_intercept\", [\"not bool\", 1, 0, [True]])", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"fit_intercept\", [\"not bool\", 1, 0, [True]])", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 131, "n_words": 73, "vocab_size": 52, "complexity": 1, "nloc": 13, "token_counts": 99, "n_ast_nodes": 197, "n_identifiers": 15, "d_id": 75786, "documentation": { "docstring": "Test the raised errors in the validation of sample_weight.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 243047, "commit_id": "406fe59242ad288bcd9f9fe663b227620eacd344", "repo": "Pillow", "path": "src/PIL/ImageFont.py", "file_name": "ImageFont.py", "fun_name": "getoffset", "commit_message": "deprecate font.getsize and related functions", "code": "def getoffset(self, text):\n \n deprecate(\"getoffset\", 10, \"getbbox\")\n return self.font.getsize(text)[1]\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 28, "n_ast_nodes": 48, "n_identifiers": 6, "d_id": 69961, "documentation": { "docstring": "\n Returns the offset of given text. This is the gap between the\n starting coordinate and the first marking. Note that this gap is\n included in the result of :py:func:`~PIL.ImageFont.FreeTypeFont.getsize`.\n\n :param text: Text to measure.\n\n :return: A tuple of the x and y offset\n ", "n_words": 43, "vocab_size": 32, "n_whitespaces": 86, "language": "en" } }, { "id": 154144, "commit_id": "4548012a6372b8ce79d7e07c9ae13fd7444a91c8", "repo": "modin", "path": "modin/core/io/column_stores/parquet_dispatcher.py", "file_name": "parquet_dispatcher.py", "fun_name": "build_index", "commit_message": "FIX-#4756: Correctly propagate `storage_options` in `read_parquet` (#4764)\n\nCo-authored-by: Yaroslav Igoshev \r\nCo-authored-by: Alexey Prutskov \r\nSigned-off-by: Karthik Velayutham ", "code": "def build_index(cls, path, partition_ids, index_columns, storage_options):\n \n from pyarrow.parquet import read_table\n\n range_index = True\n column_names_to_read = []\n for column in index_columns:\n # According to https://arrow.apache.org/docs/python/generated/pyarrow.Schema.html,\n # only RangeIndex will be stored as metadata. Otherwise, the default behavior is\n # to store the index as a column.\n if isinstance(column, str):\n column_names_to_read.append(column)\n range_index = False\n elif column[\"name\"] is not None:\n column_names_to_read.append(column[\"name\"])\n\n # For the second check, let us consider the case where we have an empty dataframe,\n # that has a valid index.\n if range_index or (len(partition_ids) == 0 and len(column_names_to_read) != 0):\n fs, fs_path = cls._get_fs_and_fs_path(path, storage_options)\n complete_index = (\n read_table(fs_path, columns=column_names_to_read, filesystem=fs)\n .to_pandas()\n .index\n )\n # Empty DataFrame case\n elif len(partition_ids) == 0:\n return [], False\n else:\n index_ids = [part_id[0][1] for part_id in partition_ids if len(part_id) > 0]\n index_objs = cls.materialize(index_ids)\n complete_index = index_objs[0].append(index_objs[1:])\n return complete_index, range_index or (len(index_columns) == 0)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 446, "n_words": 140, "vocab_size": 106, "complexity": 11, "nloc": 24, "token_counts": 193, "n_ast_nodes": 307, "n_identifiers": 28, "d_id": 35809, "documentation": { "docstring": "\n Compute index and its split sizes of resulting Modin DataFrame.\n\n Parameters\n ----------\n path : Pathlike\n Path to dataset.\n partition_ids : list\n Array with references to the partitions data.\n index_columns : list\n List of index columns specified by pandas metadata.\n storage_options : dict\n Parameters for specific storage engine.\n\n Returns\n -------\n index : pandas.Index\n Index of resulting Modin DataFrame.\n needs_index_sync : bool\n Whether the partition indices need to be synced with frame\n index because there's no index column, or at least one\n index column is a RangeIndex.\n\n Notes\n -----\n See `build_partition` for more detail on the contents of partitions_ids.\n ", "n_words": 97, "vocab_size": 73, "n_whitespaces": 291, "language": "en" } }, { "id": 243602, "commit_id": "ccac8540771120bdeb570ec5b7bbfc4e3e9a38dd", "repo": "Pillow", "path": "Tests/test_imagegrab.py", "file_name": "test_imagegrab.py", "fun_name": "test_grabclipboard", "commit_message": "If available, use wl-paste for grabclipboard() on Linux", "code": "def test_grabclipboard(self):\n if sys.platform == \"darwin\":\n subprocess.call([\"screencapture\", \"-cx\"])\n elif sys.platform == \"win32\":\n p = subprocess.Popen([\"powershell\", \"-command\", \"-\"], stdin=subprocess.PIPE)\n p.stdin.write(\n b\n )\n p.communicate()\n else:\n if not shutil.which(\"wl-paste\"):\n with pytest.raises(NotImplementedError) as e:\n ImageGrab.grabclipboard()\n assert (\n str(e.value)\n == \"wl-paste is required for ImageGrab.grabclipboard() on Linux\"\n )\n return\n\n ImageGrab.grabclipboard()\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 266, "n_words": 45, "vocab_size": 38, "complexity": 4, "nloc": 22, "token_counts": 106, "n_ast_nodes": 191, "n_identifiers": 22, "d_id": 70057, "documentation": { "docstring": "[Reflection.Assembly]::LoadWithPartialName(\"System.Drawing\")\n[Reflection.Assembly]::LoadWithPartialName(\"System.Windows.Forms\")\n$bmp = New-Object Drawing.Bitmap 200, 200\n[Windows.Forms.Clipboard]::SetImage($bmp)", "n_words": 9, "vocab_size": 9, "n_whitespaces": 5, "language": "en" } }, { "id": 215810, "commit_id": "a35b29b2651bf33c5d5b45e64bc7765ffde4aff4", "repo": "salt", "path": "tests/pytests/functional/modules/file/test_symlink.py", "file_name": "test_symlink.py", "fun_name": "test_symlink_exists_different_force", "commit_message": "Add some funtional tests\n\nAdd functional tests for the following:\n- file.readlink\n- file.replace\n- file.symlink\n\nRemove unit tests for file.replace as they are duplicated in the added\nfunctional test", "code": "def test_symlink_exists_different_force(file, source):\n \n dif_source = source.parent / \"dif_source.txt\"\n target = source.parent / \"symlink.lnk\"\n target.symlink_to(dif_source)\n try:\n file.symlink(source, target, force=True)\n assert salt.utils.path.readlink(target) == str(source)\n finally:\n target.unlink()\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 63, "n_words": 24, "vocab_size": 21, "complexity": 2, "nloc": 9, "token_counts": 65, "n_ast_nodes": 110, "n_identifiers": 15, "d_id": 54183, "documentation": { "docstring": "\n Test symlink with an existing symlink to a different file with force=True\n Should destroy the existing symlink and generate a new one to the correct\n location\n ", "n_words": 26, "vocab_size": 19, "n_whitespaces": 39, "language": "en" } }, { "id": 149904, "commit_id": "682daa4e941abf2235e60d9ecd1ad029eec5d3c4", "repo": "freqtrade", "path": "freqtrade/exchange/common.py", "file_name": "common.py", "fun_name": "_reset_logging_mixin", "commit_message": "Reset logging mixin to avoid random test failure", "code": "def _reset_logging_mixin():\n \n global __logging_mixin\n __logging_mixin = LoggingMixin(logger)\n\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 16, "n_words": 7, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 13, "n_ast_nodes": 25, "n_identifiers": 4, "d_id": 34583, "documentation": { "docstring": "\n Reset global logging mixin - used in tests only.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 16, "language": "en" } }, { "id": 260443, "commit_id": "0206d3e08c0f0917ba2f1c65cb55569b97d9a9ba", "repo": "scikit-learn", "path": "sklearn/neural_network/_multilayer_perceptron.py", "file_name": "_multilayer_perceptron.py", "fun_name": "partial_fit", "commit_message": "MAINT validate parameters for MLPRregressor and MLPClassifier (#23789)\n\nCo-authored-by: jeremie du boisberranger ", "code": "def partial_fit(self, X, y):\n \n if not hasattr(self, \"coefs_\"):\n self._validate_params()\n\n return self._fit(X, y, incremental=True)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 45, "n_words": 13, "vocab_size": 13, "complexity": 2, "nloc": 4, "token_counts": 37, "n_ast_nodes": 60, "n_identifiers": 8, "d_id": 76253, "documentation": { "docstring": "Update the model with a single iteration over the given data.\n\n Parameters\n ----------\n X : {array-like, sparse matrix} of shape (n_samples, n_features)\n The input data.\n\n y : ndarray of shape (n_samples,)\n The target values.\n\n Returns\n -------\n self : object\n Trained MLP model.\n ", "n_words": 42, "vocab_size": 35, "n_whitespaces": 131, "language": "en" } }, { "id": 31489, "commit_id": "3eed5530ec74bb60ad9f8f612717d0f6ccf820f2", "repo": "transformers", "path": "src/transformers/tokenization_utils_base.py", "file_name": "tokenization_utils_base.py", "fun_name": "unk_token", "commit_message": "Fix properties of unset special tokens in non verbose mode (#17797)\n\nCo-authored-by: SaulLu <55560583+SaulLu@users.noreply.github.com>", "code": "def unk_token(self) -> str:\n \n if self._unk_token is None:\n if self.verbose:\n logger.error(\"Using unk_token, but it is not set yet.\")\n return None\n return str(self._unk_token)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 80, "n_words": 22, "vocab_size": 19, "complexity": 3, "nloc": 9, "token_counts": 35, "n_ast_nodes": 61, "n_identifiers": 7, "d_id": 5764, "documentation": { "docstring": "\n `str`: Unknown token. Log an error if used while not having been set.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 28, "language": "en" } }, { "id": 233259, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/pointcloud/_marker.py", "file_name": "_marker.py", "fun_name": "sizemin", "commit_message": "switch to black .22", "code": "def sizemin(self):\n \n return self[\"sizemin\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 64703, "documentation": { "docstring": "\n Sets the minimum size (in px) of the rendered marker points,\n effective when the `pointcloud` shows a million or more points.\n\n The 'sizemin' property is a number and may be specified as:\n - An int or float in the interval [0.1, 2]\n\n Returns\n -------\n int|float\n ", "n_words": 45, "vocab_size": 40, "n_whitespaces": 104, "language": "en" } }, { "id": 242309, "commit_id": "8da80130dbc747f3954b4904247d26289fe722f9", "repo": "Pillow", "path": "src/PIL/ImageShow.py", "file_name": "ImageShow.py", "fun_name": "show_file", "commit_message": "In show_file, use os.remove to remove temporary images", "code": "def show_file(self, path=None, **options):\n \n if path is None:\n if \"file\" in options:\n warnings.warn(\n \"The 'file' argument is deprecated and will be removed in Pillow \"\n \"10 (2023-07-01). Use 'path' instead.\",\n DeprecationWarning,\n )\n path = options.pop(\"file\")\n else:\n raise TypeError(\"Missing required argument: 'path'\")\n subprocess.call([\"open\", \"-a\", \"Preview.app\", path])\n subprocess.Popen(\n [\n sys.executable,\n \"-c\",\n \"import os, sys, time;time.sleep(20);os.remove(sys.argv[1])\",\n path,\n ]\n )\n return 1\n\n\nif sys.platform == \"darwin\":\n register(MacViewer)\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 328, "n_words": 63, "vocab_size": 57, "complexity": 3, "nloc": 21, "token_counts": 81, "n_ast_nodes": 163, "n_identifiers": 17, "d_id": 69823, "documentation": { "docstring": "\n Display given file.\n\n Before Pillow 9.1.0, the first argument was ``file``. This is now deprecated,\n and will be removed in Pillow 10.0.0 (2023-07-01). ``path`` should be used\n instead.\n ", "n_words": 28, "vocab_size": 26, "n_whitespaces": 64, "language": "en" } }, { "id": 43407, "commit_id": "e2f19505bf3622935480e80bee55bf5b6d80097b", "repo": "airflow", "path": "airflow/www/views.py", "file_name": "views.py", "fun_name": "confirm", "commit_message": "Upgrade FAB to 4.1.1 (#24399)\n\n* Upgrade FAB to 4.1.1\r\n\r\nThe Flask Application Builder have been updated recently to\r\nsupport a number of newer dependencies. This PR is the\r\nattempt to migrate FAB to newer version.\r\n\r\nThis includes:\r\n\r\n* update setup.py and setup.cfg upper and lower bounds to\r\n account for proper version of dependencies that\r\n FAB < 4.0.0 was blocking from upgrade\r\n* added typed Flask application retrieval with a custom\r\n application fields available for MyPy typing checks.\r\n* fix typing to account for typing hints added in multiple\r\n upgraded libraries optional values and content of request\r\n returned as Mapping\r\n* switch to PyJWT 2.* by using non-deprecated \"required\" claim as\r\n list rather than separate fields\r\n* add possibiliyt to install providers without constraints\r\n so that we could avoid errors on conflicting constraints when\r\n upgrade-to-newer-dependencies is used\r\n* add pre-commit to check that 2.4+ only get_airflow_app is not\r\n used in providers\r\n* avoid Bad Request in case the request sent to Flask 2.0 is not\r\n JSon content type\r\n* switch imports of internal classes to direct packages\r\n where classes are available rather than from \"airflow.models\" to\r\n satisfy MyPY\r\n* synchronize changes of FAB Security Manager 4.1.1 with our copy\r\n of the Security Manager.\r\n* add error handling for a few \"None\" cases detected by MyPY\r\n* corrected test cases that were broken by immutability of\r\n Flask 2 objects and better escaping done by Flask 2\r\n* updated test cases to account for redirection to \"path\" rather\r\n than full URL by Flask2\r\n\r\nFixes: #22397\r\n\r\n* fixup! Upgrade FAB to 4.1.1", "code": "def confirm(self):\n \n args = request.args\n dag_id = args.get('dag_id')\n task_id = args.get('task_id')\n dag_run_id = args.get('dag_run_id')\n state = args.get('state')\n origin = args.get('origin')\n\n if 'map_index' not in args:\n map_indexes: Optional[List[int]] = None\n else:\n map_indexes = args.getlist('map_index', type=int)\n\n upstream = to_boolean(args.get('upstream'))\n downstream = to_boolean(args.get('downstream'))\n future = to_boolean(args.get('future'))\n past = to_boolean(args.get('past'))\n origin = origin or url_for('Airflow.index')\n\n dag = get_airflow_app().dag_bag.get_dag(dag_id)\n if not dag:\n msg = f'DAG {dag_id} not found'\n return redirect_or_json(origin, msg, status='error', status_code=404)\n\n try:\n task = dag.get_task(task_id)\n except airflow.exceptions.TaskNotFound:\n msg = f\"Task {task_id} not found\"\n return redirect_or_json(origin, msg, status='error', status_code=404)\n\n task.dag = dag\n\n if state not in (\n 'success',\n 'failed',\n ):\n msg = f\"Invalid state {state}, must be either 'success' or 'failed'\"\n return redirect_or_json(origin, msg, status='error', status_code=400)\n\n latest_execution_date = dag.get_latest_execution_date()\n if not latest_execution_date:\n msg = f\"Cannot mark tasks as {state}, seem that dag {dag_id} has never run\"\n return redirect_or_json(origin, msg, status='error', status_code=400)\n\n if map_indexes is None:\n tasks: Union[List[Operator], List[Tuple[Operator, int]]] = [task]\n else:\n tasks = [(task, map_index) for map_index in map_indexes]\n\n to_be_altered = set_state(\n tasks=tasks,\n run_id=dag_run_id,\n upstream=upstream,\n downstream=downstream,\n future=future,\n past=past,\n state=state,\n commit=False,\n )\n\n if request.headers.get('Accept') == 'application/json':\n details = [str(t) for t in to_be_altered]\n return htmlsafe_json_dumps(details, separators=(',', ':'))\n\n details = \"\\n\".join(str(t) for t in to_be_altered)\n\n response = self.render_template(\n \"airflow/confirm.html\",\n endpoint=url_for(f'Airflow.{state}'),\n message=f\"Task instances you are about to mark as {state}:\",\n details=details,\n )\n\n return response\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 751, "n_words": 208, "vocab_size": 129, "complexity": 12, "nloc": 61, "token_counts": 430, "n_ast_nodes": 729, "n_identifiers": 57, "d_id": 7960, "documentation": { "docstring": "Show confirmation page for marking tasks as success or failed.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 268190, "commit_id": "89862fda3b4a427894061d90e2a96ad6efaf251c", "repo": "ansible", "path": "test/lib/ansible_test/_util/controller/sanity/pylint/plugins/deprecated.py", "file_name": "deprecated.py", "fun_name": "collection_name", "commit_message": "ansible-test - Sanity test code cleanup. (#78497)", "code": "def collection_name(self) -> t.Optional[str]:\n \n return self.config.collection_name\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 32, "n_identifiers": 6, "d_id": 79442, "documentation": { "docstring": "Return the collection name, or None if ansible-core is being tested.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 150371, "commit_id": "8961b8d56042545b566d2ef5fea1cb34e2ebdb35", "repo": "freqtrade", "path": "freqtrade/freqai/freqai_interface.py", "file_name": "freqai_interface.py", "fun_name": "inference_timer", "commit_message": "merge in inference timer and historic predictions handling improvements.", "code": "def inference_timer(self, do='start'):\n \n if do == 'start':\n self.pair_it += 1\n self.begin_time = time.time()\n elif do == 'stop':\n end = time.time()\n self.inference_time += (end - self.begin_time)\n if self.pair_it == self.total_pairs:\n logger.info(\n f'Total time spent inferencing pairlist {self.inference_time:.2f} seconds')\n if self.inference_time > 0.25 * self.base_tf_seconds:\n logger.warning('Inference took over 25/% of the candle time. Reduce pairlist to'\n ' avoid blinding open trades and degrading performance.')\n self.pair_it = 0\n self.inference_time = 0\n return\n\n # Following methods which are overridden by user made prediction models.\n # See freqai/prediction_models/CatboostPredictionModel.py for an example.\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 307, "n_words": 86, "vocab_size": 69, "complexity": 5, "nloc": 16, "token_counts": 99, "n_ast_nodes": 180, "n_identifiers": 13, "d_id": 34720, "documentation": { "docstring": "\n Timer designed to track the cumulative time spent in FreqAI for one pass through\n the whitelist. This will check if the time spent is more than 1/4 the time\n of a single candle, and if so, it will warn the user of degraded performance\n ", "n_words": 44, "vocab_size": 34, "n_whitespaces": 73, "language": "en" } }, { "id": 206091, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/http/request.py", "file_name": "request.py", "fun_name": "fromkeys", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def fromkeys(cls, iterable, value=\"\", mutable=False, encoding=None):\n \n q = cls(\"\", mutable=True, encoding=encoding)\n for key in iterable:\n q.appendlist(key, value)\n if not mutable:\n q._mutable = False\n return q\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 82, "n_words": 25, "vocab_size": 23, "complexity": 3, "nloc": 7, "token_counts": 58, "n_ast_nodes": 91, "n_identifiers": 10, "d_id": 51360, "documentation": { "docstring": "\n Return a new QueryDict with keys (may be repeated) from an iterable and\n values from value.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 38, "language": "en" } }, { "id": 65378, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/trial_balance/trial_balance.py", "file_name": "trial_balance.py", "fun_name": "get_rootwise_opening_balances", "commit_message": "style: format code with black", "code": "def get_rootwise_opening_balances(filters, report_type):\n\tadditional_conditions = \"\"\n\tif not filters.show_unclosed_fy_pl_balances:\n\t\tadditional_conditions = (\n\t\t\t\" and posting_date >= %(year_start_date)s\" if report_type == \"Profit and Loss\" else \"\"\n\t\t)\n\n\tif not flt(filters.with_period_closing_entry):\n\t\tadditional_conditions += \" and ifnull(voucher_type, '')!='Period Closing Voucher'\"\n\n\tif filters.cost_center:\n\t\tlft, rgt = frappe.db.get_value(\"Cost Center\", filters.cost_center, [\"lft\", \"rgt\"])\n\t\tadditional_conditions += % (\n\t\t\tlft,\n\t\t\trgt,\n\t\t)\n\n\tif filters.project:\n\t\tadditional_conditions += \" and project = %(project)s\"\n\n\tif filters.finance_book:\n\t\tfb_conditions = \" AND finance_book = %(finance_book)s\"\n\t\tif filters.include_default_book_entries:\n\t\t\tfb_conditions = (\n\t\t\t\t\" AND (finance_book in (%(finance_book)s, %(company_fb)s, '') OR finance_book IS NULL)\"\n\t\t\t)\n\n\t\tadditional_conditions += fb_conditions\n\n\taccounting_dimensions = get_accounting_dimensions(as_list=False)\n\n\tquery_filters = {\n\t\t\"company\": filters.company,\n\t\t\"from_date\": filters.from_date,\n\t\t\"report_type\": report_type,\n\t\t\"year_start_date\": filters.year_start_date,\n\t\t\"project\": filters.project,\n\t\t\"finance_book\": filters.finance_book,\n\t\t\"company_fb\": frappe.db.get_value(\"Company\", filters.company, \"default_finance_book\"),\n\t}\n\n\tif accounting_dimensions:\n\t\tfor dimension in accounting_dimensions:\n\t\t\tif filters.get(dimension.fieldname):\n\t\t\t\tif frappe.get_cached_value(\"DocType\", dimension.document_type, \"is_tree\"):\n\t\t\t\t\tfilters[dimension.fieldname] = get_dimension_with_children(\n\t\t\t\t\t\tdimension.document_type, filters.get(dimension.fieldname)\n\t\t\t\t\t)\n\t\t\t\t\tadditional_conditions += \"and {0} in %({0})s\".format(dimension.fieldname)\n\t\t\t\telse:\n\t\t\t\t\tadditional_conditions += \"and {0} in (%({0})s)\".format(dimension.fieldname)\n\n\t\t\t\tquery_filters.update({dimension.fieldname: filters.get(dimension.fieldname)})\n\n\tgle = frappe.db.sql(\n\t\t.format(\n\t\t\tadditional_conditions=additional_conditions\n\t\t),\n\t\tquery_filters,\n\t\tas_dict=True,\n\t)\n\n\topening = frappe._dict()\n\tfor d in gle:\n\t\topening.setdefault(d.account, d)\n\n\treturn opening\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 114, "n_words": 168, "vocab_size": 105, "complexity": 13, "nloc": 66, "token_counts": 311, "n_ast_nodes": 521, "n_identifiers": 40, "d_id": 13873, "documentation": { "docstring": " and cost_center in (select name from `tabCost Center`\n\t\t\twhere lft >= %s and rgt <= %s)\n\t\tselect\n\t\t\taccount, sum(debit) as opening_debit, sum(credit) as opening_credit\n\t\tfrom `tabGL Entry`\n\t\twhere\n\t\t\tcompany=%(company)s\n\t\t\t{additional_conditions}\n\t\t\tand (posting_date < %(from_date)s or ifnull(is_opening, 'No') = 'Yes')\n\t\t\tand account in (select name from `tabAccount` where report_type=%(report_type)s)\n\t\t\tand is_cancelled = 0\n\t\tgroup by account", "n_words": 55, "vocab_size": 41, "n_whitespaces": 44, "language": "en" } }, { "id": 178909, "commit_id": "abfb99b0a05dd76d2ecc6ebc20732a271857c6c8", "repo": "Nuitka", "path": "nuitka/freezer/IncludedDataFiles.py", "file_name": "IncludedDataFiles.py", "fun_name": "addIncludedDataFilesFromFileOptions", "commit_message": "Plugins: Massive cleanup of data file handling\n\n* Move data file handling out of standalone only, allowing support\n for other modes as well.\n\n* Attach logger and tags to data file objects.", "code": "def addIncludedDataFilesFromFileOptions():\n \n\n for included_datafile in _addIncludedDataFilesFromFileOptions():\n addIncludedDataFile(included_datafile)\n\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 20, "n_words": 7, "vocab_size": 7, "complexity": 2, "nloc": 3, "token_counts": 16, "n_ast_nodes": 30, "n_identifiers": 4, "d_id": 42857, "documentation": { "docstring": "Early data files, from user options that work with file system.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 101459, "commit_id": "13cfb3f39e72e9ca181f173b7b3db2a048db0d08", "repo": "faceswap", "path": "plugins/extract/pipeline.py", "file_name": "pipeline.py", "fun_name": "_check_and_raise_error", "commit_message": "extract: Add batch processing mode", "code": "def _check_and_raise_error(self) -> bool:\n \n for plugin in self._active_plugins:\n if plugin.check_and_raise_error():\n return True\n return False\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 61, "n_words": 14, "vocab_size": 13, "complexity": 3, "nloc": 6, "token_counts": 26, "n_ast_nodes": 44, "n_identifiers": 6, "d_id": 20872, "documentation": { "docstring": " Check all threads for errors and raise if one occurs ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 305039, "commit_id": "2224d0f43a048052cfc4572df95c7afcccdf3a57", "repo": "core", "path": "homeassistant/data_entry_flow.py", "file_name": "data_entry_flow.py", "fun_name": "async_remove", "commit_message": "Add a callback for data flow handler removal (#77394)\n\n* Add a callback for when data flows are removed\r\n\r\n* Call `async_remove` at the very end\r\n\r\n* Handle and log exceptions caught during flow removal\r\n\r\n* Log the error as an exception, with a traceback\r\n\r\n* Adjust test's expected logging output to match updated format specifier", "code": "def async_remove(self) -> None:\n \n\n\n@callback", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@callback", "n_ast_errors": 1, "ast_levels": 6, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 8, "n_ast_nodes": 20, "n_identifiers": 3, "d_id": 103832, "documentation": { "docstring": "Notification that the config flow has been removed.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 166762, "commit_id": "7e23a37e1c5bda81234801a6584563e2880769eb", "repo": "pandas", "path": "pandas/tests/indexes/interval/test_interval.py", "file_name": "test_interval.py", "fun_name": "test_is_unique_interval", "commit_message": "ENH: consistency of input args for boundaries - Interval (#46522)", "code": "def test_is_unique_interval(self, closed):\n \n # unique overlapping - distinct endpoints\n idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], inclusive=closed)\n assert idx.is_unique is True\n\n # unique overlapping - shared endpoints\n idx = IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)], inclusive=closed)\n assert idx.is_unique is True\n\n # unique nested\n idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], inclusive=closed)\n assert idx.is_unique is True\n\n # unique NaN\n idx = IntervalIndex.from_tuples([(np.NaN, np.NaN)], inclusive=closed)\n assert idx.is_unique is True\n\n # non-unique NaN\n idx = IntervalIndex.from_tuples(\n [(np.NaN, np.NaN), (np.NaN, np.NaN)], inclusive=closed\n )\n assert idx.is_unique is False\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 211, "n_words": 81, "vocab_size": 42, "complexity": 1, "nloc": 13, "token_counts": 176, "n_ast_nodes": 252, "n_identifiers": 10, "d_id": 39858, "documentation": { "docstring": "\n Interval specific tests for is_unique in addition to base class tests\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 26, "language": "en" } }, { "id": 155177, "commit_id": "193505fdf0c984743397ba3df56262f30aee13a8", "repo": "modin", "path": "modin/core/execution/unidist/implementations/pandas_on_unidist/partitioning/partition.py", "file_name": "partition.py", "fun_name": "preprocess_func", "commit_message": "FEAT-#5053: Add pandas on unidist execution with MPI backend (#5059)\n\nSigned-off-by: Igoshev, Iaroslav ", "code": "def preprocess_func(cls, func):\n \n return unidist.put(func)\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 15, "n_ast_nodes": 26, "n_identifiers": 5, "d_id": 36269, "documentation": { "docstring": "\n Put a function into the object store to use in ``apply``.\n\n Parameters\n ----------\n func : callable\n A function to preprocess.\n\n Returns\n -------\n unidist.ObjectRef\n A reference to `func`.\n ", "n_words": 27, "vocab_size": 23, "n_whitespaces": 106, "language": "en" } }, { "id": 224450, "commit_id": "f79b34d174e41084391868e7b503f5c61b8b1bdf", "repo": "mkdocs", "path": "mkdocs/plugins.py", "file_name": "plugins.py", "fun_name": "on_page_read_source", "commit_message": "Move plugin events docs into source code + refactor\n\n* Create real (no-op) methods for each event in the base class.\n* Refactor event dispatcher to not check for methods' existence, instead just call them.\n* Move documentation from Markdown into docstrings of these methods.\n* Activate the 'mkdocstrings' plugin.\n* Use 'mkdocstrings' to insert documentation from those docstrings into the site.", "code": "def on_page_read_source(self, page, config):\n \n return None\n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 20, "n_identifiers": 4, "d_id": 57295, "documentation": { "docstring": "\n The `on_page_read_source` event can replace the default mechanism to read\n the contents of a page's source from the filesystem.\n\n Parameters:\n page: `mkdocs.nav.Page` instance\n config: global configuration object\n\n Returns:\n The raw source for a page as unicode string. If `None` is returned, the\n default loading from a file will be performed.\n ", "n_words": 50, "vocab_size": 41, "n_whitespaces": 134, "language": "en" } }, { "id": 167605, "commit_id": "f538568afc2c76c2d738d32e3544cf9fe6742960", "repo": "pandas", "path": "pandas/conftest.py", "file_name": "conftest.py", "fun_name": "mixed_type_frame", "commit_message": "TYP: misc return type annotations (#47558)", "code": "def mixed_type_frame() -> DataFrame:\n \n return DataFrame(\n {\n \"a\": 1.0,\n \"b\": 2,\n \"c\": \"foo\",\n \"float32\": np.array([1.0] * 10, dtype=\"float32\"),\n \"int32\": np.array([1] * 10, dtype=\"int32\"),\n },\n index=np.arange(10),\n )\n\n\n@pytest.fixture", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "@pytest.fixture", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 111, "n_words": 27, "vocab_size": 25, "complexity": 1, "nloc": 15, "token_counts": 73, "n_ast_nodes": 125, "n_identifiers": 9, "d_id": 40057, "documentation": { "docstring": "\n Fixture for DataFrame of float/int/string columns with RangeIndex\n Columns are ['a', 'b', 'c', 'float32', 'int32'].\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 25, "language": "en" } }, { "id": 272185, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/integration_test/forwardprop_test.py", "file_name": "forwardprop_test.py", "fun_name": "_grad", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _grad(f, argnums=0):\n \n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 18, "n_identifiers": 3, "d_id": 80972, "documentation": { "docstring": "Return a function which computes the gradient of `f`.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 110316, "commit_id": "383de519505964ed879c40b23ef36e90c17ebe0d", "repo": "matplotlib", "path": "lib/matplotlib/axes/_axes.py", "file_name": "_axes.py", "fun_name": "axvline", "commit_message": "[Doc] fix more spelling and grammar", "code": "def axvline(self, x=0, ymin=0, ymax=1, **kwargs):\n \n self._check_no_units([ymin, ymax], ['ymin', 'ymax'])\n if \"transform\" in kwargs:\n raise ValueError(\"'transform' is not allowed as a keyword \"\n \"argument; axvline generates its own transform.\")\n xmin, xmax = self.get_xbound()\n\n # Strip away the units for comparison with non-unitized bounds.\n xx, = self._process_unit_info([(\"x\", x)], kwargs)\n scalex = (xx < xmin) or (xx > xmax)\n\n trans = self.get_xaxis_transform(which='grid')\n l = mlines.Line2D([x, x], [ymin, ymax], transform=trans, **kwargs)\n self.add_line(l)\n if scalex:\n self._request_autoscale_view(\"x\")\n return l\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 208, "n_words": 74, "vocab_size": 66, "complexity": 4, "nloc": 14, "token_counts": 139, "n_ast_nodes": 227, "n_identifiers": 23, "d_id": 24055, "documentation": { "docstring": "\n Add a vertical line across the Axes.\n\n Parameters\n ----------\n x : float, default: 0\n x position in data coordinates of the vertical line.\n\n ymin : float, default: 0\n Should be between 0 and 1, 0 being the bottom of the plot, 1 the\n top of the plot.\n\n ymax : float, default: 1\n Should be between 0 and 1, 0 being the bottom of the plot, 1 the\n top of the plot.\n\n Returns\n -------\n `~matplotlib.lines.Line2D`\n\n Other Parameters\n ----------------\n **kwargs\n Valid keyword arguments are `.Line2D` properties, except for\n 'transform':\n\n %(Line2D:kwdoc)s\n\n See Also\n --------\n vlines : Add vertical lines in data coordinates.\n axvspan : Add a vertical span (rectangle) across the axis.\n axline : Add a line with an arbitrary slope.\n\n Examples\n --------\n * draw a thick red vline at *x* = 0 that spans the yrange::\n\n >>> axvline(linewidth=4, color='r')\n\n * draw a default vline at *x* = 1 that spans the yrange::\n\n >>> axvline(x=1)\n\n * draw a default vline at *x* = .5 that spans the middle half of\n the yrange::\n\n >>> axvline(x=.5, ymin=0.25, ymax=0.75)\n ", "n_words": 173, "vocab_size": 87, "n_whitespaces": 465, "language": "en" } }, { "id": 256180, "commit_id": "a59bca366174d9c692fa19750c24d65f47660ef7", "repo": "haystack", "path": "haystack/__init__.py", "file_name": "__init__.py", "fun_name": "DeprecatedModule", "commit_message": "Apply black formatting (#2115)\n\n* Testing black on ui/\r\n\r\n* Applying black on docstores\r\n\r\n* Add latest docstring and tutorial changes\r\n\r\n* Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too\r\n\r\n* Remove comments\r\n\r\n* Relax constraints on pydoc-markdown\r\n\r\n* Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade\r\n\r\n* Fix a couple of bugs\r\n\r\n* Add a type: ignore that was missing somehow\r\n\r\n* Give path to black\r\n\r\n* Apply Black\r\n\r\n* Apply Black\r\n\r\n* Relocate a couple of type: ignore\r\n\r\n* Update documentation\r\n\r\n* Make Linux CI run after applying Black\r\n\r\n* Triggering Black\r\n\r\n* Apply Black\r\n\r\n* Remove dependency, does not work well\r\n\r\n* Remove manually double trailing commas\r\n\r\n* Update documentation\r\n\r\nCo-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>", "code": "def DeprecatedModule(mod, deprecated_attributes=None, is_module_deprecated=True):\n \n", "url": "https://github.com/deepset-ai/haystack.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 7, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 5, "token_counts": 30, "n_ast_nodes": 23, "n_identifiers": 4, "d_id": 74787, "documentation": { "docstring": "\n Return a wrapped object that warns about deprecated accesses at import\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 18, "language": "en" } }, { "id": 159095, "commit_id": "f00148b089d326c952880a0e5e6bd4b2dcb98ce5", "repo": "rasa", "path": "tests/utils/test_common.py", "file_name": "test_common.py", "fun_name": "test_cli_log_level_debug_used", "commit_message": "Configurable logging for libraries (#10614)\n\n* Make library level logging to be configurable\r\n\r\nFixes https://github.com/RasaHQ/rasa/issues/10203\r\n\r\n* Create log level documentation under cheatsheet in Rasa docs\r\n\r\n* Add log docs to `rasa shell --debug` (and others)", "code": "def test_cli_log_level_debug_used():\n \n configure_logging_and_warnings(logging.DEBUG)\n rasa_logger = logging.getLogger(\"rasa\")\n rasa_logger.level == logging.DEBUG\n matplotlib_logger = logging.getLogger(\"matplotlib\")\n # Default log level for libraries is currently ERROR\n matplotlib_logger.level == logging.ERROR\n\n\n@mock.patch.dict(os.environ, {\"LOG_LEVEL\": \"WARNING\"})", "url": "https://github.com/RasaHQ/rasa.git", "language": "Python", "ast_errors": "@mock.patch.dict(os.environ, {\"LOG_LEVEL\": \"WARNING\"})", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 47, "n_words": 27, "vocab_size": 25, "complexity": 1, "nloc": 6, "token_counts": 41, "n_ast_nodes": 105, "n_identifiers": 14, "d_id": 38121, "documentation": { "docstring": "Test CLI with log level uses for rasa logger whereas libraries stay default.", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 206410, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/test/runner.py", "file_name": "runner.py", "fun_name": "partition_suite_by_case", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def partition_suite_by_case(suite):\n \n suite_class = type(suite)\n all_tests = iter_test_cases(suite)\n return [suite_class(tests) for _, tests in itertools.groupby(all_tests, type)]\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 28, "n_words": 16, "vocab_size": 15, "complexity": 2, "nloc": 4, "token_counts": 38, "n_ast_nodes": 62, "n_identifiers": 10, "d_id": 51517, "documentation": { "docstring": "Partition a test suite by test case, preserving the order of tests.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 75149, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/tests/test_admin_views.py", "file_name": "test_admin_views.py", "fun_name": "test_get_invalid_filter_spec", "commit_message": "Reformat with black", "code": "def test_get_invalid_filter_spec(self):\n \n # Get the image\n response = self.client.get(\n reverse(\"wagtailimages:preview\", args=(self.image.id, \"bad-filter-spec\"))\n )\n\n # Check response\n self.assertEqual(response.status_code, 400)\n\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 71, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 5, "token_counts": 41, "n_ast_nodes": 71, "n_identifiers": 11, "d_id": 16368, "documentation": { "docstring": "\n Test that an invalid filter spec returns a 400 response\n\n This is very unlikely to happen in reality. A user would have\n to create signature for the invalid filter spec which can't be\n done with Wagtails built in URL generator. We should test it\n anyway though.\n ", "n_words": 46, "vocab_size": 41, "n_whitespaces": 89, "language": "en" } }, { "id": 261584, "commit_id": "d8fa96c29828e3ca79ddd5d7466521ac4d95213c", "repo": "scikit-learn", "path": "sklearn/impute/tests/test_impute.py", "file_name": "test_impute.py", "fun_name": "test_knn_imputer_keep_empty_features", "commit_message": "ENH keep features with all missing values during imputation (#24770)\n\nCo-authored-by: Chiara Marmo \r\nCo-authored-by: Julien Jerphanion \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>\r\nCo-authored-by: Vitor SRG \r\nFixes https://github.com/scikit-learn/scikit-learn/pull/16695\r\nFixes https://github.com/scikit-learn/scikit-learn/issues/16426\r\nFixes https://github.com/scikit-learn/scikit-learn/issues/16977", "code": "def test_knn_imputer_keep_empty_features(keep_empty_features):\n \n X = np.array([[1, np.nan, 2], [3, np.nan, np.nan]])\n\n imputer = KNNImputer(keep_empty_features=keep_empty_features)\n\n for method in [\"fit_transform\", \"transform\"]:\n X_imputed = getattr(imputer, method)(X)\n if keep_empty_features:\n assert X_imputed.shape == X.shape\n assert_array_equal(X_imputed[:, 1], 0)\n else:\n assert X_imputed.shape == (X.shape[0], X.shape[1] - 1)\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 105, "n_words": 39, "vocab_size": 33, "complexity": 3, "nloc": 10, "token_counts": 110, "n_ast_nodes": 168, "n_identifiers": 13, "d_id": 76878, "documentation": { "docstring": "Check the behaviour of `keep_empty_features` for `KNNImputer`.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 63049, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py", "file_name": "__init__.py", "fun_name": "requires", "commit_message": "upd; format", "code": "def requires(self, extras=()):\n \n dm = self._dep_map\n deps = []\n deps.extend(dm.get(None, ()))\n for ext in extras:\n try:\n deps.extend(dm[safe_extra(ext)])\n except KeyError:\n raise UnknownExtra(\n \"%s has no such extra feature %r\" % (self, ext)\n )\n return deps\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 162, "n_words": 34, "vocab_size": 32, "complexity": 3, "nloc": 13, "token_counts": 69, "n_ast_nodes": 113, "n_identifiers": 12, "d_id": 13115, "documentation": { "docstring": "List of Requirements needed for this distro if `extras` are used", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 241885, "commit_id": "7438fe5edfb565ff341fa6ab054461fcdd504aa2", "repo": "scipy", "path": "scipy/stats/_stats_py.py", "file_name": "_stats_py.py", "fun_name": "mode", "commit_message": "MAINT: stats: mode: fix negative axis issue with np.moveaxis instead of custom code (#15421)", "code": "def mode(a, axis=0, nan_policy='propagate'):\n \n a, axis = _chk_asarray(a, axis)\n if a.size == 0:\n return ModeResult(np.array([]), np.array([]))\n\n contains_nan, nan_policy = _contains_nan(a, nan_policy)\n\n if contains_nan and nan_policy == 'omit':\n a = ma.masked_invalid(a)\n return mstats_basic.mode(a, axis)\n\n if a.dtype == object and np.nan in set(a.ravel()):\n # Fall back to a slower method since np.unique does not work with NaN\n scores = set(np.ravel(a)) # get ALL unique values\n testshape = list(a.shape)\n testshape[axis] = 1\n oldmostfreq = np.zeros(testshape, dtype=a.dtype)\n oldcounts = np.zeros(testshape, dtype=int)\n\n for score in scores:\n template = (a == score)\n counts = np.sum(template, axis, keepdims=True)\n mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)\n oldcounts = np.maximum(counts, oldcounts)\n oldmostfreq = mostfrequent\n\n return ModeResult(mostfrequent, oldcounts)\n", "url": "https://github.com/scipy/scipy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 259, "n_words": 108, "vocab_size": 78, "complexity": 8, "nloc": 31, "token_counts": 340, "n_ast_nodes": 336, "n_identifiers": 35, "d_id": 69724, "documentation": { "docstring": "Return an array of the modal (most common) value in the passed array.\n\n If there is more than one such value, only the smallest is returned.\n The bin-count for the modal bins is also returned.\n\n Parameters\n ----------\n a : array_like\n n-dimensional array of which to find mode(s).\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over\n the whole array `a`.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n\n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n\n Returns\n -------\n mode : ndarray\n Array of modal values.\n count : ndarray\n Array of counts for each mode.\n\n Examples\n --------\n >>> a = np.array([[6, 8, 3, 0],\n ... [3, 2, 1, 7],\n ... [8, 1, 8, 4],\n ... [5, 3, 0, 5],\n ... [4, 7, 5, 9]])\n >>> from scipy import stats\n >>> stats.mode(a)\n ModeResult(mode=array([[3, 1, 0, 0]]), count=array([[1, 1, 1, 1]]))\n\n To get mode of whole array, specify ``axis=None``:\n\n >>> stats.mode(a, axis=None)\n ModeResult(mode=array([3]), count=array([3]))\n\n ", "n_words": 183, "vocab_size": 131, "n_whitespaces": 390, "language": "en" } }, { "id": 260641, "commit_id": "6e5ef2e9b8c64e6788428610ae884b9bf3d298a2", "repo": "scikit-learn", "path": "sklearn/feature_selection/_rfe.py", "file_name": "_rfe.py", "fun_name": "score", "commit_message": "MAINT solve long line reported by flake8 (#24065)", "code": "def score(self, X, y, **fit_params):\n \n check_is_fitted(self)\n return self.estimator_.score(self.transform(X), y, **fit_params)\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 31, "n_words": 10, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 36, "n_ast_nodes": 56, "n_identifiers": 8, "d_id": 76391, "documentation": { "docstring": "Reduce X to the selected features and return the score of the estimator.\n\n Parameters\n ----------\n X : array of shape [n_samples, n_features]\n The input samples.\n\n y : array of shape [n_samples]\n The target values.\n\n **fit_params : dict\n Parameters to pass to the `score` method of the underlying\n estimator.\n\n .. versionadded:: 1.0\n\n Returns\n -------\n score : float\n Score of the underlying base estimator computed with the selected\n features returned by `rfe.transform(X)` and `y`.\n ", "n_words": 72, "vocab_size": 46, "n_whitespaces": 212, "language": "en" } }, { "id": 321041, "commit_id": "d9e20f6b3071b86b479f281fe27d621e0b3ae7e5", "repo": "qutebrowser", "path": "qutebrowser/config/configfiles.py", "file_name": "configfiles.py", "fun_name": "finalize", "commit_message": "config: Handle config.py errors while updating mutables\n\nFixes #3580", "code": "def finalize(self) -> None:\n \n if self._warn_autoconfig:\n desc = configexc.ConfigErrorDesc(\n \"autoconfig loading not specified\",\n (\"Your config.py should call either `config.load_autoconfig()`\"\n \" (to load settings configured via the GUI) or \"\n \"`config.load_autoconfig(False)` (to not do so)\"))\n self.errors.append(desc)\n\n with self._handle_error(\"updating mutated values\"):\n self._config.update_mutables()\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 156, "n_words": 40, "vocab_size": 37, "complexity": 2, "nloc": 11, "token_counts": 50, "n_ast_nodes": 95, "n_identifiers": 11, "d_id": 117491, "documentation": { "docstring": "Do work which needs to be done after reading config.py.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 271940, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training_v1.py", "file_name": "training_v1.py", "fun_name": "predict_on_batch", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def predict_on_batch(self, x):\n \n self._check_call_args(\"predict_on_batch\")\n\n if (\n self._distribution_strategy\n and tf.distribute.in_cross_replica_context()\n ):\n raise NotImplementedError(\n \"`predict_on_batch` is not supported for models distributed with\"\n \" tf.distribute.Strategy.\"\n )\n # Validate and standardize user data.\n inputs, _, _ = self._standardize_user_data(\n x, extract_tensors_from_dataset=True\n )\n # If `self._distribution_strategy` is True, then we are in a replica context\n # at this point.\n if self.run_eagerly or self._distribution_strategy:\n inputs = training_utils_v1.cast_if_floating_dtype(inputs)\n if isinstance(inputs, collections.abc.Sequence):\n # Unwrap lists with only one input, as we do when training on batch\n if len(inputs) == 1:\n inputs = inputs[0]\n\n return self(inputs) # pylint: disable=not-callable\n\n self._make_predict_function()\n outputs = self.predict_function(inputs)\n\n if len(outputs) == 1:\n return outputs[0]\n return outputs\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 378, "n_words": 101, "vocab_size": 80, "complexity": 8, "nloc": 24, "token_counts": 127, "n_ast_nodes": 213, "n_identifiers": 24, "d_id": 80906, "documentation": { "docstring": "Returns predictions for a single batch of samples.\n\n Args:\n x: Input data. It could be:\n - A Numpy array (or array-like), or a list of arrays\n (in case the model has multiple inputs).\n - A TensorFlow tensor, or a list of tensors\n (in case the model has multiple inputs).\n - A `tf.data` dataset.\n\n Returns:\n Numpy array(s) of predictions.\n\n Raises:\n ValueError: In case of mismatch between given number of inputs and\n expectations of the model.\n ", "n_words": 74, "vocab_size": 50, "n_whitespaces": 217, "language": "en" } }, { "id": 104575, "commit_id": "3804442bb7cfcb9d52044d92688115cfdc69c2da", "repo": "datasets", "path": "src/datasets/features/audio.py", "file_name": "audio.py", "fun_name": "flatten", "commit_message": "Fix flatten of complex feature types (#3723)\n\n* Flatten Translation and TranslationVariableLanguages\r\n\r\n* Add tests\r\n\r\n* Style\r\n\r\n* Flatten for decodable features\r\n\r\n* Fix flatten for non-dict types\r\n\r\n* Add test\r\n\r\n* Descriptive message in flatten for Audio feature\r\n\r\n* Small refactor\r\n\r\n* Add flatten to features\r\n\r\n* Update table_flatten\r\n\r\n* Revert changes in Dataset.flatten_/flatten\r\n\r\n* Apply Quentin's suggestions from code review\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>\r\n\r\n* Improve table_flatten docstring\r\n\r\n* Fix tests\r\n\r\n* Add nested test\r\n\r\n* Minor fix\r\n\r\n* Remove comment\r\n\r\nCo-authored-by: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com>", "code": "def flatten(self) -> Union[\"FeatureType\", Dict[str, \"FeatureType\"]]:\n \n from .features import Value\n\n if self.decode:\n raise ValueError(\"Cannot flatten a decoded Audio feature.\")\n return {\n \"bytes\": Value(\"binary\"),\n \"path\": Value(\"string\"),\n }\n", "url": "https://github.com/huggingface/datasets.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 94, "n_words": 26, "vocab_size": 26, "complexity": 2, "nloc": 9, "token_counts": 50, "n_ast_nodes": 93, "n_identifiers": 9, "d_id": 21901, "documentation": { "docstring": "If in the decodable state, raise an error, otherwise flatten the feature into a dictionary.", "n_words": 15, "vocab_size": 14, "n_whitespaces": 14, "language": "en" } }, { "id": 242047, "commit_id": "56869131c8e0a0d6e1af86cc1a000c61e83efcf6", "repo": "scipy", "path": "scipy/stats/_stats_py.py", "file_name": "_stats_py.py", "fun_name": "gmean", "commit_message": "DOC: stats: correct doc display", "code": "def gmean(a, axis=0, dtype=None, weights=None):\n r\n if not isinstance(a, np.ndarray):\n # if not an ndarray object attempt to convert it\n log_a = np.log(np.array(a, dtype=dtype))\n elif dtype:\n # Must change the default dtype allowing array type\n if isinstance(a, np.ma.MaskedArray):\n log_a = np.log(np.ma.asarray(a, dtype=dtype))\n else:\n log_a = np.log(np.asarray(a, dtype=dtype))\n else:\n log_a = np.log(a)\n\n if weights is not None:\n weights = np.asanyarray(weights, dtype=dtype)\n\n return np.exp(np.average(log_a, axis=axis, weights=weights))\n\n\n@_axis_nan_policy_factory(\n lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,\n result_unpacker=lambda x: (x,), kwd_samples=['weights'])", "url": "https://github.com/scipy/scipy.git", "language": "Python", "ast_errors": "@_axis_nan_policy_factory(\n lambda x: x, n_samples=1, n_outputs=1, too_small=0, paired=True,\n result_unpacker=lambda x: (x,), kwd_samples=['weights'])", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 177, "n_words": 76, "vocab_size": 57, "complexity": 5, "nloc": 79, "token_counts": 148, "n_ast_nodes": 286, "n_identifiers": 25, "d_id": 69761, "documentation": { "docstring": "Compute the weighted geometric mean along the specified axis.\n\n The weighted geometric mean of the array :math:`a_i` associated to weights\n :math:`w_i` is:\n\n .. math::\n\n \\exp \\left( \\frac{ \\sum_{i=1}^n w_i \\log a_i }{ \\sum_{i=1}^n w_i }\n \\right) \\, ,\n\n and, with equal weights, it falls backs to:\n\n .. math::\n\n \\sqrt[n]{ \\prod_{i=1}^n a_i } \\, .\n\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array.\n axis : int or None, optional\n Axis along which the geometric mean is computed. Default is 0.\n If None, compute over the whole array `a`.\n dtype : dtype, optional\n Type of the returned array and of the accumulator in which the\n elements are summed. If dtype is not specified, it defaults to the\n dtype of a, unless a has an integer dtype with a precision less than\n that of the default platform integer. In that case, the default\n platform integer is used.\n weights : array_like, optional\n The `weights` array must be broadcastable to the same shape as `a`.\n Default is None, which gives each value a weight of 1.0.\n\n Returns\n -------\n gmean : ndarray\n See `dtype` parameter above.\n\n See Also\n --------\n numpy.mean : Arithmetic average\n numpy.average : Weighted average\n hmean : Harmonic mean\n\n Notes\n -----\n The geometric average is computed over a single dimension of the input\n array, axis=0 by default, or all values in the array if axis=None.\n float64 intermediate and return values are used for integer inputs.\n\n References\n ----------\n .. [1] \"Weighted Geometric Mean\", *Wikipedia*,\n https://en.wikipedia.org/wiki/Weighted_geometric_mean.\n\n Examples\n --------\n >>> from scipy.stats import gmean\n >>> gmean([1, 4])\n 2.0\n >>> gmean([1, 2, 3, 4, 5, 6, 7])\n 3.3800151591412964\n >>> gmean([1, 4, 7], weights=[3, 1, 3])\n 2.80668351922014\n\n ", "n_words": 276, "vocab_size": 173, "n_whitespaces": 506, "language": "en" } }, { "id": 62169, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/util.py", "file_name": "util.py", "fun_name": "split_filename", "commit_message": "upd; format", "code": "def split_filename(filename, project_name=None):\n \n result = None\n pyver = None\n filename = unquote(filename).replace(' ', '-')\n m = PYTHON_VERSION.search(filename)\n if m:\n pyver = m.group(1)\n filename = filename[:m.start()]\n if project_name and len(filename) > len(project_name) + 1:\n m = re.match(re.escape(project_name) + r'\\b', filename)\n if m:\n n = m.end()\n result = filename[:n], filename[n + 1:], pyver\n if result is None:\n m = PROJECT_NAME_AND_VERSION.match(filename)\n if m:\n result = m.group(1), m.group(3), pyver\n return result\n\n# Allow spaces in name because of legacy dists like \"Twisted Core\"\nNAME_VERSION_RE = re.compile(r'(?P[\\w .-]+)\\s*'\n r'\\(\\s*(?P[^\\s)]+)\\)$')\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 212, "n_words": 84, "vocab_size": 54, "complexity": 7, "nloc": 18, "token_counts": 154, "n_ast_nodes": 270, "n_identifiers": 21, "d_id": 12890, "documentation": { "docstring": "\n Extract name, version, python version from a filename (no extension)\n\n Return name, version, pyver or None\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 26, "language": "en" } }, { "id": 46887, "commit_id": "91832a42d8124b040073481fd93c54e9e64c2609", "repo": "airflow", "path": "tests/models/test_dagrun.py", "file_name": "test_dagrun.py", "fun_name": "test_mapped_literal_to_xcom_arg_verify_integrity", "commit_message": "Expand mapped tasks at DagRun.Veriy_integrity (#22679)\n\nCreate the necessary task instances for a mapped task at dagrun.verify_integrity\r\n\r\nCo-authored-by: Ash Berlin-Taylor ", "code": "def test_mapped_literal_to_xcom_arg_verify_integrity(dag_maker, session):\n \n\n with dag_maker(session=session) as dag:\n t1 = BaseOperator(task_id='task_1')\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 23, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 25, "token_counts": 185, "n_ast_nodes": 49, "n_identifiers": 7, "d_id": 9034, "documentation": { "docstring": "Test that when we change from literal to a XComArg the TIs are removed", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 5320, "commit_id": "150ab593f8ca1f1aa960a0811aece26c46ba6c75", "repo": "airbyte", "path": "airbyte-cdk/python/airbyte_cdk/sources/declarative/requesters/requester.py", "file_name": "requester.py", "fun_name": "raise_on_http_errors", "commit_message": "Low code connectors: core structure (#12850)\n\n* checkout from alex/cac\r\n\r\n* doc\r\n\r\n* doc\r\n\r\n* remove broken test\r\n\r\n* rename\r\n\r\n* rename file\r\n\r\n* delete unused file\r\n\r\n* rename\r\n\r\n* abstract property\r\n\r\n* isort\r\n\r\n* update state\r\n\r\n* Update comment\r\n\r\n* remove incremental mixin\r\n\r\n* delete comment\r\n\r\n* update comments\r\n\r\n* update comments\r\n\r\n* remove no_state\r\n\r\n* rename package\r\n\r\n* pass parameters through kwargs\r\n\r\n* update interface to pass source in interface\r\n\r\n* update interface to pass source in interface\r\n\r\n* rename to stream_slicer\r\n\r\n* Low code connectors: string interpolation with jinja (#12852)\r\n\r\n* checkout from alex/cac\r\n\r\n* Add missing tests\r\n\r\n* Add missing files\r\n\r\n* missing file\r\n\r\n* rename\r\n\r\n* jinja dependency\r\n\r\n* Add comment\r\n\r\n* comment\r\n\r\n* comment\r\n\r\n* Revert \"delete unused file\"\r\n\r\nThis reverts commit 758e939367775ddbefcd52c6e1e832976d3ba9fe.\r\n\r\n* delete unused field\r\n\r\n* delete unused field\r\n\r\n* rename\r\n\r\n* pass kwargs directly\r\n\r\n* isort\r\n\r\n* Revert \"isort\"\r\n\r\nThis reverts commit 4a792239440bc9950813ccc6ed368641ce2a96e4.\r\n\r\n* format\r\n\r\n* decoder\r\n\r\n* better error handling\r\n\r\n* remove nostate\r\n\r\n* isort\r\n\r\n* delete dead code\r\n\r\n* Update mapping type to [str, Any]\r\n\r\n* add comment\r\n\r\n* Add comment\r\n\r\n* pass parameters through kwargs\r\n\r\n* move test to right module\r\n\r\n* Add missing test\r\n\r\n* Use authbase instead of deprecated class\r\n\r\n* leverage generator\r\n\r\n* rename to declarative\r\n\r\n* rename the classes too", "code": "def raise_on_http_errors(self) -> bool:\n \n", "url": "https://github.com/airbytehq/airbyte.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 4, "token_counts": 8, "n_ast_nodes": 16, "n_identifiers": 3, "d_id": 753, "documentation": { "docstring": "\n If set to False, allows opting-out of raising HTTP code exception.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 26, "language": "en" } }, { "id": 262688, "commit_id": "3b8b105b0d6539ac12972de94e0b2a5077fa1ce2", "repo": "TTS", "path": "TTS/tts/layers/overflow/neural_hmm.py", "file_name": "neural_hmm.py", "fun_name": "_mask_lengths", "commit_message": "Adding OverFlow (#2183)\n\n* Adding encoder\r\n\r\n* currently modifying hmm\r\n\r\n* Adding hmm\r\n\r\n* Adding overflow\r\n\r\n* Adding overflow setting up flat start\r\n\r\n* Removing runs\r\n\r\n* adding normalization parameters\r\n\r\n* Fixing models on same device\r\n\r\n* Training overflow and plotting evaluations\r\n\r\n* Adding inference\r\n\r\n* At the end of epoch the test sentences are coming on cpu instead of gpu\r\n\r\n* Adding figures from model during training to monitor\r\n\r\n* reverting tacotron2 training recipe\r\n\r\n* fixing inference on gpu for test sentences on config\r\n\r\n* moving helpers and texts within overflows source code\r\n\r\n* renaming to overflow\r\n\r\n* moving loss to the model file\r\n\r\n* Fixing the rename\r\n\r\n* Model training but not plotting the test config sentences's audios\r\n\r\n* Formatting logs\r\n\r\n* Changing model name to camelcase\r\n\r\n* Fixing test log\r\n\r\n* Fixing plotting bug\r\n\r\n* Adding some tests\r\n\r\n* Adding more tests to overflow\r\n\r\n* Adding all tests for overflow\r\n\r\n* making changes to camel case in config\r\n\r\n* Adding information about parameters and docstring\r\n\r\n* removing compute_mel_statistics moved statistic computation to the model instead\r\n\r\n* Added overflow in readme\r\n\r\n* Adding more test cases, now it doesn't saves transition_p like tensor and can be dumped as json", "code": "def _mask_lengths(mel_lens, log_c, log_alpha_scaled):\n \n mask_log_c = sequence_mask(mel_lens)\n log_c = log_c * mask_log_c\n mask_log_alpha_scaled = mask_log_c.unsqueeze(2)\n log_alpha_scaled = log_alpha_scaled * mask_log_alpha_scaled\n return log_c, log_alpha_scaled\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 65, "n_words": 23, "vocab_size": 13, "complexity": 1, "nloc": 6, "token_counts": 38, "n_ast_nodes": 62, "n_identifiers": 8, "d_id": 77320, "documentation": { "docstring": "\n Mask the lengths of the forward variables so that the variable lenghts\n do not contribute in the loss calculation\n Args:\n mel_inputs (torch.FloatTensor): (batch, T, frame_channels)\n mel_inputs_lengths (torch.IntTensor): (batch)\n log_c (torch.FloatTensor): (batch, T)\n Returns:\n log_c (torch.FloatTensor) : scaled probabilities (batch, T)\n log_alpha_scaled (torch.FloatTensor): forward probabilities (batch, T, N)\n ", "n_words": 47, "vocab_size": 34, "n_whitespaces": 138, "language": "en" } }, { "id": 36274, "commit_id": "0a5ef036e6c2d5093ed348c5fd706634f6ed5e38", "repo": "transformers", "path": "src/transformers/commands/add_new_model_like.py", "file_name": "add_new_model_like.py", "fun_name": "get_default_frameworks", "commit_message": "Make `add-new-model-like` work in an env without all frameworks (#16239)\n\n* Make add-new-model-like work without all frameworks installed\r\n\r\n* A few fixes\r\n\r\n* Last default frameworks", "code": "def get_default_frameworks():\n \n frameworks = []\n if is_torch_available():\n frameworks.append(\"pt\")\n if is_tf_available():\n frameworks.append(\"tf\")\n if is_flax_available():\n frameworks.append(\"flax\")\n return frameworks\n\n\n_re_model_mapping = re.compile(\"MODEL_([A-Z_]*)MAPPING_NAMES\")\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 57, "n_words": 19, "vocab_size": 15, "complexity": 4, "nloc": 9, "token_counts": 44, "n_ast_nodes": 100, "n_identifiers": 9, "d_id": 6594, "documentation": { "docstring": "\n Returns the list of frameworks (PyTorch, TensorFlow, Flax) that are installed in the environment.\n ", "n_words": 14, "vocab_size": 13, "n_whitespaces": 21, "language": "en" } }, { "id": 159732, "commit_id": "1edd6407d008fcaa282a6058ae355025c26517fd", "repo": "numpy", "path": "numpy/core/tests/test_limited_api.py", "file_name": "test_limited_api.py", "fun_name": "test_limited_api", "commit_message": "TST: Split example package, skip limited API test for debug", "code": "def test_limited_api(tmp_path):\n \n # Based in part on test_cython from random.tests.test_extending\n\n here = os.path.dirname(__file__)\n ext_dir = os.path.join(here, \"examples\", \"limited_api\")\n\n cytest = str(tmp_path / \"limited_api\")\n\n shutil.copytree(ext_dir, cytest)\n # build the examples and \"install\" them into a temporary directory\n\n install_log = str(tmp_path / \"tmp_install_log.txt\")\n subprocess.check_call(\n [\n sys.executable,\n \"setup.py\",\n \"build\",\n \"install\",\n \"--prefix\", str(tmp_path / \"installdir\"),\n \"--single-version-externally-managed\",\n \"--record\",\n install_log,\n ],\n cwd=cytest,\n )\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 196, "n_words": 57, "vocab_size": 48, "complexity": 1, "nloc": 19, "token_counts": 91, "n_ast_nodes": 158, "n_identifiers": 19, "d_id": 38413, "documentation": { "docstring": "Test building a third-party C extension with the limited API.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 108602, "commit_id": "9a03cb3b8c7253271054f146724c230eca96274b", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_dates.py", "file_name": "test_dates.py", "fun_name": "test_drange", "commit_message": "Add tests for date module", "code": "def test_drange():\n \n start = datetime.datetime(2011, 1, 1, tzinfo=mdates.UTC)\n end = datetime.datetime(2011, 1, 2, tzinfo=mdates.UTC)\n delta = datetime.timedelta(hours=1)\n # We expect 24 values in drange(start, end, delta), because drange returns\n # dates from an half open interval [start, end)\n assert len(mdates.drange(start, end, delta)) == 24\n\n # Same if interval ends slightly earlier\n end = end - datetime.timedelta(microseconds=1)\n assert len(mdates.drange(start, end, delta)) == 24\n\n # if end is a little bit later, we expect the range to contain one element\n # more\n end = end + datetime.timedelta(microseconds=2)\n assert len(mdates.drange(start, end, delta)) == 25\n\n # reset end\n end = datetime.datetime(2011, 1, 2, tzinfo=mdates.UTC)\n\n # and tst drange with \"complicated\" floats:\n # 4 hours = 1/6 day, this is an \"dangerous\" float\n delta = datetime.timedelta(hours=4)\n daterange = mdates.drange(start, end, delta)\n assert len(daterange) == 6\n assert mdates.num2date(daterange[-1]) == (end - delta)\n\n\n@_new_epoch_decorator", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "@_new_epoch_decorator", "n_ast_errors": 1, "ast_levels": 10, "n_whitespaces": 202, "n_words": 137, "vocab_size": 80, "complexity": 1, "nloc": 14, "token_counts": 187, "n_ast_nodes": 292, "n_identifiers": 16, "d_id": 23271, "documentation": { "docstring": "\n This test should check if drange works as expected, and if all the\n rounding errors are fixed\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 27, "language": "en" } }, { "id": 44553, "commit_id": "1a8a897120762692ca98ac5ce4da881678c073aa", "repo": "airflow", "path": "airflow/executors/executor_loader.py", "file_name": "executor_loader.py", "fun_name": "get_default_executor", "commit_message": "Improve speed to run `airflow` by 6x (#21438)\n\nBy delaying expensive/slow imports to where they are needed, this gets\n`airflow` printing it's usage information in under 0.8s, down from almost\n3s which makes it feel much much snappier.\n\nBy not loading BaseExecutor we can get down to <0.5s", "code": "def get_default_executor(cls) -> \"BaseExecutor\":\n \n if cls._default_executor is not None:\n return cls._default_executor\n\n from airflow.configuration import conf\n\n executor_name = conf.get('core', 'EXECUTOR')\n\n cls._default_executor = cls.load_executor(executor_name)\n\n return cls._default_executor\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 77, "n_words": 24, "vocab_size": 19, "complexity": 2, "nloc": 8, "token_counts": 50, "n_ast_nodes": 86, "n_identifiers": 9, "d_id": 8295, "documentation": { "docstring": "Creates a new instance of the configured executor if none exists and returns it", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 55084, "commit_id": "808660dd04465fc796a34e835467e8ae1f2449b3", "repo": "prefect", "path": "tests/cli/test_profile.py", "file_name": "test_profile.py", "fun_name": "test_create_profile_from_existing", "commit_message": "Add tests for profile CLI", "code": "def test_create_profile_from_existing():\n save_profiles(\n ProfilesCollection(\n profiles=[\n Profile(name=\"foo\", settings={PREFECT_API_KEY: \"foo\"}),\n ],\n active=None,\n )\n )\n\n invoke_and_assert(\n [\"profile\", \"create\", \"bar\", \"--from\", \"foo\"],\n expected_output=(\n f\n ),\n )\n\n profiles = load_profiles()\n assert profiles[\"foo\"].settings == {PREFECT_API_KEY: \"foo\"}, \"Foo is unchanged\"\n assert profiles[\"bar\"] == Profile(\n name=\"bar\",\n settings={PREFECT_API_KEY: \"foo\"},\n source=PREFECT_PROFILES_PATH.value(),\n )\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 180, "n_words": 42, "vocab_size": 35, "complexity": 1, "nloc": 32, "token_counts": 105, "n_ast_nodes": 179, "n_identifiers": 15, "d_id": 11204, "documentation": { "docstring": "\n Created profile 'bar' matching 'foo'.\n\n Switch to your new profile with:\n\n prefect profile use 'bar'\n\n Or, to use it for a single command, include the `-p` option:\n\n prefect -p 'bar' config view\n ", "n_words": 32, "vocab_size": 25, "n_whitespaces": 107, "language": "en" } }, { "id": 241714, "commit_id": "b56d8677ad0ff8513e566334f4a78a24b88480c3", "repo": "lightning", "path": "tests/callbacks/test_device_stats_monitor.py", "file_name": "test_device_stats_monitor.py", "fun_name": "test_device_stats_gpu_from_nvidia", "commit_message": "Update test_pruning.py to use `devices` instead of `gpus` or `ipus` (#11339)", "code": "def test_device_stats_gpu_from_nvidia(tmpdir):\n \n model = BoringModel()\n device_stats = DeviceStatsMonitor()\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 17, "n_words": 8, "vocab_size": 7, "complexity": 1, "nloc": 19, "token_counts": 82, "n_ast_nodes": 31, "n_identifiers": 6, "d_id": 69667, "documentation": { "docstring": "Test GPU stats are logged using a logger with Pytorch < 1.8.0.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 265787, "commit_id": "0d7851ed9de2792ea6d9ed223c315c235290ddd7", "repo": "netbox", "path": "netbox/utilities/views.py", "file_name": "views.py", "fun_name": "register_model_view", "commit_message": "#9072: Implement a mechanism for dynamically registering model detail views", "code": "def register_model_view(model, name, view_path, tab_label=None, tab_badge=None, tab_permission=None, kwargs=None):\n \n app_label = model._meta.app_label\n model_name = model._meta.model_name\n\n if model_name not in registry['views'][app_label]:\n registry['views'][app_label][model_name] = []\n\n registry['views'][app_label][model_name].append({\n 'name': name,\n 'path': view_path,\n 'tab_label': tab_label,\n 'tab_badge': tab_badge,\n 'tab_permission': tab_permission,\n 'kwargs': kwargs or {},\n })\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 105, "n_words": 38, "vocab_size": 33, "complexity": 3, "nloc": 13, "token_counts": 108, "n_ast_nodes": 172, "n_identifiers": 13, "d_id": 78196, "documentation": { "docstring": "\n Register a subview for a core model.\n\n Args:\n model: The Django model class with which this view will be associated\n name: The name to register when creating a URL path\n view_path: A dotted path to the view class or function (e.g. 'myplugin.views.FooView')\n tab_label: The label to display for the view's tab under the model view (optional)\n tab_badge: A static value or callable to display a badge within the view's tab (optional). If a callable is\n specified, it must accept the current object as its single positional argument.\n tab_permission: The name of the permission required to display the tab (optional)\n kwargs: A dictionary of keyword arguments to send to the view (optional)\n ", "n_words": 111, "vocab_size": 71, "n_whitespaces": 181, "language": "en" } }, { "id": 125025, "commit_id": "864af14f410ab12c7553332dd3a62e716f24a667", "repo": "ray", "path": "python/ray/data/_internal/batcher.py", "file_name": "batcher.py", "fun_name": "done_adding", "commit_message": "[Datasets] [Local Shuffle - 1/N] Add local shuffling option. (#26094)\n\nCo-authored-by: Eric Liang \r\nCo-authored-by: matthewdeng \r\nCo-authored-by: Matthew Deng \r\nCo-authored-by: Richard Liaw ", "code": "def done_adding(self) -> bool:\n \n raise NotImplementedError()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 12, "n_ast_nodes": 23, "n_identifiers": 4, "d_id": 27753, "documentation": { "docstring": "Indicate to the batcher that no more blocks will be added to the buffer.", "n_words": 14, "vocab_size": 12, "n_whitespaces": 13, "language": "en" } }, { "id": 292660, "commit_id": "845bf80e725af8c921915906b0f796c7a8164d11", "repo": "core", "path": "tests/components/mqtt/test_init.py", "file_name": "test_init.py", "fun_name": "test_subscribe_deprecated_async", "commit_message": "Mqtt improve test coverage (#66279)\n\nCo-authored-by: Martin Hjelmare ", "code": "async def test_subscribe_deprecated_async(hass, mqtt_mock):\n \n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 7, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 27, "token_counts": 184, "n_ast_nodes": 16, "n_identifiers": 3, "d_id": 91734, "documentation": { "docstring": "Test the subscription of a topic using deprecated coroutine signature.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 267055, "commit_id": "1b947eaf92b6833d2a4fd019a30d7b85406f1778", "repo": "ansible", "path": "lib/ansible/module_utils/common/arg_spec.py", "file_name": "arg_spec.py", "fun_name": "validate", "commit_message": "arg_spec - Return aliases in validation result and update aliases (#77576)\n\n\r\nWhen looking up the `no_log` setting for a parameter that is an alias in\r\n`AnsibleModule._log_invocation()`, the alias value will always be an\r\nempty dictionary since `self.aliases` on the `AnsibleModule` instance is\r\nnever updated after initialization. Since the `no_log` setting is on the\r\ncanonical parameter not the alias, an incorrect warning is issued if the\r\nparameter matches `PASSWORD_MATCH`.\r\n\r\nThis PR returns the aliases dictionary as an attribute of the\r\n`ValidationResult` and updates the `aliases` attribute on the\r\n`AnsibleModule` instance.", "code": "def validate(self, parameters, *args, **kwargs):\n \n\n result = ValidationResult(parameters)\n\n result._no_log_values.update(set_fallbacks(self.argument_spec, result._validated_parameters))\n\n alias_warnings = []\n alias_deprecations = []\n try:\n result._aliases.update(_handle_aliases(self.argument_spec, result._validated_parameters, alias_warnings, alias_deprecations))\n except (TypeError, ValueError) as e:\n result.errors.append(AliasError(to_native(e)))\n\n legal_inputs = _get_legal_inputs(self.argument_spec, result._validated_parameters, result._aliases)\n\n for option, alias in alias_warnings:\n result._warnings.append({'option': option, 'alias': alias})\n\n for deprecation in alias_deprecations:\n result._deprecations.append({\n 'name': deprecation['name'],\n 'version': deprecation.get('version'),\n 'date': deprecation.get('date'),\n 'collection_name': deprecation.get('collection_name'),\n })\n\n try:\n result._no_log_values.update(_list_no_log_values(self.argument_spec, result._validated_parameters))\n except TypeError as te:\n result.errors.append(NoLogError(to_native(te)))\n\n try:\n result._unsupported_parameters.update(_get_unsupported_parameters(self.argument_spec, result._validated_parameters, legal_inputs))\n except TypeError as te:\n result.errors.append(RequiredDefaultError(to_native(te)))\n except ValueError as ve:\n result.errors.append(AliasError(to_native(ve)))\n\n try:\n check_mutually_exclusive(self._mutually_exclusive, result._validated_parameters)\n except TypeError as te:\n result.errors.append(MutuallyExclusiveError(to_native(te)))\n\n result._no_log_values.update(_set_defaults(self.argument_spec, result._validated_parameters, False))\n\n try:\n check_required_arguments(self.argument_spec, result._validated_parameters)\n except TypeError as e:\n result.errors.append(RequiredError(to_native(e)))\n\n _validate_argument_types(self.argument_spec, result._validated_parameters, errors=result.errors)\n _validate_argument_values(self.argument_spec, result._validated_parameters, errors=result.errors)\n\n for check in _ADDITIONAL_CHECKS:\n try:\n check['func'](getattr(self, \"_{attr}\".format(attr=check['attr'])), result._validated_parameters)\n except TypeError as te:\n result.errors.append(check['err'](to_native(te)))\n\n result._no_log_values.update(_set_defaults(self.argument_spec, result._validated_parameters))\n\n _validate_sub_spec(self.argument_spec, result._validated_parameters,\n errors=result.errors,\n no_log_values=result._no_log_values,\n unsupported_parameters=result._unsupported_parameters)\n\n if result._unsupported_parameters:\n flattened_names = []\n for item in result._unsupported_parameters:\n if isinstance(item, tuple):\n flattened_names.append(\".\".join(item))\n else:\n flattened_names.append(item)\n\n unsupported_string = \", \".join(sorted(list(flattened_names)))\n supported_string = \", \".join(self._valid_parameter_names)\n result.errors.append(\n UnsupportedError(\"{0}. Supported parameters include: {1}.\".format(unsupported_string, supported_string)))\n\n return result\n\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 825, "n_words": 154, "vocab_size": 98, "complexity": 14, "nloc": 62, "token_counts": 575, "n_ast_nodes": 930, "n_identifiers": 65, "d_id": 78708, "documentation": { "docstring": "Validate ``parameters`` against argument spec.\n\n Error messages in the :class:`ValidationResult` may contain no_log values and should be\n sanitized with :func:`~ansible.module_utils.common.parameters.sanitize_keys` before logging or displaying.\n\n :arg parameters: Parameters to validate against the argument spec\n :type parameters: dict[str, dict]\n\n :return: :class:`ValidationResult` containing validated parameters.\n\n :Simple Example:\n\n .. code-block:: text\n\n argument_spec = {\n 'name': {'type': 'str'},\n 'age': {'type': 'int'},\n }\n\n parameters = {\n 'name': 'bo',\n 'age': '42',\n }\n\n validator = ArgumentSpecValidator(argument_spec)\n result = validator.validate(parameters)\n\n if result.error_messages:\n sys.exit(\"Validation failed: {0}\".format(\", \".join(result.error_messages))\n\n valid_params = result.validated_parameters\n ", "n_words": 80, "vocab_size": 66, "n_whitespaces": 355, "language": "en" } }, { "id": 246128, "commit_id": "bf60da1a60096fac5fb778b732ff2214862ac808", "repo": "synapse", "path": "tests/handlers/test_profile.py", "file_name": "test_profile.py", "fun_name": "test_avatar_constraints_file_size", "commit_message": "Configurable limits on avatars (#11846)\n\nOnly allow files which file size and content types match configured\r\nlimits to be set as avatar.\r\n\r\nMost of the inspiration from the non-test code comes from matrix-org/synapse-dinsic#19", "code": "def test_avatar_constraints_file_size(self):\n \n self._setup_local_files(\n {\n \"small\": {\"size\": 40},\n \"big\": {\"size\": 60},\n }\n )\n\n res = self.get_success(\n self.handler.check_avatar_size_and_mime_type(\"mxc://test/small\")\n )\n self.assertTrue(res)\n\n res = self.get_success(\n self.handler.check_avatar_size_and_mime_type(\"mxc://test/big\")\n )\n self.assertFalse(res)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 161, "n_words": 24, "vocab_size": 18, "complexity": 1, "nloc": 15, "token_counts": 71, "n_ast_nodes": 127, "n_identifiers": 9, "d_id": 71029, "documentation": { "docstring": "Tests that a file that's above the allowed file size is forbidden but one\n that's below it is allowed.\n ", "n_words": 19, "vocab_size": 16, "n_whitespaces": 33, "language": "en" } }, { "id": 220161, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/ast.py", "file_name": "ast.py", "fun_name": "get_source_segment", "commit_message": "add python 3.10.4 for windows", "code": "def get_source_segment(source, node, *, padded=False):\n \n try:\n if node.end_lineno is None or node.end_col_offset is None:\n return None\n lineno = node.lineno - 1\n end_lineno = node.end_lineno - 1\n col_offset = node.col_offset\n end_col_offset = node.end_col_offset\n except AttributeError:\n return None\n\n lines = _splitlines_no_ff(source)\n if end_lineno == lineno:\n return lines[lineno].encode()[col_offset:end_col_offset].decode()\n\n if padded:\n padding = _pad_whitespace(lines[lineno].encode()[:col_offset].decode())\n else:\n padding = ''\n\n first = padding + lines[lineno].encode()[col_offset:].decode()\n last = lines[end_lineno].encode()[:end_col_offset].decode()\n lines = lines[lineno+1:end_lineno]\n\n lines.insert(0, first)\n lines.append(last)\n return ''.join(lines)\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 183, "n_words": 70, "vocab_size": 45, "complexity": 6, "nloc": 23, "token_counts": 187, "n_ast_nodes": 303, "n_identifiers": 20, "d_id": 55928, "documentation": { "docstring": "Get source code segment of the *source* that generated *node*.\n\n If some location information (`lineno`, `end_lineno`, `col_offset`,\n or `end_col_offset`) is missing, return None.\n\n If *padded* is `True`, the first line of a multi-line statement will\n be padded with spaces to match its original position.\n ", "n_words": 44, "vocab_size": 40, "n_whitespaces": 59, "language": "en" } }, { "id": 129126, "commit_id": "71fae21e8e86c75bc58b53dccae563d15691610f", "repo": "ray", "path": "python/ray/autoscaler/_private/aws/cloudwatch/cloudwatch_helper.py", "file_name": "cloudwatch_helper.py", "fun_name": "_replace_cwa_config_variables", "commit_message": "[autoscaler] AWS Autoscaler CloudWatch Dashboard support (#20266)\n\nThese changes add a set of improvements to enable automatic creation and update of CloudWatch dashboards when provisioning AWS Autoscaling clusters. Successful implementation of these improvements will allow AWS Autoscaler users to:\r\n\r\n1. Get rapid insights into their cluster state via CloudWatch dashboards.\r\n2. Allow users to update their CloudWatch dashboard JSON configuration files during Ray up execution time.\r\n\r\nNotes:\r\n1. This PR is a follow-up PR for #18619, adds dashboard support.", "code": "def _replace_cwa_config_variables(self) -> Dict[str, Any]:\n \n cwa_config = self._load_config_file(\"agent\")\n self._replace_all_config_variables(\n cwa_config,\n self.node_id,\n self.cluster_name,\n self.provider_config[\"region\"],\n )\n return cwa_config\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 95, "n_words": 16, "vocab_size": 15, "complexity": 1, "nloc": 13, "token_counts": 45, "n_ast_nodes": 72, "n_identifiers": 11, "d_id": 28902, "documentation": { "docstring": "\n replace known variable occurrences in\n Unified Cloudwatch Agent config file\n ", "n_words": 10, "vocab_size": 10, "n_whitespaces": 32, "language": "en" } }, { "id": 101297, "commit_id": "2beceffad9b15c1fd78f06b9b272563321c5a41e", "repo": "faceswap", "path": "plugins/train/trainer/_base.py", "file_name": "_base.py", "fun_name": "get_batch", "commit_message": "Data Augmentation update (#1263)\n\n- lib.detected_face\r\n - Subclass Masks for Landmark based masks\r\n - Add training mask propery + methods to DetectedFace\r\n - lib.training_training\r\n - subclass TrainingDataGenerator for training and preview data\r\n - Split cache into own module\r\n - Reduce thread count to 1 to prevent image corruption + data re-use\r\n - Process on largest model input/output size rather than stored image size\r\n - Size and crop masks during caching stage\r\n - Implement ring buffer for data flow\r\n - Fix preview reload bug\r\n - augmentation\r\n - typing\r\n - switch color aug order\r\n - better initialization\r\n - Fix warp + landmark warp to correctly apply at different image scales\r\n - Slightly improved warp caching\r\n - Don't store whether image is_preview. Handle all data as training images implicitly\r\n - plugins.trainer: Typing and fixes to work with trainingdata refactor", "code": "def get_batch(self) -> Tuple[List[List[np.ndarray]], ...]:\n \n model_inputs: List[List[np.ndarray]] = []\n model_targets: List[List[np.ndarray]] = []\n for side in (\"a\", \"b\"):\n side_feed, side_targets = next(self._feeds[side])\n if self._model.config[\"learn_mask\"]: # Add the face mask as it's own target\n side_targets += [side_targets[-1][..., 3][..., None]]\n logger.trace(\"side: %s, input_shapes: %s, target_shapes: %s\", # type: ignore\n side, side_feed.shape, [i.shape for i in side_targets])\n model_inputs.append([side_feed])\n model_targets.append(side_targets)\n\n return model_inputs, model_targets\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 190, "n_words": 59, "vocab_size": 50, "complexity": 4, "nloc": 22, "token_counts": 140, "n_ast_nodes": 217, "n_identifiers": 20, "d_id": 20716, "documentation": { "docstring": " Get the feed data and the targets for each training side for feeding into the model's\n train function.\n\n Returns\n -------\n model_inputs: list\n The inputs to the model for each side A and B\n model_targets: list\n The targets for the model for each side A and B\n ", "n_words": 46, "vocab_size": 26, "n_whitespaces": 111, "language": "en" } }, { "id": 203557, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/widgets.py", "file_name": "widgets.py", "fun_name": "optgroups", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def optgroups(self, name, value, attr=None):\n \n default = (None, [], 0)\n groups = [default]\n has_selected = False\n selected_choices = {\n str(v) for v in value if str(v) not in self.choices.field.empty_values\n }\n if not self.is_required and not self.allow_multiple_selected:\n default[1].append(self.create_option(name, \"\", \"\", False, 0))\n remote_model_opts = self.field.remote_field.model._meta\n to_field_name = getattr(\n self.field.remote_field, \"field_name\", remote_model_opts.pk.attname\n )\n to_field_name = remote_model_opts.get_field(to_field_name).attname\n choices = (\n (getattr(obj, to_field_name), self.choices.field.label_from_instance(obj))\n for obj in self.choices.queryset.using(self.db).filter(\n **{\"%s__in\" % to_field_name: selected_choices}\n )\n )\n for option_value, option_label in choices:\n selected = str(option_value) in value and (\n has_selected is False or self.allow_multiple_selected\n )\n has_selected |= selected\n index = len(default[1])\n subgroup = default[1]\n subgroup.append(\n self.create_option(\n name, option_value, option_label, selected_choices, index\n )\n )\n return groups\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 435, "n_words": 108, "vocab_size": 70, "complexity": 9, "nloc": 33, "token_counts": 244, "n_ast_nodes": 373, "n_identifiers": 39, "d_id": 50447, "documentation": { "docstring": "Return selected options based on the ModelChoiceIterator.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 186093, "commit_id": "e75f784b2c788f95e398821266fcaab0f79aa12f", "repo": "textual", "path": "tests/test_binding_inheritance.py", "file_name": "test_binding_inheritance.py", "fun_name": "test_focused_child_widget_with_movement_bindings_on_screen", "commit_message": "Add a test for a screen binding movement, wrapping a focusable widget\n\nThis is the heart of the issue introduced by\nhttps://github.com/Textualize/textual/pull/1170/commits/b48a1402b8103ca16d5e338538620e9e08fb2c0e\nand which is being investigated in\nhttps://github.com/Textualize/textual/issues/1343 -- the child widget can be\nfocused, but (as far as the author of the code is concerned) it has no\nbindings. Bindings for movement-oriented keys exist on the screen which\ncomposes up the widget into it. Up until 0.5.0 this worked just fine. As of\n0.6.0, because binding inheritance was introduced, the bindings for movement\nthat live at the `Widget` level cause the widget that has no bindings to\nappear to have bindings.\n\nWhile this can potentially be worked around with the use of\ninherit_bindings, this isn't a very satisfying solution and also breaks the\nrule of least astonishment.\n\nThis test is going to be key to all of this. This is the test that should be\nmade to work without breaking any of the other currently-passing tests.", "code": "async def test_focused_child_widget_with_movement_bindings_on_screen() -> None:\n ", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 8, "n_words": 5, "vocab_size": 5, "complexity": 2, "nloc": 5, "token_counts": 53, "n_ast_nodes": 16, "n_identifiers": 1, "d_id": 45329, "documentation": { "docstring": "A focused child widget, with movement bindings in the screen, should trigger screen actions.", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 293739, "commit_id": "9215702388eef03c7c3ed9f756ea0db533d5beec", "repo": "core", "path": "homeassistant/components/logbook/__init__.py", "file_name": "__init__.py", "fun_name": "attributes_icon", "commit_message": "Separate attrs into another table (reduces database size) (#68224)", "code": "def attributes_icon(self):\n \n if self._attributes:\n return self._attributes.get(ATTR_ICON)\n result = ICON_JSON_EXTRACT.search(\n self._row.shared_attrs or self._row.attributes\n )\n return result and result.group(1)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 74, "n_words": 17, "vocab_size": 15, "complexity": 4, "nloc": 7, "token_counts": 47, "n_ast_nodes": 77, "n_identifiers": 12, "d_id": 92792, "documentation": { "docstring": "Extract the icon from the decoded attributes or json.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 64875, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/doctype/journal_entry/journal_entry.py", "file_name": "journal_entry.py", "fun_name": "get_default_bank_cash_account", "commit_message": "style: format code with black", "code": "def get_default_bank_cash_account(company, account_type=None, mode_of_payment=None, account=None):\n\tfrom erpnext.accounts.doctype.sales_invoice.sales_invoice import get_bank_cash_account\n\n\tif mode_of_payment:\n\t\taccount = get_bank_cash_account(mode_of_payment, company).get(\"account\")\n\n\tif not account:\n\t\t\n\t\tif account_type == \"Bank\":\n\t\t\taccount = frappe.get_cached_value(\"Company\", company, \"default_bank_account\")\n\t\t\tif not account:\n\t\t\t\taccount_list = frappe.get_all(\n\t\t\t\t\t\"Account\", filters={\"company\": company, \"account_type\": \"Bank\", \"is_group\": 0}\n\t\t\t\t)\n\t\t\t\tif len(account_list) == 1:\n\t\t\t\t\taccount = account_list[0].name\n\n\t\telif account_type == \"Cash\":\n\t\t\taccount = frappe.get_cached_value(\"Company\", company, \"default_cash_account\")\n\t\t\tif not account:\n\t\t\t\taccount_list = frappe.get_all(\n\t\t\t\t\t\"Account\", filters={\"company\": company, \"account_type\": \"Cash\", \"is_group\": 0}\n\t\t\t\t)\n\t\t\t\tif len(account_list) == 1:\n\t\t\t\t\taccount = account_list[0].name\n\n\tif account:\n\t\taccount_details = frappe.db.get_value(\n\t\t\t\"Account\", account, [\"account_currency\", \"account_type\"], as_dict=1\n\t\t)\n\n\t\treturn frappe._dict(\n\t\t\t{\n\t\t\t\t\"account\": account,\n\t\t\t\t\"balance\": get_balance_on(account),\n\t\t\t\t\"account_currency\": account_details.account_currency,\n\t\t\t\t\"account_type\": account_details.account_type,\n\t\t\t}\n\t\t)\n\telse:\n\t\treturn frappe._dict()\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 19, "n_whitespaces": 70, "n_words": 106, "vocab_size": 58, "complexity": 10, "nloc": 40, "token_counts": 236, "n_ast_nodes": 409, "n_identifiers": 26, "d_id": 13744, "documentation": { "docstring": "\n\t\tSet the default account first. If the user hasn't set any default account then, he doesn't\n\t\twant us to set any random account. In this case set the account only if there is single\n\t\taccount (of that type), otherwise return empty dict.\n\t\t", "n_words": 42, "vocab_size": 33, "n_whitespaces": 39, "language": "en" } }, { "id": 138197, "commit_id": "2cd4637521a0c75e0375266ff52d3dfca2084c2d", "repo": "ray", "path": "python/ray/data/_internal/execution/interfaces.py", "file_name": "interfaces.py", "fun_name": "destroy_if_owned", "commit_message": "[data] New executor backend [1/n]--- Add basic interfaces (#31216)\n\nThis PR adds the basic interfaces and feature flags; split out from https://github.com/ray-project/ray/pull/30903/files\r\n\r\nSee REP ray-project/enhancements#18 for more details.", "code": "def destroy_if_owned(self) -> int:\n \n raise NotImplementedError\n\n\n@dataclass", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@dataclass", "n_ast_errors": 1, "ast_levels": 6, "n_whitespaces": 20, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 7, "token_counts": 10, "n_ast_nodes": 23, "n_identifiers": 5, "d_id": 31352, "documentation": { "docstring": "Clears the object store memory for these blocks if owned.\n\n Returns:\n The number of bytes freed.\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 41, "language": "en" } }, { "id": 272697, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/layers/merging/subtract.py", "file_name": "subtract.py", "fun_name": "subtract", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def subtract(inputs, **kwargs):\n \n return Subtract(**kwargs)(inputs)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 18, "n_ast_nodes": 32, "n_identifiers": 4, "d_id": 81039, "documentation": { "docstring": "Functional interface to the `Subtract` layer.\n\n Args:\n inputs: A list of input tensors (exactly 2).\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor, the difference of the inputs.\n\n Examples:\n\n ```python\n import keras\n\n input1 = keras.layers.Input(shape=(16,))\n x1 = keras.layers.Dense(8, activation='relu')(input1)\n input2 = keras.layers.Input(shape=(32,))\n x2 = keras.layers.Dense(8, activation='relu')(input2)\n subtracted = keras.layers.subtract([x1, x2])\n\n out = keras.layers.Dense(4)(subtracted)\n model = keras.models.Model(inputs=[input1, input2], outputs=out)\n ```\n ", "n_words": 59, "vocab_size": 48, "n_whitespaces": 154, "language": "en" } }, { "id": 283195, "commit_id": "ab4de1dd70fba866930150e440a03e461a6ca6a8", "repo": "OpenBBTerminal", "path": "build/pyinstaller/user_agent/base.py", "file_name": "base.py", "fun_name": "pick_config_ids", "commit_message": "Create a packaged app bundle with Pyinstaller (#1525)\n\n* Add dashboard widget assets\r\n\r\n* Add ipywidgets and ipyflex to project\r\n\r\n* Add currencies dashboard notebook\r\n\r\n* Update docs and docstrings\r\n\r\n* Add pyinstaller to project deps\r\n\r\n* Add pyinstaller artifacts to gitignore\r\n\r\n* Fix linter errors in terminal.py\r\n\r\n* Update cspell hook and action with a pyinstaller specific word\r\n\r\n* Add pyinstaller specfile and artifacts\r\n\r\n* Add splashscreen image\r\n\r\n* Add app icon\r\n\r\n* adding splash screen support to terminal.spec and terminal.py\r\n\r\n* Restore the conda env build files\r\n\r\n* Sync deps\r\n\r\n* Add border to the splashscreen image\r\n\r\n* Clean up terminal launcher\r\n\r\n* Add support for default feature flags in packages apps\r\n\r\n* Fix types and linting\r\n\r\n* Add splashscreen management to app bootup\r\n\r\n* Check prediction feature flag when entering crypto/pred\r\n\r\n* Update pyinstaller spec file\r\n\r\n* fix .spec file to work for splash and icon - removed the \"..\"\r\n\r\n* Allows to export when using installer (#1568)\r\n\r\n* fix export for packaged apps\r\n\r\n* fix filename\r\n\r\n* Git : replace commit_hash when it is set in config_terminal\r\n\r\n* Add update of the git commit hash in gtff default during build\r\n\r\n* Add packaged app name and feature flag to logs\r\n\r\n* Add platform specific icon assignment\r\n\r\n* Add macOS build assets\r\n\r\n* Add tensorflow to hidden imports\r\n\r\n* Move LOGGING_COMMIT_HASH to gtff\r\n\r\n* Adding files/folders needed to .spec and pyinstaller folder. This will make certain commands work again.\r\n\r\n* Linting\r\n\r\n* Workflow : ignore ./build/pyinstaller from codespell\r\n\r\n* Workflow : exclude ./build/pyinstaller from flake8\r\n\r\n* Poetry + Workflow : add types-six\r\n\r\n* Pyinstaller : remove property_cached, user_agent and vaderSentiment\r\n\r\n* Revert \"Pyinstaller : remove property_cached, user_agent and vaderSentiment\"\r\n\r\nThis reverts commit dbb3e2b81086f97819ebd21457148c7160a4d703.\r\n\r\n* Clean up local paths in specfile\r\n\r\n* Validate deps have correct Jinja version (they do)\r\n\r\n* Fix logging commit hash to be set correctly for the logger to see it\r\n\r\nCo-authored-by: Andrew \r\nCo-authored-by: didierlopes.eth \r\nCo-authored-by: Chavithra PARANA ", "code": "def pick_config_ids(device_type, os, navigator):\n \n\n if os is None:\n default_dev_types = [\"desktop\"]\n else:\n default_dev_types = list(DEVICE_TYPE_OS.keys())\n dev_type_choices = get_option_choices(\n \"device_type\", device_type, default_dev_types, list(DEVICE_TYPE_OS.keys())\n )\n os_choices = get_option_choices(\n \"os\", os, list(OS_NAVIGATOR.keys()), list(OS_NAVIGATOR.keys())\n )\n nav_choices = get_option_choices(\n \"navigator\", navigator, list(NAVIGATOR_OS.keys()), list(NAVIGATOR_OS.keys())\n )\n\n variants = []\n for dev, os, nav in product(dev_type_choices, os_choices, nav_choices):\n\n if (\n os in DEVICE_TYPE_OS[dev]\n and nav in DEVICE_TYPE_NAVIGATOR[dev]\n and nav in OS_NAVIGATOR[os]\n ):\n variants.append((dev, os, nav))\n if not variants:\n raise InvalidOption(\n \"Options device_type, os and navigator\" \" conflicts with each other\"\n )\n device_type, os_id, navigator_id = randomizer.choice(variants)\n\n assert os_id in OS_PLATFORM\n assert navigator_id in NAVIGATOR_OS\n assert device_type in DEVICE_TYPE_OS\n\n return device_type, os_id, navigator_id\n\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 273, "n_words": 104, "vocab_size": 66, "complexity": 7, "nloc": 31, "token_counts": 199, "n_ast_nodes": 315, "n_identifiers": 26, "d_id": 84461, "documentation": { "docstring": "\n Select one random pair (device_type, os_id, navigator_id) from\n all possible combinations matching the given os and\n navigator filters.\n\n :param os: allowed os(es)\n :type os: string or list/tuple or None\n :param navigator: allowed browser engine(s)\n :type navigator: string or list/tuple or None\n :param device_type: limit possible oses by device type\n :type device_type: list/tuple or None, possible values:\n \"desktop\", \"smartphone\", \"tablet\", \"all\"\n ", "n_words": 60, "vocab_size": 42, "n_whitespaces": 98, "language": "en" } }, { "id": 269444, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "name_scope", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def name_scope(name):\n ", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "\"\"\"A context manager for use when defining a Python op.\n\n This context manager pushes a name scope, which will make the name of all\n operations added within it have a prefix.\n\n For example, to define a new Python op called `my_op`:use when defining a Python op.\n\n This context manager pushes a namewhich will make the name of all\n operations added within it have a prefix.\n\n Forto define a new Python op called", "n_ast_errors": 4, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 64, "n_identifiers": 32, "d_id": 80083, "documentation": { "docstring": "A context manager for use when defining a Python op.\n\n This context manager pushes a name scope, which will make the name of all\n operations added within it have a prefix.\n\n For example, to define a new Python op called `my_op`:\n\n", "n_words": 41, "vocab_size": 34, "n_whitespaces": 49, "language": "en" } }, { "id": 163113, "commit_id": "46ddb8ef882940fa3da58813e0b7a2df1061031e", "repo": "pandas", "path": "pandas/core/indexes/multi.py", "file_name": "multi.py", "fun_name": "get_loc", "commit_message": "BUG: Index.get_loc always raise InvalidIndexError on listlike (#45181)", "code": "def get_loc(self, key, method=None):\n \n if method is not None:\n raise NotImplementedError(\n \"only the default get_loc method is \"\n \"currently supported for MultiIndex\"\n )\n\n self._check_indexing_error(key)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 97, "n_words": 24, "vocab_size": 22, "complexity": 15, "nloc": 51, "token_counts": 324, "n_ast_nodes": 53, "n_identifiers": 6, "d_id": 39371, "documentation": { "docstring": "\n Get location for a label or a tuple of labels.\n\n The location is returned as an integer/slice or boolean\n mask.\n\n Parameters\n ----------\n key : label or tuple of labels (one for each level)\n method : None\n\n Returns\n -------\n loc : int, slice object or boolean mask\n If the key is past the lexsort depth, the return may be a\n boolean mask array, otherwise it is always a slice or int.\n\n See Also\n --------\n Index.get_loc : The get_loc method for (single-level) index.\n MultiIndex.slice_locs : Get slice location given start label(s) and\n end label(s).\n MultiIndex.get_locs : Get location for a label/slice/list/mask or a\n sequence of such.\n\n Notes\n -----\n The key cannot be a slice, list of same-level labels, a boolean mask,\n or a sequence of such. If you want to use those, use\n :meth:`MultiIndex.get_locs` instead.\n\n Examples\n --------\n >>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])\n\n >>> mi.get_loc('b')\n slice(1, 3, None)\n\n >>> mi.get_loc(('b', 'e'))\n 1\n ", "n_words": 149, "vocab_size": 93, "n_whitespaces": 428, "language": "en" } }, { "id": 251421, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "mitmproxy/optmanager.py", "file_name": "optmanager.py", "fun_name": "setter", "commit_message": "make it black!", "code": "def setter(self, attr):\n \n if attr not in self._options:\n raise KeyError(\"No such option: %s\" % attr)\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 40, "n_words": 15, "vocab_size": 15, "complexity": 2, "nloc": 5, "token_counts": 27, "n_ast_nodes": 40, "n_identifiers": 5, "d_id": 73730, "documentation": { "docstring": "\n Generate a setter for a given attribute. This returns a callable\n taking a single argument.\n ", "n_words": 15, "vocab_size": 12, "n_whitespaces": 37, "language": "en" } }, { "id": 198306, "commit_id": "2a1afca9477eb781f16d5d6b63fa37abed7740a3", "repo": "sympy", "path": "sympy/simplify/sqrtdenest.py", "file_name": "sqrtdenest.py", "fun_name": "sqrtdenest", "commit_message": "Use sympify less", "code": "def sqrtdenest(expr, max_iter=3):\n \n expr = expand_mul(expr)\n for i in range(max_iter):\n z = _sqrtdenest0(expr)\n if expr == z:\n return expr\n expr = z\n return expr\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 68, "n_words": 24, "vocab_size": 16, "complexity": 3, "nloc": 8, "token_counts": 42, "n_ast_nodes": 69, "n_identifiers": 8, "d_id": 48866, "documentation": { "docstring": "Denests sqrts in an expression that contain other square roots\n if possible, otherwise returns the expr unchanged. This is based on the\n algorithms of [1].\n\n Examples\n ========\n\n >>> from sympy.simplify.sqrtdenest import sqrtdenest\n >>> from sympy import sqrt\n >>> sqrtdenest(sqrt(5 + 2 * sqrt(6)))\n sqrt(2) + sqrt(3)\n\n See Also\n ========\n\n sympy.solvers.solvers.unrad\n\n References\n ==========\n\n .. [1] http://researcher.watson.ibm.com/researcher/files/us-fagin/symb85.pdf\n\n .. [2] D. J. Jeffrey and A. D. Rich, 'Symplifying Square Roots of Square Roots\n by Denesting' (available at http://www.cybertester.com/data/denest.pdf)\n\n ", "n_words": 75, "vocab_size": 63, "n_whitespaces": 133, "language": "en" } }, { "id": 174923, "commit_id": "bad03ef931d9b3ff4f9e75f35f9c41f45839e2a1", "repo": "pip", "path": "src/pip/_internal/models/link.py", "file_name": "link.py", "fun_name": "as_hashes", "commit_message": "Use data-dist-info-metadata (PEP 658) to decouple resolution from downloading (#11111)\n\nCo-authored-by: Tzu-ping Chung ", "code": "def as_hashes(self) -> Hashes:\n \n return Hashes({self.name: [self.value]})\n", "url": "https://github.com/pypa/pip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 3, "token_counts": 23, "n_ast_nodes": 39, "n_identifiers": 5, "d_id": 41518, "documentation": { "docstring": "Return a Hashes instance which checks only for the current hash.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 308798, "commit_id": "10195dc700770cdfdeaff79c53cf5d1d763f20c6", "repo": "core", "path": "tests/components/plex/conftest.py", "file_name": "conftest.py", "fun_name": "plextv_resources_two_servers_fixture", "commit_message": "Improve server selection for Plex config flows (#63408)", "code": "def plextv_resources_two_servers_fixture():\n \n return load_fixture(\"plex/plextv_resources_two_servers.xml\")\n\n\n@pytest.fixture(name=\"plextv_shared_users\", scope=\"session\")", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "@pytest.fixture(name=\"plextv_shared_users\", scope=\"session\")", "n_ast_errors": 1, "ast_levels": 8, "n_whitespaces": 11, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 46, "n_identifiers": 6, "d_id": 107536, "documentation": { "docstring": "Load two-server payload for plex.tv resources and return it.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 262773, "commit_id": "1cd3b73e2939052271a0bc26cf204eebee4dcd15", "repo": "pyinstaller", "path": "PyInstaller/utils/osx.py", "file_name": "osx.py", "fun_name": "remove_signature_from_binary", "commit_message": "macOS: Remove the timeouts for codesigning/signature stripping/lipo. (#6644)", "code": "def remove_signature_from_binary(filename):\n \n logger.debug(\"Removing signature from file %r\", filename)\n cmd_args = ['codesign', '--remove', '--all-architectures', filename]\n p = subprocess.run(cmd_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)\n if p.returncode:\n raise SystemError(f\"codesign command ({cmd_args}) failed with error code {p.returncode}!\\noutput: {p.stdout}\")\n\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 54, "n_words": 32, "vocab_size": 31, "complexity": 2, "nloc": 6, "token_counts": 60, "n_ast_nodes": 118, "n_identifiers": 15, "d_id": 77359, "documentation": { "docstring": "\n Remove the signature from all architecture slices of the given binary file using the codesign utility.\n ", "n_words": 16, "vocab_size": 14, "n_whitespaces": 23, "language": "en" } }, { "id": 276014, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/saved_model/load.py", "file_name": "load.py", "fun_name": "_get_child_layer_node_ids", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def _get_child_layer_node_ids(self, node_id):\n \n # Sequential and Functional track layers with names following the format\n # \"layer-N\". Use this to generate the list of layers.\n num_layers = 0\n child_layers = {}\n pattern = re.compile(\"layer-(\\\\d+)\")\n\n for child in self._proto.nodes[node_id].children:\n m = pattern.match(child.local_name)\n if m is None:\n continue\n layer_n = int(m.group(1))\n num_layers = max(layer_n + 1, num_layers)\n child_layers[layer_n] = child.node_id\n\n ordered = []\n for n in range(num_layers):\n child = child_layers.get(n)\n if child is None:\n break\n ordered.append(child)\n return ordered\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 263, "n_words": 75, "vocab_size": 55, "complexity": 5, "nloc": 18, "token_counts": 116, "n_ast_nodes": 191, "n_identifiers": 24, "d_id": 81524, "documentation": { "docstring": "Returns the node ids of each layer in a Sequential/Functional model.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 202366, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/csrf_tests/tests.py", "file_name": "tests.py", "fun_name": "test_token_node_empty_csrf_cookie", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_token_node_empty_csrf_cookie(self):\n \n req = self._get_request(cookie=\"\")\n mw = CsrfViewMiddleware(token_view)\n mw.process_view(req, token_view, (), {})\n resp = token_view(req)\n\n token = get_token(req)\n self.assertIsNotNone(token)\n csrf_secret = _unmask_cipher_token(token)\n self._check_token_present(resp, csrf_secret)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 87, "n_words": 24, "vocab_size": 20, "complexity": 1, "nloc": 9, "token_counts": 68, "n_ast_nodes": 113, "n_identifiers": 16, "d_id": 50084, "documentation": { "docstring": "\n A new token is sent if the csrf_cookie is the empty string.\n ", "n_words": 12, "vocab_size": 10, "n_whitespaces": 27, "language": "en" } }, { "id": 42591, "commit_id": "f019fbedb3d2b6a2e6b58ec1b38db612b106568b", "repo": "nltk", "path": "nltk/corpus/reader/bcp47.py", "file_name": "bcp47.py", "fun_name": "parse_tag", "commit_message": "Support both iso639-3 codes and BCP-47 language tags (#3060)\n\n* Add support for iso639-3 language codes\r\n\r\n* Add support for retired language codes\r\n\r\n* Move langnames.py to the top-level\r\n\r\n* Add langcode() function\r\n\r\n* Add iso639retired dictionary\r\n\r\n* Improve wrapper functions\r\n\r\n* Add module docstring with doctest\r\n\r\n* Add 2-letter language codes\r\n\r\n* Add regular expression check\r\n\r\n* Improve inverse lookup of retired codes\r\n\r\n* Support BCP-47\r\n\r\n* Avoid deprecated langcodes\r\n\r\n* Set stack level for warnings to warn on the langname call\r\n\r\nNow it throws e.g.\r\n```\r\n...\\nltk_3060.py:9: UserWarning: Shortening 'smo' to 'sm'\r\n print(f\"{lang}: {langname(code)}\")\r\n```\r\n\r\nRather than\r\n```\r\n...\\nltk\\langnames.py:64: UserWarning: Shortening zha to za\r\n warn(f\"Shortening {code} to {code2}\")\r\n```\r\n\r\n* Dict key membership is equivalent to dict membership\r\n\r\n* Resolve bug: subtag -> tag\r\n\r\n* Capitalize BCP47 in CorpusReader name\r\n\r\n* Reimplement removed type hint changes from #3081\r\n\r\nCo-authored-by: Tom Aarsen ", "code": "def parse_tag(self, tag):\n \n subtags = tag.split(\"-\")\n lang = {}\n labels = [\"language\", \"extlang\", \"script\", \"region\", \"variant\", \"variant\"]\n while subtags and labels:\n subtag = subtags.pop(0)\n found = False\n while labels:\n label = labels.pop(0)\n subtag = self.casing[label](subtag)\n if self.format[label].fullmatch(subtag):\n if subtag in self.db[label]:\n found = True\n valstr = self.val2str(self.db[label][subtag][\"Description\"])\n if label == \"variant\" and label in lang:\n lang[label] += \": \" + valstr\n else:\n lang[label] = valstr\n break\n elif subtag in self.db[\"deprecated\"][label]:\n found = True\n note = f\"The {subtag!r} {label} code is deprecated\"\n if \"Preferred-Value\" in self.db[\"deprecated\"][label][subtag]:\n prefer = self.db[\"deprecated\"][label][subtag][\n \"Preferred-Value\"\n ]\n note += f\"', prefer '{self.val2str(prefer)}'\"\n lang[label] = self.val2str(\n self.db[\"deprecated\"][label][subtag][\"Description\"]\n )\n warn(note)\n break\n if not found:\n if subtag == \"u\" and subtags[0] == \"sd\": # CLDR regional subdivisions\n sd = subtags[1]\n if sd in self.subdiv:\n ext = self.subdiv[sd]\n else:\n ext = f\"\"\n else: # other extension subtags are not supported yet\n ext = f\"{subtag}{''.join(['-'+ext for ext in subtags])}\".lower()\n if not self.format[\"singleton\"].fullmatch(subtag):\n ext = f\"\"\n warn(ext)\n lang[\"extension\"] = ext\n subtags = []\n return lang\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 1053, "n_words": 166, "vocab_size": 94, "complexity": 15, "nloc": 47, "token_counts": 318, "n_ast_nodes": 598, "n_identifiers": 25, "d_id": 7646, "documentation": { "docstring": "Convert a BCP-47 tag to a dictionary of labelled subtags", "n_words": 10, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 128542, "commit_id": "b0945548e874642287b81514b71432a2330de1d3", "repo": "ray", "path": "rllib/models/tests/test_torch_model.py", "file_name": "test_torch_model.py", "fun_name": "test_recurrent_unroll_and_filter", "commit_message": "[RLlib] Add torch models (#29043)\n\n1. converted class attributes to setters\r\n2. use override decorator\r\n3. SimpleModel should not have any T dimension, it can confuse folks. So I removed it.\r\n4. I merged all the unittests under one class and separated them by methods names. It will be easier to use -k filter to run pytests later if we don't allow repetative method names.\r\n\r\nSigned-off-by: Kourosh Hakhamaneshi \r\nSigned-off-by: Steven Morad ", "code": "def test_recurrent_unroll_and_filter(self):\n \n inputs = TensorDict(\n {\n \"in\": torch.arange(B * T * 2).reshape(B, T, 2),\n \"bork\": torch.arange(5 * 4).reshape(5, 4),\n }\n )\n states = TensorDict(\n {\n \"in\": torch.arange(B * 4).reshape(B, 4),\n \"bork\": torch.arange(5 * 4).reshape(5, 4),\n }\n )\n outputs, out_states = SimpleRecurrentModel(ModelConfig()).unroll(inputs, states)\n desired = TensorDict({\"out\": torch.arange(B * T * 3).reshape(B, T, 3)})\n desired_states = TensorDict({\"out\": torch.arange(B * 5).reshape(B, 5)})\n\n for k in outputs.flatten().keys() | desired.flatten().keys():\n check(outputs[k], desired[k])\n\n for k in out_states.flatten().keys() | desired_states.flatten().keys():\n check(out_states[k], desired_states[k])\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 271, "n_words": 75, "vocab_size": 44, "complexity": 3, "nloc": 20, "token_counts": 235, "n_ast_nodes": 374, "n_identifiers": 21, "d_id": 28737, "documentation": { "docstring": "Ensures unused inputs are filtered out before _unroll and that\n outputs are correct.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 19, "language": "en" } }, { "id": 322238, "commit_id": "1c10abadb7c960e58ce44813f6197dfca9cbd28d", "repo": "PaddleNLP", "path": "paddlenlp/datasets/dataset.py", "file_name": "dataset.py", "fun_name": "_shard", "commit_message": "Integrate HF Datasets and add DatasetTuple (#1612)\n\n* fix bart perf\r\n\r\n* update fastergeneration doc\r\n\r\n* add img\r\n\r\n* add img\r\n\r\n* change img\r\n\r\n* update img\r\n\r\n* fix img\r\n\r\n* update docs\r\n\r\n* fix readme\r\n\r\n* update readme\r\n\r\n* fix perf\r\n\r\n* fix perf\r\n\r\n* fix modelling\r\n\r\n* fix perf and sample code\r\n\r\n* fix perf\r\n\r\n* fix perf\r\n\r\n* fix seq_len for gpt_sample\r\n\r\n* add forced eos token id for faster\r\n\r\n* upgrade perf and add forced eos token id\r\n\r\n* chenge stack to gather\r\n\r\n* add auto perf\r\n\r\n* minor fix\r\n\r\n* remove encoder change\r\n\r\n* Update bart_perf.py\r\n\r\n* Update bart_perf.py\r\n\r\n* 1. Integrate HF Datasets\r\n2. return all splits by default\r\n3. load_dataset returns DatasetTuple now\r\n\r\n* add HF Dataset example\r\n\r\n* add kwargs for HF load_dataset\r\n\r\n* change datasets to alternative\r\n\r\n* remove experimental", "code": "def _shard(self, num_shards=None, index=None, contiguous=False):\n \n if num_shards is None:\n num_shards = dist.get_world_size()\n if index is None:\n index = dist.get_rank()\n\n if contiguous:\n div = len(self) // num_shards\n mod = len(self) % num_shards\n start = div * index + min(index, mod)\n end = start + div + (1 if index < mod else 0)\n new_data = [self.new_data[idx] for idx in range(start, end)]\n else:\n new_data = [\n self.new_data[idx] for idx in range(len(self.new_data))\n if idx % num_shards == index\n ]\n\n return MapDataset(new_data)\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 249, "n_words": 78, "vocab_size": 44, "complexity": 8, "nloc": 17, "token_counts": 141, "n_ast_nodes": 220, "n_identifiers": 18, "d_id": 118103, "documentation": { "docstring": "\n Split the dataset into `num_shards` pieces. Note that the size of each\n shard might be different because the original dataset may not be evenly\n divisible.\n\n Args:\n num_shards (int, optional): An integer representing the number of\n data shards. If None, `num_shards` would be number of trainers.\n Defaults to `None`.\n index (int, optional): An integer representing the index of the\n current shard. If None, `index` would be the current trainer rank\n id. Defaults to `None`.\n contiguous: (bool, optional): If true, contiguous chunks of data \n will be select for sharding. And total number of examples will \n be the same. Otherwise each shard will contain all examples of \n dataset whose index mod `num_shards` = `index`. Defaults to `False`.\n ", "n_words": 114, "vocab_size": 66, "n_whitespaces": 291, "language": "en" } }, { "id": 128560, "commit_id": "c1d62d46495f0157faf3168aa87eed350802e10f", "repo": "ray", "path": "python/ray/data/_internal/arrow_serialization.py", "file_name": "arrow_serialization.py", "fun_name": "_register_arrow_data_serializer", "commit_message": "[Datasets] Arrow 7.0.0+ Support: Use Arrow IPC format for pickling Arrow data to circumvent slice view buffer truncation bug. (#29055)\n\nThis PR registers a custom serializer for Array arrays, chunked arrays, record batches, and tables that works around an Arrow serialization bug that serializes the entire underlying data buffer when serializing zero-copy slice views. The custom serializer uses the Arrow IPC format as the underlying pickled representation.", "code": "def _register_arrow_data_serializer(serialization_context):\n \n import pyarrow as pa\n\n if os.environ.get(RAY_DISABLE_CUSTOM_ARROW_DATA_SERIALIZATION, \"0\") == \"1\":\n return\n\n # Register custom reducer for Arrow Arrays.\n array_types = _get_arrow_array_types()\n for array_type in array_types:\n serialization_context._register_cloudpickle_reducer(\n array_type, _arrow_array_reduce\n )\n # Register custom reducer for Arrow ChunkedArrays.\n serialization_context._register_cloudpickle_reducer(\n pa.ChunkedArray, _arrow_chunkedarray_reduce\n )\n # Register custom reducer for Arrow RecordBatches.\n serialization_context._register_cloudpickle_reducer(\n pa.RecordBatch, _arrow_recordbatch_reduce\n )\n # Register custom reducer for Arrow Tables.\n serialization_context._register_cloudpickle_reducer(pa.Table, _arrow_table_reduce)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 149, "n_words": 61, "vocab_size": 38, "complexity": 3, "nloc": 16, "token_counts": 73, "n_ast_nodes": 124, "n_identifiers": 19, "d_id": 28745, "documentation": { "docstring": "Custom reducer for Arrow data that works around a zero-copy slicing pickling\n bug by using the Arrow IPC format for the underlying serialization.\n\n Background:\n Arrow has both array-level slicing and buffer-level slicing; both are zero-copy,\n but the former has a serialization bug where the entire buffer is serialized\n instead of just the slice, while the latter's serialization works as expected\n and only serializes the slice of the buffer. I.e., array-level slicing doesn't\n propagate the slice down to the buffer when serializing the array.\n\n All that these copy methods do is, at serialization time, take the array-level\n slicing and translate them to buffer-level slicing, so only the buffer slice is\n sent over the wire instead of the entire buffer.\n\n See https://issues.apache.org/jira/browse/ARROW-10739.\n ", "n_words": 120, "vocab_size": 75, "n_whitespaces": 188, "language": "en" } }, { "id": 228160, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_scatterternary.py", "file_name": "_scatterternary.py", "fun_name": "csrc", "commit_message": "switch to black .22", "code": "def csrc(self):\n \n return self[\"csrc\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 59833, "documentation": { "docstring": "\n Sets the source reference on Chart Studio Cloud for `c`.\n\n The 'csrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 77, "language": "en" } }, { "id": 320927, "commit_id": "5616a99eff34f7074641d1391ed77d6b4b743529", "repo": "qutebrowser", "path": "tests/unit/mainwindow/test_messageview.py", "file_name": "test_messageview.py", "fun_name": "test_size_hint", "commit_message": "Add a MessageInfo data class\n\nPreparation for #7246", "code": "def test_size_hint(view):\n \n view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test1'))\n height1 = view.sizeHint().height()\n assert height1 > 0\n view.show_message(message.MessageInfo(usertypes.MessageLevel.info, 'test2'))\n height2 = view.sizeHint().height()\n assert height2 == height1 * 2\n\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 43, "n_words": 22, "vocab_size": 15, "complexity": 1, "nloc": 7, "token_counts": 72, "n_ast_nodes": 122, "n_identifiers": 12, "d_id": 117440, "documentation": { "docstring": "The message height should increase with more messages.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 321172, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "qutebrowser/browser/webkit/network/webkitqutescheme.py", "file_name": "webkitqutescheme.py", "fun_name": "handler", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def handler(request, operation, current_url):\n \n if operation != QNetworkAccessManager.Operation.GetOperation:\n return networkreply.ErrorNetworkReply(\n request, \"Unsupported request type\",\n QNetworkReply.NetworkError.ContentOperationNotPermittedError)\n\n url = request.url()\n\n if ((url.scheme(), url.host(), url.path()) ==\n ('qute', 'settings', '/set')):\n if current_url != QUrl('qute://settings/'):\n log.network.warning(\"Blocking malicious request from {} to {}\"\n .format(current_url.toDisplayString(),\n url.toDisplayString()))\n return networkreply.ErrorNetworkReply(\n request, \"Invalid qute://settings request\",\n QNetworkReply.NetworkError.ContentAccessDenied)\n\n try:\n mimetype, data = qutescheme.data_for_url(url)\n except qutescheme.Error as e:\n errors = {\n qutescheme.NotFoundError:\n QNetworkReply.NetworkError.ContentNotFoundError,\n qutescheme.UrlInvalidError:\n QNetworkReply.NetworkError.ContentOperationNotPermittedError,\n qutescheme.RequestDeniedError:\n QNetworkReply.NetworkError.ContentAccessDenied,\n qutescheme.SchemeOSError:\n QNetworkReply.NetworkError.ContentNotFoundError,\n qutescheme.Error:\n QNetworkReply.NetworkError.InternalServerError,\n }\n exctype = type(e)\n log.misc.error(\"{} while handling qute://* URL\".format(\n exctype.__name__))\n return networkreply.ErrorNetworkReply(request, str(e), errors[exctype])\n except qutescheme.Redirect as e:\n qtutils.ensure_valid(e.url)\n return networkreply.RedirectNetworkReply(e.url)\n\n return networkreply.FixedDataNetworkReply(request, data, mimetype)\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 483, "n_words": 93, "vocab_size": 76, "complexity": 6, "nloc": 38, "token_counts": 264, "n_ast_nodes": 418, "n_identifiers": 47, "d_id": 117576, "documentation": { "docstring": "Scheme handler for qute:// URLs.\n\n Args:\n request: QNetworkRequest to answer to.\n operation: The HTTP operation being done.\n current_url: The page we're on currently.\n\n Return:\n A QNetworkReply.\n ", "n_words": 26, "vocab_size": 25, "n_whitespaces": 63, "language": "en" } }, { "id": 20619, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pyparsing/results.py", "file_name": "results.py", "fun_name": "get", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def get(self, key, default_value=None):\n \n if key in self:\n return self[key]\n else:\n return default_value\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 56, "n_words": 13, "vocab_size": 12, "complexity": 2, "nloc": 5, "token_counts": 26, "n_ast_nodes": 42, "n_identifiers": 4, "d_id": 3457, "documentation": { "docstring": "\n Returns named result matching the given key, or if there is no\n such name, then returns the given ``default_value`` or ``None`` if no\n ``default_value`` is specified.\n\n Similar to ``dict.get()``.\n\n Example::\n\n integer = Word(nums)\n date_str = integer(\"year\") + '/' + integer(\"month\") + '/' + integer(\"day\")\n\n result = date_str.parse_string(\"1999/12/31\")\n print(result.get(\"year\")) # -> '1999'\n print(result.get(\"hour\", \"not specified\")) # -> 'not specified'\n print(result.get(\"hour\")) # -> None\n ", "n_words": 62, "vocab_size": 44, "n_whitespaces": 171, "language": "en" } }, { "id": 274548, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/losses.py", "file_name": "losses.py", "fun_name": "huber", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def huber(y_true, y_pred, delta=1.0):\n \n y_pred = tf.cast(y_pred, dtype=backend.floatx())\n y_true = tf.cast(y_true, dtype=backend.floatx())\n delta = tf.cast(delta, dtype=backend.floatx())\n error = tf.subtract(y_pred, y_true)\n abs_error = tf.abs(error)\n half = tf.convert_to_tensor(0.5, dtype=abs_error.dtype)\n return backend.mean(\n tf.where(\n abs_error <= delta,\n half * tf.square(error),\n delta * abs_error - half * tf.square(delta),\n ),\n axis=-1,\n )\n\n\n@keras_export(\n \"keras.losses.log_cosh\",\n \"keras.losses.logcosh\",\n \"keras.metrics.log_cosh\",\n \"keras.metrics.logcosh\",\n)\n@tf.__internal__.dispatch.add_dispatch_support", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\n \"keras.losses.log_cosh\",\n \"keras.losses.logcosh\",\n \"keras.metrics.log_cosh\",\n \"keras.metrics.logcosh\",\n)\n@tf.__internal__.dispatch.add_dispatch_support", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 143, "n_words": 53, "vocab_size": 38, "complexity": 1, "nloc": 15, "token_counts": 139, "n_ast_nodes": 243, "n_identifiers": 23, "d_id": 81226, "documentation": { "docstring": "Computes Huber loss value.\n\n For each value x in `error = y_true - y_pred`:\n\n ```\n loss = 0.5 * x^2 if |x| <= d\n loss = d * |x| - 0.5 * d^2 if |x| > d\n ```\n where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss\n\n Args:\n y_true: tensor of true targets.\n y_pred: tensor of predicted targets.\n delta: A float, the point where the Huber loss function changes from a\n quadratic to linear.\n\n Returns:\n Tensor with one scalar loss entry per sample.\n ", "n_words": 80, "vocab_size": 57, "n_whitespaces": 158, "language": "en" } }, { "id": 281032, "commit_id": "f5b0dc8e7b5ae7ed3a4b175ba48aba0d5ea9d2db", "repo": "OpenBBTerminal", "path": "gamestonk_terminal/economy/fred/prediction/pred_controller.py", "file_name": "pred_controller.py", "fun_name": "print_help", "commit_message": "Add prediction to economy/fred (#1133)", "code": "def print_help(self):\n \n id_string = \"\"\n for s_id, sub_dict in self.current_series.items():\n id_string += f\" [cyan]{s_id.upper()}[/cyan] : {sub_dict['title']}\"\n help_string = f\n t_console.print(help_string)\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 69, "n_words": 20, "vocab_size": 18, "complexity": 2, "nloc": 24, "token_counts": 36, "n_ast_nodes": 96, "n_identifiers": 12, "d_id": 83477, "documentation": { "docstring": "Print help\nPrediction Techniques Menu:\n\n load load new series\n\nSelected Series (starting from [green]{self.start_date}[/green]):\n{id_string}\n\nModels:\n ets exponential smoothing (e.g. Holt-Winters)\n knn k-Nearest Neighbors\n regression polynomial regression\n arima autoregressive integrated moving average\n mlp MultiLayer Perceptron\n rnn Recurrent Neural Network\n lstm Long-Short Term Memory\n conv1d 1D Convolutional Neural Network\n mc Monte-Carlo simulations\n ", "n_words": 51, "vocab_size": 47, "n_whitespaces": 151, "language": "en" } }, { "id": 132209, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/tune/function_runner.py", "file_name": "function_runner.py", "fun_name": "__call__", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def __call__(self, _metric=None, **kwargs):\n \n\n assert self._last_report_time is not None, (\n \"StatusReporter._start() must be called before the first \"\n \"report __call__ is made to ensure correct runtime metrics.\"\n )\n\n if _metric:\n kwargs[DEFAULT_METRIC] = _metric\n\n # time per iteration is recorded directly in the reporter to ensure\n # any delays in logging results aren't counted\n report_time = time.time()\n if TIME_THIS_ITER_S not in kwargs:\n kwargs[TIME_THIS_ITER_S] = report_time - self._last_report_time\n self._last_report_time = report_time\n\n # add results to a thread-safe queue\n self._queue.put(kwargs.copy(), block=True)\n\n # This blocks until notification from the FunctionRunner that the last\n # result has been returned to Tune and that the function is safe to\n # resume training.\n self._continue_semaphore.acquire()\n\n # If the trial should be terminated, exit gracefully.\n if self._end_event.is_set():\n self._end_event.clear()\n sys.exit(0)\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 305, "n_words": 120, "vocab_size": 86, "complexity": 4, "nloc": 16, "token_counts": 107, "n_ast_nodes": 182, "n_identifiers": 20, "d_id": 29699, "documentation": { "docstring": "Report updated training status.\n\n Pass in `done=True` when the training job is completed.\n\n Args:\n kwargs: Latest training result status.\n\n Example:\n >>> reporter(mean_accuracy=1, training_iteration=4)\n >>> reporter(mean_accuracy=1, training_iteration=4, done=True)\n\n Raises:\n StopIteration: A StopIteration exception is raised if the trial has\n been signaled to stop.\n ", "n_words": 42, "vocab_size": 35, "n_whitespaces": 136, "language": "en" } }, { "id": 167696, "commit_id": "9612375ca28ade056f15d4338f1bfde5d045c9fc", "repo": "pandas", "path": "pandas/core/config_init.py", "file_name": "config_init.py", "fun_name": "register_plotting_backend_cb", "commit_message": "TYP: return values in core/*.py (#47587)\n\n* TYP: return values in core/*.py\r\n\r\n* fix test\r\n\r\n* to_html\r\n\r\n* to_html part 2\r\n\r\n* DataFrame.query\r\n\r\n* more overloads\r\n\r\n* fix query?\r\n\r\n* increase stacklevel by one\r\n\r\n* fix rename_axis\r\n\r\n* and an overload for DataFrame.eval\r\n\r\n* address comments\r\n\r\n* fix typevar", "code": "def register_plotting_backend_cb(key) -> None:\n if key == \"matplotlib\":\n # We defer matplotlib validation, since it's the default\n return\n from pandas.plotting._core import _get_plot_backend\n\n _get_plot_backend(key)\n\n\nwith cf.config_prefix(\"plotting\"):\n cf.register_option(\n \"backend\",\n defval=\"matplotlib\",\n doc=plotting_backend_doc,\n validator=register_plotting_backend_cb,\n )\n\n\nregister_converter_doc = \n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 88, "n_words": 33, "vocab_size": 33, "complexity": 2, "nloc": 5, "token_counts": 25, "n_ast_nodes": 97, "n_identifiers": 14, "d_id": 40080, "documentation": { "docstring": "\n: bool or 'auto'.\n Whether to register converters with matplotlib's units registry for\n dates, times, datetimes, and Periods. Toggling to False will remove\n the converters, restoring any converters that pandas overwrote.\n", "n_words": 31, "vocab_size": 29, "n_whitespaces": 39, "language": "en" } }, { "id": 60904, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/network/lazy_wheel.py", "file_name": "lazy_wheel.py", "fun_name": "dist_from_wheel_url", "commit_message": "upd; format", "code": "def dist_from_wheel_url(name, url, session):\n # type: (str, str, PipSession) -> Distribution\n \n with LazyZipOverHTTP(url, session) as wheel:\n # For read-only ZIP files, ZipFile only needs methods read,\n # seek, seekable and tell, not the whole IO protocol.\n zip_file = ZipFile(wheel) # type: ignore\n # After context manager exit, wheel.name\n # is an invalid file by intention.\n return pkg_resources_distribution_for_wheel(zip_file, name, wheel.name)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 111, "n_words": 59, "vocab_size": 53, "complexity": 1, "nloc": 4, "token_counts": 37, "n_ast_nodes": 66, "n_identifiers": 9, "d_id": 12335, "documentation": { "docstring": "Return a pkg_resources.Distribution from the given wheel URL.\n\n This uses HTTP range requests to only fetch the potion of the wheel\n containing metadata, just enough for the object to be constructed.\n If such requests are not supported, HTTPRangeRequestUnsupported\n is raised.\n ", "n_words": 40, "vocab_size": 34, "n_whitespaces": 55, "language": "en" } }, { "id": 129598, "commit_id": "3d79815cd08c1be8e56c245e662f34366523847e", "repo": "ray", "path": "python/ray/tune/integration/comet.py", "file_name": "comet.py", "fun_name": "_configure_experiment_defaults", "commit_message": "Comet Integration (#20766)\n\nThis PR adds a `CometLoggerCallback` to the Tune Integrations, allowing users to log runs from Ray to [Comet](https://www.comet.ml/site/).\r\n\r\nCo-authored-by: Michael Cullan \r\nCo-authored-by: Antoni Baum ", "code": "def _configure_experiment_defaults(self):\n \n for option in self._exclude_autolog:\n if not self.experiment_kwargs.get(option):\n self.experiment_kwargs[option] = False\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 52, "n_words": 12, "vocab_size": 12, "complexity": 3, "nloc": 4, "token_counts": 32, "n_ast_nodes": 53, "n_identifiers": 6, "d_id": 28986, "documentation": { "docstring": "Disable the specific autologging features that cause throttling.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 3286, "commit_id": "10310ceb2da05837a198db6714d658a1e0a32478", "repo": "prophet", "path": "python/prophet/forecaster.py", "file_name": "forecaster.py", "fun_name": "add_group_component", "commit_message": "Change deprecated `append` to `concat` (#2114)", "code": "def add_group_component(self, components, name, group):\n \n new_comp = components[components['component'].isin(set(group))].copy()\n group_cols = new_comp['col'].unique()\n if len(group_cols) > 0:\n new_comp = pd.DataFrame({'col': group_cols, 'component': name})\n components = pd.concat([components, new_comp])\n return components\n", "url": "https://github.com/facebook/prophet.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 84, "n_words": 27, "vocab_size": 22, "complexity": 2, "nloc": 7, "token_counts": 81, "n_ast_nodes": 134, "n_identifiers": 15, "d_id": 429, "documentation": { "docstring": "Adds a component with given name that contains all of the components\n in group.\n\n Parameters\n ----------\n components: Dataframe with components.\n name: Name of new group component.\n group: List of components that form the group.\n\n Returns\n -------\n Dataframe with components.\n ", "n_words": 39, "vocab_size": 29, "n_whitespaces": 109, "language": "en" } }, { "id": 206834, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/views/defaults.py", "file_name": "defaults.py", "fun_name": "permission_denied", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def permission_denied(request, exception, template_name=ERROR_403_TEMPLATE_NAME):\n \n try:\n template = loader.get_template(template_name)\n except TemplateDoesNotExist:\n if template_name != ERROR_403_TEMPLATE_NAME:\n # Reraise if it's a missing custom template.\n raise\n return HttpResponseForbidden(\n ERROR_PAGE_TEMPLATE % {\"title\": \"403 Forbidden\", \"details\": \"\"},\n content_type=\"text/html\",\n )\n return HttpResponseForbidden(\n template.render(request=request, context={\"exception\": str(exception)})\n )\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 134, "n_words": 40, "vocab_size": 36, "complexity": 3, "nloc": 13, "token_counts": 74, "n_ast_nodes": 128, "n_identifiers": 15, "d_id": 51740, "documentation": { "docstring": "\n Permission denied (403) handler.\n\n Templates: :template:`403.html`\n Context:\n exception\n The message from the exception which triggered the 403 (if one was\n supplied).\n\n If the template does not exist, an Http403 response containing the text\n \"403 Forbidden\" (as per RFC 7231) will be returned.\n ", "n_words": 42, "vocab_size": 38, "n_whitespaces": 90, "language": "en" } }, { "id": 158149, "commit_id": "b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2", "repo": "d2l-zh", "path": "d2l/mxnet.py", "file_name": "mxnet.py", "fun_name": "transpose_qkv", "commit_message": "[PaddlePaddle] Merge master into Paddle branch (#1186)\n\n* change 15.2 title in chinese version (#1109)\r\n\r\nchange title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘\r\n\r\n* 修改部分语义表述 (#1105)\r\n\r\n* Update r0.17.5 (#1120)\r\n\r\n* Bump versions in installation\r\n\r\n* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)\r\n\r\n* line 313: \"bert.mall\" -> \"bert.small\" (#1130)\r\n\r\n* fix: update language as native reader (#1114)\r\n\r\n* Fix the translation of \"stride\" (#1115)\r\n\r\n* Update index.md (#1118)\r\n\r\n修改部分语义表述\r\n\r\n* Update self-attention-and-positional-encoding.md (#1133)\r\n\r\n依照本书的翻译习惯,将pooling翻译成汇聚\r\n\r\n* maybe a comment false (#1149)\r\n\r\n* maybe a little false\r\n\r\n* maybe a little false\r\n\r\n* A minor bug in the rcnn section (Chinese edition) (#1148)\r\n\r\n* Update bert.md (#1137)\r\n\r\n一个笔误\r\n# 假设batch_size=2,num_pred_positions=3\r\n# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]\r\n\r\n* Update calculus.md (#1135)\r\n\r\n* fix typo in git documentation (#1106)\r\n\r\n* fix: Update the Chinese translation in lr-scheduler.md (#1136)\r\n\r\n* Update lr-scheduler.md\r\n\r\n* Update chapter_optimization/lr-scheduler.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* fix translation for kaggle-house-price.md (#1107)\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\n* fix translation for kaggle-house-price.md\r\n\r\nSigned-off-by: sunhaizhou \r\n\r\n* Update weight-decay.md (#1150)\r\n\r\n* Update weight-decay.md\r\n\r\n关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解\r\n关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。\r\n并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释\r\n解释为何会增加复杂性以及为何需要细粒度工具。\r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/weight-decay.md\r\n\r\nyep\r\n\r\nCo-authored-by: goldmermaid \r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Fix a spelling error (#1161)\r\n\r\n* Update gru.md (#1152)\r\n\r\nThe key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.\r\n翻译错误\r\n\r\n* Unify the function naming (#1113)\r\n\r\nUnify naming of the function 'init_xavier()'.\r\n\r\n* Update mlp-concise.md (#1166)\r\n\r\n* Update mlp-concise.md\r\n\r\n语句不通顺\r\n\r\n* Update environment.md\r\n\r\n语序异常\r\n\r\n* Update config.ini\r\n\r\n* fix the imprecise description (#1168)\r\n\r\nCo-authored-by: yuande \r\n\r\n* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)\r\n\r\n* Fix some typos. (#1163)\r\n\r\n* Update batch-norm.md (#1170)\r\n\r\nfixing typos u->x in article\r\n\r\n* Update linear-regression.md (#1090)\r\n\r\nWe invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that\r\n\r\n原译文把who也直接翻译出来了。\r\n\r\n* Update mlp.md (#1117)\r\n\r\n* Update mlp.md\r\n\r\n修改部分语义表述\r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: goldmermaid \r\n\r\n* Update chapter_multilayer-perceptrons/mlp.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: goldmermaid \r\n\r\n* Correct a translation error. (#1091)\r\n\r\n* Correct a translation error.\r\n\r\n* Update chapter_computer-vision/image-augmentation.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update aws.md (#1121)\r\n\r\n* Update aws.md\r\n\r\n* Update chapter_appendix-tools-for-deep-learning/aws.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update image-augmentation.md (#1093)\r\n\r\n* Update anchor.md (#1088)\r\n\r\nfix a minor issue in code\r\n\r\n* Update anchor.md\r\n\r\n* Update image-augmentation.md\r\n\r\n* fix typo and improve translation in chapter_linear-networks\\softmax-regression.md (#1087)\r\n\r\n* Avoid `torch.meshgrid` user warning (#1174)\r\n\r\nAvoids the following user warning:\r\n```python\r\n~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)\r\n return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]\r\n```\r\n\r\n* bump to 2.0.0-beta1\r\n\r\n* Update sequence.md\r\n\r\n* bump beta1 on readme\r\n\r\n* Add latex code block background to config\r\n\r\n* BLD: Bump python support version 3.9 (#1183)\r\n\r\n* BLD: Bump python support version 3.9\r\n\r\n* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4\r\n\r\n* BLD: Bump torch and tensorflow\r\n\r\n* Update Jenkinsfile\r\n\r\n* Update chapter_installation/index.md\r\n\r\n* Update chapter_installation/index.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Update config.ini\r\n\r\n* Update INFO.md\r\n\r\n* Update INFO.md\r\n\r\n* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)\r\n\r\n* resolve the conflicts\r\n\r\n* revise from publisher (#1089)\r\n\r\n* revise from publisher\r\n\r\n* d2l api\r\n\r\n* post_latex\r\n\r\n* revise from publisher\r\n\r\n* revise ch11\r\n\r\n* Delete d2l-Copy1.bib\r\n\r\n* clear cache\r\n\r\n* rm d2lbook clear\r\n\r\n* debug anchor\r\n\r\n* keep original d2l doc\r\n\r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\n\r\n* 重复语句 (#1188)\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improve expression for chapter_preliminaries/pandas.md (#1184)\r\n\r\n* Update pandas.md\r\n\r\n* Improve expression\r\n\r\n* Improve expression\r\n\r\n* Update chapter_preliminaries/pandas.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)\r\n\r\n* Improce expression\r\n\r\n* Improve code comments\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\n* Update chapter_preliminaries/linear-algebra.md\r\n\r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\n\r\n* Fix multibox_detection bugs\r\n\r\n* Update d2l to 0.17.5 version\r\n\r\n* restore older version\r\n\r\n* Upgrade pandas\r\n\r\n* change to python3.8\r\n\r\n* Test warning log\r\n\r\n* relocate warning log\r\n\r\n* test logs filtering\r\n\r\n* Update gru.md\r\n\r\n* Add DeprecationWarning filter\r\n\r\n* Test warning log\r\n\r\n* Update attention mechanisms & computational performance\r\n\r\n* Update multilayer perceptron& linear & convolution networks & computer vision\r\n\r\n* Update recurrent&optimition&nlp pretraining & nlp applications\r\n\r\n* ignore warnings\r\n\r\n* Update index.md\r\n\r\n* Update linear networks\r\n\r\n* Update multilayer perceptrons&deep learning computation\r\n\r\n* Update preliminaries\r\n\r\n* Check and Add warning filter\r\n\r\n* Update kaggle-cifar10.md\r\n\r\n* Update object-detection-dataset.md\r\n\r\n* Update ssd.md fcn.md\r\n\r\n* Update hybridize.md\r\n\r\n* Update hybridize.md\r\n\r\nSigned-off-by: sunhaizhou \r\nCo-authored-by: zhou201505013 <39976863+zhou201505013@users.noreply.github.com>\r\nCo-authored-by: Xinwei Liu \r\nCo-authored-by: Anirudh Dagar \r\nCo-authored-by: Aston Zhang <22279212+astonzhang@users.noreply.github.com>\r\nCo-authored-by: hugo_han <57249629+HugoHann@users.noreply.github.com>\r\nCo-authored-by: gyro永不抽风 <1247006353@qq.com>\r\nCo-authored-by: CanChengZheng \r\nCo-authored-by: linlin \r\nCo-authored-by: iuk \r\nCo-authored-by: yoos <49556860+liyunlongaaa@users.noreply.github.com>\r\nCo-authored-by: Mr. Justice Lawrence John Wargrave <65226618+RUCWargrave@users.noreply.github.com>\r\nCo-authored-by: Chiyuan Fu \r\nCo-authored-by: Sunhuashan <48636870+Sunhuashan@users.noreply.github.com>\r\nCo-authored-by: Haiker Sun \r\nCo-authored-by: Ming Liu \r\nCo-authored-by: goldmermaid \r\nCo-authored-by: silenceZheng66 <13754430639@163.com>\r\nCo-authored-by: Wenchao Yan <56541797+YWonchall@users.noreply.github.com>\r\nCo-authored-by: Kiki2049 <55939997+Kiki2049@users.noreply.github.com>\r\nCo-authored-by: Krahets \r\nCo-authored-by: friedmainfunction <73703265+friedmainfunction@users.noreply.github.com>\r\nCo-authored-by: Jameson \r\nCo-authored-by: P. Yao <12227516+YaoPengCN@users.noreply.github.com>\r\nCo-authored-by: Yulv-git <34329208+Yulv-git@users.noreply.github.com>\r\nCo-authored-by: Liu,Xiao <45966993+liuxiao916@users.noreply.github.com>\r\nCo-authored-by: YIN, Gang <1246410+yingang@users.noreply.github.com>\r\nCo-authored-by: Joe-HZ <58297431+Joe-HZ@users.noreply.github.com>\r\nCo-authored-by: lybloveyou <102609904+lybloveyou@users.noreply.github.com>\r\nCo-authored-by: VigourJiang \r\nCo-authored-by: zxhd863943427 <74853597+zxhd863943427@users.noreply.github.com>\r\nCo-authored-by: LYF <27893441+liyufan@users.noreply.github.com>\r\nCo-authored-by: Aston Zhang \r\nCo-authored-by: xiaotinghe \r\nCo-authored-by: Ubuntu \r\nCo-authored-by: Holly-Max <60691735+Holly-Max@users.noreply.github.com>\r\nCo-authored-by: HinGwenWoong \r\nCo-authored-by: Shuai Zhang ", "code": "def transpose_qkv(X, num_heads):\n \n # Shape of input `X`:\n # (`batch_size`, no. of queries or key-value pairs, `num_hiddens`).\n # Shape of output `X`:\n # (`batch_size`, no. of queries or key-value pairs, `num_heads`,\n # `num_hiddens` / `num_heads`)\n X = X.reshape(X.shape[0], X.shape[1], num_heads, -1)\n\n # Shape of output `X`:\n # (`batch_size`, `num_heads`, no. of queries or key-value pairs,\n # `num_hiddens` / `num_heads`)\n X = X.transpose(0, 2, 1, 3)\n\n # Shape of `output`:\n # (`batch_size` * `num_heads`, no. of queries or key-value pairs,\n # `num_hiddens` / `num_heads`)\n return X.reshape(-1, X.shape[2], X.shape[3])\n\n", "url": "https://github.com/d2l-ai/d2l-zh.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 132, "n_words": 87, "vocab_size": 37, "complexity": 1, "nloc": 4, "token_counts": 69, "n_ast_nodes": 111, "n_identifiers": 6, "d_id": 37330, "documentation": { "docstring": "Transposition for parallel computation of multiple attention heads.\n\n Defined in :numref:`sec_multihead-attention`", "n_words": 11, "vocab_size": 11, "n_whitespaces": 13, "language": "en" } }, { "id": 310240, "commit_id": "11d0dcf7ac4ddc2638f403ef0ee6b796ac5bbceb", "repo": "core", "path": "tests/components/zwave_js/test_diagnostics.py", "file_name": "test_diagnostics.py", "fun_name": "test_device_diagnostics_error", "commit_message": "Add zwave_js device diagnostics (#64504)\n\n* Add zwave_js device diagnostics\r\n\r\n* Add diagnostics as a dependency in manifest\r\n\r\n* Add failure scenario test\r\n\r\n* fix device diagnostics helper and remove dependency\r\n\r\n* tweak", "code": "async def test_device_diagnostics_error(hass, integration):\n \n dev_reg = async_get(hass)\n device = dev_reg.async_get_or_create(\n config_entry_id=integration.entry_id, identifiers={(\"test\", \"test\")}\n )\n with pytest.raises(ValueError):\n await async_get_device_diagnostics(hass, integration, device)\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 49, "n_words": 20, "vocab_size": 19, "complexity": 1, "nloc": 7, "token_counts": 53, "n_ast_nodes": 90, "n_identifiers": 14, "d_id": 108926, "documentation": { "docstring": "Test the device diagnostics raises exception when an invalid device is used.", "n_words": 12, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 222570, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/ccompiler.py", "file_name": "ccompiler.py", "fun_name": "set_runtime_library_dirs", "commit_message": "add python 3.10.4 for windows", "code": "def set_runtime_library_dirs(self, dirs):\n \n self.runtime_library_dirs = dirs[:]\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 28, "n_identifiers": 4, "d_id": 56644, "documentation": { "docstring": "Set the list of directories to search for shared libraries at\n runtime to 'dirs' (a list of strings). This does not affect any\n standard search path that the runtime linker may search by\n default.\n ", "n_words": 34, "vocab_size": 27, "n_whitespaces": 63, "language": "en" } }, { "id": 161034, "commit_id": "b617a87ee40ab384767a27335313c2c65ee094ec", "repo": "MockingBird", "path": "ppg2mel/train/solver.py", "file_name": "solver.py", "fun_name": "exec", "commit_message": "Init ppg extractor and ppg2mel (#375)\n\n* Init ppg extractor and ppg2mel\r\n\r\n* add preprocess and training\r\n\r\n* FIx known issues\r\n\r\n* Update __init__.py\r\n\r\nAllow to gen audio\r\n\r\n* Fix length issue\r\n\r\n* Fix bug of preparing fid\r\n\r\n* Fix sample issues\r\n\r\n* Add UI usage of PPG-vc", "code": "def exec(self):\n \n raise NotImplementedError\n", "url": "https://github.com/babysor/MockingBird.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 8, "n_ast_nodes": 16, "n_identifiers": 3, "d_id": 38858, "documentation": { "docstring": "\n Called by main to execute training/inference\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 21, "language": "en" } }, { "id": 19026, "commit_id": "964f5ab75098c55f028f8acfeeae05df35ea68d5", "repo": "mlflow", "path": "mlflow/models/evaluation/base.py", "file_name": "base.py", "fun_name": "_gen_md5_for_arraylike_obj", "commit_message": "Evaluation Default evaluator (#5092)\n\n* init\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* rename module\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert black change\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* change module path\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* lazy load pyspark\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert export\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix curcit import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix conftest.py\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* Revert \"fix conftest.py\"\r\n\r\nThis reverts commit 2ea29c62bfffc5461bf77f3da15b5c00f51de19b.\r\n\r\n* fix tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* default evaluator\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update hash algo\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comment\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix lint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add more tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix lint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update shap explainer\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* remove scikitplot dep\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add pr curve\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add shap.summary_plot\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* log explainer\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* improve explainer code\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update shap init\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update explainer creating\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update predict_proba\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add multi-class metrics artifacts\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add log_loss metric\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* lazy load pyspark\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* address ben comments\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* prevent show shap logo, add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* support spark model\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add shap version check\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update docs, loose classifier label limit\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* multiclass classifier merge metrics/plots\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* zfill feature name\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add config max_num_classes_threshold_logging_roc_pr_curve_for_multiclass_classifier\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* improve label handling\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* add tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* black\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* increase plot dpi\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix test fixture\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update doc\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* use matplot rc_context\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix shap import\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* refactor EvaluationDataset\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* limit user specify shap algos\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* clean\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* update evaluation dataset\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* use svg fig\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* revert svg\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* curve dashline, legend display ap/roc, legend move out\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* linewidth 1\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* keyword arguments for evaluate, fix tests\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* mark abc.abstractmethod, kw args for ModelEvaluator methods\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu \r\n\r\n* fix pylint\r\n\r\nSigned-off-by: Weichen Xu ", "code": "def _gen_md5_for_arraylike_obj(md5_gen, data):\n \n import numpy as np\n\n len_bytes = _hash_uint64_ndarray_as_bytes(np.array([len(data)], dtype=\"uint64\"))\n md5_gen.update(len_bytes)\n if len(data) < EvaluationDataset.NUM_SAMPLE_ROWS_FOR_HASH * 2:\n md5_gen.update(_hash_array_like_obj_as_bytes(data))\n else:\n head_rows = data[: EvaluationDataset.NUM_SAMPLE_ROWS_FOR_HASH]\n tail_rows = data[-EvaluationDataset.NUM_SAMPLE_ROWS_FOR_HASH :]\n md5_gen.update(_hash_array_like_obj_as_bytes(head_rows))\n md5_gen.update(_hash_array_like_obj_as_bytes(tail_rows))\n\n", "url": "https://github.com/mlflow/mlflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 83, "n_words": 30, "vocab_size": 28, "complexity": 2, "nloc": 11, "token_counts": 98, "n_ast_nodes": 163, "n_identifiers": 16, "d_id": 2874, "documentation": { "docstring": "\n Helper method to generate MD5 hash array-like object, the MD5 will calculate over:\n - array length\n - first NUM_SAMPLE_ROWS_FOR_HASH rows content\n - last NUM_SAMPLE_ROWS_FOR_HASH rows content\n ", "n_words": 26, "vocab_size": 20, "n_whitespaces": 45, "language": "en" } }, { "id": 258973, "commit_id": "b28c5bba66529217ceedd497201a684e5d35b73c", "repo": "scikit-learn", "path": "sklearn/dummy.py", "file_name": "dummy.py", "fun_name": "fit", "commit_message": "FIX DummyRegressor overriding constant (#22486)", "code": "def fit(self, X, y, sample_weight=None):\n \n allowed_strategies = (\"mean\", \"median\", \"quantile\", \"constant\")\n if self.strategy not in allowed_strategies:\n raise ValueError(\n \"Unknown strategy type: %s, expected one of %s.\"\n % (self.strategy, allowed_strategies)\n )\n\n y = check_array(y, ensure_2d=False, input_name=\"y\")\n if len(y) == 0:\n raise ValueError(\"y must not be empty.\")\n\n if y.ndim == 1:\n y = np.reshape(y, (-1, 1))\n self.n_outputs_ = y.shape[1]\n\n check_consistent_length(X, y, sample_weight)\n\n if sample_weight is not None:\n sample_weight = _check_sample_weight(sample_weight, X)\n\n if self.strategy == \"mean\":\n self.constant_ = np.average(y, axis=0, weights=sample_weight)\n\n elif self.strategy == \"median\":\n if sample_weight is None:\n self.constant_ = np.median(y, axis=0)\n else:\n self.constant_ = [\n _weighted_percentile(y[:, k], sample_weight, percentile=50.0)\n for k in range(self.n_outputs_)\n ]\n\n elif self.strategy == \"quantile\":\n if self.quantile is None or not np.isscalar(self.quantile):\n raise ValueError(\n \"Quantile must be a scalar in the range [0.0, 1.0], but got %s.\"\n % self.quantile\n )\n\n percentile = self.quantile * 100.0\n if sample_weight is None:\n self.constant_ = np.percentile(y, axis=0, q=percentile)\n else:\n self.constant_ = [\n _weighted_percentile(y[:, k], sample_weight, percentile=percentile)\n for k in range(self.n_outputs_)\n ]\n\n elif self.strategy == \"constant\":\n if self.constant is None:\n raise TypeError(\n \"Constant target value has to be specified \"\n \"when the constant strategy is used.\"\n )\n\n self.constant_ = check_array(\n self.constant,\n accept_sparse=[\"csr\", \"csc\", \"coo\"],\n ensure_2d=False,\n ensure_min_samples=0,\n )\n\n if self.n_outputs_ != 1 and self.constant_.shape[0] != y.shape[1]:\n raise ValueError(\n \"Constant target value should have shape (%d, 1).\" % y.shape[1]\n )\n\n self.constant_ = np.reshape(self.constant_, (1, -1))\n return self\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 944, "n_words": 222, "vocab_size": 128, "complexity": 18, "nloc": 58, "token_counts": 414, "n_ast_nodes": 646, "n_identifiers": 35, "d_id": 75502, "documentation": { "docstring": "Fit the random regressor.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training data.\n\n y : array-like of shape (n_samples,) or (n_samples, n_outputs)\n Target values.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n Returns\n -------\n self : object\n Fitted estimator.\n ", "n_words": 42, "vocab_size": 32, "n_whitespaces": 149, "language": "en" } }, { "id": 242620, "commit_id": "76871795f787756ab1978772b53237948bec377a", "repo": "Pillow", "path": "src/PIL/GifImagePlugin.py", "file_name": "GifImagePlugin.py", "fun_name": "_normalize_mode", "commit_message": "Resolved UNDONE by removing code", "code": "def _normalize_mode(im):\n \n if im.mode in RAWMODE:\n im.load()\n return im\n if Image.getmodebase(im.mode) == \"RGB\":\n im = im.convert(\"P\", palette=Image.Palette.ADAPTIVE)\n if im.palette.mode == \"RGBA\":\n for rgba in im.palette.colors.keys():\n if rgba[3] == 0:\n im.info[\"transparency\"] = im.palette.colors[rgba]\n break\n return im\n return im.convert(\"L\")\n\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 148, "n_words": 37, "vocab_size": 26, "complexity": 6, "nloc": 13, "token_counts": 103, "n_ast_nodes": 173, "n_identifiers": 15, "d_id": 69883, "documentation": { "docstring": "\n Takes an image (or frame), returns an image in a mode that is appropriate\n for saving in a Gif.\n\n It may return the original image, or it may return an image converted to\n palette or 'L' mode.\n\n :param im: Image object\n :returns: Image object\n ", "n_words": 44, "vocab_size": 33, "n_whitespaces": 66, "language": "en" } }, { "id": 130275, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/_private/thirdparty/pathspec/util.py", "file_name": "util.py", "fun_name": "iter_tree_files", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def iter_tree_files(root, on_error=None, follow_links=None):\n \n if on_error is not None and not callable(on_error):\n raise TypeError(\"on_error:{!r} is not callable.\".format(on_error))\n\n if follow_links is None:\n follow_links = True\n\n for entry in _iter_tree_entries_next(\n os.path.abspath(root), \"\", {}, on_error, follow_links\n ):\n if not entry.is_dir(follow_links):\n yield entry.path\n\n\n# Alias `iter_tree_files()` as `iter_tree()`.\niter_tree = iter_tree_files\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 99, "n_words": 47, "vocab_size": 37, "complexity": 6, "nloc": 10, "token_counts": 81, "n_ast_nodes": 136, "n_identifiers": 14, "d_id": 29200, "documentation": { "docstring": "\n Walks the specified directory for all files.\n\n *root* (:class:`str`) is the root directory to search for files.\n\n *on_error* (:class:`~collections.abc.Callable` or :data:`None`)\n optionally is the error handler for file-system exceptions. It will be\n called with the exception (:exc:`OSError`). Reraise the exception to\n abort the walk. Default is :data:`None` to ignore file-system\n exceptions.\n\n *follow_links* (:class:`bool` or :data:`None`) optionally is whether\n to walk symbolic links that resolve to directories. Default is\n :data:`None` for :data:`True`.\n\n Raises :exc:`RecursionError` if recursion is detected.\n\n Returns an :class:`~collections.abc.Iterable` yielding the path to\n each file (:class:`str`) relative to *root*.\n ", "n_words": 90, "vocab_size": 59, "n_whitespaces": 133, "language": "en" } }, { "id": 227576, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_pie.py", "file_name": "_pie.py", "fun_name": "insidetextorientation", "commit_message": "switch to black .22", "code": "def insidetextorientation(self):\n \n return self[\"insidetextorientation\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 59249, "documentation": { "docstring": "\n Controls the orientation of the text inside chart sectors. When\n set to \"auto\", text may be oriented in any direction in order\n to be as big as possible in the middle of a sector. The\n \"horizontal\" option orients text to be parallel with the bottom\n of the chart, and may make text smaller in order to achieve\n that goal. The \"radial\" option orients text along the radius of\n the sector. The \"tangential\" option orients text perpendicular\n to the radius of the sector.\n\n The 'insidetextorientation' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['horizontal', 'radial', 'tangential', 'auto']\n\n Returns\n -------\n Any\n ", "n_words": 107, "vocab_size": 62, "n_whitespaces": 223, "language": "en" } }, { "id": 123858, "commit_id": "216565fb05166d4bcf80b35a4f8f381e9f6b3d08", "repo": "sqlmap", "path": "lib/request/basic.py", "file_name": "basic.py", "fun_name": "forgeHeaders", "commit_message": "Fixes #5275", "code": "def forgeHeaders(items=None, base=None):\n \n\n items = items or {}\n\n for _ in list(items.keys()):\n if items[_] is None:\n del items[_]\n\n headers = OrderedDict(conf.httpHeaders if base is None else base)\n headers.update(items.items())\n", "url": "https://github.com/sqlmapproject/sqlmap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 61, "n_words": 28, "vocab_size": 23, "complexity": 34, "nloc": 53, "token_counts": 506, "n_ast_nodes": 107, "n_identifiers": 11, "d_id": 27460, "documentation": { "docstring": "\n Prepare HTTP Cookie, HTTP User-Agent and HTTP Referer headers to use when performing\n the HTTP requests\n ", "n_words": 16, "vocab_size": 13, "n_whitespaces": 26, "language": "en" } }, { "id": 212851, "commit_id": "ed2bc288ff17344f6406c49623036620f18e65bb", "repo": "PySimpleGUI", "path": "PySimpleGUI.py", "file_name": "PySimpleGUI.py", "fun_name": "update", "commit_message": "Completed switching all elements over to the new way of handling visiblity", "code": "def update(self, value=None, values=None, disabled=None, visible=None, size=(None, None)):\n \n if not self._widget_was_created(): # if widget hasn't been created yet, then don't allow\n return\n\n if values is not None:\n self.Values = values\n self.TKOptionMenu['menu'].delete(0, 'end')\n\n # Insert list of new options (tk._setit hooks them up to var)\n # self.TKStringVar.set(self.Values[0])\n for new_value in self.Values:\n self.TKOptionMenu['menu'].add_command(label=new_value, command=tk._setit(self.TKStringVar, new_value))\n if value is None:\n self.TKStringVar.set('')\n\n if size == (None, None):\n max_line_len = max([len(str(l)) for l in self.Values]) if len(self.Values) else 0\n if self.AutoSizeText is False:\n width = self.Size[0]\n else:\n width = max_line_len + 1\n self.TKOptionMenu.configure(width=width)\n else:\n self.TKOptionMenu.configure(width=size[0])\n\n if value is not None:\n self.DefaultValue = value\n self.TKStringVar.set(value)\n\n if disabled == True:\n self.TKOptionMenu['state'] = 'disabled'\n elif disabled == False:\n self.TKOptionMenu['state'] = 'normal'\n if visible is False:\n self._pack_forget_save_settings()\n # self.TKOptionMenu.pack_forget()\n elif visible is True:\n self._pack_restore_settings()\n # self.TKOptionMenu.pack(padx=self.pad_used[0], pady=self.pad_used[1])\n if visible is not None:\n self._visible = visible\n\n Update = update\n\n\n# ------------------------- OPTION MENU Element lazy functions ------------------------- #\nInputOptionMenu = OptionMenu\n\n\n# ---------------------------------------------------------------------- #\n# Listbox #\n# ---------------------------------------------------------------------- #", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 628, "n_words": 160, "vocab_size": 96, "complexity": 15, "nloc": 52, "token_counts": 270, "n_ast_nodes": 452, "n_identifiers": 35, "d_id": 53459, "documentation": { "docstring": "\n Changes some of the settings for the OptionMenu Element. Must call `Window.Read` or `Window.Finalize` prior\n\n Changes will not be visible in your window until you call window.read or window.refresh.\n\n If you change visibility, your element may MOVE. If you want it to remain stationary, use the \"layout helper\"\n function \"pin\" to ensure your element is \"pinned\" to that location in your layout so that it returns there\n when made visible.\n\n :param value: the value to choose by default\n :type value: (Any)\n :param values: Values to be displayed\n :type values: List[Any]\n :param disabled: disable or enable state of the element\n :type disabled: (bool)\n :param visible: control visibility of element\n :type visible: (bool)\n :param size: (width, height) size in characters (wide), height is ignored and present to be consistent with other elements\n :type size: (int, int) (width, UNUSED)\n ", "n_words": 136, "vocab_size": 90, "n_whitespaces": 274, "language": "en" } }, { "id": 199733, "commit_id": "5c9a4787c032d39abb80aae106030b177263a7cc", "repo": "sympy", "path": "sympy/polys/orthopolys.py", "file_name": "orthopolys.py", "fun_name": "hermite_prob_poly", "commit_message": "Probabilist's Hermite polynomials\n\nThe plain or physicist's Hermite polynomials have leading coefficient\n2^n, which leads to orthogonality with respect to the simplest possible\nform of the weight function exp(-x^2) and is the specific normalisation\nappearing in the solution to the Schrödinger equation for the quantum\nharmonic oscillator, but leads to unnecessary complications everywhere\nelse.\n\nRemoving the twos in the 3-term recurrence relation leads to the monic\nprobabilist's version; its weight function of exp(-x^2/2) becomes the\nstandard normal distribution after normalising. This version also forms\nthe sign-alternated matching polynomial for the complete graph, a highly\ninteresting connection to combinatorics.", "code": "def hermite_prob_poly(n, x=None, polys=False):\n r\n return named_poly(n, dup_hermite_prob, ZZ,\n \"probabilist's Hermite polynomial\", (x,), polys)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 30, "n_words": 14, "vocab_size": 14, "complexity": 1, "nloc": 14, "token_counts": 33, "n_ast_nodes": 47, "n_identifiers": 7, "d_id": 49380, "documentation": { "docstring": "Generates the probabilist's Hermite polynomial `He_n(x)`.\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ", "n_words": 32, "vocab_size": 27, "n_whitespaces": 64, "language": "en" } }, { "id": 176660, "commit_id": "b7d65ffc7183e9a01cdc07b79f8f06403cc4dda4", "repo": "networkx", "path": "networkx/algorithms/shortest_paths/weighted.py", "file_name": "weighted.py", "fun_name": "dijkstra_predecessor_and_distance", "commit_message": "DOC: remove note re: non-existant param (#5648)", "code": "def dijkstra_predecessor_and_distance(G, source, cutoff=None, weight=\"weight\"):\n \n if source not in G:\n raise nx.NodeNotFound(f\"Node {source} is not found in the graph\")\n weight = _weight_function(G, weight)\n pred = {source: []} # dictionary of predecessors\n return (pred, _dijkstra(G, source, weight, pred=pred, cutoff=cutoff))\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 61, "n_words": 38, "vocab_size": 34, "complexity": 2, "nloc": 6, "token_counts": 67, "n_ast_nodes": 106, "n_identifiers": 10, "d_id": 42028, "documentation": { "docstring": "Compute weighted shortest path length and predecessors.\n\n Uses Dijkstra's Method to obtain the shortest weighted paths\n and return dictionaries of predecessors for each node and\n distance for each node from the `source`.\n\n Parameters\n ----------\n G : NetworkX graph\n\n source : node label\n Starting node for path\n\n cutoff : integer or float, optional\n Length (sum of edge weights) at which the search is stopped.\n If cutoff is provided, only return paths with summed weight <= cutoff.\n\n weight : string or function\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number.\n\n Returns\n -------\n pred, distance : dictionaries\n Returns two dictionaries representing a list of predecessors\n of a node and the distance to each node.\n\n Raises\n ------\n NodeNotFound\n If `source` is not in `G`.\n\n Notes\n -----\n Edge weight attributes must be numerical.\n Distances are calculated as sums of weighted edges traversed.\n\n The list of predecessors contains more than one element only when\n there are more than one shortest paths to the key node.\n\n Examples\n --------\n >>> G = nx.path_graph(5, create_using=nx.DiGraph())\n >>> pred, dist = nx.dijkstra_predecessor_and_distance(G, 0)\n >>> sorted(pred.items())\n [(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])]\n >>> sorted(dist.items())\n [(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]\n\n >>> pred, dist = nx.dijkstra_predecessor_and_distance(G, 0, 1)\n >>> sorted(pred.items())\n [(0, []), (1, [0])]\n >>> sorted(dist.items())\n [(0, 0), (1, 1)]\n ", "n_words": 302, "vocab_size": 161, "n_whitespaces": 519, "language": "en" } }, { "id": 165733, "commit_id": "2d6a2c3e981208bf67bdd36cca726e8a399e487c", "repo": "pandas", "path": "pandas/core/arrays/_mixins.py", "file_name": "_mixins.py", "fun_name": "_chunk_positional_ranges", "commit_message": "REF: move ArrowStringArray.__setitem__ and related methods to ArrowExtensionArray (#46439)", "code": "def _chunk_positional_ranges(self) -> tuple[tuple[int, int], ...]:\n \n ranges = []\n stop = 0\n for c in self._data.iterchunks():\n start, stop = stop, stop + len(c)\n ranges.append((start, stop))\n return tuple(ranges)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 84, "n_words": 27, "vocab_size": 23, "complexity": 2, "nloc": 16, "token_counts": 63, "n_ast_nodes": 99, "n_identifiers": 12, "d_id": 39697, "documentation": { "docstring": "\n Return a tuple of tuples each containing the left (inclusive)\n and right (exclusive) positional bounds of each chunk's values\n within the underlying ChunkedArray.\n\n Returns\n -------\n tuple[tuple]\n ", "n_words": 26, "vocab_size": 23, "n_whitespaces": 76, "language": "en" } }, { "id": 107725, "commit_id": "fdb4ad372ab66177d99e478385c807e5843d6a0f", "repo": "matplotlib", "path": "lib/matplotlib/colorbar.py", "file_name": "colorbar.py", "fun_name": "_mesh", "commit_message": "MNT: Use a context manager to change the norm in colorbar code\n\nThis removes a deepcopy of the norm in Colorbar, instead updating\nthe vmin/vmax via the context manager and ignoring any\ncallback updates in the process.", "code": "def _mesh(self):\n \n y, extendlen = self._proportional_y()\n # Use the vmin and vmax of the colorbar, which may not be the same\n # as the norm. There are situations where the colormap has a\n # narrower range than the colorbar and we want to accommodate the\n # extra contours.\n if (isinstance(self.norm, (colors.BoundaryNorm, colors.NoNorm))\n or self.boundaries is not None):\n # not using a norm.\n y = y * (self.vmax - self.vmin) + self.vmin\n else:\n # Update the norm values in a context manager as it is only\n # a temporary change and we don't want to propagate any signals\n # attached to the norm (callbacks.blocked).\n with self.norm.callbacks.blocked(), \\\n cbook._setattr_cm(self.norm,\n vmin=self.vmin,\n vmax=self.vmax):\n y = self.norm.inverse(y)\n self._y = y\n X, Y = np.meshgrid([0., 1.], y)\n if self.orientation == 'vertical':\n return (X, Y, extendlen)\n else:\n return (Y, X, extendlen)\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 429, "n_words": 134, "vocab_size": 92, "complexity": 4, "nloc": 17, "token_counts": 152, "n_ast_nodes": 239, "n_identifiers": 24, "d_id": 22892, "documentation": { "docstring": "\n Return the coordinate arrays for the colorbar pcolormesh/patches.\n\n These are scaled between vmin and vmax, and already handle colorbar\n orientation.\n ", "n_words": 20, "vocab_size": 17, "n_whitespaces": 49, "language": "en" } }, { "id": 62006, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/locators.py", "file_name": "locators.py", "fun_name": "try_to_replace", "commit_message": "upd; format", "code": "def try_to_replace(self, provider, other, problems):\n \n rlist = self.reqts[other]\n unmatched = set()\n for s in rlist:\n matcher = self.get_matcher(s)\n if not matcher.match(provider.version):\n unmatched.add(s)\n if unmatched:\n # can't replace other with provider\n problems.add(('cantreplace', provider, other,\n frozenset(unmatched)))\n result = False\n else:\n # can replace other with provider\n self.remove_distribution(other)\n del self.reqts[other]\n for s in rlist:\n self.reqts.setdefault(provider, set()).add(s)\n self.add_distribution(provider)\n result = True\n return result\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 284, "n_words": 59, "vocab_size": 40, "complexity": 5, "nloc": 19, "token_counts": 126, "n_ast_nodes": 202, "n_identifiers": 20, "d_id": 12817, "documentation": { "docstring": "\n Attempt to replace one provider with another. This is typically used\n when resolving dependencies from multiple sources, e.g. A requires\n (B >= 1.0) while C requires (B >= 1.1).\n\n For successful replacement, ``provider`` must meet all the requirements\n which ``other`` fulfills.\n\n :param provider: The provider we are trying to replace with.\n :param other: The provider we're trying to replace.\n :param problems: If False is returned, this will contain what\n problems prevented replacement. This is currently\n a tuple of the literal string 'cantreplace',\n ``provider``, ``other`` and the set of requirements\n that ``provider`` couldn't fulfill.\n :return: True if we can replace ``other`` with ``provider``, else\n False.\n ", "n_words": 104, "vocab_size": 78, "n_whitespaces": 288, "language": "en" } }, { "id": 179085, "commit_id": "ae8a1e0ff4b13e6e6a0155e346864805b2ca81dd", "repo": "DeepFaceLive", "path": "xlib/mp/csw/DynamicSingleSwitch.py", "file_name": "DynamicSingleSwitch.py", "fun_name": "set_choices", "commit_message": "added Face Animator module", "code": "def set_choices(self, choices, choices_names=None, none_choice_name=None):\n \n self.unselect()\n\n # Validate choices\n if choices is None:\n raise ValueError('Choices cannot be None.')\n if not isinstance(choices, Iterable):\n raise ValueError('Choices must be Iterable')\n\n if choices_names is None:\n choices_names = tuple(str(c) for c in choices)\n elif isinstance(choices_names, (list,tuple)):\n if len(choices_names) != len(choices):\n raise ValueError('mismatch len of choices and choices names')\n elif isinstance(choices_names, dict):\n choices_names = [ choices_names[x] for x in choices ]\n else:\n raise ValueError('unsupported type of choices_names')\n\n if not all( isinstance(x, str) for x in choices_names ):\n raise ValueError('all values in choices_names must be a str')\n\n choices = tuple(choices)\n\n self._set_choices(choices, choices_names, none_choice_name)\n self._send_choices()\n", "url": "https://github.com/iperov/DeepFaceLive.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 364, "n_words": 97, "vocab_size": 62, "complexity": 11, "nloc": 20, "token_counts": 160, "n_ast_nodes": 260, "n_identifiers": 19, "d_id": 42898, "documentation": { "docstring": "\n set choices, and optional choices_names.\n\n choices_names list/dict/None if list, should match the len of choices\n if dict, should return a str by key of choice\n if None, choices will be stringfied\n\n none_choice_name('') str/None if not None, shows None choice with name,\n by default empty string\n ", "n_words": 45, "vocab_size": 36, "n_whitespaces": 230, "language": "en" } }, { "id": 67582, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/doctype/batch/batch.py", "file_name": "batch.py", "fun_name": "get_batches_by_oldest", "commit_message": "style: format code with black", "code": "def get_batches_by_oldest(item_code, warehouse):\n\t\n\tbatches = get_batch_qty(item_code=item_code, warehouse=warehouse)\n\tbatches_dates = [\n\t\t[batch, frappe.get_value(\"Batch\", batch.batch_no, \"expiry_date\")] for batch in batches\n\t]\n\tbatches_dates.sort(key=lambda tup: tup[1])\n\treturn batches_dates\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 17, "n_words": 25, "vocab_size": 22, "complexity": 2, "nloc": 7, "token_counts": 60, "n_ast_nodes": 106, "n_identifiers": 14, "d_id": 14565, "documentation": { "docstring": "Returns the oldest batch and qty for the given item_code and warehouse", "n_words": 12, "vocab_size": 10, "n_whitespaces": 11, "language": "en" } }, { "id": 130222, "commit_id": "7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065", "repo": "ray", "path": "python/ray/_private/signature.py", "file_name": "signature.py", "fun_name": "recover_args", "commit_message": "[CI] Format Python code with Black (#21975)\n\nSee #21316 and #21311 for the motivation behind these changes.", "code": "def recover_args(flattened_args):\n \n assert (\n len(flattened_args) % 2 == 0\n ), \"Flattened arguments need to be even-numbered. See `flatten_args`.\"\n args = []\n kwargs = {}\n for name_index in range(0, len(flattened_args), 2):\n name, arg = flattened_args[name_index], flattened_args[name_index + 1]\n if name == DUMMY_TYPE:\n args.append(arg)\n else:\n kwargs[name] = arg\n\n return args, kwargs\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 120, "n_words": 49, "vocab_size": 43, "complexity": 3, "nloc": 13, "token_counts": 80, "n_ast_nodes": 128, "n_identifiers": 11, "d_id": 29164, "documentation": { "docstring": "Recreates `args` and `kwargs` from the flattened arg list.\n\n Args:\n flattened_args: List of args and kwargs. This should be the output of\n `flatten_args`.\n\n Returns:\n args: The non-keyword arguments passed into the function.\n kwargs: The keyword arguments passed into the function.\n ", "n_words": 40, "vocab_size": 30, "n_whitespaces": 81, "language": "en" } }, { "id": 61271, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py", "file_name": "misc.py", "fun_name": "captured_output", "commit_message": "upd; format", "code": "def captured_output(stream_name):\n # type: (str) -> Iterator[StreamWrapper]\n \n orig_stdout = getattr(sys, stream_name)\n setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))\n try:\n yield getattr(sys, stream_name)\n finally:\n setattr(sys, stream_name, orig_stdout)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 54, "n_words": 22, "vocab_size": 18, "complexity": 2, "nloc": 7, "token_counts": 46, "n_ast_nodes": 75, "n_identifiers": 8, "d_id": 12482, "documentation": { "docstring": "Return a context manager used by captured_stdout/stdin/stderr\n that temporarily replaces the sys stream *stream_name* with a StringIO.\n\n Taken from Lib/support/__init__.py in the CPython repo.\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 33, "language": "en" } }, { "id": 221732, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/contextlib.py", "file_name": "contextlib.py", "fun_name": "_recreate_cm", "commit_message": "add python 3.10.4 for windows", "code": "def _recreate_cm(self):\n \n return self\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 8, "n_ast_nodes": 16, "n_identifiers": 2, "d_id": 56495, "documentation": { "docstring": "Return a recreated instance of self.\n\n Allows an otherwise one-shot context manager like\n _GeneratorContextManager to support use as\n a decorator via implicit recreation.\n\n This is a private interface just for _GeneratorContextManager.\n See issue #11647 for details.\n ", "n_words": 36, "vocab_size": 33, "n_whitespaces": 78, "language": "en" } }, { "id": 215962, "commit_id": "f2a783643de61cac1ff3288b40241e5ce6e1ddc8", "repo": "salt", "path": "salt/modules/virt.py", "file_name": "virt.py", "fun_name": "_fill_disk_filename", "commit_message": "Update to latest ``pyupgrade`` hook. Stop skipping it on CI.\n\nSigned-off-by: Pedro Algarvio ", "code": "def _fill_disk_filename(conn, vm_name, disk, hypervisor, pool_caps):\n \n # Compute the filename without extension since it may not make sense for some pool types\n disk[\"filename\"] = \"{}_{}\".format(vm_name, disk[\"name\"])\n\n # Compute the source file path\n base_dir = disk.get(\"pool\", None)\n if hypervisor in [\"qemu\", \"kvm\", \"xen\"]:\n # Compute the base directory from the pool property. We may have either a path\n # or a libvirt pool name there.\n if not base_dir:\n base_dir = _get_images_dir()\n\n # If the pool is a known libvirt one, skip the filename since a libvirt volume will be created later\n if base_dir not in conn.listStoragePools():\n # For path-based disks, keep the qcow2 default format\n if not disk.get(\"format\"):\n disk[\"format\"] = \"qcow2\"\n disk[\"filename\"] = \"{}.{}\".format(disk[\"filename\"], disk[\"format\"])\n disk[\"source_file\"] = os.path.join(base_dir, disk[\"filename\"])\n else:\n if \"pool\" not in disk:\n disk[\"pool\"] = base_dir\n pool_obj = conn.storagePoolLookupByName(base_dir)\n pool_xml = ElementTree.fromstring(pool_obj.XMLDesc())\n pool_type = pool_xml.get(\"type\")\n\n # Disk pools volume names are partition names, they need to be named based on the device name\n if pool_type == \"disk\":\n device = pool_xml.find(\"./source/device\").get(\"path\")\n all_volumes = pool_obj.listVolumes()\n if disk.get(\"source_file\") not in all_volumes:\n indexes = [\n int(re.sub(\"[a-z]+\", \"\", vol_name)) for vol_name in all_volumes\n ] or [0]\n index = min(\n idx for idx in range(1, max(indexes) + 2) if idx not in indexes\n )\n disk[\"filename\"] = \"{}{}\".format(os.path.basename(device), index)\n\n # Is the user wanting to reuse an existing volume?\n if disk.get(\"source_file\"):\n if not disk.get(\"source_file\") in pool_obj.listVolumes():\n raise SaltInvocationError(\n \"{} volume doesn't exist in pool {}\".format(\n disk.get(\"source_file\"), base_dir\n )\n )\n disk[\"filename\"] = disk[\"source_file\"]\n del disk[\"source_file\"]\n\n # Get the default format from the pool capabilities\n if not disk.get(\"format\"):\n volume_options = (\n [\n type_caps.get(\"options\", {}).get(\"volume\", {})\n for type_caps in pool_caps.get(\"pool_types\")\n if type_caps[\"name\"] == pool_type\n ]\n or [{}]\n )[0]\n # Still prefer qcow2 if possible\n if \"qcow2\" in volume_options.get(\"targetFormatType\", []):\n disk[\"format\"] = \"qcow2\"\n else:\n disk[\"format\"] = volume_options.get(\"default_format\", None)\n\n elif hypervisor == \"bhyve\" and vm_name:\n disk[\"filename\"] = \"{}.{}\".format(vm_name, disk[\"name\"])\n disk[\"source_file\"] = os.path.join(\n \"/dev/zvol\", base_dir or \"\", disk[\"filename\"]\n )\n\n elif hypervisor in [\"esxi\", \"vmware\"]:\n if not base_dir:\n base_dir = __salt__[\"config.get\"](\"virt:storagepool\", \"[0] \")\n disk[\"filename\"] = \"{}.{}\".format(disk[\"filename\"], disk[\"format\"])\n disk[\"source_file\"] = \"{}{}\".format(base_dir, disk[\"filename\"])\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 23, "n_whitespaces": 1234, "n_words": 328, "vocab_size": 174, "complexity": 24, "nloc": 60, "token_counts": 518, "n_ast_nodes": 924, "n_identifiers": 40, "d_id": 54284, "documentation": { "docstring": "\n Compute the disk file name and update it in the disk value.\n ", "n_words": 12, "vocab_size": 10, "n_whitespaces": 19, "language": "en" } }, { "id": 250388, "commit_id": "652d1669c5a103b1c20478770c4aaf18849c09a3", "repo": "synapse", "path": "tests/handlers/test_register.py", "file_name": "test_register.py", "fun_name": "test_auto_create_auto_join_remote_room", "commit_message": "Add missing type hints to tests.handlers. (#14680)\n\nAnd do not allow untyped defs in tests.handlers.", "code": "def test_auto_create_auto_join_remote_room(self) -> None:\n \n\n # Register a first user; this should call _create_and_join_rooms\n self.get_success(self.handler.register_user(localpart=\"jeff\"))\n\n _, kwargs = self.room_member_handler.update_membership.call_args\n\n self.assertEqual(kwargs[\"room_id\"], self.room_id)\n self.assertEqual(kwargs[\"action\"], \"join\")\n self.assertEqual(kwargs[\"remote_room_hosts\"], [\"remotetest\"])\n\n # Register a second user; this should call _join_rooms\n self.get_success(self.handler.register_user(localpart=\"jeff2\"))\n\n _, kwargs = self.room_member_handler.update_membership.call_args\n\n self.assertEqual(kwargs[\"room_id\"], self.room_id)\n self.assertEqual(kwargs[\"action\"], \"join\")\n self.assertEqual(kwargs[\"remote_room_hosts\"], [\"remotetest\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 135, "n_words": 44, "vocab_size": 27, "complexity": 1, "nloc": 13, "token_counts": 134, "n_ast_nodes": 232, "n_identifiers": 13, "d_id": 73411, "documentation": { "docstring": "Tests that we don't attempt to create remote rooms, and that we don't attempt\n to invite ourselves to rooms we're not in.", "n_words": 22, "vocab_size": 16, "n_whitespaces": 28, "language": "en" } }, { "id": 269469, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "logsumexp", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def logsumexp(x, axis=None, keepdims=False):\n \n return tf.reduce_logsumexp(x, axis, keepdims)\n\n\n@keras_export(\"keras.backend.round\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.backend.round\")\n@tf.__internal__.dispatch.add_dispatch_support\n@doc_controls.do_not_generate_docs", "n_ast_errors": 1, "ast_levels": 7, "n_whitespaces": 14, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 2, "token_counts": 25, "n_ast_nodes": 67, "n_identifiers": 12, "d_id": 80104, "documentation": { "docstring": "Computes log(sum(exp(elements across dimensions of a tensor))).\n\n This function is more numerically stable than log(sum(exp(x))).\n It avoids overflows caused by taking the exp of large inputs and\n underflows caused by taking the log of small inputs.\n\n Args:\n x: A tensor or variable.\n axis: An integer, the axis to reduce over.\n keepdims: A boolean, whether to keep the dimensions or not.\n If `keepdims` is `False`, the rank of the tensor is reduced\n by 1. If `keepdims` is `True`, the reduced dimension is\n retained with length 1.\n\n Returns:\n The reduced tensor.\n ", "n_words": 89, "vocab_size": 62, "n_whitespaces": 168, "language": "en" } }, { "id": 83535, "commit_id": "af5d0d6f5e5444332f9f8e565d97f4acdceaa72f", "repo": "zulip", "path": "zerver/tests/test_bots.py", "file_name": "test_bots.py", "fun_name": "test_patch_bot_owner_of_bot_with_can_create_users", "commit_message": "bots: Don't allow admins to change owner of bot with can_create_users.\n\nOrdinary organization administrators shouldn't be allowed to change\nownership of a bot with the can_create_users permission.\n\nThis is a special permission that is granted manually by server\nadministrators to an organization (to a UserProfile of the org owners'\nchoice) after approval by a server administator. The code comments\nprovide more detail about why this is sensitive.", "code": "def test_patch_bot_owner_of_bot_with_can_create_users(self) -> None:\n \n cordelia = self.example_user(\"cordelia\")\n\n self.login(\"hamlet\")\n self.create_bot()\n\n bot_realm = get_realm(\"zulip\")\n bot_email = \"hambot-bot@zulip.testserver\"\n bot_user = get_user(bot_email, bot_realm)\n\n do_change_can_create_users(bot_user, True)\n\n self.logout()\n # iago is an ordinary organization administrator, and thus doesn't have\n # sufficient permissions to change ownership of this bot.\n self.login(\"iago\")\n bot_info = {\n \"bot_owner_id\": cordelia.id,\n }\n result = self.client_patch(f\"/json/bots/{bot_user.id}\", bot_info)\n self.assert_json_error(\n result,\n \"Must be an organization owner\",\n )\n\n self.logout()\n # desdemona is the organization owner and should be allowed to change the bot's ownership.\n self.login(\"desdemona\")\n result = self.client_patch(f\"/json/bots/{bot_user.id}\", bot_info)\n self.assert_json_success(result)\n\n bot_user.refresh_from_db()\n self.assertEqual(bot_user.bot_owner, cordelia)\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 287, "n_words": 86, "vocab_size": 65, "complexity": 1, "nloc": 29, "token_counts": 134, "n_ast_nodes": 251, "n_identifiers": 22, "d_id": 17679, "documentation": { "docstring": "\n can_create_users is granted to organizations upon approval, and thus\n should be thought of as something that only organization owners should\n have control over.\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 52, "language": "en" } }, { "id": 92239, "commit_id": "923658b395545abc1b7f7a39cf64d198c9feea74", "repo": "sentry", "path": "src/sentry/api/endpoints/project_dynamic_sampling.py", "file_name": "project_dynamic_sampling.py", "fun_name": "percentile_fn", "commit_message": "feat(dynamic-sampling): Adds endpoint that returns onboarding flow trace info [TET-176] (#36113)\n\nThis PR adds an endpoint for the dynamic sampling onboarding flow that:\r\n\r\n- Does a query to the transactions table to fetch a random sampleSize over the last passed statsPeriod date range.\r\n- If distrubutedTracing mode is enabled, then it runs a subsequent query to fetch the project breakdown in the traces from the first query\r\n- Calculates distribution function values like p50, p90, p95, p99, avg, max, min on the client side sample rates returned from the first query\r\n- Returns the percentage of transactions that did not have a sample rate", "code": "def percentile_fn(data, percentile):\n \n return data[int((len(data) - 1) * percentile)] if len(data) > 0 else None\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 21, "n_words": 15, "vocab_size": 15, "complexity": 2, "nloc": 2, "token_counts": 34, "n_ast_nodes": 54, "n_identifiers": 5, "d_id": 18892, "documentation": { "docstring": "\n Returns the nth percentile from a sorted list\n\n :param percentile: A value between 0 and 1\n :param data: Sorted list of values\n ", "n_words": 22, "vocab_size": 20, "n_whitespaces": 35, "language": "en" } }, { "id": 196343, "commit_id": "3e167a67bde4b4817666de48bf98d247bed86e2d", "repo": "sympy", "path": "sympy/combinatorics/perm_groups.py", "file_name": "perm_groups.py", "fun_name": "equals", "commit_message": "Update sympy/combinatorics/perm_groups.py", "code": "def equals(self, other):\n \n if not isinstance(other, PermutationGroup):\n return False\n\n set_self_gens = set(self.generators)\n set_other_gens = set(other.generators)\n\n # before reaching the general case there are also certain\n # optimisation and obvious cases requiring less or no actual\n # computation.\n if set_self_gens == set_other_gens:\n return True\n\n # in the most general case it will check that each generator of\n # one group belongs to the other PermutationGroup and vice-versa\n for gen1 in set_self_gens:\n if not other.contains(gen1):\n return False\n for gen2 in set_other_gens:\n if not self.contains(gen2):\n return False\n return True\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 251, "n_words": 86, "vocab_size": 59, "complexity": 7, "nloc": 14, "token_counts": 76, "n_ast_nodes": 127, "n_identifiers": 12, "d_id": 47843, "documentation": { "docstring": "Return ``True`` if PermutationGroup generated by elements in the\n group are same i.e they represent the same PermutationGroup.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation, PermutationGroup\n >>> p = Permutation(0, 1, 2, 3, 4, 5)\n >>> G = PermutationGroup([p, p**2])\n >>> H = PermutationGroup([p**2, p])\n >>> G.generators == H.generators\n False\n >>> G.equals(H)\n True\n\n ", "n_words": 53, "vocab_size": 43, "n_whitespaces": 137, "language": "en" } }, { "id": 100334, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "lib/gui/display_page.py", "file_name": "display_page.py", "fun_name": "set_vars", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def set_vars():\n \n return {}\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 8, "n_ast_nodes": 17, "n_identifiers": 1, "d_id": 19830, "documentation": { "docstring": " Override to return a dict of page specific variables ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 138400, "commit_id": "30ab5458a7e4ba2351d5e1beef8c8797b5946493", "repo": "ray", "path": "dashboard/state_aggregator.py", "file_name": "state_aggregator.py", "fun_name": "get_tasks", "commit_message": "[State Observability] Tasks and Objects API (#23912)\n\nThis PR implements ray list tasks and ray list objects APIs.\r\n\r\nNOTE: You can ignore the merge conflict for now. It is because the first PR was reverted. There's a fix PR open now.", "code": "async def get_tasks(self) -> dict:\n \n replies = await asyncio.gather(\n *[\n self._client.get_task_info(node_id, timeout=DEFAULT_RPC_TIMEOUT)\n for node_id in self._client.get_all_registered_raylet_ids()\n ]\n )\n\n result = defaultdict(dict)\n for reply in replies:\n tasks = reply.task_info_entries\n for task in tasks:\n data = self._message_to_dict(\n message=task,\n fields_to_decode=[\"task_id\"],\n )\n data = filter_fields(data, TaskState)\n result[data[\"task_id\"]] = data\n return result\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 261, "n_words": 47, "vocab_size": 34, "complexity": 4, "nloc": 24, "token_counts": 98, "n_ast_nodes": 157, "n_identifiers": 24, "d_id": 31408, "documentation": { "docstring": "List all task information from the cluster.\n\n Returns:\n {task_id -> task_data_in_dict}\n task_data_in_dict's schema is in TaskState\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 52, "language": "en" } }, { "id": 228605, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_waterfall.py", "file_name": "_waterfall.py", "fun_name": "measure", "commit_message": "switch to black .22", "code": "def measure(self):\n \n return self[\"measure\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 60278, "documentation": { "docstring": "\n An array containing types of values. By default the values are\n considered as 'relative'. However; it is possible to use\n 'total' to compute the sums. Also 'absolute' could be applied\n to reset the computed total or to declare an initial value\n where needed.\n\n The 'measure' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n ", "n_words": 65, "vocab_size": 54, "n_whitespaces": 143, "language": "en" } }, { "id": 157333, "commit_id": "ca86da3a30c4e080d4db8c25fca73de843663cb4", "repo": "stablediffusion", "path": "ldm/models/diffusion/ddpm.py", "file_name": "ddpm.py", "fun_name": "disabled_train", "commit_message": "release more models", "code": "def disabled_train(self, mode=True):\n \n return self\n\n", "url": "https://github.com/Stability-AI/stablediffusion.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 21, "n_identifiers": 3, "d_id": 36899, "documentation": { "docstring": "Overwrite model.train with this function to make sure train/eval mode\n does not change anymore.", "n_words": 14, "vocab_size": 14, "n_whitespaces": 16, "language": "en" } }, { "id": 249234, "commit_id": "1595052b2681fb86c1c1b9a6028c1bc0d38a2e4b", "repo": "synapse", "path": "tests/rest/admin/test_device.py", "file_name": "test_device.py", "fun_name": "test_no_auth", "commit_message": "Use literals in place of `HTTPStatus` constants in tests (#13479)\n\nReplace\r\n- `HTTPStatus.NOT_FOUND`\r\n- `HTTPStatus.FORBIDDEN`\r\n- `HTTPStatus.UNAUTHORIZED`\r\n- `HTTPStatus.CONFLICT`\r\n- `HTTPStatus.CREATED`\r\n\r\nSigned-off-by: Dirk Klimpel ", "code": "def test_no_auth(self) -> None:\n \n channel = self.make_request(\"GET\", self.url, b\"{}\")\n\n self.assertEqual(\n 401,\n channel.code,\n msg=channel.json_body,\n )\n self.assertEqual(Codes.MISSING_TOKEN, channel.json_body[\"errcode\"])\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 84, "n_words": 16, "vocab_size": 16, "complexity": 1, "nloc": 11, "token_counts": 55, "n_ast_nodes": 88, "n_identifiers": 11, "d_id": 72738, "documentation": { "docstring": "\n Try to list devices of an user without authentication.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 11747, "commit_id": "217a11bb8dc613ed1136b8b541a68e6d53ca4fc1", "repo": "jina", "path": "tests/unit/helloworld/multimodal/test_executors.py", "file_name": "test_executors.py", "fun_name": "test_image_crafter_index", "commit_message": "test: fix tests failing after new docarray patch (#4449)\n\n* test: fix tests failing after new docarray patch\r\n\r\n* test: fix failing tests", "code": "def test_image_crafter_index(encoder_doc_array, tmpdir):\n \n create_test_img(path=str(tmpdir), file_name='1.jpg')\n with Flow().add(uses=ImageCrafter) as f:\n res = f.index(inputs=encoder_doc_array)\n assert len(res) == 1\n doc = res[0]\n assert doc.mime_type == 'image/jpeg'\n assert doc.tensor is not None\n\n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 56, "n_words": 28, "vocab_size": 24, "complexity": 1, "nloc": 8, "token_counts": 71, "n_ast_nodes": 120, "n_identifiers": 19, "d_id": 2104, "documentation": { "docstring": "In this test, we input one ``DocumentArray`` with one ``Document``,\n and the `craft` method in the ``ImageCrafter`` returns chunks.\n In the ``ImageCrafter``, we filtered out all the modalities and only kept `image/jpeg`.\n So the 2 chunks should left only 1 chunk.\n And the tensor value of the ``Document`` is not empty once we finished crafting since\n we converted image uri/datauri to tensor.\n ", "n_words": 62, "vocab_size": 49, "n_whitespaces": 80, "language": "en" } }, { "id": 211415, "commit_id": "d4e34fe165c09db65fd00113708be1b711ac957c", "repo": "PaddleDetection", "path": "ppdet/modeling/architectures/pose3d_metro.py", "file_name": "pose3d_metro.py", "fun_name": "orthographic_projection", "commit_message": "pose3d metro modeling (#6612)\n\n* pose3d metro modeling\r\n\r\n* delete extra comments", "code": "def orthographic_projection(X, camera):\n \n camera = camera.reshape((-1, 1, 3))\n X_trans = X[:, :, :2] + camera[:, :, 1:]\n shape = paddle.shape(X_trans)\n X_2d = (camera[:, :, 0] * X_trans.reshape((shape[0], -1))).reshape(shape)\n return X_2d\n\n\n@register", "url": "https://github.com/PaddlePaddle/PaddleDetection.git", "language": "Python", "ast_errors": "@register", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 48, "n_words": 31, "vocab_size": 25, "complexity": 1, "nloc": 6, "token_counts": 86, "n_ast_nodes": 137, "n_identifiers": 9, "d_id": 53089, "documentation": { "docstring": "Perform orthographic projection of 3D points X using the camera parameters\n Args:\n X: size = [B, N, 3]\n camera: size = [B, 3]\n Returns:\n Projected 2D points -- size = [B, N, 2]\n ", "n_words": 33, "vocab_size": 24, "n_whitespaces": 63, "language": "en" } }, { "id": 200596, "commit_id": "1d8576449e7ab757f13f49a1d33faed602aa88fb", "repo": "sympy", "path": "sympy/algebras/quaternion.py", "file_name": "quaternion.py", "fun_name": "to_euler", "commit_message": "implemented to_euler and from_euler", "code": "def to_euler(self, seq):\n \n extrinsic = _check_sequence(seq)\n i, j, k = seq.lower()\n i = _elementary_axis_index(i)\n j = _elementary_axis_index(j)\n k = _elementary_axis_index(k)\n\n if not extrinsic:\n i, k = k, i\n\n # check if sequence is symmetric\n symmetric = i == k\n if symmetric:\n k = 6 - i - j\n\n # parity of the permutation\n sign = (i - j) * (j - k) * (k - i) // 2\n\n # permutate elements\n elements = [self.a, self.b, self.c, self.d]\n a = elements[0]\n b = elements[i]\n c = elements[j]\n d = elements[k] * sign\n\n if not symmetric:\n a, b, c, d = a - c, b + d, c + a, d - b\n\n # calculate angles\n half_sum = atan2(b, a)\n half_diff = atan2(d, c)\n\n angle_2 = 2*atan2(sqrt(c*c + d*d), sqrt(a*a + b*b))\n # alternatively, we can use this to avoid the square root:\n # angle_2 = acos(2*(a*a + b*b)/(a*a + b*b + c*c + d*d) - 1)\n\n angle_1 = half_sum + half_diff\n angle_3 = half_sum - half_diff\n\n if extrinsic:\n angle_1, angle_3 = angle_3, angle_1\n\n # for Tait-Bryan angles\n if not symmetric:\n angle_2 -= pi / 2\n if extrinsic:\n angle_3 *= sign\n else:\n angle_1 *= sign\n\n return Matrix([angle_1, angle_2, angle_3])\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 521, "n_words": 197, "vocab_size": 104, "complexity": 7, "nloc": 33, "token_counts": 258, "n_ast_nodes": 404, "n_identifiers": 26, "d_id": 49725, "documentation": { "docstring": "Returns Euler angles representing same in the sequence given by\n `seq`.\n\n Parameters\n ==========\n\n seq : string of length 3\n Represents the sequence of rotations.\n For intrinsic rotations, seq but be all lowercase and its elements\n must be from the set `['x', 'y', 'z']`\n For extrinsic rotations, seq but be all uppercase and its elements\n must be from the set `['X', 'Y', 'Z']`\n\n Returns\n =======\n\n Matrix\n The Euler angles calculated from the quaternion\n\n Examples\n ========\n\n >>> from sympy import Quaternion\n >>> from sympy.abc import a, b, c, d\n >>> euler = Quaternion(a, b, c, d).to_euler('zyz')\n >>> euler\n Matrix([[-atan2(-b, c) + atan2(d, a)],\n [2*atan2(sqrt(b**2 + c**2), sqrt(a**2 + d**2))],\n [atan2(-b, c) + atan2(d, a)]])\n\n ", "n_words": 112, "vocab_size": 73, "n_whitespaces": 313, "language": "en" } }, { "id": 190005, "commit_id": "206db54af53a87985c0d243d75304ea620dad520", "repo": "manim", "path": "tests/opengl/test_ipython_magic_opengl.py", "file_name": "test_ipython_magic_opengl.py", "fun_name": "test_jupyter_file_output", "commit_message": "Migrate more `os.path` to `pathlib` in tests (#2991)\n\n* Migrate more `os.path` to `pathlib` in tests\r\n\r\n* Convert test fixtures to pathlib\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* fix mypy errors in tests\r\n\r\n* migrate another pathlib instance\r\n\r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>\r\nCo-authored-by: Benjamin Hackl ", "code": "def test_jupyter_file_output(tmp_path):\n \n scene_name = \"SimpleScene\"\n current_renderer = config.renderer\n with tempconfig({\"scene_names\": [scene_name], \"renderer\": \"opengl\"}):\n file_name = _generate_file_name()\n actual_path = tmp_path.with_name(file_name)\n with actual_path.open(\"w\") as outfile:\n outfile.write(\"\")\n assert actual_path.exists()\n assert actual_path.is_file()\n # needs manually set back to avoid issues across tests\n config.renderer = current_renderer\n", "url": "https://github.com/ManimCommunity/manim.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 113, "n_words": 41, "vocab_size": 33, "complexity": 1, "nloc": 11, "token_counts": 76, "n_ast_nodes": 144, "n_identifiers": 16, "d_id": 46287, "documentation": { "docstring": "Check the jupyter file naming is valid and can be created", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 290687, "commit_id": "9b8f94363c0b4ecd1434ac1ac3bb82febd3889d0", "repo": "core", "path": "tests/components/recorder/test_websocket_api.py", "file_name": "test_websocket_api.py", "fun_name": "test_statistic_during_period_hole", "commit_message": "Fix statistic_during_period for data with holes (#81847)", "code": "async def test_statistic_during_period_hole(recorder_mock, hass, hass_ws_client):\n \n id = 1\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 14, "n_words": 8, "vocab_size": 8, "complexity": 17, "nloc": 137, "token_counts": 900, "n_ast_nodes": 23, "n_identifiers": 5, "d_id": 89801, "documentation": { "docstring": "Test statistic_during_period when there are holes in the data.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 289146, "commit_id": "3b33e0d832b238b40360383099391e2093ea05cb", "repo": "core", "path": "tests/components/homekit/test_type_switches.py", "file_name": "test_type_switches.py", "fun_name": "test_valve_set_state", "commit_message": "Add support for restoring HomeKit IIDs (#79913)", "code": "async def test_valve_set_state(hass, hk_driver, events):\n \n entity_id = \"switch.valve_test\"\n\n hass.states.async_set(entity_id, None)\n await hass.async_block_till_done()\n\n acc = Valve(hass, hk_driver, \"Valve\", entity_id, 2, {CONF_TYPE: TYPE_FAUCET})\n await acc.run()\n await hass.async_block_till_done()\n assert acc.category == 29 # Faucet\n assert acc.char_valve_type.value == 3 # Water faucet\n\n acc = Valve(hass, hk_driver, \"Valve\", entity_id, 3, {CONF_TYPE: TYPE_SHOWER})\n await acc.run()\n await hass.async_block_till_done()\n assert acc.category == 30 # Shower\n assert acc.char_valve_type.value == 2 # Shower head\n\n acc = Valve(hass, hk_driver, \"Valve\", entity_id, 4, {CONF_TYPE: TYPE_SPRINKLER})\n await acc.run()\n await hass.async_block_till_done()\n assert acc.category == 28 # Sprinkler\n assert acc.char_valve_type.value == 1 # Irrigation\n\n acc = Valve(hass, hk_driver, \"Valve\", entity_id, 5, {CONF_TYPE: TYPE_VALVE})\n await acc.run()\n await hass.async_block_till_done()\n\n assert acc.aid == 5\n assert acc.category == 29 # Faucet\n\n assert acc.char_active.value == 0\n assert acc.char_in_use.value == 0\n assert acc.char_valve_type.value == 0 # Generic Valve\n\n hass.states.async_set(entity_id, STATE_ON)\n await hass.async_block_till_done()\n assert acc.char_active.value == 1\n assert acc.char_in_use.value == 1\n\n hass.states.async_set(entity_id, STATE_OFF)\n await hass.async_block_till_done()\n assert acc.char_active.value == 0\n assert acc.char_in_use.value == 0\n\n # Set from HomeKit\n call_turn_on = async_mock_service(hass, \"switch\", \"turn_on\")\n call_turn_off = async_mock_service(hass, \"switch\", \"turn_off\")\n\n acc.char_active.client_update_value(1)\n await hass.async_block_till_done()\n assert acc.char_in_use.value == 1\n assert call_turn_on\n assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id\n assert len(events) == 1\n assert events[-1].data[ATTR_VALUE] is None\n\n acc.char_active.client_update_value(0)\n await hass.async_block_till_done()\n assert acc.char_in_use.value == 0\n assert call_turn_off\n assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id\n assert len(events) == 2\n assert events[-1].data[ATTR_VALUE] is None\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 371, "n_words": 207, "vocab_size": 70, "complexity": 1, "nloc": 51, "token_counts": 431, "n_ast_nodes": 685, "n_identifiers": 32, "d_id": 88293, "documentation": { "docstring": "Test if Valve accessory and HA are updated accordingly.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 24488, "commit_id": "ddaa2c2552e19635cd6cdf38619f1f176c358f89", "repo": "PaddleOCR", "path": "ppstructure/table/table_master_match.py", "file_name": "table_master_match.py", "fun_name": "deal_bb", "commit_message": "add SLANet", "code": "def deal_bb(result_token):\n \n # find out parts.\n thead_pattern = '(.*?)'\n if re.search(thead_pattern, result_token) is None:\n return result_token\n thead_part = re.search(thead_pattern, result_token).group()\n origin_thead_part = copy.deepcopy(thead_part)\n\n # check \"rowspan\" or \"colspan\" occur in parts or not .\n span_pattern = \"|||\"\n span_iter = re.finditer(span_pattern, thead_part)\n span_list = [s.group() for s in span_iter]\n has_span_in_head = True if len(span_list) > 0 else False\n\n if not has_span_in_head:\n # not include \"rowspan\" or \"colspan\" branch 1.\n # 1. replace to , and to \n # 2. it is possible to predict text include or by Text-line recognition,\n # so we replace to , and to \n thead_part = thead_part.replace('', '')\\\n .replace('', '')\\\n .replace('', '')\\\n .replace('', '')\n else:\n # include \"rowspan\" or \"colspan\" branch 2.\n # Firstly, we deal rowspan or colspan cases.\n # 1. replace > to >\n # 2. replace to \n # 3. it is possible to predict text include or by Text-line recognition,\n # so we replace to , and to \n\n # Secondly, deal ordinary cases like branch 1\n\n # replace \">\" to \"\"\n replaced_span_list = []\n for sp in span_list:\n replaced_span_list.append(sp.replace('>', '>'))\n for sp, rsp in zip(span_list, replaced_span_list):\n thead_part = thead_part.replace(sp, rsp)\n\n # replace \"\" to \"\"\n thead_part = thead_part.replace('', '')\n\n # remove duplicated by re.sub\n mb_pattern = \"()+\"\n single_b_string = \"\"\n thead_part = re.sub(mb_pattern, single_b_string, thead_part)\n\n mgb_pattern = \"()+\"\n single_gb_string = \"\"\n thead_part = re.sub(mgb_pattern, single_gb_string, thead_part)\n\n # ordinary cases like branch 1\n thead_part = thead_part.replace('', '').replace('',\n '')\n\n # convert back to , empty cell has no .\n # but space cell( ) is suitable for \n thead_part = thead_part.replace('', '')\n # deal with duplicated \n thead_part = deal_duplicate_bb(thead_part)\n # deal with isolate span tokens, which causes by wrong predict by structure prediction.\n # eg.PMC5994107_011_00.png\n thead_part = deal_isolate_span(thead_part)\n # replace original result with new thead part.\n result_token = result_token.replace(origin_thead_part, thead_part)\n return result_token\n\n", "url": "https://github.com/PaddlePaddle/PaddleOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 720, "n_words": 324, "vocab_size": 170, "complexity": 7, "nloc": 35, "token_counts": 264, "n_ast_nodes": 484, "n_identifiers": 30, "d_id": 4740, "documentation": { "docstring": "\n In our opinion, always occurs in text's context.\n This function will find out all tokens in and insert by manual.\n :param result_token:\n :return:\n ", "n_words": 27, "vocab_size": 24, "n_whitespaces": 43, "language": "en" } }, { "id": 219627, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "_convert_other", "commit_message": "add python 3.10.4 for windows", "code": "def _convert_other(other, raiseit=False, allow_float=False):\n \n if isinstance(other, Decimal):\n return other\n if isinstance(other, int):\n return Decimal(other)\n if allow_float and isinstance(other, float):\n return Decimal.from_float(other)\n\n if raiseit:\n raise TypeError(\"Unable to convert %s to Decimal\" % other)\n return NotImplemented\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 80, "n_words": 34, "vocab_size": 25, "complexity": 6, "nloc": 10, "token_counts": 66, "n_ast_nodes": 105, "n_identifiers": 11, "d_id": 55662, "documentation": { "docstring": "Convert other to Decimal.\n\n Verifies that it's ok to use in an implicit construction.\n If allow_float is true, allow conversion from float; this\n is used in the comparison methods (__eq__ and friends).\n\n ", "n_words": 32, "vocab_size": 29, "n_whitespaces": 45, "language": "en" } }, { "id": 263832, "commit_id": "35451d0df77dd4e2c3ad613ee35cb28d99a9421e", "repo": "pyinstaller", "path": "PyInstaller/depend/imphook.py", "file_name": "imphook.py", "fun_name": "_process_hidden_imports", "commit_message": "depend: allow hooks to opt out of missing hidden import warnings\n\nImplement new standard hook variable, called\n`warn_on_missing_hiddenimports`. This optional boolean flag\nallows a hook to opt out from warnings generated by missing\nhidden imports originating from that hook.", "code": "def _process_hidden_imports(self):\n \n\n # For each hidden import required by the module being hooked...\n for import_module_name in self.hiddenimports:\n try:\n # Graph node for this module. Do not implicitly create namespace packages for non-existent packages.\n caller = self.module_graph.find_node(self.module_name, create_nspkg=False)\n\n # Manually import this hidden import from this module.\n self.module_graph.import_hook(import_module_name, caller)\n # If this hidden import is unimportable, print a non-fatal warning. Hidden imports often become\n # desynchronized from upstream packages and hence are only \"soft\" recommendations.\n except ImportError:\n if self.warn_on_missing_hiddenimports:\n logger.warning('Hidden import \"%s\" not found!', import_module_name)\n\n # FIXME: This is pretty... intense. Attempting to cleanly \"undo\" prior module graph operations is a recipe for\n # subtle edge cases and difficult-to-debug issues. It would be both safer and simpler to prevent these\n # imports from being added to the graph in the first place. To do so:\n #\n # * Remove the _process_excluded_imports() method below.\n # * Remove the PostGraphAPI.del_imports() method, which cannot reasonably be supported by the following solution,\n # appears to be currently broken, and (in any case) is not called anywhere in the PyInstaller codebase.\n # * Override the ModuleGraph._safe_import_hook() superclass method with a new PyiModuleGraph._safe_import_hook()\n # subclass method resembling:\n #\n # def _safe_import_hook(\n # self, target_module_name, source_module, fromlist,\n # level=DEFAULT_IMPORT_LEVEL, attr=None):\n #\n # if source_module.identifier in self._module_hook_cache:\n # for module_hook in self._module_hook_cache[\n # source_module.identifier]:\n # if target_module_name in module_hook.excludedimports:\n # return []\n #\n # return super()._safe_import_hook(\n # target_module_name, source_module, fromlist,\n # level=level, attr=attr)", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 615, "n_words": 234, "vocab_size": 146, "complexity": 4, "nloc": 8, "token_counts": 57, "n_ast_nodes": 121, "n_identifiers": 14, "d_id": 77456, "documentation": { "docstring": "\n Add all imports listed in this hook script's `hiddenimports` attribute to the module graph as if directly\n imported by this hooked module.\n\n These imports are typically _not_ implicitly detectable by PyInstaller and hence must be explicitly defined\n by hook scripts.\n ", "n_words": 40, "vocab_size": 35, "n_whitespaces": 76, "language": "en" } }, { "id": 60504, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/cli/cmdoptions.py", "file_name": "cmdoptions.py", "fun_name": "check_install_build_global", "commit_message": "upd; format", "code": "def check_install_build_global(options, check_options=None):\n # type: (Values, Optional[Values]) -> None\n \n if check_options is None:\n check_options = options\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 32, "n_words": 16, "vocab_size": 15, "complexity": 3, "nloc": 13, "token_counts": 62, "n_ast_nodes": 32, "n_identifiers": 3, "d_id": 12188, "documentation": { "docstring": "Disable wheels if per-setup.py call options are set.\n\n :param options: The OptionParser options to update.\n :param check_options: The options to check, if not supplied defaults to\n options.\n ", "n_words": 27, "vocab_size": 20, "n_whitespaces": 43, "language": "en" } }, { "id": 20185, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/platformdirs/__init__.py", "file_name": "__init__.py", "fun_name": "user_documents_path", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def user_documents_path() -> Path:\n \n return PlatformDirs().user_documents_path\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 5, "token_counts": 13, "n_ast_nodes": 25, "n_identifiers": 3, "d_id": 3239, "documentation": { "docstring": "\n :returns: documents path tied to the user\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 14, "language": "en" } }, { "id": 100823, "commit_id": "ff6b0209dd5ad57b81b0aca570df7f39a7119bfb", "repo": "faceswap", "path": "plugins/train/model/_base/model.py", "file_name": "model.py", "fun_name": "_config_changeable_items", "commit_message": "Refactoring and TravisCI to Github Actions (#1239)\n\n* refactor training\r\n\r\n* travis to actions", "code": "def _config_changeable_items(self) -> dict:\n \n return Config(self._config_section, configfile=self._configfile).changeable_items\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 4, "token_counts": 23, "n_ast_nodes": 38, "n_identifiers": 8, "d_id": 20274, "documentation": { "docstring": " dict: The configuration options that can be updated after the model has already been\n created. ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 27, "language": "en" } }, { "id": 63730, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/six.py", "file_name": "six.py", "fun_name": "_import_module", "commit_message": "upd; format", "code": "def _import_module(name):\n \n __import__(name)\n return sys.modules[name]\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 14, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 30, "n_identifiers": 5, "d_id": 13488, "documentation": { "docstring": "Import module, returning the module after the last dot.", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 269927, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/callbacks.py", "file_name": "callbacks.py", "fun_name": "keras_model_summary", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def keras_model_summary(name, data, step=None):\n \n summary_metadata = tf.compat.v1.SummaryMetadata()\n # Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for\n # the rationale.\n summary_metadata.plugin_data.plugin_name = \"graph_keras_model\"\n # version number = 1\n summary_metadata.plugin_data.content = b\"1\"\n\n try:\n json_string = data.to_json()\n except Exception as exc: # pylint: disable=broad-except\n # An exception should not break a model code.\n logging.warning(\n \"Model failed to serialize as JSON. Ignoring... %s\", exc\n )\n return False\n\n with tf.summary.experimental.summary_scope(\n name, \"graph_keras_model\", [data, step]\n ) as (tag, _):\n with tf.device(\"cpu:0\"):\n tensor = tf.constant(json_string, dtype=tf.string)\n return tf.summary.write(\n tag=tag, tensor=tensor, step=step, metadata=summary_metadata\n )\n\n\n@keras_export(\"keras.callbacks.TensorBoard\", v1=[])", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "@keras_export(\"keras.callbacks.TensorBoard\", v1=[])", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 219, "n_words": 90, "vocab_size": 73, "complexity": 2, "nloc": 19, "token_counts": 133, "n_ast_nodes": 240, "n_identifiers": 31, "d_id": 80336, "documentation": { "docstring": "Writes a Keras model as JSON to as a Summary.\n\n Writing the Keras model configuration allows the TensorBoard graph plugin to\n render a conceptual graph, as opposed to graph of ops. In case the model fails\n to serialize as JSON, it ignores and returns False.\n\n Args:\n name: A name for this summary. The summary tag used for TensorBoard will be\n this name prefixed by any active name scopes.\n data: A Keras Model to write.\n step: Explicit `int64`-castable monotonic step value for this summary. If\n omitted, this defaults to `tf.summary.experimental.get_step()`, which must\n not be None.\n\n Returns:\n True on success, or False if no summary was written because no default\n summary writer was available.\n\n Raises:\n ValueError: if a default writer exists, but no step was provided and\n `tf.summary.experimental.get_step()` is None.\n ", "n_words": 128, "vocab_size": 87, "n_whitespaces": 207, "language": "en" } }, { "id": 45549, "commit_id": "08575ddd8a72f96a3439f73e973ee9958188eb83", "repo": "airflow", "path": "tests/www/views/test_views_extra_links.py", "file_name": "test_views_extra_links.py", "fun_name": "test_operator_extra_link_multiple_operators", "commit_message": "Change BaseOperatorLink interface to take a ti_key, not a datetime (#21798)", "code": "def test_operator_extra_link_multiple_operators(dag_run, task_2, task_3, viewer_client):\n \n response = viewer_client.get(\n f\"{ENDPOINT}?dag_id={task_2.dag_id}&task_id={task_2.task_id}\"\n f\"&execution_date={DEFAULT_DATE}&link_name=airflow\",\n follow_redirects=True,\n )\n\n assert response.status_code == 200\n response_str = response.data\n if isinstance(response.data, bytes):\n response_str = response_str.decode()\n assert json.loads(response_str) == {'url': 'https://airflow.apache.org/1.10.5/', 'error': None}\n\n response = viewer_client.get(\n f\"{ENDPOINT}?dag_id={task_3.dag_id}&task_id={task_3.task_id}\"\n f\"&execution_date={DEFAULT_DATE}&link_name=airflow\",\n follow_redirects=True,\n )\n\n assert response.status_code == 200\n response_str = response.data\n if isinstance(response.data, bytes):\n response_str = response_str.decode()\n assert json.loads(response_str) == {'url': 'https://airflow.apache.org/1.10.5/', 'error': None}\n\n # Also check that the other Operator Link defined for this operator exists\n response = viewer_client.get(\n f\"{ENDPOINT}?dag_id={task_3.dag_id}&task_id={task_3.task_id}\"\n f\"&execution_date={DEFAULT_DATE}&link_name=google\",\n follow_redirects=True,\n )\n\n assert response.status_code == 200\n response_str = response.data\n if isinstance(response.data, bytes):\n response_str = response_str.decode()\n assert json.loads(response_str) == {'url': 'https://www.google.com', 'error': None}\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 243, "n_words": 99, "vocab_size": 43, "complexity": 4, "nloc": 31, "token_counts": 195, "n_ast_nodes": 386, "n_identifiers": 20, "d_id": 8641, "documentation": { "docstring": "\n This tests checks if Operator Link (AirflowLink2) defined in\n Airflow Plugin (AirflowLink2) is attached to all the list of\n operators defined in the AirflowLink2().operators property\n\n AirflowLink2 returns 'https://airflow.apache.org/1.10.5/' link\n GoogleLink returns 'https://www.google.com'\n ", "n_words": 32, "vocab_size": 27, "n_whitespaces": 51, "language": "en" } }, { "id": 250896, "commit_id": "f2f918a17e8d06c638b1bb5b06b3150a786d77a0", "repo": "mitmproxy", "path": "mitmproxy/dns.py", "file_name": "dns.py", "fun_name": "to_json", "commit_message": "[dns] build and improve web UI", "code": "def to_json(self) -> dict:\n \n return {\n \"id\": self.id,\n \"query\": self.query,\n \"opCode\": self.op_code.name,\n \"authoritativeAnswer\": self.authoritative_answer,\n \"truncation\": self.truncation,\n \"recursionDesired\": self.recursion_desired,\n \"recursionAvailable\": self.recursion_available,\n \"responseCode\": self.response_code.name,\n \"responseCodeHttpEquiv\": self.response_code.http_equiv_status_code,\n \"questions\": [{\n \"name\": question.name,\n \"type\": question.type.name,\n \"class\": question.class_.name,\n } for question in self.questions],\n \"answers\": [rr.to_json() for rr in self.answers],\n \"authorities\": [rr.to_json() for rr in self.authorities],\n \"additionals\": [rr.to_json() for rr in self.additionals],\n \"size\": self.size,\n \"timestamp\": self.timestamp,\n }\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 302, "n_words": 60, "vocab_size": 49, "complexity": 5, "nloc": 26, "token_counts": 166, "n_ast_nodes": 271, "n_identifiers": 23, "d_id": 73565, "documentation": { "docstring": "\n Converts the message into json for the mitmweb.\n Sync with web/src/flow.ts.\n ", "n_words": 11, "vocab_size": 10, "n_whitespaces": 33, "language": "en" } }, { "id": 186795, "commit_id": "ae7967c8aed28a8416a329e5eeac117c1672c878", "repo": "certbot", "path": "certbot/certbot/configuration.py", "file_name": "configuration.py", "fun_name": "no_verify_ssl", "commit_message": "docs: how to override the trusted CA certificates (#9357)\n\n* docs: how to override the trusted CA certificates\r\n\r\n* Update certbot/docs/using.rst\r\n\r\nCo-authored-by: ohemorange \r\n\r\nCo-authored-by: ohemorange ", "code": "def no_verify_ssl(self) -> bool:\n \n return self.namespace.no_verify_ssl\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 7, "token_counts": 14, "n_ast_nodes": 25, "n_identifiers": 4, "d_id": 45626, "documentation": { "docstring": "Disable verification of the ACME server's certificate.\n\n The root certificates trusted by Certbot can be overriden by setting the\n REQUESTS_CA_BUNDLE environment variable.\n ", "n_words": 22, "vocab_size": 20, "n_whitespaces": 43, "language": "en" } }, { "id": 60671, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/configuration.py", "file_name": "configuration.py", "fun_name": "_dictionary", "commit_message": "upd; format", "code": "def _dictionary(self):\n # type: () -> Dict[str, Any]\n \n # NOTE: Dictionaries are not populated if not loaded. So, conditionals\n # are not needed here.\n retval = {}\n\n for variant in OVERRIDE_ORDER:\n retval.update(self._config[variant])\n\n return retval\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 100, "n_words": 34, "vocab_size": 28, "complexity": 2, "nloc": 5, "token_counts": 28, "n_ast_nodes": 50, "n_identifiers": 7, "d_id": 12237, "documentation": { "docstring": "A dictionary representing the loaded configuration.\n ", "n_words": 6, "vocab_size": 6, "n_whitespaces": 13, "language": "en" } }, { "id": 319780, "commit_id": "53baed03895f28f24113d376b089e3ef281b34ed", "repo": "paperless-ngx", "path": "src/documents/tests/test_api.py", "file_name": "test_api.py", "fun_name": "test_api_set_storage_path_not_provided", "commit_message": "Increases test coverage of storage paths", "code": "def test_api_set_storage_path_not_provided(self):\n \n response = self.client.post(\n \"/api/documents/bulk_edit/\",\n json.dumps(\n {\n \"documents\": [self.doc1.id],\n \"method\": \"set_storage_path\",\n \"parameters\": {},\n },\n ),\n content_type=\"application/json\",\n )\n\n self.assertEqual(response.status_code, 400)\n self.async_task.assert_not_called()\n", "url": "https://github.com/paperless-ngx/paperless-ngx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 187, "n_words": 21, "vocab_size": 21, "complexity": 1, "nloc": 14, "token_counts": 66, "n_ast_nodes": 113, "n_identifiers": 14, "d_id": 116993, "documentation": { "docstring": "\n GIVEN:\n - API data to set the storage path of a document\n - API data is missing storage path ID\n WHEN:\n - API is called\n THEN:\n - set_storage_path is called with correct document IDs and storage_path ID\n ", "n_words": 37, "vocab_size": 24, "n_whitespaces": 110, "language": "en" } }, { "id": 22543, "commit_id": "f0af0c43340763724f139fa68aa1e5a9ffe458b4", "repo": "Python", "path": "XORcipher/XOR_cipher.py", "file_name": "XOR_cipher.py", "fun_name": "encrypt_file", "commit_message": "refactor: clean code\n\nSigned-off-by: slowy07 ", "code": "def encrypt_file(self, file, key=0):\n \n\n # precondition\n assert isinstance(file, str) and isinstance(key, int)\n\n try:\n with open(file, \"r\") as fin:\n with open(\"encrypt.out\", \"w+\") as fout:\n # actual encrypt-process\n for line in fin:\n fout.write(self.encrypt_string(line, key))\n\n except:\n return False\n\n return True\n", "url": "https://github.com/geekcomputers/Python.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 177, "n_words": 37, "vocab_size": 32, "complexity": 4, "nloc": 10, "token_counts": 72, "n_ast_nodes": 125, "n_identifiers": 13, "d_id": 4359, "documentation": { "docstring": "\n input: filename (str) and a key (int)\n output: returns true if encrypt process was\n successful otherwise false\n if key not passed the method uses the key by the constructor.\n otherwise key = 1\n ", "n_words": 33, "vocab_size": 26, "n_whitespaces": 76, "language": "en" } }, { "id": 321418, "commit_id": "0877fb0d78635692e481c8bde224fac5ad0dd430", "repo": "qutebrowser", "path": "tests/unit/misc/test_msgbox.py", "file_name": "test_msgbox.py", "fun_name": "test_finished_signal", "commit_message": "Run scripts/dev/rewrite_enums.py", "code": "def test_finished_signal(qtbot):\n \n signal_triggered = False\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 9, "token_counts": 64, "n_ast_nodes": 18, "n_identifiers": 3, "d_id": 117705, "documentation": { "docstring": "Make sure we can pass a slot to be called when the dialog finished.", "n_words": 14, "vocab_size": 14, "n_whitespaces": 13, "language": "en" } }, { "id": 101106, "commit_id": "03f6cb4e7e106bc227ad781a515338097fba26f9", "repo": "faceswap", "path": "setup.py", "file_name": "setup.py", "fun_name": "_install_setup_packages", "commit_message": "setup.py: implement logging", "code": "def _install_setup_packages(self) -> None:\n \n setup_packages = [(pkg.unsafe_name, pkg.specs)\n for pkg in parse_requirements(_INSTALLER_REQUIREMENTS)]\n\n for pkg in setup_packages:\n if pkg not in self._env.missing_packages:\n continue\n self._env.missing_packages.pop(self._env.missing_packages.index(pkg))\n pkg_str = self._format_package(*pkg)\n if self._env.is_conda:\n cmd = [\"conda\", \"install\", \"-y\"]\n else:\n cmd = [sys.executable, \"-m\", \"pip\", \"install\", \"--no-cache-dir\"]\n if self._env.is_admin:\n cmd.append(\"--user\")\n cmd.append(pkg_str)\n\n clean_pkg = pkg_str.replace(\"\\\"\", \"\")\n if self._subproc_installer(cmd, clean_pkg) != 0:\n logger.error(\"Unable to install package: %s. Process aborted\", clean_pkg)\n sys.exit(1)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 306, "n_words": 63, "vocab_size": 48, "complexity": 7, "nloc": 24, "token_counts": 160, "n_ast_nodes": 269, "n_identifiers": 26, "d_id": 20537, "documentation": { "docstring": " Install any packages that are required for the setup.py installer to work. This\n includes the pexpect package if it is not already installed.\n\n Subprocess is used as we do not currently have pexpect\n ", "n_words": 33, "vocab_size": 29, "n_whitespaces": 55, "language": "en" } }, { "id": 75098, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/images/models.py", "file_name": "models.py", "fun_name": "attrs_dict", "commit_message": "Reformat with black", "code": "def attrs_dict(self):\n \n return OrderedDict(\n [\n (\"src\", self.url),\n (\"width\", self.width),\n (\"height\", self.height),\n (\"alt\", self.alt),\n ]\n )\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 118, "n_words": 15, "vocab_size": 15, "complexity": 1, "nloc": 9, "token_counts": 44, "n_ast_nodes": 71, "n_identifiers": 7, "d_id": 16354, "documentation": { "docstring": "\n A dict of the src, width, height, and alt attributes for an tag.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 29, "language": "en" } }, { "id": 189048, "commit_id": "04e7aa604155736cce0abcc15c9b7b63d941b0e9", "repo": "psutil", "path": "psutil/tests/__init__.py", "file_name": "__init__.py", "fun_name": "_call_ntimes", "commit_message": "docs: fix simple typo, repeadetly -> repeatedly (#2123)", "code": "def _call_ntimes(self, fun, times):\n \n gc.collect(generation=1)\n mem1 = self._get_mem()\n for x in range(times):\n ret = self.call(fun)\n del x, ret\n gc.collect(generation=1)\n mem2 = self._get_mem()\n self.assertEqual(gc.garbage, [])\n diff = mem2 - mem1 # can also be negative\n return diff\n", "url": "https://github.com/giampaolo/psutil.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 122, "n_words": 36, "vocab_size": 27, "complexity": 2, "nloc": 11, "token_counts": 78, "n_ast_nodes": 127, "n_identifiers": 17, "d_id": 45981, "documentation": { "docstring": "Get 2 distinct memory samples, before and after having\n called fun repeatedly, and return the memory difference.\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 31, "language": "en" } }, { "id": 46791, "commit_id": "4ffd4f09532fceb67675fce4c1f5cd383eff992e", "repo": "airflow", "path": "dev/breeze/src/airflow_breeze/utils/run_utils.py", "file_name": "run_utils.py", "fun_name": "get_filesystem_type", "commit_message": "Prepare Breeze2 for prime time :) (#22713)\n\nThis is a review and clean-up for all the parameters and\r\ncommands for Breeze2 in order to prepare it for being\r\nused by the contribugors.\r\n\r\nThere are various small fixes here and there, removal\r\nof duplicated code, refactoring and moving code around\r\nas well as cleanup and review all the parameters used\r\nfor all implemented commands.\r\n\r\nThe parameters, default values and their behaviours were\r\nupdated to match \"new\" life of Breeze rather than old\r\none.\r\n\r\nSome improvements are made to the autocomplete and\r\nclick help messages printed. Full list of choices is\r\nalways displayed, parameters are groups according to\r\ntheir target audience, and they were sorted according\r\nto importance and frequency of use.\r\n\r\nVarious messages have been colourised according to their\r\nmeaning - warnings as yellow, errors as red and\r\ninformational messages as bright_blue.\r\n\r\nThe `dry-run` option has been added to just show what\r\nwould have been run without actually running some\r\npotentially \"write\" commands (read commands are still\r\nexecuted) so that you can easily verify and manually\r\ncopy and execute the commands with option to modify\r\nthem before. The `dry_run` and `verbose` options are\r\nnow used for all commands.\r\n\r\nThe \"main\" command now runs \"shell\" by default similarly\r\nas the original Breeze.\r\n\r\nAll \"shortcut\" parameters have been standardized - i.e\r\ncommon options (verbose/dry run/help) have one and all\r\ncommon flags that are likely to be used often have an\r\nassigned shortcute.\r\n\r\nThe \"stop\" and \"cleanup\" command have been added\r\nas they are necessary for average user to complete the\r\nregular usage cycle.\r\n\r\nDocumentation for all the important methods have been\r\nupdated.", "code": "def get_filesystem_type(filepath):\n \n # We import it locally so that click autocomplete works\n import psutil\n\n root_type = \"unknown\"\n for part in psutil.disk_partitions():\n if part.mountpoint == '/':\n root_type = part.fstype\n continue\n if filepath.startswith(part.mountpoint):\n return part.fstype\n\n return root_type\n\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 100, "n_words": 35, "vocab_size": 28, "complexity": 4, "nloc": 10, "token_counts": 49, "n_ast_nodes": 87, "n_identifiers": 9, "d_id": 8995, "documentation": { "docstring": "\n Determine the type of filesystem used - we might want to use different parameters if tmpfs is used.\n :param filepath: path to check\n :return: type of filesystem\n ", "n_words": 27, "vocab_size": 23, "n_whitespaces": 40, "language": "en" } }, { "id": 270737, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/base_layer.py", "file_name": "base_layer.py", "fun_name": "input_mask", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def input_mask(self):\n \n inputs = self.input\n if isinstance(inputs, list):\n return [getattr(x, \"_keras_mask\", None) for x in inputs]\n else:\n return getattr(inputs, \"_keras_mask\", None)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 71, "n_words": 21, "vocab_size": 18, "complexity": 3, "nloc": 6, "token_counts": 45, "n_ast_nodes": 73, "n_identifiers": 8, "d_id": 80552, "documentation": { "docstring": "Retrieves the input mask tensor(s) of a layer.\n\n Only applicable if the layer has exactly one inbound node,\n i.e. if it is connected to one incoming layer.\n\n Returns:\n Input mask tensor (potentially None) or list of input\n mask tensors.\n\n Raises:\n AttributeError: if the layer is connected to\n more than one incoming layers.\n ", "n_words": 52, "vocab_size": 36, "n_whitespaces": 131, "language": "en" } }, { "id": 67737, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/stock/doctype/serial_no/serial_no.py", "file_name": "serial_no.py", "fun_name": "get_item_details", "commit_message": "style: format code with black", "code": "def get_item_details(item_code):\n\treturn frappe.db.sql(\n\t\t,\n\t\titem_code,\n\t\tas_dict=True,\n\t)[0]\n\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 2, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 8, "token_counts": 24, "n_ast_nodes": 37, "n_identifiers": 6, "d_id": 14611, "documentation": { "docstring": "select name, has_batch_no, docstatus,\n\t\tis_stock_item, has_serial_no, serial_no_series\n\t\tfrom tabItem where name=%s", "n_words": 11, "vocab_size": 11, "n_whitespaces": 8, "language": "en" } }, { "id": 264031, "commit_id": "d789a7daa7712716c89259b987349917a89aece7", "repo": "pyinstaller", "path": "PyInstaller/utils/hooks/qt/__init__.py", "file_name": "__init__.py", "fun_name": "collect_qtqml_files", "commit_message": "hookutils: reorganize the Qt hook utilities\n\nReorganize the Qt module information to provide information necessary\nto deal with variations between different python Qt bindings (PySide2,\nPyQt5, PySide6, and PyQt6). Replace the existing table-like dictionary\nwith list of entries, which is easier to format and document. From this\nlist, we now generate two dictionaries; one that maps Qt module (shared\nlibrary) names to the module info entries (the same role as the old\ndictionary), and one that maps python module names to the module info\nentries. The latter is necessary to accommodate python modules that do\nnot have corresponding Qt shared libraries (header-only Qt modules,\nsuch as QtAxContainer; or statically-linked module, such as QSci), but\nwe still need to provide information about plugins or translation\nfiles.\n\nThe new information list is based on manual inspection of source code\nfor Qt 5.15 and 6.3, and should provide comprehensive information about\nall plugin names and translation file basenames.\n\nIn addition, most of the helper functions, which take a reference to\nthe `QtLibraryInfo` class as their first argument, have been turned\ninto methods of the `QtLibraryInfo` class. The corresponding hooks\nhave also been adjusted.", "code": "def collect_qtqml_files(self):\n \n\n # No-op if requested Qt-based package is not available.\n if self.version is None:\n return [], []\n\n # Not all PyQt5/PySide2 installs have QML files. In this case, location['Qml2ImportsPath'] is empty.\n # Furthermore, even if location path is provided, the directory itself may not exist.\n #\n # https://github.com/pyinstaller/pyinstaller/pull/3229#issuecomment-359735031\n # https://github.com/pyinstaller/pyinstaller/issues/3864\n #\n # In Qt 6, Qml2ImportsPath was deprecated in favor of QmlImportsPath. The former is not available in PySide6\n # 6.4.0 anymore (but is in PyQt6 6.4.0). Use the new QmlImportsPath if available.\n if 'QmlImportsPath' in self.location:\n qml_src_dir = self.location['QmlImportsPath']\n else:\n qml_src_dir = self.location['Qml2ImportsPath']\n if not qml_src_dir or not os.path.isdir(qml_src_dir):\n logger.warning('%s: QML directory %r does not exist. QML files not packaged.', self, qml_src_dir)\n return [], []\n\n qml_dst_dir = os.path.join(self.qt_rel_dir, 'qml')\n datas = [(qml_src_dir, qml_dst_dir)]\n binaries = [\n # Produce ``/path/to/Qt/Qml/path_to_qml_binary/qml_binary, PyQt5/Qt/Qml/path_to_qml_binary``.\n (\n qml_plugin_file,\n os.path.join(qml_dst_dir, os.path.dirname(os.path.relpath(qml_plugin_file, qml_src_dir)))\n ) for qml_plugin_file in misc.dlls_in_subdirs(qml_src_dir)\n ]\n\n return binaries, datas\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 397, "n_words": 146, "vocab_size": 99, "complexity": 6, "nloc": 19, "token_counts": 144, "n_ast_nodes": 243, "n_identifiers": 20, "d_id": 77570, "documentation": { "docstring": "\n Collect additional binaries and data for QtQml module.\n ", "n_words": 8, "vocab_size": 8, "n_whitespaces": 23, "language": "en" } }, { "id": 208416, "commit_id": "82d1a374575d9785708f144976cf139c76c7acb7", "repo": "ipython", "path": "IPython/conftest.py", "file_name": "conftest.py", "fun_name": "pytest_collection_modifyitems", "commit_message": "make sure to run async tests\n\nthere are some `async def` tests, but they are skipped without `mark(\"asyncio\")`", "code": "def pytest_collection_modifyitems(items):\n \n for item in items:\n if inspect.iscoroutinefunction(item.obj):\n item.add_marker(\"asyncio\")\n assert not inspect.isasyncgenfunction(item.obj)\n\n", "url": "https://github.com/ipython/ipython.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 43, "n_words": 12, "vocab_size": 12, "complexity": 3, "nloc": 5, "token_counts": 37, "n_ast_nodes": 64, "n_identifiers": 8, "d_id": 52319, "documentation": { "docstring": "This function is automatically run by pytest passing all collected test\n functions.\n\n We use it to add asyncio marker to all async tests and assert we don't use\n test functions that are async generators which wouldn't make sense.\n ", "n_words": 38, "vocab_size": 33, "n_whitespaces": 50, "language": "en" } }, { "id": 205772, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/db/models/query.py", "file_name": "query.py", "fun_name": "filter", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def filter(self, *args, **kwargs):\n \n self._not_support_combined_queries(\"filter\")\n return self._filter_or_exclude(False, args, kwargs)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 30, "n_words": 9, "vocab_size": 9, "complexity": 1, "nloc": 3, "token_counts": 29, "n_ast_nodes": 48, "n_identifiers": 6, "d_id": 51206, "documentation": { "docstring": "\n Return a new QuerySet instance with the args ANDed to the existing\n set.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 35, "language": "en" } }, { "id": 101607, "commit_id": "98d01760e469fd2108eed8d0b0a1ba6297c3177c", "repo": "faceswap", "path": "tools/sort/sort.py", "file_name": "sort.py", "fun_name": "_output_non_grouped", "commit_message": "Overhaul sort:\n - Standardize image data reading and writing\n - Optimize loading (just one pass required)\n - Make all sort groups binnable (to greater or lesser results)\n - Add sort by pitch\n - Deprecate multiple options\n - linting, docs + locales", "code": "def _output_non_grouped(self) -> None:\n \n output_dir = self._args.output_dir\n os.makedirs(output_dir, exist_ok=True)\n\n description = f\"{'Copying' if self._args.keep_original else 'Moving'} and renaming\"\n for idx, source in enumerate(tqdm(self._sorter.sorted_filelist,\n desc=description,\n file=sys.stdout,\n leave=False)):\n dest = os.path.join(output_dir, f\"{idx:06d}_{os.path.basename(source)}\")\n\n self._sort_file(source, dest)\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 212, "n_words": 32, "vocab_size": 30, "complexity": 2, "nloc": 14, "token_counts": 81, "n_ast_nodes": 165, "n_identifiers": 25, "d_id": 21015, "documentation": { "docstring": " Output non-grouped files.\n\n These are files which are sorted but not binned, so just the filename gets updated\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 33, "language": "en" } }, { "id": 20623, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pyparsing/results.py", "file_name": "results.py", "fun_name": "insert", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def insert(self, index, ins_string):\n ", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "\"\"\"\n Inserts new element at location index in the list of parsed tokens.\n\n Similar to ``list.insert()``.\n\n Example::\n\n numlist = Word(nums)[...]\n print(numlist.parse_string(\"0 123 321\"))", "n_ast_errors": 1, "ast_levels": 4, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 3, "nloc": 7, "token_counts": 64, "n_ast_nodes": 19, "n_identifiers": 4, "d_id": 3460, "documentation": { "docstring": "\n Inserts new element at location index in the list of parsed tokens.\n\n Similar to ``list.insert()``.\n\n Example::\n\n numlist = Word(nums)[...]\n print(numlist.parse_string(\"0 123 321\")) # -> ['0', '123', '321']\n\n # use a parse action to insert the parse location in the front of the parsed results", "n_words": 44, "vocab_size": 34, "n_whitespaces": 98, "language": "en" } }, { "id": 92157, "commit_id": "9288539aeeac52990705aa6dd0abaebe0d12da21", "repo": "sentry", "path": "src/sentry/utils/sdk.py", "file_name": "sdk.py", "fun_name": "mark_scope_as_experimental", "commit_message": "poc(sdk): Add experimental dsn for upcoming perf work (#36000)\n\nThis adds an experimental dsn to the MultiplexingTransport to intentionally send specific flagged events solely to a separate dsn, which will help us avoid troubles with ingesting random errors into our main Sentry project.", "code": "def mark_scope_as_experimental():\n \n with configure_scope() as scope:\n scope.set_tag(EXPERIMENT_TAG, True)\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 21, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 20, "n_ast_nodes": 39, "n_identifiers": 5, "d_id": 18879, "documentation": { "docstring": "\n Set the experimental tag on the SDK scope for outgoing crashes and transactions.\n\n Marking the scope will cause these crashes and transaction to be sent to a separate experimental dsn.\n ", "n_words": 30, "vocab_size": 23, "n_whitespaces": 40, "language": "en" } }, { "id": 292654, "commit_id": "845bf80e725af8c921915906b0f796c7a8164d11", "repo": "core", "path": "tests/components/mqtt/test_init.py", "file_name": "test_init.py", "fun_name": "test_handle_mqtt_on_callback", "commit_message": "Mqtt improve test coverage (#66279)\n\nCo-authored-by: Martin Hjelmare ", "code": "async def test_handle_mqtt_on_callback(hass, caplog, mqtt_mock, mqtt_client_mock):\n \n # Simulate an ACK for mid == 1, this will call mqtt_mock._mqtt_handle_mid(mid)\n mqtt_client_mock.on_publish(mqtt_client_mock, None, 1)\n await hass.async_block_till_done()\n # Make sure the ACK has been received\n await hass.async_block_till_done()\n # Now call publish without call back, this will call _wait_for_mid(msg_info.mid)\n await mqtt.async_publish(hass, \"no_callback/test-topic\", \"test-payload\")\n # Since the mid event was already set, we should not see any timeout\n await hass.async_block_till_done()\n assert (\n \"Transmitting message on no_callback/test-topic: 'test-payload', mid: 1\"\n in caplog.text\n )\n assert \"No ACK from MQTT server\" not in caplog.text\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 138, "n_words": 85, "vocab_size": 64, "complexity": 1, "nloc": 11, "token_counts": 66, "n_ast_nodes": 117, "n_identifiers": 10, "d_id": 91728, "documentation": { "docstring": "Test receiving an ACK callback before waiting for it.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 303468, "commit_id": "df67a8cd4f8df91a153778009a74be1e3876ca53", "repo": "core", "path": "homeassistant/components/zha/core/channels/lighting.py", "file_name": "lighting.py", "fun_name": "enhanced_hue_supported", "commit_message": "Fix ZHA light color temp support (#76305)", "code": "def enhanced_hue_supported(self) -> bool:\n \n return (\n self.color_capabilities is not None\n and lighting.Color.ColorCapabilities.Enhanced_hue in self.color_capabilities\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 58, "n_words": 15, "vocab_size": 14, "complexity": 2, "nloc": 6, "token_counts": 29, "n_ast_nodes": 47, "n_identifiers": 8, "d_id": 102288, "documentation": { "docstring": "Return True if the channel supports enhanced hue and saturation.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 97870, "commit_id": "d246d2b6d3e014270941209e54f2f12e09ad9a81", "repo": "sentry", "path": "src/sentry/pipeline/provider.py", "file_name": "provider.py", "fun_name": "set_pipeline", "commit_message": "ref(py): Split up large file (#32862)\n\nCo-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com>", "code": "def set_pipeline(self, pipeline):\n \n self.pipeline = pipeline\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 23, "n_identifiers": 3, "d_id": 19496, "documentation": { "docstring": "\n Used by the pipeline to give the provider access to the executing pipeline.\n ", "n_words": 13, "vocab_size": 10, "n_whitespaces": 28, "language": "en" } }, { "id": 267843, "commit_id": "3eb0485dd92c88cc92152d3656d94492db44b183", "repo": "ansible", "path": "test/lib/ansible_test/_internal/commands/integration/cloud/aws.py", "file_name": "aws.py", "fun_name": "_setup_dynamic", "commit_message": "ansible-test - Use more native type hints. (#78435)\n\n* ansible-test - Use more native type hints.\r\n\r\nSimple search and replace to switch from comments to native type hints for return types of functions with no arguments.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of simple single-line function annotation type comments to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nConversion of single-line function annotation type comments with default values to native type hints.\r\n\r\n* ansible-test - Use more native type hints.\r\n\r\nManual conversion of type annotation comments for functions which have pylint directives.", "code": "def _setup_dynamic(self) -> None:\n \n display.info('Provisioning %s cloud environment.' % self.platform, verbosity=1)\n\n config = self._read_config_template()\n\n aci = self._create_ansible_core_ci()\n\n response = aci.start()\n\n if not self.args.explain:\n credentials = response['aws']['credentials']\n\n values = dict(\n ACCESS_KEY=credentials['access_key'],\n SECRET_KEY=credentials['secret_key'],\n SECURITY_TOKEN=credentials['session_token'],\n REGION='us-east-1',\n )\n\n display.sensitive.add(values['SECRET_KEY'])\n display.sensitive.add(values['SECURITY_TOKEN'])\n\n config = self._populate_config_template(config, values)\n\n self._write_config(config)\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 216, "n_words": 41, "vocab_size": 35, "complexity": 2, "nloc": 18, "token_counts": 128, "n_ast_nodes": 217, "n_identifiers": 25, "d_id": 79123, "documentation": { "docstring": "Request AWS credentials through the Ansible Core CI service.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 266742, "commit_id": "a06fa496d3f837cca3c437ab6e9858525633d147", "repo": "ansible", "path": "test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py", "file_name": "__init__.py", "fun_name": "managed", "commit_message": "ansible-test - Code cleanup and refactoring. (#77169)\n\n* Remove unnecessary PyCharm ignores.\r\n* Ignore intentional undefined attribute usage.\r\n* Add missing type hints. Fix existing type hints.\r\n* Fix docstrings and comments.\r\n* Use function to register completion handler.\r\n* Pass strings to display functions.\r\n* Fix CompositeAction handling of dest argument.\r\n* Use consistent types in expressions/assignments.\r\n* Use custom function to keep linters happy.\r\n* Add missing raise for custom exception.\r\n* Clean up key/value type handling in cloud plugins.\r\n* Use dataclass instead of dict for results.\r\n* Add custom type_guard function to check lists.\r\n* Ignore return type that can't be checked (yet).\r\n* Avoid changing types on local variables.", "code": "def managed(self): # type: () -> bool\n \n return t.cast(bool, self._get_cloud_config(self._MANAGED))\n", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 25, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 22, "n_ast_nodes": 38, "n_identifiers": 7, "d_id": 78553, "documentation": { "docstring": "True if resources are managed by ansible-test, otherwise False.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 261585, "commit_id": "d8fa96c29828e3ca79ddd5d7466521ac4d95213c", "repo": "scikit-learn", "path": "sklearn/impute/tests/test_impute.py", "file_name": "test_impute.py", "fun_name": "test_simple_imputer_constant_keep_empty_features", "commit_message": "ENH keep features with all missing values during imputation (#24770)\n\nCo-authored-by: Chiara Marmo \r\nCo-authored-by: Julien Jerphanion \r\nCo-authored-by: Jérémie du Boisberranger <34657725+jeremiedbb@users.noreply.github.com>\r\nCo-authored-by: Vitor SRG \r\nFixes https://github.com/scikit-learn/scikit-learn/pull/16695\r\nFixes https://github.com/scikit-learn/scikit-learn/issues/16426\r\nFixes https://github.com/scikit-learn/scikit-learn/issues/16977", "code": "def test_simple_imputer_constant_keep_empty_features(array_type, keep_empty_features):\n \n X = np.array([[np.nan, 2], [np.nan, 3], [np.nan, 6]])\n X = _convert_container(X, array_type)\n fill_value = 10\n imputer = SimpleImputer(\n strategy=\"constant\",\n fill_value=fill_value,\n keep_empty_features=keep_empty_features,\n )\n\n for method in [\"fit_transform\", \"transform\"]:\n X_imputed = getattr(imputer, method)(X)\n assert X_imputed.shape == X.shape\n constant_feature = (\n X_imputed[:, 0].A if array_type == \"sparse\" else X_imputed[:, 0]\n )\n assert_array_equal(constant_feature, fill_value)\n\n\n@pytest.mark.parametrize(\"array_type\", [\"array\", \"sparse\"])\n@pytest.mark.parametrize(\"strategy\", [\"mean\", \"median\", \"most_frequent\"])\n@pytest.mark.parametrize(\"keep_empty_features\", [True, False])", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "@pytest.mark.parametrize(\"array_type\", [\"array\", \"sparse\"])\n@pytest.mark.parametrize(\"strategy\", [\"mean\", \"median\", \"most_frequent\"])\n@pytest.mark.parametrize(\"keep_empty_features\", [True, False])", "n_ast_errors": 1, "ast_levels": 13, "n_whitespaces": 148, "n_words": 63, "vocab_size": 53, "complexity": 3, "nloc": 16, "token_counts": 125, "n_ast_nodes": 273, "n_identifiers": 22, "d_id": 76879, "documentation": { "docstring": "Check the behaviour of `keep_empty_features` with `strategy='constant'.\n For backward compatibility, a column full of missing values will always be\n fill and never dropped.\n ", "n_words": 23, "vocab_size": 22, "n_whitespaces": 32, "language": "en" } }, { "id": 45916, "commit_id": "401419432082d222b823e4f2a66f21e5cc3ab28d", "repo": "airflow", "path": "tests/providers/databricks/operators/test_databricks_sql.py", "file_name": "test_databricks_sql.py", "fun_name": "test_copy_with_credential", "commit_message": "Add new options to DatabricksCopyIntoOperator (#22076)\n\nThis includes:\r\n* `encryption` - to specify encryption options for a given location\r\n* `credential` - to specify authentication options for a given location\r\n* `validate` - to control validation of schema & data", "code": "def test_copy_with_credential(self):\n expression = \"col1, col2\"\n op = DatabricksCopyIntoOperator(\n file_location=COPY_FILE_LOCATION,\n file_format='CSV',\n table_name='test',\n task_id=TASK_ID,\n expression_list=expression,\n credential={'AZURE_SAS_TOKEN': 'abc'},\n )\n assert (\n op._create_sql_query()\n == f.strip()\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 145, "n_words": 23, "vocab_size": 21, "complexity": 1, "nloc": 17, "token_counts": 56, "n_ast_nodes": 102, "n_identifiers": 15, "d_id": 8743, "documentation": { "docstring": "COPY INTO test\nFROM (SELECT {expression} FROM '{COPY_FILE_LOCATION}' WITH (CREDENTIAL (AZURE_SAS_TOKEN = 'abc') ))\nFILEFORMAT = CSV\n", "n_words": 17, "vocab_size": 15, "n_whitespaces": 14, "language": "en" } }, { "id": 112868, "commit_id": "98c1a77f61900d486f46d284c49fb65675dbee6a", "repo": "nni", "path": "nni/runtime/msg_dispatcher.py", "file_name": "msg_dispatcher.py", "fun_name": "_earlystop_notify_tuner", "commit_message": "Support multiple HPO experiments in one process (#4855)", "code": "def _earlystop_notify_tuner(self, data):\n \n _logger.debug('Early stop notify tuner data: [%s]', data)\n data['type'] = MetricType.FINAL\n data['value'] = dump(data['value'])\n self.enqueue_command(CommandType.ReportMetricData, data)\n", "url": "https://github.com/microsoft/nni.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 53, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 5, "token_counts": 46, "n_ast_nodes": 80, "n_identifiers": 11, "d_id": 24779, "documentation": { "docstring": "Send last intermediate result as final result to tuner in case the\n trial is early stopped.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 30, "language": "en" } }, { "id": 196453, "commit_id": "975df9b627556d176039ba3a0f3a2e3a3df9686c", "repo": "sympy", "path": "sympy/vector/operators.py", "file_name": "operators.py", "fun_name": "gradient", "commit_message": "Fixed removals not fully performed earlier", "code": "def gradient(scalar_field, doit=True):\n \n coord_sys = _get_coord_systems(scalar_field)\n\n if len(coord_sys) == 0:\n return Vector.zero\n elif len(coord_sys) == 1:\n coord_sys = next(iter(coord_sys))\n h1, h2, h3 = coord_sys.lame_coefficients()\n i, j, k = coord_sys.base_vectors()\n x, y, z = coord_sys.base_scalars()\n vx = Derivative(scalar_field, x) / h1\n vy = Derivative(scalar_field, y) / h2\n vz = Derivative(scalar_field, z) / h3\n\n if doit:\n return (vx * i + vy * j + vz * k).doit()\n return vx * i + vy * j + vz * k\n else:\n if isinstance(scalar_field, (Add, VectorAdd)):\n return VectorAdd.fromiter(gradient(i) for i in scalar_field.args)\n if isinstance(scalar_field, (Mul, VectorMul)):\n s = _split_mul_args_wrt_coordsys(scalar_field)\n return VectorAdd.fromiter(scalar_field / i * gradient(i) for i in s)\n return Gradient(scalar_field)\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 258, "n_words": 108, "vocab_size": 60, "complexity": 8, "nloc": 22, "token_counts": 212, "n_ast_nodes": 329, "n_identifiers": 36, "d_id": 47935, "documentation": { "docstring": "\n Returns the vector gradient of a scalar field computed wrt the\n base scalars of the given coordinate system.\n\n Parameters\n ==========\n\n scalar_field : SymPy Expr\n The scalar field to compute the gradient of\n\n doit : bool\n If True, the result is returned after calling .doit() on\n each component. Else, the returned expression contains\n Derivative instances\n\n Examples\n ========\n\n >>> from sympy.vector import CoordSys3D, gradient\n >>> R = CoordSys3D('R')\n >>> s1 = R.x*R.y*R.z\n >>> gradient(s1)\n R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k\n >>> s2 = 5*R.x**2*R.z\n >>> gradient(s2)\n 10*R.x*R.z*R.i + 5*R.x**2*R.k\n\n ", "n_words": 86, "vocab_size": 64, "n_whitespaces": 166, "language": "en" } }, { "id": 215877, "commit_id": "53b3ebc92648c2081c58865713b50a2859ae8310", "repo": "salt", "path": "salt/modules/win_certutil.py", "file_name": "win_certutil.py", "fun_name": "add_store", "commit_message": "Fix win_certutil module to handle paths with spaces", "code": "def add_store(source, store, retcode=False, saltenv=\"base\"):\n \n source = __salt__[\"cp.cache_file\"](source, saltenv)\n\n # Since we're allowing a path, let's make sure it exists\n if not os.path.exists(source):\n msg = \"cert_file not found: \".format(source)\n raise CommandExecutionError(msg)\n\n cmd = 'certutil.exe -addstore {} \"{}\"'.format(store, source)\n if retcode:\n return __salt__[\"cmd.retcode\"](cmd)\n else:\n return __salt__[\"cmd.run\"](cmd)\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 94, "n_words": 45, "vocab_size": 40, "complexity": 3, "nloc": 10, "token_counts": 82, "n_ast_nodes": 144, "n_identifiers": 13, "d_id": 54222, "documentation": { "docstring": "\n Add the given cert into the given Certificate Store\n\n source (str):\n The source certificate file. This is either the path to a local file or\n a file from the file server in the form of ``salt://path/to/file``\n\n store (str):\n The certificate store to add the certificate to\n\n retcode (bool):\n If ``True``, return the retcode instead of stdout. Default is ``False``\n\n saltenv (str):\n The salt environment to use. This is ignored if the path is local\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' certutil.add_store salt://cert.cer TrustedPublisher\n salt '*' certutil.add_store C:\\\\path\\\\to\\\\local.cer TrustedPublisher\n ", "n_words": 89, "vocab_size": 54, "n_whitespaces": 163, "language": "en" } }, { "id": 138432, "commit_id": "9f4cb9b3c9c27ae21bf7807595973231b6814648", "repo": "ray", "path": "python/ray/data/datasource/partitioning.py", "file_name": "partitioning.py", "fun_name": "base_dir", "commit_message": "[Datasets] Add Path Partitioning Support for All Content Types (#23624)\n\nAdds a content-type-agnostic partition parser with support for filtering files. Also adds some corner-case bug fixes and usability improvements for supporting more robust input path types.", "code": "def base_dir(self) -> str:\n \n return self._base_dir\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 4, "d_id": 31421, "documentation": { "docstring": "Gets the original base directory supplied during object construction.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 183837, "commit_id": "4dd0d9fae43583638f34257f97d5749ca4f2c00c", "repo": "textual", "path": "tests/css/test_stylesheet.py", "file_name": "test_stylesheet.py", "fun_name": "test_stylesheet_apply_takes_final_rule_in_specificity_clash", "commit_message": "Add various additional tests around CSS specificity", "code": "def test_stylesheet_apply_takes_final_rule_in_specificity_clash():\n \n css = \".a {background: red; color: lime;} .b {background: blue;}\"\n stylesheet = _make_stylesheet(css)\n node = DOMNode(classes=\"a b\", id=\"c\")\n stylesheet.apply(node)\n\n assert node.styles.color == Color(0, 255, 0) # color: lime\n assert node.styles.background == Color(0, 0, 255) # background: blue\n\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 62, "n_words": 39, "vocab_size": 31, "complexity": 1, "nloc": 7, "token_counts": 62, "n_ast_nodes": 105, "n_identifiers": 13, "d_id": 44359, "documentation": { "docstring": ".a and .b both contain background and have same specificity, so .b wins\n since it was declared last - the background should be blue.", "n_words": 24, "vocab_size": 21, "n_whitespaces": 26, "language": "en" } }, { "id": 309258, "commit_id": "dc58bc375ae203e3d394225f9c3a5a14d43cb2f3", "repo": "core", "path": "tests/util/test_async.py", "file_name": "test_async.py", "fun_name": "test_check_loop_async_integration", "commit_message": "Warn on`time.sleep` in event loop (#63766)\n\nCo-authored-by: Martin Hjelmare ", "code": "async def test_check_loop_async_integration(caplog):\n \n with pytest.raises(RuntimeError), patch(\n \"homeassistant.util.async_.extract_stack\",\n return_value=[\n Mock(\n filename=\"/home/paulus/homeassistant/core.py\",\n lineno=\"23\",\n line=\"do_something()\",\n ),\n Mock(\n filename=\"/home/paulus/homeassistant/components/hue/light.py\",\n lineno=\"23\",\n line=\"self.light.is_on\",\n ),\n Mock(\n filename=\"/home/paulus/aiohue/lights.py\",\n lineno=\"2\",\n line=\"something()\",\n ),\n ],\n ):\n hasync.check_loop()\n assert (\n \"Detected blocking call inside the event loop. This is causing stability issues. \"\n \"Please report issue for hue doing blocking calls at \"\n \"homeassistant/components/hue/light.py, line 23: self.light.is_on\"\n in caplog.text\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 330, "n_words": 58, "vocab_size": 51, "complexity": 1, "nloc": 28, "token_counts": 88, "n_ast_nodes": 158, "n_identifiers": 14, "d_id": 107965, "documentation": { "docstring": "Test check_loop detects and raises when called from event loop from integration context.", "n_words": 13, "vocab_size": 12, "n_whitespaces": 12, "language": "en" } }, { "id": 246326, "commit_id": "a121507cfec0ffce45a89f5a1019034eda5b0c70", "repo": "synapse", "path": "tests/handlers/test_oidc.py", "file_name": "test_oidc.py", "fun_name": "test_callback", "commit_message": "Adds misc missing type hints (#11953)", "code": "def test_callback(self):\n \n\n # ensure that we are correctly testing the fallback when \"get_extra_attributes\"\n # is not implemented.\n mapping_provider = self.provider._user_mapping_provider\n with self.assertRaises(AttributeError):\n _ = mapping_provider.get_extra_attributes\n\n token = {\n \"type\": \"bearer\",\n \"id_token\": \"id_token\",\n \"access_token\": \"access_token\",\n }\n username = \"bar\"\n userinfo = {\n \"sub\": \"foo\",\n \"username\": username,\n }\n expected_user_id = \"@%s:%s\" % (username, self.hs.hostname)\n self.provider._exchange_code = simple_async_mock(return_value=token)\n self.provider._parse_id_token = simple_async_mock(return_value=userinfo)\n self.provider._fetch_userinfo = simple_async_mock(return_value=userinfo)\n auth_handler = self.hs.get_auth_handler()\n auth_handler.complete_sso_login = simple_async_mock()\n\n code = \"code\"\n state = \"state\"\n nonce = \"nonce\"\n client_redirect_url = \"http://client/redirect\"\n ip_address = \"10.0.0.1\"\n session = self._generate_oidc_session_token(state, nonce, client_redirect_url)\n request = _build_callback_request(code, state, session, ip_address=ip_address)\n\n self.get_success(self.handler.handle_oidc_callback(request))\n\n auth_handler.complete_sso_login.assert_called_once_with(\n expected_user_id,\n \"oidc\",\n request,\n client_redirect_url,\n None,\n new_user=True,\n auth_provider_session_id=None,\n )\n self.provider._exchange_code.assert_called_once_with(code)\n self.provider._parse_id_token.assert_called_once_with(token, nonce=nonce)\n self.provider._fetch_userinfo.assert_not_called()\n self.render_error.assert_not_called()\n\n # Handle mapping errors\n with patch.object(\n self.provider,\n \"_remote_id_from_userinfo\",\n new=Mock(side_effect=MappingException()),\n ):\n self.get_success(self.handler.handle_oidc_callback(request))\n self.assertRenderedError(\"mapping_error\")\n\n # Handle ID token errors\n self.provider._parse_id_token = simple_async_mock(raises=Exception())\n self.get_success(self.handler.handle_oidc_callback(request))\n self.assertRenderedError(\"invalid_token\")\n\n auth_handler.complete_sso_login.reset_mock()\n self.provider._exchange_code.reset_mock()\n self.provider._parse_id_token.reset_mock()\n self.provider._fetch_userinfo.reset_mock()\n\n # With userinfo fetching\n self.provider._user_profile_method = \"userinfo_endpoint\"\n token = {\n \"type\": \"bearer\",\n \"access_token\": \"access_token\",\n }\n self.provider._exchange_code = simple_async_mock(return_value=token)\n self.get_success(self.handler.handle_oidc_callback(request))\n\n auth_handler.complete_sso_login.assert_called_once_with(\n expected_user_id,\n \"oidc\",\n request,\n client_redirect_url,\n None,\n new_user=False,\n auth_provider_session_id=None,\n )\n self.provider._exchange_code.assert_called_once_with(code)\n self.provider._parse_id_token.assert_not_called()\n self.provider._fetch_userinfo.assert_called_once_with(token)\n self.render_error.assert_not_called()\n\n # With an ID token, userinfo fetching and sid in the ID token\n self.provider._user_profile_method = \"userinfo_endpoint\"\n token = {\n \"type\": \"bearer\",\n \"access_token\": \"access_token\",\n \"id_token\": \"id_token\",\n }\n id_token = {\n \"sid\": \"abcdefgh\",\n }\n self.provider._parse_id_token = simple_async_mock(return_value=id_token)\n self.provider._exchange_code = simple_async_mock(return_value=token)\n auth_handler.complete_sso_login.reset_mock()\n self.provider._fetch_userinfo.reset_mock()\n self.get_success(self.handler.handle_oidc_callback(request))\n\n auth_handler.complete_sso_login.assert_called_once_with(\n expected_user_id,\n \"oidc\",\n request,\n client_redirect_url,\n None,\n new_user=False,\n auth_provider_session_id=id_token[\"sid\"],\n )\n self.provider._exchange_code.assert_called_once_with(code)\n self.provider._parse_id_token.assert_called_once_with(token, nonce=nonce)\n self.provider._fetch_userinfo.assert_called_once_with(token)\n self.render_error.assert_not_called()\n\n # Handle userinfo fetching error\n self.provider._fetch_userinfo = simple_async_mock(raises=Exception())\n self.get_success(self.handler.handle_oidc_callback(request))\n self.assertRenderedError(\"fetch_error\")\n\n # Handle code exchange failure\n from synapse.handlers.oidc import OidcError\n\n self.provider._exchange_code = simple_async_mock(\n raises=OidcError(\"invalid_request\")\n )\n self.get_success(self.handler.handle_oidc_callback(request))\n self.assertRenderedError(\"invalid_request\")\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 1236, "n_words": 247, "vocab_size": 131, "complexity": 1, "nloc": 111, "token_counts": 684, "n_ast_nodes": 1151, "n_identifiers": 56, "d_id": 71155, "documentation": { "docstring": "Code callback works and display errors if something went wrong.\n\n A lot of scenarios are tested here:\n - when the callback works, with userinfo from ID token\n - when the user mapping fails\n - when ID token verification fails\n - when the callback works, with userinfo fetched from the userinfo endpoint\n - when the userinfo fetching fails\n - when the code exchange fails\n ", "n_words": 63, "vocab_size": 35, "n_whitespaces": 125, "language": "en" } }, { "id": 157491, "commit_id": "ca86da3a30c4e080d4db8c25fca73de843663cb4", "repo": "stablediffusion", "path": "ldm/modules/image_degradation/bsrgan.py", "file_name": "bsrgan.py", "fun_name": "bicubic_degradation", "commit_message": "release more models", "code": "def bicubic_degradation(x, sf=3):\n \n x = util.imresize_np(x, scale=1 / sf)\n return x\n\n", "url": "https://github.com/Stability-AI/stablediffusion.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 20, "n_words": 11, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 42, "n_identifiers": 6, "d_id": 36943, "documentation": { "docstring": "\n Args:\n x: HxWxC image, [0, 1]\n sf: down-scale factor\n Return:\n bicubicly downsampled LR image\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 45, "language": "en" } }, { "id": 42167, "commit_id": "b1db0f72627e9fae8fda261514392d53906384cf", "repo": "seaborn", "path": "seaborn/categorical.py", "file_name": "categorical.py", "fun_name": "plot", "commit_message": "Cleanup and merge #2909 (#2955)\n\n* Sorting boxenplot\r\n\r\n* Boxenplot separate kws\r\n\r\nRemoved `kwargs` which were used to draw the median lines and scatter plot of outliers previously.\r\n\r\nAdded separate kwargs - `box_kws`, `line_kws` (drawing the median lines) and `flier_kws` (for the scatter of outliers).\r\n\r\nUpdated the matching docstring.\r\n\r\n* In the previous commit most code on the categorical.py file was auto-reformatted. Here it is reverted and only the changes to `seaborn.categorical.boxenplot` and `seaborn.categorical._LVPlotter` are kept.\r\n\r\n* Reinserted blank lines in docstring.\r\n\r\n* - Removed redundant indention in `boxenplot` function\r\n- Removed commented out code in the `plot` function\r\n\r\n* Removed default kwargs from `plot`\r\n\r\n* Removing commented out code\r\n\r\n* Reverted to ternary expressions\r\n\r\n* Replaced default kwargs assignment to box_kws\r\nDisentangled the nested for loop for default kwargs assignment\r\n\r\n* Removed remaining `kwargs` item in docstring\r\n\r\n* Resolved incorrect reference in the box_kws item on the docstring.\r\n\r\n* Resolved incorrect descriptions for box_kws, line_kws and flier_kws.\r\n\r\n* Changed line_kws update to source arguments frmo box_kws if there is only a single data point.\r\n\r\n* Added line_kws test\r\n\r\n* Added flier_kws test, renamed line_kws test\r\n\r\n* Tests - further work is required in expanding the tests.\r\nTwo current issues\r\n(a) most are not testing when multiple categories are used on the x-axis, but only a single one.\r\n(b) the tests for the box_kws functionality are very slim.\r\n\r\n* Fix lint issues\r\n\r\n* Fix pinned tests\r\n\r\n* Update release notes\r\n\r\n* Cleanup boxenplot colors test\r\n\r\nCo-authored-by: EitanHemed <37670372+EitanHemed@users.noreply.github.com>", "code": "def plot(self, ax, box_kws, flier_kws, line_kws):\n \n self.draw_letter_value_plot(ax, box_kws, flier_kws, line_kws)\n self.annotate_axes(ax)\n if self.orient == \"h\":\n ax.invert_yaxis()\n\n\n_categorical_docs = dict(\n\n # Shared narrative docs\n categorical_narrative=dedent(),\n\n new_categorical_narrative=dedent(),\n\n # Shared function parameters\n input_params=dedent(),\n string_input_params=dedent(),\n categorical_data=dedent(),\n long_form_data=dedent(),\n order_vars=dedent(),\n stat_api_params=dedent(),\n orient=dedent(),\n color=dedent(),\n palette=dedent(),\n hue_norm=dedent(),\n saturation=dedent(),\n capsize=dedent(),\n errwidth=dedent(),\n width=dedent(),\n dodge=dedent(),\n linewidth=dedent(),\n native_scale=dedent(),\n formatter=dedent(),\n legend=dedent(),\n ax_in=dedent(),\n ax_out=dedent(),\n\n # Shared see also\n boxplot=dedent(),\n violinplot=dedent(),\n stripplot=dedent(),\n swarmplot=dedent(),\n barplot=dedent(),\n countplot=dedent(),\n pointplot=dedent(),\n catplot=dedent(),\n boxenplot=dedent(),\n\n)\n\n_categorical_docs.update(_facet_docs)\n\n", "url": "https://github.com/mwaskom/seaborn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 206, "n_words": 65, "vocab_size": 59, "complexity": 2, "nloc": 5, "token_counts": 44, "n_ast_nodes": 537, "n_identifiers": 46, "d_id": 7489, "documentation": { "docstring": "Make the plot.\\\n .. note::\n This function always treats one of the variables as categorical and\n draws data at ordinal positions (0, 1, ... n) on the relevant axis,\n even when the data has a numeric or date type.\n\n See the :ref:`tutorial ` for more information.\\\n \\\n .. note::\n By default, this function treats one of the variables as categorical\n and draws data at ordinal positions (0, 1, ... n) on the relevant axis.\n This can be disabled with the `native_scale` parameter.\n\n See the :ref:`tutorial ` for more information.\\\n \\\n x, y, hue : names of variables in ``data`` or vector data, optional\n Inputs for plotting long-form data. See examples for interpretation.\\\n \\\n x, y, hue : names of variables in ``data``\n Inputs for plotting long-form data. See examples for interpretation.\\\n \\\n data : DataFrame, array, or list of arrays, optional\n Dataset for plotting. If ``x`` and ``y`` are absent, this is\n interpreted as wide-form. Otherwise it is expected to be long-form.\\\n \\\n data : DataFrame\n Long-form (tidy) dataset for plotting. Each column should correspond\n to a variable, and each row should correspond to an observation.\\\n \\\n order, hue_order : lists of strings, optional\n Order to plot the categorical levels in; otherwise the levels are\n inferred from the data objects.\\\n \\\n estimator : string or callable that maps vector -> scalar, optional\n Statistical function to estimate within each categorical bin.\n errorbar : string, (string, number) tuple, or callable\n Name of errorbar method (either \"ci\", \"pi\", \"se\", or \"sd\"), or a tuple\n with a method name and a level parameter, or a function that maps from a\n vector to a (min, max) interval.\n n_boot : int, optional\n Number of bootstrap samples used to compute confidence intervals.\n units : name of variable in ``data`` or vector data, optional\n Identifier of sampling units, which will be used to perform a\n multilevel bootstrap and account for repeated measures design.\n seed : int, numpy.random.Generator, or numpy.random.RandomState, optional\n Seed or random number generator for reproducible bootstrapping.\\\n \\\n orient : \"v\" | \"h\", optional\n Orientation of the plot (vertical or horizontal). This is usually\n inferred based on the type of the input variables, but it can be used\n to resolve ambiguity when both `x` and `y` are numeric or when\n plotting wide-form data.\\\n \\\n color : matplotlib color, optional\n Single color for the elements in the plot.\\\n \\\n palette : palette name, list, or dict, optional\n Color palette that maps the hue variable. If the palette is a dictionary,\n keys should be names of levels and values should be matplotlib colors.\\\n \\\n hue_norm : tuple or :class:`matplotlib.colors.Normalize` object\n Normalization in data units for colormap applied to the `hue`\n variable when it is numeric. Not relevant if `hue` is categorical.\\\n \\\n saturation : float, optional\n Proportion of the original saturation to draw colors at. Large patches\n often look better with slightly desaturated colors, but set this to\n `1` if you want the plot colors to perfectly match the input color.\\\n \\\n capsize : float, optional\n Width of the \"caps\" on error bars./\n \\\n errwidth : float, optional\n Thickness of error bar lines (and caps).\\\n \\\n width : float, optional\n Width of a full element when not using hue nesting, or width of all the\n elements for one level of the major grouping variable.\\\n \\\n dodge : bool, optional\n When hue nesting is used, whether elements should be shifted along the\n categorical axis.\\\n \\\n linewidth : float, optional\n Width of the gray lines that frame the plot elements.\\\n \\\n native_scale : bool, optional\n When True, numeric or datetime values on the categorical axis will maintain\n their original scaling rather than being converted to fixed indices.\\\n \\\n formatter : callable, optional\n Function for converting categorical data into strings. Affects both grouping\n and tick labels.\\\n \\\nlegend : \"auto\", \"brief\", \"full\", or False\n How to draw the legend. If \"brief\", numeric `hue` and `size`\n variables will be represented with a sample of evenly spaced values.\n If \"full\", every group will get an entry in the legend. If \"auto\",\n choose between brief or full representation based on number of levels.\n If `False`, no legend data is added and no legend is drawn.\n \\\n ax : matplotlib Axes, optional\n Axes object to draw the plot onto, otherwise uses the current Axes.\\\n \\\n ax : matplotlib Axes\n Returns the Axes object with the plot drawn onto it.\\\n \\\n boxplot : A traditional box-and-whisker plot with a similar API.\\\n \\\n violinplot : A combination of boxplot and kernel density estimation.\\\n \\\n stripplot : A scatterplot where one variable is categorical. Can be used\n in conjunction with other plots to show each observation.\\\n \\\n swarmplot : A categorical scatterplot where the points do not overlap. Can\n be used with other plots to show each observation.\\\n \\\n barplot : Show point estimates and confidence intervals using bars.\\\n \\\n countplot : Show the counts of observations in each categorical bin.\\\n \\\n pointplot : Show point estimates and confidence intervals using scatterplot\n glyphs.\\\n \\\n catplot : Combine a categorical plot with a :class:`FacetGrid`.\\\n \\\n boxenplot : An enhanced boxplot for larger datasets.\\\n ", "n_words": 843, "vocab_size": 378, "n_whitespaces": 1442, "language": "en" } }, { "id": 65350, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/accounts/report/share_ledger/share_ledger.py", "file_name": "share_ledger.py", "fun_name": "get_all_transfers", "commit_message": "style: format code with black", "code": "def get_all_transfers(date, shareholder):\n\tcondition = \" \"\n\t# if company:\n\t# \tcondition = 'AND company = %(company)s '\n\treturn frappe.db.sql(\n\t\t.format(\n\t\t\tcondition=condition\n\t\t),\n\t\t{\"date\": date, \"shareholder\": shareholder},\n\t\tas_dict=1,\n\t)\n", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 18, "n_words": 29, "vocab_size": 24, "complexity": 1, "nloc": 12, "token_counts": 41, "n_ast_nodes": 70, "n_identifiers": 9, "d_id": 13868, "documentation": { "docstring": "SELECT * FROM `tabShare Transfer`\n\t\tWHERE (DATE(date) <= %(date)s AND from_shareholder = %(shareholder)s {condition})\n\t\tOR (DATE(date) <= %(date)s AND to_shareholder = %(shareholder)s {condition})\n\t\tORDER BY date", "n_words": 26, "vocab_size": 19, "n_whitespaces": 22, "language": "en" } }, { "id": 261541, "commit_id": "239e16319116ab7445c0557bb08783ab2d60673d", "repo": "scikit-learn", "path": "sklearn/metrics/_pairwise_distances_reduction/_dispatcher.py", "file_name": "_dispatcher.py", "fun_name": "sqeuclidean_row_norms", "commit_message": "MAINT Introduce `MiddleTermComputer`, an abstraction generalizing `GEMMTermComputer` (#24807)\n\nCo-authored-by: Julien Jerphanion \r\nCo-authored-by: Olivier Grisel ", "code": "def sqeuclidean_row_norms(X, num_threads):\n \n if X.dtype == np.float64:\n return np.asarray(_sqeuclidean_row_norms64(X, num_threads))\n if X.dtype == np.float32:\n return np.asarray(_sqeuclidean_row_norms32(X, num_threads))\n\n raise ValueError(\n \"Only float64 or float32 datasets are supported at this time, \"\n f\"got: X.dtype={X.dtype}.\"\n )\n\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 76, "n_words": 33, "vocab_size": 28, "complexity": 3, "nloc": 9, "token_counts": 57, "n_ast_nodes": 100, "n_identifiers": 11, "d_id": 76852, "documentation": { "docstring": "Compute the squared euclidean norm of the rows of X in parallel.\n\n Parameters\n ----------\n X : ndarray or CSR matrix of shape (n_samples, n_features)\n Input data. Must be c-contiguous.\n\n num_threads : int\n The number of OpenMP threads to use.\n\n Returns\n -------\n sqeuclidean_row_norms : ndarray of shape (n_samples,)\n Arrays containing the squared euclidean norm of each row of X.\n ", "n_words": 58, "vocab_size": 42, "n_whitespaces": 103, "language": "en" } }, { "id": 165739, "commit_id": "24652cf178c12562585639cba39c46d62b95f107", "repo": "pandas", "path": "pandas/tests/extension/json/test_json.py", "file_name": "test_json.py", "fun_name": "test_groupby_extension_agg", "commit_message": "TST: Convert skip -> xfail (#46427)", "code": "def test_groupby_extension_agg(self):\n \n super().test_groupby_extension_agg()\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 17, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 26, "n_identifiers": 3, "d_id": 39701, "documentation": { "docstring": "\n This fails when we get to tm.assert_series_equal when left.index\n contains dictionaries, which are not hashable.\n ", "n_words": 15, "vocab_size": 14, "n_whitespaces": 37, "language": "en" } }, { "id": 84965, "commit_id": "61de767967bb6af077165075260eda3293f95970", "repo": "zulip", "path": "zerver/tests/test_signup.py", "file_name": "test_signup.py", "fun_name": "test_login_deactivate_user_error", "commit_message": "login page: Show form-independent errors even if email auth disabled.\n\nThese used to only be shown conditional on the\n{% if password_auth_enabled %} in the template. Meaning that if you had\nan org with email auth disabled and a deactivated user tried to log in,\nthey wouldn't see the error shown and get confused.\n\nThis switches the position of where these error will be shown (above the\nlogin+password form instead of below it), but it looks fine.", "code": "def test_login_deactivate_user_error(self) -> None:\n \n user_profile = self.example_user(\"hamlet\")\n realm = user_profile.realm\n self.assertTrue(email_auth_enabled(realm))\n\n url = f\"{realm.uri}/login/?\" + urlencode({\"is_deactivated\": user_profile.delivery_email})\n result = self.client_get(url)\n self.assertEqual(result.status_code, 200)\n self.assert_in_response(\n f\"Your account {user_profile.delivery_email} has been deactivated.\", result\n )\n\n auth_dict = realm.authentication_methods_dict()\n auth_dict[\"Email\"] = False\n do_set_realm_authentication_methods(realm, auth_dict, acting_user=None)\n result = self.client_get(url)\n self.assertEqual(result.status_code, 200)\n self.assert_in_response(\n f\"Your account {user_profile.delivery_email} has been deactivated.\", result\n )\n", "url": "https://github.com/zulip/zulip.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 188, "n_words": 54, "vocab_size": 34, "complexity": 1, "nloc": 23, "token_counts": 122, "n_ast_nodes": 222, "n_identifiers": 20, "d_id": 17898, "documentation": { "docstring": "\n This is meant to test whether the error message signaled by the\n is_deactivated is shown independently of whether the Email\n backend is enabled.\n ", "n_words": 23, "vocab_size": 18, "n_whitespaces": 52, "language": "en" } }, { "id": 289964, "commit_id": "bcae6d604e2967c7475f0caa4b1b5e4e76ab88bf", "repo": "core", "path": "homeassistant/components/mqtt/device_tracker/schema_discovery.py", "file_name": "schema_discovery.py", "fun_name": "location_name", "commit_message": "Improve MQTT type hints part 8 (#81034)\n\n* Improve typing device_tracker discovery\r\n\r\n* Improve typing device_tracker yaml\r\n\r\n* Add test source_type attribute\r\n\r\n* Follow up comment\r\n\r\n* Initialize at `__init__` not at class level.\r\n\r\n* Use full name for return variable\r\n\r\n* Correct import, remove assert\r\n\r\n* Use AsyncSeeCallback", "code": "def location_name(self) -> str | None:\n \n return self._location_name\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 22, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 14, "n_ast_nodes": 25, "n_identifiers": 4, "d_id": 89090, "documentation": { "docstring": "Return a location name for the current location of the device.", "n_words": 11, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 180422, "commit_id": "51c8c34486bfddca5948e46e498de44e21ab6496", "repo": "gradio", "path": "gradio/utils.py", "file_name": "utils.py", "fun_name": "__await__", "commit_message": "Async Request Class (#1595)\n\n* Implement Request class and its tests.\r\n\r\n* Add new requirements\r\n\r\n* Reformat codebase.\r\n\r\n* Fix formatting.\r\n\r\n* Add library level requirements.\r\n\r\n* Convert validated_data property to get_validated_data function.\r\n\r\n* Fix the client fixture.\r\n\r\n* Update test/test_utils.py\r\n\r\n* Update test/test_utils.py\r\n\r\n* Fix the client fixture.\r\n\r\n* Add missing initialization for Request._validated_data\r\n\r\n* Fix async test problem with test_tunneling.py\r\n\r\n* Update gradio/utils.py\r\n\r\n* Update gradio/utils.py\r\n\r\n* Fix formatting.\r\n\r\nCo-authored-by: Ömer Faruk Özdemir ", "code": "def __await__(self):\n \n return self.__run().__await__()\n", "url": "https://github.com/gradio-app/gradio.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 16, "n_ast_nodes": 30, "n_identifiers": 3, "d_id": 43164, "documentation": { "docstring": "\n Wrap Request's __await__ magic function to create request calls which are executed in one line.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 30, "language": "en" } }, { "id": 221717, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/contextlib.py", "file_name": "contextlib.py", "fun_name": "enter_async_context", "commit_message": "add python 3.10.4 for windows", "code": "async def enter_async_context(self, cm):\n \n _cm_type = type(cm)\n _exit = _cm_type.__aexit__\n result = await _cm_type.__aenter__(cm)\n self._push_async_cm_exit(cm, _exit)\n return result\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 60, "n_words": 18, "vocab_size": 15, "complexity": 1, "nloc": 6, "token_counts": 38, "n_ast_nodes": 64, "n_identifiers": 10, "d_id": 56485, "documentation": { "docstring": "Enters the supplied async context manager.\n\n If successful, also pushes its __aexit__ method as a callback and\n returns the result of the __aenter__ method.\n ", "n_words": 24, "vocab_size": 22, "n_whitespaces": 45, "language": "en" } }, { "id": 261975, "commit_id": "8d85af84cd5f1748f979fddcbc4aab1449f61ecb", "repo": "TTS", "path": "TTS/tts/utils/text/punctuation.py", "file_name": "punctuation.py", "fun_name": "strip_to_restore", "commit_message": "Implement Punctuation class", "code": "def strip_to_restore(self, text):\n \n text, puncs = self._strip_to_restore(text)\n return text, puncs\n", "url": "https://github.com/coqui-ai/TTS.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 31, "n_words": 10, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 22, "n_ast_nodes": 37, "n_identifiers": 5, "d_id": 77090, "documentation": { "docstring": "Remove punctuations from text to restore them later.\n\n Args:\n text (str): The text to be processed.\n\n Examples ::\n\n \"This is. example !\" -> [[\"This is\", \"example\"], [\".\", \"!\"]]\n\n ", "n_words": 28, "vocab_size": 25, "n_whitespaces": 71, "language": "en" } }, { "id": 34115, "commit_id": "22454ae492eca4bb749fa6d770dffc91d17dab87", "repo": "transformers", "path": "src/transformers/models/realm/tokenization_realm.py", "file_name": "tokenization_realm.py", "fun_name": "batch_encode_candidates", "commit_message": "Add REALM (#13292)\n\n* REALM initial commit\r\n\r\n* Retriever OK (Update new_gelu).\r\n\r\n* Encoder prediction score OK\r\n\r\n* Encoder pretrained model OK\r\n\r\n* Update retriever comments\r\n\r\n* Update docs, tests, and imports\r\n\r\n* Prune unused models\r\n\r\n* Make embedder as a module `RealmEmbedder`\r\n\r\n* Add RealmRetrieverOutput\r\n\r\n* Update tokenization\r\n\r\n* Pass all tests in test_modeling_realm.py\r\n\r\n* Prune RealmModel\r\n\r\n* Update docs\r\n\r\n* Add training test.\r\n\r\n* Remove completed TODO\r\n\r\n* Style & Quality\r\n\r\n* Prune `RealmModel`\r\n\r\n* Fixup\r\n\r\n* Changes:\r\n1. Remove RealmTokenizerFast\r\n2. Update docstrings\r\n3. Add a method to RealmTokenizer to handle candidates tokenization.\r\n\r\n* Fix up\r\n\r\n* Style\r\n\r\n* Add tokenization tests\r\n\r\n* Update `from_pretrained` tests\r\n\r\n* Apply suggestions\r\n\r\n* Style & Quality\r\n\r\n* Copy BERT model\r\n\r\n* Fix comment to avoid docstring copying\r\n\r\n* Make RealmBertModel private\r\n\r\n* Fix bug\r\n\r\n* Style\r\n\r\n* Basic QA\r\n\r\n* Save\r\n\r\n* Complete reader logits\r\n\r\n* Add searcher\r\n\r\n* Complete searcher & reader\r\n\r\n* Move block records init to constructor\r\n\r\n* Fix training bug\r\n\r\n* Add some outputs to RealmReader\r\n\r\n* Add finetuned checkpoint variable names parsing\r\n\r\n* Fix bug\r\n\r\n* Update REALM config\r\n\r\n* Add RealmForOpenQA\r\n\r\n* Update convert_tfrecord logits\r\n\r\n* Fix bugs\r\n\r\n* Complete imports\r\n\r\n* Update docs\r\n\r\n* Update naming\r\n\r\n* Add brute-force searcher\r\n\r\n* Pass realm model tests\r\n\r\n* Style\r\n\r\n* Exclude RealmReader from common tests\r\n\r\n* Fix\r\n\r\n* Fix\r\n\r\n* convert docs\r\n\r\n* up\r\n\r\n* up\r\n\r\n* more make style\r\n\r\n* up\r\n\r\n* upload\r\n\r\n* up\r\n\r\n* Fix\r\n\r\n* Update src/transformers/__init__.py\r\n\r\n* adapt testing\r\n\r\n* change modeling code\r\n\r\n* fix test\r\n\r\n* up\r\n\r\n* up\r\n\r\n* up\r\n\r\n* correct more\r\n\r\n* make retriever work\r\n\r\n* update\r\n\r\n* make style\r\n\r\n* finish main structure\r\n\r\n* Resolve merge conflict\r\n\r\n* Make everything work\r\n\r\n* Style\r\n\r\n* Fixup\r\n\r\n* Fixup\r\n\r\n* Update training test\r\n\r\n* fix retriever\r\n\r\n* remove hardcoded path\r\n\r\n* Fix\r\n\r\n* Fix modeling test\r\n\r\n* Update model links\r\n\r\n* Initial retrieval test\r\n\r\n* Fix modeling test\r\n\r\n* Complete retrieval tests\r\n\r\n* Fix\r\n\r\n* style\r\n\r\n* Fix tests\r\n\r\n* Fix docstring example\r\n\r\n* Minor fix of retrieval test\r\n\r\n* Update license headers and docs\r\n\r\n* Apply suggestions from code review\r\n\r\n* Style\r\n\r\n* Apply suggestions from code review\r\n\r\n* Add an example to RealmEmbedder\r\n\r\n* Fix\r\n\r\nCo-authored-by: Patrick von Platen \r\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>", "code": "def batch_encode_candidates(self, text, **kwargs):\n r\n\n # Always using a fixed sequence length to encode in order to stack candidates into a batch.\n kwargs[\"padding\"] = PaddingStrategy.MAX_LENGTH\n\n batch_text = text\n batch_text_pair = kwargs.pop(\"text_pair\", None)\n return_tensors = kwargs.pop(\"return_tensors\", None)\n\n output_data = {\n \"input_ids\": [],\n \"attention_mask\": [],\n \"token_type_ids\": [],\n }\n\n for idx, candidate_text in enumerate(batch_text):\n if batch_text_pair is not None:\n candidate_text_pair = batch_text_pair[idx]\n else:\n candidate_text_pair = None\n\n encoded_candidates = super().__call__(candidate_text, candidate_text_pair, return_tensors=None, **kwargs)\n\n encoded_input_ids = encoded_candidates.get(\"input_ids\")\n encoded_attention_mask = encoded_candidates.get(\"attention_mask\")\n encoded_token_type_ids = encoded_candidates.get(\"token_type_ids\")\n\n if encoded_input_ids is not None:\n output_data[\"input_ids\"].append(encoded_input_ids)\n if encoded_attention_mask is not None:\n output_data[\"attention_mask\"].append(encoded_attention_mask)\n if encoded_token_type_ids is not None:\n output_data[\"token_type_ids\"].append(encoded_token_type_ids)\n\n output_data = dict((key, item) for key, item in output_data.items() if len(item) != 0)\n\n return BatchEncoding(output_data, tensor_type=return_tensors)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 395, "n_words": 112, "vocab_size": 74, "complexity": 8, "nloc": 62, "token_counts": 213, "n_ast_nodes": 351, "n_identifiers": 30, "d_id": 6202, "documentation": { "docstring": "\n Encode a batch of text or text pair. This method is similar to regular __call__ method but has the following\n differences:\n\n 1. Handle additional num_candidate axis. (batch_size, num_candidates, text)\n 2. Always pad the sequences to *max_length*.\n 3. Must specify *max_length* in order to stack packs of candidates into a batch.\n\n - single sequence: `[CLS] X [SEP]`\n - pair of sequences: `[CLS] A [SEP] B [SEP]`\n\n Args:\n text (`List[List[str]]`):\n The batch of sequences to be encoded. Each sequence must be in this format: (batch_size,\n num_candidates, text).\n text_pair (`List[List[str]]`, *optional*):\n The batch of sequences to be encoded. Each sequence must be in this format: (batch_size,\n num_candidates, text).\n **kwargs:\n Keyword arguments of the __call__ method.\n\n Returns:\n [`BatchEncoding`]: Encoded text or text pair.\n\n Example:\n\n ```python\n >>> from transformers import RealmTokenizer\n\n >>> # batch_size = 2, num_candidates = 2\n >>> text = [[\"Hello world!\", \"Nice to meet you!\"], [\"The cute cat.\", \"The adorable dog.\"]]\n\n >>> tokenizer = RealmTokenizer.from_pretrained(\"qqaatw/realm-cc-news-pretrained-encoder\")\n >>> tokenized_text = tokenizer.batch_encode_candidates(text, max_length=10, return_tensors=\"pt\")\n ```", "n_words": 160, "vocab_size": 106, "n_whitespaces": 418, "language": "en" } }, { "id": 261401, "commit_id": "ff9344f3d8d11d38fa3a2497199113e5bac9537c", "repo": "scikit-learn", "path": "sklearn/linear_model/_linear_loss.py", "file_name": "_linear_loss.py", "fun_name": "weight_intercept", "commit_message": "FEA add (single) Cholesky Newton solver to GLMs (#24637)\n\n* FEA add NewtonSolver, CholeskyNewtonSolver and QRCholeskyNewtonSolver\n\n* ENH better singular hessian special solve\n\n* CLN fix some typos found by reviewer\n\n* TST assert ConvergenceWarning is raised\n\n* MNT add BaseCholeskyNewtonSolver\n\n* WIP colinear design in GLMs\n\n* FIX _solve_singular\n\n* FIX false unpacking in\n\n* TST add tests for unpenalized GLMs\n\n* TST fix solutions of glm_dataset\n\n* ENH add SVDFallbackSolver\n\n* CLN remove SVDFallbackSolver\n\n* ENH use gradient step for singular hessians\n\n* ENH print iteration number in warnings\n\n* TST improve test_linalg_warning_with_newton_solver\n\n* CLN LinAlgWarning fron scipy.linalg\n\n* ENH more robust hessian\n\n* ENH increase maxls for lbfgs to make it more robust\n\n* ENH add hessian_warning for too many negative hessian values\n\n* CLN some warning messages\n\n* ENH add lbfgs_step\n\n* ENH use lbfgs_step for hessian_warning\n\n* TST make them pass\n\n* TST tweek rtol for lbfgs\n\n* TST add rigoros test for GLMs\n\n* TST improve test_warm_start\n\n* ENH improve lbfgs options for better convergence\n\n* CLN fix test_warm_start\n\n* TST fix assert singular values in datasets\n\n* CLN address most review comments\n\n* ENH enable more vebosity levels for lbfgs\n\n* DOC add whatsnew\n\n* CLN remove xfail and clean a bit\n\n* CLN docstring about minimum norm\n\n* More informative repr for the glm_dataset fixture cases\n\n* Forgot to run black\n\n* CLN remove unnecessary filterwarnings\n\n* CLN address review comments\n\n* Trigger [all random seeds] on the following tests:\ntest_glm_regression\ntest_glm_regression_hstacked_X\ntest_glm_regression_vstacked_X\ntest_glm_regression_unpenalized\ntest_glm_regression_unpenalized_hstacked_X\ntest_glm_regression_unpenalized_vstacked_X\ntest_warm_start\n\n* CLN add comment for lbfgs ftol=64 * machine precision\n\n* CLN XXX code comment\n\n* Trigger [all random seeds] on the following tests:\n\ntest_glm_regression\ntest_glm_regression_hstacked_X\ntest_glm_regression_vstacked_X\ntest_glm_regression_unpenalized\ntest_glm_regression_unpenalized_hstacked_X\ntest_glm_regression_unpenalized_vstacked_X\ntest_warm_start\n\n* CLN link issue and remove code snippet in comment\n\n* Trigger [all random seeds] on the following tests:\n\ntest_glm_regression\ntest_glm_regression_hstacked_X\ntest_glm_regression_vstacked_X\ntest_glm_regression_unpenalized\ntest_glm_regression_unpenalized_hstacked_X\ntest_glm_regression_unpenalized_vstacked_X\ntest_warm_start\n\n* CLN add catch_warnings\n\n* Trigger [all random seeds] on the following tests:\n\ntest_glm_regression\ntest_glm_regression_hstacked_X\ntest_glm_regression_vstacked_X\ntest_glm_regression_unpenalized\ntest_glm_regression_unpenalized_hstacked_X\ntest_glm_regression_unpenalized_vstacked_X\ntest_warm_start\n\n* Trigger [all random seeds] on the following tests:\n\ntest_glm_regression\ntest_glm_regression_hstacked_X\ntest_glm_regression_vstacked_X\ntest_glm_regression_unpenalized\ntest_glm_regression_unpenalized_hstacked_X\ntest_glm_regression_unpenalized_vstacked_X\ntest_warm_start\n\n* [all random seeds]\n\ntest_glm_regression\ntest_glm_regression_hstacked_X\ntest_glm_regression_vstacked_X\ntest_glm_regression_unpenalized\ntest_glm_regression_unpenalized_hstacked_X\ntest_glm_regression_unpenalized_vstacked_X\ntest_warm_start\n\n* Trigger with -Werror [all random seeds]\n\ntest_glm_regression\ntest_glm_regression_hstacked_X\ntest_glm_regression_vstacked_X\ntest_glm_regression_unpenalized\ntest_glm_regression_unpenalized_hstacked_X\ntest_glm_regression_unpenalized_vstacked_X\ntest_warm_start\n\n* ENH increase maxls to 50\n\n* [all random seeds]\n\ntest_glm_regression\ntest_glm_regression_hstacked_X\ntest_glm_regression_vstacked_X\ntest_glm_regression_unpenalized\ntest_glm_regression_unpenalized_hstacked_X\ntest_glm_regression_unpenalized_vstacked_X\ntest_warm_start\n\n* Revert \"Trigger with -Werror [all random seeds]\"\n\nThis reverts commit 99f4cf99ca41b4ad2bdad537ad60f936970e3a88.\n\n* TST add catch_warnings to filterwarnings\n\n* TST adapt tests for newton solvers\n\n* CLN cleaner gradient step with gradient_times_newton\n\n* DOC add whatsnew\n\n* ENH always use lbfgs as fallback\n\n* TST adapt rtol\n\n* TST fix test_linalg_warning_with_newton_solver\n\n* CLN address some review comments\n\n* Improve tests related to convergence warning on collinear data\n\n* overfit -> fit\n\n* Typo in comment\n\n* Apply suggestions from code review\n\n* ENH fallback_lbfgs_solve\n- Do not use lbfgs steps, fall back complete to lbfgs\n\n* ENH adapt rtol\n\n* Improve test_linalg_warning_with_newton_solver\n\n* Better comments\n\n* Fixed Hessian casing and improved warning messages\n\n* [all random seeds]\n\ntest_linalg_warning_with_newton_solver\n\n* Ignore ConvergenceWarnings for now if convergence is good\n\n* CLN remove counting of warnings\n\n* ENH fall back to lbfgs if line search did not converge\n\n* DOC better comment on performance bottleneck\n\n* Update GLM related examples to use the new solver\n\n* CLN address reviewer comments\n\n* EXA improve some wordings\n\n* CLN do not pop \"solver in parameter constraints\n\n* CLN fix typos\n\n* DOC fix docstring\n\n* CLN remove solver newton-qr-cholesky\n\n* DOC update PR number in whatsnew\n\n* CLN address review comments\n\n* CLN remove unnecessary catch_warnings\n\n* CLN address some review comments\n\n* DOC more precise whatsnew\n\n* CLN use init_zero_coef\n\n* CLN use and test init_zero_coef\n\n* CLN address some review comments\n\n* CLN mark NewtonSolver as private by leading underscore\n\n* CLN exact comments for inner_solve\n\n* TST add test_newton_solver_verbosity\n\n* TST extend test_newton_solver_verbosity\n\n* TST logic in test_glm_regression_unpenalized\n\n* TST use count_nonzero\n\n* CLN remove super rare line search checks\n\n* MNT move Newton solver to new file _newton_solver.py\n\nCo-authored-by: Olivier Grisel \nCo-authored-by: Julien Jerphanion ", "code": "def weight_intercept(self, coef):\n \n if not self.base_loss.is_multiclass:\n if self.fit_intercept:\n intercept = coef[-1]\n weights = coef[:-1]\n else:\n intercept = 0.0\n weights = coef\n else:\n # reshape to (n_classes, n_dof)\n if coef.ndim == 1:\n weights = coef.reshape((self.base_loss.n_classes, -1), order=\"F\")\n else:\n weights = coef\n if self.fit_intercept:\n intercept = weights[:, -1]\n weights = weights[:, :-1]\n else:\n intercept = 0.0\n\n return weights, intercept\n", "url": "https://github.com/scikit-learn/scikit-learn.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 297, "n_words": 57, "vocab_size": 31, "complexity": 5, "nloc": 19, "token_counts": 116, "n_ast_nodes": 185, "n_identifiers": 12, "d_id": 76808, "documentation": { "docstring": "Helper function to get coefficients and intercept.\n\n Parameters\n ----------\n coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,)\n Coefficients of a linear model.\n If shape (n_classes * n_dof,), the classes of one feature are contiguous,\n i.e. one reconstructs the 2d-array via\n coef.reshape((n_classes, -1), order=\"F\").\n\n Returns\n -------\n weights : ndarray of shape (n_features,) or (n_classes, n_features)\n Coefficients without intercept term.\n intercept : float or ndarray of shape (n_classes,)\n Intercept terms.\n ", "n_words": 72, "vocab_size": 52, "n_whitespaces": 194, "language": "en" } }, { "id": 263985, "commit_id": "be94db4587c16f3f6498d64d563c7ce740e11863", "repo": "pyinstaller", "path": "PyInstaller/utils/conftest.py", "file_name": "conftest.py", "fun_name": "_run_executable", "commit_message": "tests: do not attempt to re-run executable after a failed run\n\nRemove the re-runs of a failed executable. This functionality\nwas originally intended for executables whose execution timed\nout (when running in \"regular\" mode, where we expect the\nprogram to exit). However, it ended up subtly broken ever\nsince cf9dfec8018c96511f8c8caffc2b2e965350f2f9 changed the\nprogram exit code for unhandled exceptions from -1 to 1 to\nmatch the behavior of python interpreter.\n\nConsequently, every time that a test executable fails due to\nunhandled exception, it is re-run again. This unnecessarily\nprolongs the test time (especially for onefile executables)\nand bloats/obfuscates the captured test output (since we end\nup with bootloader log of two runs instead of just one).\n\nRemove this built-in re-run functionality altogether, because\nre-runs usually mask some other issue that should be fixed.", "code": "def _run_executable(self, prog, args, run_from_path, runtime):\n \n # Run the test in a clean environment to make sure they're really self-contained.\n prog_env = copy.deepcopy(os.environ)\n prog_env['PATH'] = ''\n del prog_env['PATH']\n # For Windows we need to keep minimal PATH for successful running of some tests.\n if is_win:\n # Minimum Windows PATH is in most cases: C:\\Windows\\system32;C:\\Windows\n prog_env['PATH'] = os.pathsep.join(winutils.get_system_path())\n\n exe_path = prog\n if run_from_path:\n # Run executable in the temp directory. Add the directory containing the executable to $PATH. Basically,\n # pretend we are a shell executing the program from $PATH.\n prog_cwd = str(self._tmpdir)\n prog_name = os.path.basename(prog)\n prog_env['PATH'] = os.pathsep.join([prog_env.get('PATH', ''), os.path.dirname(prog)])\n\n else:\n # Run executable in the directory where it is.\n prog_cwd = os.path.dirname(prog)\n # The executable will be called with argv[0] as relative not absolute path.\n prog_name = os.path.join(os.curdir, os.path.basename(prog))\n\n args = [prog_name] + args\n # Using sys.stdout/sys.stderr for subprocess fixes printing messages in Windows command prompt. Py.test is then\n # able to collect stdout/sterr messages and display them if a test fails.\n return self._run_executable_(args, exe_path, prog_env, prog_cwd, runtime)\n", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 390, "n_words": 169, "vocab_size": 115, "complexity": 3, "nloc": 16, "token_counts": 165, "n_ast_nodes": 275, "n_identifiers": 27, "d_id": 77541, "documentation": { "docstring": "\n Run executable created by PyInstaller.\n\n :param args: CLI options to pass to the created executable.\n ", "n_words": 15, "vocab_size": 13, "n_whitespaces": 37, "language": "en" } }, { "id": 337281, "commit_id": "fb5ed62c102c0323486b89805e1888495de3db15", "repo": "accelerate", "path": "src/accelerate/accelerator.py", "file_name": "accelerator.py", "fun_name": "unscale_gradients", "commit_message": "Convert documentation to the new front (#271)\n\n* Main conversion\r\n\r\n* Doc styling\r\n\r\n* Style\r\n\r\n* New front deploy\r\n\r\n* Fixes\r\n\r\n* Fixes\r\n\r\n* Fix new docstrings\r\n\r\n* Style", "code": "def unscale_gradients(self, optimizer=None):\n \n if self.state.use_fp16 and self.native_amp:\n if optimizer is None:\n # TODO: this unscales all optimizers where we should only unscale the one where parameters are.\n optimizer = self._optimizers\n elif not isinstance(optimizer, (tuple, list)):\n optimizer = [optimizer]\n for opt in optimizer:\n while isinstance(opt, AcceleratedOptimizer):\n opt = opt.optimizer\n self.scaler.unscale_(opt)\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 190, "n_words": 49, "vocab_size": 42, "complexity": 7, "nloc": 10, "token_counts": 75, "n_ast_nodes": 119, "n_identifiers": 14, "d_id": 120995, "documentation": { "docstring": "\n Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings.\n\n Args:\n optimizer (`torch.optim.Optimizer` or `List[torch.optim.Optimizer]`, *optional*):\n The optimizer(s) for which to unscale gradients. If not set, will unscale gradients on all optimizers\n that were passed to [`~Accelerator.prepare`].\n ", "n_words": 44, "vocab_size": 39, "n_whitespaces": 107, "language": "en" } }, { "id": 284452, "commit_id": "9068ad01249c1e1adaca3ef9a704d70da7e3a17b", "repo": "OpenBBTerminal", "path": "tests/openbb_terminal/cryptocurrency/test_cryptocurrency_helpers.py", "file_name": "test_cryptocurrency_helpers.py", "fun_name": "test_coin_api_load_df_for_ta", "commit_message": "Refactored Crypto Tests (#1743)\n\n* Refactored tests\r\n\r\n* Removed unused command\r\n\r\n* Added tests\r\n\r\n* Tests : remove cassettes files + add fixture\r\n\r\n* Black\r\n\r\n* Tests : skip tests\r\n\r\nCo-authored-by: didierlopes.eth \r\nCo-authored-by: minhhoang1023 <40023817+minhhoang1023@users.noreply.github.com>\r\nCo-authored-by: Chavithra PARANA ", "code": "def test_coin_api_load_df_for_ta(get_bitcoin, mocker):\n \n mock_load = mocker.patch(\n base\n + \"due_diligence.pycoingecko_model.CoinGeckoAPI.get_coin_market_chart_by_id\"\n )\n _, symbol = get_bitcoin\n coin_map_df = prepare_all_coins_df().set_index(\"Symbol\").loc[symbol.upper()].iloc[0]\n\n with open(\n \"tests/openbb_terminal/cryptocurrency/json/test_cryptocurrency_helpers/btc_usd_test_data.json\",\n encoding=\"utf8\",\n ) as f:\n sample_return = json.load(f)\n\n mock_load.return_value = sample_return\n mock_return, vs = load_ta_data(\n coin_map_df=coin_map_df,\n source=\"cg\",\n currency=\"usd\",\n days=30,\n )\n assert mock_return.shape == (31, 4)\n assert vs == \"usd\"\n\n\n@pytest.mark.record_stdout\n@pytest.mark.vcr", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "@pytest.mark.record_stdout\n@pytest.mark.vcr", "n_ast_errors": 1, "ast_levels": 14, "n_whitespaces": 147, "n_words": 50, "vocab_size": 39, "complexity": 1, "nloc": 21, "token_counts": 110, "n_ast_nodes": 205, "n_identifiers": 32, "d_id": 84723, "documentation": { "docstring": "\n Mock load function through get_coin_market_chart_by_id.\n Mock returns a dict saved as .json\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 22, "language": "en" } }, { "id": 73176, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/options.py", "file_name": "options.py", "fun_name": "choose_parent_view", "commit_message": "Reformat with black", "code": "def choose_parent_view(self, request):\n \n kwargs = {\"model_admin\": self}\n view_class = self.choose_parent_view_class\n return view_class.as_view(**kwargs)(request)\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 40, "n_words": 12, "vocab_size": 11, "complexity": 1, "nloc": 4, "token_counts": 31, "n_ast_nodes": 54, "n_identifiers": 7, "d_id": 15972, "documentation": { "docstring": "\n Instantiates a class-based view to allows a parent page to be chosen\n for a new object, where the assigned model extends Wagtail's Page\n model, and there is more than one potential parent for new instances.\n The view class used can be overridden by changing the\n 'choose_parent_view_class' attribute.\n ", "n_words": 47, "vocab_size": 38, "n_whitespaces": 90, "language": "en" } }, { "id": 61321, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/unpacking.py", "file_name": "unpacking.py", "fun_name": "is_within_directory", "commit_message": "upd; format", "code": "def is_within_directory(directory, target):\n # type: (str, str) -> bool\n \n abs_directory = os.path.abspath(directory)\n abs_target = os.path.abspath(target)\n\n prefix = os.path.commonprefix([abs_directory, abs_target])\n return prefix == abs_directory\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 41, "n_words": 23, "vocab_size": 19, "complexity": 1, "nloc": 5, "token_counts": 46, "n_ast_nodes": 75, "n_identifiers": 10, "d_id": 12507, "documentation": { "docstring": "\n Return true if the absolute path of target is within the directory\n ", "n_words": 12, "vocab_size": 11, "n_whitespaces": 19, "language": "en" } }, { "id": 231584, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/_legend.py", "file_name": "_legend.py", "fun_name": "itemdoubleclick", "commit_message": "switch to black .22", "code": "def itemdoubleclick(self):\n \n return self[\"itemdoubleclick\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 63028, "documentation": { "docstring": "\n Determines the behavior on legend item double-click. \"toggle\"\n toggles the visibility of the item clicked on the graph.\n \"toggleothers\" makes the clicked item the sole visible item on\n the graph. False disables legend item double-click\n interactions.\n\n The 'itemdoubleclick' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['toggle', 'toggleothers', False]\n\n Returns\n -------\n Any\n ", "n_words": 60, "vocab_size": 42, "n_whitespaces": 155, "language": "en" } }, { "id": 300319, "commit_id": "26177bd080b4eb6d11cfd9fbdd158be36f4983d4", "repo": "core", "path": "homeassistant/components/logbook/__init__.py", "file_name": "__init__.py", "fun_name": "_generate_legacy_events_context_id_query", "commit_message": "Convert logbook to use lambda_stmt (#71624)", "code": "def _generate_legacy_events_context_id_query() -> Select:\n \n # This can be removed once we no longer have event_ids in the states table\n return (\n select(\n *EVENT_COLUMNS,\n literal(value=None, type_=sqlalchemy.String).label(\"shared_data\"),\n States.state,\n States.entity_id,\n States.attributes,\n StateAttributes.shared_attrs,\n )\n .outerjoin(States, (Events.event_id == States.event_id))\n .where(States.last_updated == States.last_changed)\n .where(_not_continuous_entity_matcher())\n .outerjoin(\n StateAttributes, (States.attributes_id == StateAttributes.attributes_id)\n )\n )\n\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 22, "n_whitespaces": 183, "n_words": 45, "vocab_size": 41, "complexity": 1, "nloc": 18, "token_counts": 98, "n_ast_nodes": 151, "n_identifiers": 24, "d_id": 99183, "documentation": { "docstring": "Generate a legacy events context id query that also joins states.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 223548, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/_header_value_parser.py", "file_name": "_header_value_parser.py", "fun_name": "get_mailbox_list", "commit_message": "add python 3.10.4 for windows", "code": "def get_mailbox_list(value):\n \n mailbox_list = MailboxList()\n while value and value[0] != ';':\n try:\n token, value = get_mailbox(value)\n mailbox_list.append(token)\n except errors.HeaderParseError:\n leader = None\n if value[0] in CFWS_LEADER:\n leader, value = get_cfws(value)\n if not value or value[0] in ',;':\n mailbox_list.append(leader)\n mailbox_list.defects.append(errors.ObsoleteHeaderDefect(\n \"empty element in mailbox-list\"))\n else:\n token, value = get_invalid_mailbox(value, ',;')\n if leader is not None:\n token[:0] = [leader]\n mailbox_list.append(token)\n mailbox_list.defects.append(errors.InvalidHeaderDefect(\n \"invalid mailbox in mailbox-list\"))\n elif value[0] == ',':\n mailbox_list.defects.append(errors.ObsoleteHeaderDefect(\n \"empty element in mailbox-list\"))\n else:\n token, value = get_invalid_mailbox(value, ',;')\n if leader is not None:\n token[:0] = [leader]\n mailbox_list.append(token)\n mailbox_list.defects.append(errors.InvalidHeaderDefect(\n \"invalid mailbox in mailbox-list\"))\n if value and value[0] not in ',;':\n # Crap after mailbox; treat it as an invalid mailbox.\n # The mailbox info will still be available.\n mailbox = mailbox_list[-1]\n mailbox.token_type = 'invalid-mailbox'\n token, value = get_invalid_mailbox(value, ',;')\n mailbox.extend(token)\n mailbox_list.defects.append(errors.InvalidHeaderDefect(\n \"invalid mailbox in mailbox-list\"))\n if value and value[0] == ',':\n mailbox_list.append(ListSeparator)\n value = value[1:]\n return mailbox_list, value\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 727, "n_words": 147, "vocab_size": 69, "complexity": 14, "nloc": 42, "token_counts": 283, "n_ast_nodes": 482, "n_identifiers": 20, "d_id": 56965, "documentation": { "docstring": " mailbox-list = (mailbox *(\",\" mailbox)) / obs-mbox-list\n obs-mbox-list = *([CFWS] \",\") mailbox *(\",\" [mailbox / CFWS])\n\n For this routine we go outside the formal grammar in order to improve error\n handling. We recognize the end of the mailbox list only at the end of the\n value or at a ';' (the group terminator). This is so that we can turn\n invalid mailboxes into InvalidMailbox tokens and continue parsing any\n remaining valid mailboxes. We also allow all mailbox entries to be null,\n and this condition is handled appropriately at a higher level.\n\n ", "n_words": 91, "vocab_size": 70, "n_whitespaces": 123, "language": "en" } }, { "id": 189164, "commit_id": "1a6b498657ec5dd29ddf4f6b240c6fc0c5d88f7a", "repo": "aws-cli", "path": "tests/functional/eks/test_kubeconfig.py", "file_name": "test_kubeconfig.py", "fun_name": "_clone_config", "commit_message": "Deprecate Kubernetes client API version v1alpha1\n\nKubernetes has deprecated v1alpha1, v1beta1 has been available since Kubernetes\nv1.11 (kubernetes/kubernetes#64482), and EKS currently supports Kubernetes\nversions v1.16 through v1.21. This is a breaking change for clients running\nversions v1.10 and older, which haven't been supported by EKS since September\n2019.\n\n\"aws eks get-token\" now respects the KUBERNETES_EXEC_INFO environment\nvariable and conservatively falls back to v1alpha1, which is supported\nby Kubernetes versions 1.10 through 1.22 (released upstream August 2021, to be\nreleased by EKS in Q4 2021). It also now supports \"v1beta1\" and \"v1\".\n\n\"aws eks update-kubeconfig\" now writes \"v1beta1\" in the kubeconfig which\nwill be supported by Kubernetes until 1.29 (aproximately December 2023).\nAt or around that date, we can change the default version written to\nkubeconfigs to \"v1\"\n\nSigned-off-by: Micah Hausler ", "code": "def _clone_config(self, config):\n \n old_path = os.path.abspath(get_testdata(config))\n new_path = os.path.join(self._temp_directory, config)\n shutil.copy2(old_path,\n new_path)\n return new_path\n", "url": "https://github.com/aws/aws-cli.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 69, "n_words": 14, "vocab_size": 12, "complexity": 1, "nloc": 6, "token_counts": 45, "n_ast_nodes": 72, "n_identifiers": 13, "d_id": 46004, "documentation": { "docstring": "\n Copies the testdata named config into the temp directory,\n Returns the new path\n\n :param config: The name of the testdata to copy\n :type config: str\n ", "n_words": 25, "vocab_size": 20, "n_whitespaces": 61, "language": "en" } }, { "id": 47548, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/models/test_cleartasks.py", "file_name": "test_cleartasks.py", "fun_name": "test_clear_task_instances_dr_state", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def test_clear_task_instances_dr_state(self, state, last_scheduling, dag_maker):\n \n with dag_maker(\n 'test_clear_task_instances',\n start_date=DEFAULT_DATE,\n end_date=DEFAULT_DATE + datetime.timedelta(days=10),\n ) as dag:\n EmptyOperator(task_id='0')\n EmptyOperator(task_id='1', retries=2)\n dr = dag_maker.create_dagrun(\n state=State.RUNNING,\n run_type=DagRunType.SCHEDULED,\n )\n ti0, ti1 = sorted(dr.task_instances, key=lambda ti: ti.task_id)\n dr.last_scheduling_decision = DEFAULT_DATE\n ti0.state = TaskInstanceState.SUCCESS\n ti1.state = TaskInstanceState.SUCCESS\n session = dag_maker.session\n session.flush()\n\n # we use order_by(task_id) here because for the test DAG structure of ours\n # this is equivalent to topological sort. It would not work in general case\n # but it works for our case because we specifically constructed test DAGS\n # in the way that those two sort methods are equivalent\n qry = session.query(TI).filter(TI.dag_id == dag.dag_id).order_by(TI.task_id).all()\n clear_task_instances(qry, session, dag_run_state=state, dag=dag)\n session.flush()\n\n session.refresh(dr)\n\n assert dr.state == state\n assert dr.start_date is None if state == State.QUEUED else dr.start_date\n assert dr.last_scheduling_decision == last_scheduling\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 355, "n_words": 124, "vocab_size": 95, "complexity": 2, "nloc": 25, "token_counts": 204, "n_ast_nodes": 326, "n_identifiers": 44, "d_id": 9154, "documentation": { "docstring": "Test that DR state is set to None after clear.\n And that DR.last_scheduling_decision is handled OK.\n start_date is also set to None\n ", "n_words": 22, "vocab_size": 16, "n_whitespaces": 43, "language": "en" } }, { "id": 45466, "commit_id": "69f6f9e01b6df76c3c8fa266d460324163957887", "repo": "airflow", "path": "airflow/migrations/versions/64a7d6477aae_fix_description_field_in_connection_to_.py", "file_name": "64a7d6477aae_fix_description_field_in_connection_to_.py", "fun_name": "downgrade", "commit_message": "Autogenerate migration reference doc (#21601)\n\n* document airflow version in each alembic migration module and use this to autogen the doc\r\n* update each migration module to have the same description used in migration ref (so it can be used in autogen)", "code": "def downgrade():\n \n conn = op.get_bind()\n if conn.dialect.name == \"sqlite\":\n # in sqlite TEXT and STRING column types are the same\n return\n if conn.dialect.name == \"mysql\":\n op.alter_column(\n 'connection',\n 'description',\n existing_type=sa.Text(5000),\n type_=sa.String(length=5000),\n existing_nullable=True,\n )\n else:\n # postgres does not allow size modifier for text type\n op.alter_column(\n 'connection',\n 'description',\n existing_type=sa.Text(),\n type_=sa.String(length=5000),\n existing_nullable=True,\n )\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 224, "n_words": 50, "vocab_size": 40, "complexity": 3, "nloc": 20, "token_counts": 98, "n_ast_nodes": 165, "n_identifiers": 14, "d_id": 8593, "documentation": { "docstring": "Unapply Fix description field in ``connection`` to be ``text``", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 119122, "commit_id": "329de7c9cc1b77f9caacf2163a77a9d8496c379b", "repo": "jax", "path": "jax/_src/dtypes.py", "file_name": "dtypes.py", "fun_name": "_canonicalize_dtype", "commit_message": "Only use config.x64_enabled as the memo cache key for canonicalize_dtype, not any other fields.\n\nThis saves the time to repeatedly build a tuple as a cache key. Reduces the time for CustomLinearSolveTest.test_custom_linear_solve_pytree on my workstation from 110s to 85s.\n\nPiperOrigin-RevId: 422632700", "code": "def _canonicalize_dtype(x64_enabled, dtype):\n \n try:\n dtype = np.dtype(dtype)\n except TypeError as e:\n raise TypeError(f'dtype {dtype!r} not understood') from e\n\n if x64_enabled:\n return dtype\n else:\n return _dtype_to_32bit_dtype.get(dtype, dtype)\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 43, "n_words": 26, "vocab_size": 24, "complexity": 3, "nloc": 9, "token_counts": 47, "n_ast_nodes": 83, "n_identifiers": 8, "d_id": 26545, "documentation": { "docstring": "Convert from a dtype to a canonical dtype based on config.x64_enabled.", "n_words": 11, "vocab_size": 9, "n_whitespaces": 10, "language": "en" } }, { "id": 178914, "commit_id": "abfb99b0a05dd76d2ecc6ebc20732a271857c6c8", "repo": "Nuitka", "path": "nuitka/freezer/IncludedDataFiles.py", "file_name": "IncludedDataFiles.py", "fun_name": "copyDataFiles", "commit_message": "Plugins: Massive cleanup of data file handling\n\n* Move data file handling out of standalone only, allowing support\n for other modes as well.\n\n* Attach logger and tags to data file objects.", "code": "def copyDataFiles():\n \n\n for included_datafile in getIncludedDataFiles():\n # TODO: directories should be resolved to files.\n if (\n not isinstance(included_datafile, (IncludedDataFile))\n or included_datafile.needsCopy()\n ):\n _handleDataFile(\n included_datafile,\n )\n", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 111, "n_words": 25, "vocab_size": 25, "complexity": 4, "nloc": 9, "token_counts": 36, "n_ast_nodes": 62, "n_identifiers": 7, "d_id": 42859, "documentation": { "docstring": "Copy the data files needed for standalone distribution.\n\n Notes:\n This is for data files only, not DLLs or even extension modules,\n those must be registered as entry points, and would not go through\n necessary handling if provided like this.\n ", "n_words": 39, "vocab_size": 35, "n_whitespaces": 66, "language": "en" } }, { "id": 134777, "commit_id": "ee0fbf9d43dfa05fdf90ad0515b2671cac16a92b", "repo": "ray", "path": "python/ray/data/_internal/util.py", "file_name": "util.py", "fun_name": "_check_pyarrow_version", "commit_message": "[Datasets] Add upper bound to pyarrow version check. (#29674)\n\nWe previously weren't checking that the 7.0.0 pyarrow upper bound was being respected. This PR adds this upper bound check.", "code": "def _check_pyarrow_version():\n \n global _VERSION_VALIDATED\n\n if not _VERSION_VALIDATED:\n if os.environ.get(RAY_DISABLE_PYARROW_VERSION_CHECK, \"0\") == \"1\":\n _VERSION_VALIDATED = True\n return\n\n try:\n import pyarrow\n except ModuleNotFoundError:\n # pyarrow not installed, short-circuit.\n return\n\n import pkg_resources\n\n if not hasattr(pyarrow, \"__version__\"):\n logger.warning(\n \"You are using the 'pyarrow' module, but the exact version is unknown \"\n \"(possibly carried as an internal component by another module). Please \"\n f\"make sure you are using pyarrow >= {MIN_PYARROW_VERSION}, < \"\n f\"{MAX_PYARROW_VERSION} to ensure compatibility with Ray Datasets. \"\n \"If you want to disable this pyarrow version check, set the \"\n f\"environment variable {RAY_DISABLE_PYARROW_VERSION_CHECK}=1.\"\n )\n else:\n version = pyarrow.__version__\n if (\n pkg_resources.packaging.version.parse(version)\n < pkg_resources.packaging.version.parse(MIN_PYARROW_VERSION)\n ) or (\n pkg_resources.packaging.version.parse(version)\n >= pkg_resources.packaging.version.parse(MAX_PYARROW_VERSION)\n ):\n raise ImportError(\n f\"Datasets requires pyarrow >= {MIN_PYARROW_VERSION}, < \"\n f\"{MAX_PYARROW_VERSION}, but {version} is installed. Reinstall \"\n f'with `pip install -U \"pyarrow<{MAX_PYARROW_VERSION}\"`. '\n \"If you want to disable this pyarrow version check, set the \"\n f\"environment variable {RAY_DISABLE_PYARROW_VERSION_CHECK}=1.\"\n )\n _VERSION_VALIDATED = True\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 603, "n_words": 149, "vocab_size": 93, "complexity": 7, "nloc": 37, "token_counts": 134, "n_ast_nodes": 266, "n_identifiers": 19, "d_id": 30413, "documentation": { "docstring": "Check that pyarrow's version is within the supported bounds.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 38773, "commit_id": "31ee80d55673f32c0f5d50936f371e661b74b21a", "repo": "transformers", "path": "src/transformers/models/layoutlmv3/modeling_layoutlmv3.py", "file_name": "modeling_layoutlmv3.py", "fun_name": "cogview_attention", "commit_message": "Add LayoutLMv3 (#17060)\n\n* Make forward pass work\r\n\r\n* More improvements\r\n\r\n* Remove unused imports\r\n\r\n* Remove timm dependency\r\n\r\n* Improve loss calculation of token classifier\r\n\r\n* Fix most tests\r\n\r\n* Add docs\r\n\r\n* Add model integration test\r\n\r\n* Make all tests pass\r\n\r\n* Add LayoutLMv3FeatureExtractor\r\n\r\n* Improve integration test + make fixup\r\n\r\n* Add example script\r\n\r\n* Fix style\r\n\r\n* Add LayoutLMv3Processor\r\n\r\n* Fix style\r\n\r\n* Add option to add visual labels\r\n\r\n* Make more tokenizer tests pass\r\n\r\n* Fix more tests\r\n\r\n* Make more tests pass\r\n\r\n* Fix bug and improve docs\r\n\r\n* Fix import of processors\r\n\r\n* Improve docstrings\r\n\r\n* Fix toctree and improve docs\r\n\r\n* Fix auto tokenizer\r\n\r\n* Move tests to model folder\r\n\r\n* Move tests to model folder\r\n\r\n* change default behavior add_prefix_space\r\n\r\n* add prefix space for fast\r\n\r\n* add_prefix_spcae set to True for Fast\r\n\r\n* no space before `unique_no_split` token\r\n\r\n* add test to hightligh special treatment of added tokens\r\n\r\n* fix `test_batch_encode_dynamic_overflowing` by building a long enough example\r\n\r\n* fix `test_full_tokenizer` with add_prefix_token\r\n\r\n* Fix tokenizer integration test\r\n\r\n* Make the code more readable\r\n\r\n* Add tests for LayoutLMv3Processor\r\n\r\n* Fix style\r\n\r\n* Add model to README and update init\r\n\r\n* Apply suggestions from code review\r\n\r\n* Replace asserts by value errors\r\n\r\n* Add suggestion by @ducviet00\r\n\r\n* Add model to doc tests\r\n\r\n* Simplify script\r\n\r\n* Improve README\r\n\r\n* a step ahead to fix\r\n\r\n* Update pair_input_test\r\n\r\n* Make all tokenizer tests pass - phew\r\n\r\n* Make style\r\n\r\n* Add LayoutLMv3 to CI job\r\n\r\n* Fix auto mapping\r\n\r\n* Fix CI job name\r\n\r\n* Make all processor tests pass\r\n\r\n* Make tests of LayoutLMv2 and LayoutXLM consistent\r\n\r\n* Add copied from statements to fast tokenizer\r\n\r\n* Add copied from statements to slow tokenizer\r\n\r\n* Remove add_visual_labels attribute\r\n\r\n* Fix tests\r\n\r\n* Add link to notebooks\r\n\r\n* Improve docs of LayoutLMv3Processor\r\n\r\n* Fix reference to section\r\n\r\nCo-authored-by: SaulLu \r\nCo-authored-by: Niels Rogge ", "code": "def cogview_attention(self, attention_scores, alpha=32):\n \n scaled_attention_scores = attention_scores / alpha\n max_value = scaled_attention_scores.amax(dim=(-1)).unsqueeze(-1)\n new_attention_scores = (scaled_attention_scores - max_value) * alpha\n return nn.Softmax(dim=-1)(new_attention_scores)\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 56, "n_words": 21, "vocab_size": 18, "complexity": 1, "nloc": 5, "token_counts": 58, "n_ast_nodes": 94, "n_identifiers": 12, "d_id": 7030, "documentation": { "docstring": "\n https://arxiv.org/abs/2105.13290 Section 2.4 Stabilization of training: Precision Bottleneck Relaxation\n (PB-Relax). A replacement of the original nn.Softmax(dim=-1)(attention_scores). Seems the new attention_probs\n will result in a slower speed and a little bias. Can use torch.allclose(standard_attention_probs,\n cogview_attention_probs, atol=1e-08) for comparison. The smaller atol (e.g., 1e-08), the better.\n ", "n_words": 44, "vocab_size": 40, "n_whitespaces": 80, "language": "en" } }, { "id": 176544, "commit_id": "1af7d49d70869081e5cb64d17165652f1b26c57b", "repo": "networkx", "path": "networkx/algorithms/planarity.py", "file_name": "planarity.py", "fun_name": "check_planarity", "commit_message": "Improve documentation of PlanarEmbedding class (#5523)\n\n* Improve documentation of PlanarEmbedding\r\n\r\n* Fix type\r\n\r\n* Make suggested changes\r\n\r\n* rst formatting nits.\r\n\r\n* Update networkx/algorithms/planarity.py\r\n\r\nCo-authored-by: Dan Schult \r\n\r\n* Run black for formatting\r\n\r\nCo-authored-by: Ross Barnowski \r\nCo-authored-by: Dan Schult ", "code": "def check_planarity(G, counterexample=False):\n \n\n planarity_state = LRPlanarity(G)\n embedding = planarity_state.lr_planarity()\n if embedding is None:\n # graph is not planar\n if counterexample:\n return False, get_counterexample(G)\n else:\n return False, None\n else:\n # graph is planar\n return True, embedding\n\n", "url": "https://github.com/networkx/networkx.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 107, "n_words": 35, "vocab_size": 22, "complexity": 3, "nloc": 10, "token_counts": 50, "n_ast_nodes": 86, "n_identifiers": 8, "d_id": 41953, "documentation": { "docstring": "Check if a graph is planar and return a counterexample or an embedding.\n\n A graph is planar iff it can be drawn in a plane without\n any edge intersections.\n\n Parameters\n ----------\n G : NetworkX graph\n counterexample : bool\n A Kuratowski subgraph (to proof non planarity) is only returned if set\n to true.\n\n Returns\n -------\n (is_planar, certificate) : (bool, NetworkX graph) tuple\n is_planar is true if the graph is planar.\n If the graph is planar `certificate` is a PlanarEmbedding\n otherwise it is a Kuratowski subgraph.\n\n Examples\n --------\n >>> G = nx.Graph([(0, 1), (0, 2)])\n >>> is_planar, P = nx.check_planarity(G)\n >>> print(is_planar)\n True\n\n When `G` is planar, a `PlanarEmbedding` instance is returned:\n\n >>> P.get_data()\n {0: [1, 2], 1: [0], 2: [0]}\n\n Notes\n -----\n A (combinatorial) embedding consists of cyclic orderings of the incident\n edges at each vertex. Given such an embedding there are multiple approaches\n discussed in literature to drawing the graph (subject to various\n constraints, e.g. integer coordinates), see e.g. [2].\n\n The planarity check algorithm and extraction of the combinatorial embedding\n is based on the Left-Right Planarity Test [1].\n\n A counterexample is only generated if the corresponding parameter is set,\n because the complexity of the counterexample generation is higher.\n\n References\n ----------\n .. [1] Ulrik Brandes:\n The Left-Right Planarity Test\n 2009\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.217.9208\n .. [2] Takao Nishizeki, Md Saidur Rahman:\n Planar graph drawing\n Lecture Notes Series on Computing: Volume 12\n 2004\n ", "n_words": 228, "vocab_size": 154, "n_whitespaces": 404, "language": "en" } }, { "id": 108776, "commit_id": "cf995d1304bfa7f660e7158b5121a46e54f869f2", "repo": "matplotlib", "path": "lib/matplotlib/patches.py", "file_name": "patches.py", "fun_name": "draw", "commit_message": "Remove ineffective exclusion of Arcs without parent Axes.\n\nThe `if not hasattr(self, 'axes'): raise RuntimeError(...)` check was\nineffectual, as artists now always have an Axes attribute, which can\njust be None for some artists. In fact, small Arcs are drawn just fine\nwithout a parent Axes; e.g.\n```\nfrom pylab import *\nfrom matplotlib.patches import *\nfig = figure()\nfig.add_artist(Ellipse((.2, .2), .1, .3, angle=45)) # for comparison\nfig.add_artist(Arc((.2, .2), .1, .3, angle=45, theta1=0, theta2=45))\n```\nworks just fine. Remove the check, and adjust the docs accordingly.\n\nOn the other hand, large arcs *did* previously fail,\nbut that occurred a bit further down, when computing\n`transforms.BboxTransformTo(self.axes.bbox)` (`self.axes` is None -->\nAttributeError). Fix that by using the figure bbox in that case (as the\npoint is to limit the drawing to the unclipped area, which is the whole\nfigure for Arcs without a parent Axes).", "code": "def draw(self, renderer):\n \n if not self.get_visible():\n return\n\n self._recompute_transform()\n\n width = self.convert_xunits(self.width)\n height = self.convert_yunits(self.height)\n\n # If the width and height of ellipse are not equal, take into account\n # stretching when calculating angles to draw between", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 96, "n_words": 36, "vocab_size": 31, "complexity": 11, "nloc": 50, "token_counts": 404, "n_ast_nodes": 74, "n_identifiers": 9, "d_id": 23337, "documentation": { "docstring": "\n Draw the arc to the given *renderer*.\n\n Notes\n -----\n Ellipses are normally drawn using an approximation that uses\n eight cubic Bezier splines. The error of this approximation\n is 1.89818e-6, according to this unverified source:\n\n Lancaster, Don. *Approximating a Circle or an Ellipse Using\n Four Bezier Cubic Splines.*\n\n https://www.tinaja.com/glib/ellipse4.pdf\n\n There is a use case where very large ellipses must be drawn\n with very high accuracy, and it is too expensive to render the\n entire ellipse with enough segments (either splines or line\n segments). Therefore, in the case where either radius of the\n ellipse is large enough that the error of the spline\n approximation will be visible (greater than one pixel offset\n from the ideal), a different technique is used.\n\n In that case, only the visible parts of the ellipse are drawn,\n with each visible arc using a fixed number of spline segments\n (8). The algorithm proceeds as follows:\n\n 1. The points where the ellipse intersects the axes (or figure)\n bounding box are located. (This is done by performing an inverse\n transformation on the bbox such that it is relative to the unit\n circle -- this makes the intersection calculation much easier than\n doing rotated ellipse intersection directly.)\n\n This uses the \"line intersecting a circle\" algorithm from:\n\n Vince, John. *Geometry for Computer Graphics: Formulae,\n Examples & Proofs.* London: Springer-Verlag, 2005.\n\n 2. The angles of each of the intersection points are calculated.\n\n 3. Proceeding counterclockwise starting in the positive\n x-direction, each of the visible arc-segments between the\n pairs of vertices are drawn using the Bezier arc\n approximation technique implemented in `.Path.arc`.\n ", "n_words": 258, "vocab_size": 160, "n_whitespaces": 541, "language": "en" } }, { "id": 297460, "commit_id": "0e2ebfe5c45716250280186234123f170e3bd08c", "repo": "core", "path": "homeassistant/components/bluetooth/base_scanner.py", "file_name": "base_scanner.py", "fun_name": "_async_setup_scanner_watchdog", "commit_message": "Move bluetooth watchdog into the scanner base class (#83888)", "code": "def _async_setup_scanner_watchdog(self) -> None:\n \n self._start_time = self._last_detection = MONOTONIC_TIME()\n if not self._cancel_watchdog:\n self._cancel_watchdog = async_track_time_interval(\n self.hass, self._async_scanner_watchdog, SCANNER_WATCHDOG_INTERVAL\n )\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 77, "n_words": 19, "vocab_size": 17, "complexity": 2, "nloc": 7, "token_counts": 41, "n_ast_nodes": 67, "n_identifiers": 10, "d_id": 96429, "documentation": { "docstring": "If something has restarted or updated, we need to restart the scanner.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 10208, "commit_id": "933415bfa1f9eb89f935037014dfed816eb9815d", "repo": "jina", "path": "tests/unit/helloworld/multimodal/test_executors.py", "file_name": "test_executors.py", "fun_name": "test_image_crafter_index", "commit_message": "feat: star routing (#3900)\n\n* feat(proto): adjust proto for star routing (#3844)\r\n\r\n* feat(proto): adjust proto for star routing\r\n\r\n* feat(proto): generate proto files\r\n\r\n* feat(grpc): refactor grpclet interface (#3846)\r\n\r\n* feat: refactor connection pool for star routing (#3872)\r\n\r\n* feat(k8s): add more labels to k8s deployments\r\n\r\n* feat(network): refactor connection pool\r\n\r\n* feat(network): refactor k8s pool\r\n\r\n* feat: star routing graph gateway (#3877)\r\n\r\n* feat: star routing - refactor grpc data runtime (#3887)\r\n\r\n* feat(runtimes): refactor grpc dataruntime\r\n\r\n* fix(tests): adapt worker runtime tests\r\n\r\n* fix(import): fix import\r\n\r\n* feat(proto): enable sending multiple lists (#3891)\r\n\r\n* feat: star routing gateway (#3893)\r\n\r\n* feat: star routing gateway all protocols (#3897)\r\n\r\n* test: add streaming and prefetch tests (#3901)\r\n\r\n* feat(head): new head runtime for star routing (#3899)\r\n\r\n* feat(head): new head runtime\r\n\r\n* feat(head): new head runtime\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(network): improve proto comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(worker): merge docs in worker runtime (#3905)\r\n\r\n* feat(worker): merge docs in worker runtime\r\n\r\n* feat(tests): assert after clean up\r\n\r\n* feat(tests): star routing runtime integration tests (#3908)\r\n\r\n* fix(tests): fix integration tests\r\n\r\n* test: test runtimes fast slow request (#3910)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table (#3915)\r\n\r\n* feat(zmq): purge zmq, zed, routing_table\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(zmq): adapt comment in dependency list\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix type tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: add test gateway to worker connection (#3921)\r\n\r\n* feat(pea): adapt peas for star routing (#3918)\r\n\r\n* feat(pea): adapt peas for star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(pea): add tests\r\n\r\n* feat(tests): add failing head pea test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(tests): integration tests for peas (#3923)\r\n\r\n* feat(tests): integration tests for peas\r\n\r\n* feat(pea): remove _inner_pea function\r\n\r\n* feat: star routing container pea (#3922)\r\n\r\n* test: rescue tests (#3942)\r\n\r\n* fix: fix streaming tests (#3945)\r\n\r\n* refactor: move docker run to run (#3948)\r\n\r\n* feat: star routing pods (#3940)\r\n\r\n* feat(pod): adapt pods for star routing\r\n\r\n* feat(pods): adapt basepod to star routing\r\n\r\n* feat(pod): merge pod and compound pod\r\n\r\n* feat(tests): fix tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(test): add container pea int test\r\n\r\n* feat(ci): remove more unnecessary tests\r\n\r\n* fix(tests): remove jinad runtime\r\n\r\n* feat(ci): remove latency tracking\r\n\r\n* fix(ci): fix ci def\r\n\r\n* fix(runtime): enable runtime to be exited\r\n\r\n* fix(tests): wrap runtime test in process\r\n\r\n* fix(runtimes): remove unused runtimes\r\n\r\n* feat(runtimes): improve cancel wait\r\n\r\n* fix(ci): build test pip again in ci\r\n\r\n* fix(tests): fix a test\r\n\r\n* fix(test): run async in its own process\r\n\r\n* feat(pod): include shard in activate msg\r\n\r\n* fix(pea): dont join\r\n\r\n* feat(pod): more debug out\r\n\r\n* feat(grpc): manage channels properly\r\n\r\n* feat(pods): remove exitfifo\r\n\r\n* feat(network): add simple send retry mechanism\r\n\r\n* fix(network): await pool close\r\n\r\n* fix(test): always close grpc server in worker\r\n\r\n* fix(tests): remove container pea from tests\r\n\r\n* fix(tests): reorder tests\r\n\r\n* fix(ci): split tests\r\n\r\n* fix(ci): allow alias setting\r\n\r\n* fix(test): skip a test\r\n\r\n* feat(pods): address comments\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* test: unblock skipped test (#3957)\r\n\r\n* feat: jinad pea (#3949)\r\n\r\n* feat: jinad pea\r\n\r\n* feat: jinad pea\r\n\r\n* test: remote peas\r\n\r\n* test: toplogy tests with jinad\r\n\r\n* ci: parallel jobs\r\n\r\n* feat(tests): add pod integration tests (#3958)\r\n\r\n* feat(tests): add pod integration tests\r\n\r\n* fix(tests): make tests less flaky\r\n\r\n* fix(test): fix test\r\n\r\n* test(pea): remote pea topologies (#3961)\r\n\r\n* test(pea): remote pea simple topology\r\n\r\n* test: remote pea topologies\r\n\r\n* refactor: refactor streamer result handling (#3960)\r\n\r\n* feat(k8s): adapt K8s Pod for StarRouting (#3964)\r\n\r\n* test: optimize k8s test\r\n\r\n* test: increase timeout and use different namespace\r\n\r\n* test: optimize k8s test\r\n\r\n* test: build and load image when needed\r\n\r\n* test: refactor k8s test\r\n\r\n* test: fix image name error\r\n\r\n* test: fix k8s image load\r\n\r\n* test: fix typoe port expose\r\n\r\n* test: update tests in connection pool and handling\r\n\r\n* test: remove unused fixture\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* test: parameterize docker images\r\n\r\n* feat(k8s): adapt k8s pod for star routing\r\n\r\n* fix(k8s): dont overwrite add/remove function in pool\r\n\r\n* fix(k8s): some fixes\r\n\r\n* fix(k8s): some more fixes\r\n\r\n* fix(k8s): linting\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix k8s unit tests\r\n\r\n* feat(k8s): complete k8s integration test\r\n\r\n* feat(k8s): finish k8s tests\r\n\r\n* feat(k8s): fix test\r\n\r\n* fix(tests): fix test with no name\r\n\r\n* feat(k8s): unify create/replace interface\r\n\r\n* feat(k8s): extract k8s port constants\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): wait for runtime being ready in tests\r\n\r\n* feat(k8s): address comments\r\n\r\nCo-authored-by: bwanglzu \r\n\r\n* feat(flow): adapt Flow for StarRouting (#3986)\r\n\r\n* feat(flow): add routes\r\n\r\n* feat(flow): adapt flow to star routing\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(flow): handle empty topologies\r\n\r\n* feat(k8s): allow k8s pool disabling\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(test): fix test with mock\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(flow): clean up tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat: add plot function (#3994)\r\n\r\n* fix(tests): avoid hanging tests\r\n\r\n* feat(flow): add type hinting\r\n\r\n* fix(test): fix duplicate exec name in test\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): enable jinad test again\r\n\r\n* fix(tests): random port fixture\r\n\r\n* fix(style): replace quotes\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(ci): bring back ci (#3997)\r\n\r\n* feat(ci): enable ci again\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat(ci): add latency tracking\r\n\r\n* feat(ci): bring back some tests\r\n\r\n* fix(tests): remove invalid port test\r\n\r\n* feat(ci): disable daemon and distributed tests\r\n\r\n* fix(tests): fix entrypoint in hub test\r\n\r\n* fix(tests): wait for gateway to be ready\r\n\r\n* fix(test): fix more tests\r\n\r\n* feat(flow): do rolling update and scale sequentially\r\n\r\n* fix(tests): fix more tests\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* feat: star routing hanging pods (#4011)\r\n\r\n* fix: try to handle hanging pods better\r\n\r\n* test: hanging pods test work\r\n\r\n* fix: fix topology graph problem\r\n\r\n* test: add unit test to graph\r\n\r\n* fix(tests): fix k8s tests\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s pool test\r\n\r\n* fix(test): fix k8s test\r\n\r\n* fix(test): fix k8s connection pool setting\r\n\r\n* fix(tests): make runtime test more reliable\r\n\r\n* fix(test): fix routes test\r\n\r\n* fix(tests): make rolling update test less flaky\r\n\r\n* feat(network): gurantee unique ports\r\n\r\n* feat(network): do round robin for shards\r\n\r\n* fix(ci): increase pytest timeout to 10 min\r\n\r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix(ci): fix ci file\r\n\r\n* feat(daemon): jinad pod for star routing\r\n\r\n* Revert \"feat(daemon): jinad pod for star routing\"\r\n\r\nThis reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92.\r\n\r\n* feat(daemon): remote jinad pod support (#4042)\r\n\r\n* feat(daemon): add pod tests for star routing\r\n\r\n* feat(daemon): add remote pod test\r\n\r\n* test(daemon): add remote pod arguments test\r\n\r\n* test(daemon): add async scale test\r\n\r\n* test(daemon): add rolling update test\r\n\r\n* test(daemon): fix host\r\n\r\n* feat(proto): remove message proto (#4051)\r\n\r\n* feat(proto): remove message proto\r\n\r\n* fix(tests): fix tests\r\n\r\n* fix(tests): fix some more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* fix(tests): fix more tests\r\n\r\n* feat(proto): put docs back in data\r\n\r\n* fix(proto): clean up\r\n\r\n* feat(proto): clean up\r\n\r\n* fix(tests): skip latency tracking\r\n\r\n* fix(test): fix hub test\r\n\r\n* fix(tests): fix k8s test\r\n\r\n* fix(test): some test clean up\r\n\r\n* fix(style): clean up style issues\r\n\r\n* feat(proto): adjust for rebase\r\n\r\n* fix(tests): bring back latency tracking\r\n\r\n* fix(tests): fix merge accident\r\n\r\n* feat(proto): skip request serialization (#4074)\r\n\r\n* feat: add reduce to star routing (#4070)\r\n\r\n* feat: add reduce on shards to head runtime\r\n\r\n* test: add reduce integration tests with fixed order\r\n\r\n* feat: add reduce on needs\r\n\r\n* chore: get_docs_matrix_from_request becomes public\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* docs: remove undeterministic results warning\r\n\r\n* fix: fix uses_after\r\n\r\n* test: assert correct num docs after reducing in test_external_pod\r\n\r\n* test: correct asserts after reduce in test_rolling_update\r\n\r\n* fix: no reduce if uses_after_address is set\r\n\r\n* fix: get_docs_from_request only if needed\r\n\r\n* fix: fix tests after merge\r\n\r\n* refactor: move reduce from data_request_handler to head\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* chore: apply suggestions\r\n\r\n* fix: fix asserts\r\n\r\n* chore: minor test fix\r\n\r\n* chore: apply suggestions\r\n\r\n* test: remove flow tests with external executor (pea)\r\n\r\n* fix: fix test_expected_messages_routing\r\n\r\n* fix: fix test_func_joiner\r\n\r\n* test: adapt k8s test\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix(k8s): fix static pool config\r\n\r\n* fix: use custom protoc doc generator image (#4088)\r\n\r\n* fix: use custom protoc doc generator image\r\n\r\n* fix(docs): minor doc improvement\r\n\r\n* fix(docs): use custom image\r\n\r\n* fix(docs): copy docarray\r\n\r\n* fix: doc building local only\r\n\r\n* fix: timeout doc building\r\n\r\n* fix: use updated args when building ContainerPea\r\n\r\n* test: add container PeaFactory test\r\n\r\n* fix: force pea close on windows (#4098)\r\n\r\n* fix: dont reduce if uses exist (#4099)\r\n\r\n* fix: dont use reduce if uses exist\r\n\r\n* fix: adjust reduce tests\r\n\r\n* fix: adjust more reduce tests\r\n\r\n* fix: fix more tests\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: ignore non jina resources (#4101)\r\n\r\n* feat(executor): enable async executors (#4102)\r\n\r\n* feat(daemon): daemon flow on star routing (#4096)\r\n\r\n* test(daemon): add remote flow test\r\n\r\n* feat(daemon): call scale in daemon\r\n\r\n* feat(daemon): remove tail args and identity\r\n\r\n* test(daemon): rename scalable executor\r\n\r\n* test(daemon): add a small delay in async test\r\n\r\n* feat(daemon): scale partial flow only\r\n\r\n* feat(daemon): call scale directly in partial flow store\r\n\r\n* test(daemon): use asyncio sleep\r\n\r\n* feat(daemon): enable flow level distributed tests\r\n\r\n* test(daemon): fix jinad env workspace config\r\n\r\n* test(daemon): fix pod test use new port rolling update\r\n\r\n* feat(daemon): enable distribuetd tests\r\n\r\n* test(daemon): remove duplicate tests and zed runtime test\r\n\r\n* test(daemon): fix stores unit test\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* feat(daemon): enable part of distributed tests\r\n\r\n* test: correct test paths\r\n\r\n* test(daemon): add client test for remote flows\r\n\r\n* test(daemon): send a request with jina client\r\n\r\n* test(daemon): assert async generator\r\n\r\n* test(daemon): small interval between tests\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): add flow test for container runtime\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): fix executor name\r\n\r\n* test(daemon): use async client fetch result\r\n\r\n* test(daemon): finish container flow test\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): enable distributed in ci\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): debug ci if else\r\n\r\n* test(daemon): decare flows and pods\r\n\r\n* test(daemon): correct test paths\r\n\r\n* test(daemon): add small delay for async tests\r\n\r\n* fix: star routing fixes (#4100)\r\n\r\n* docs: update docs\r\n\r\n* fix: fix Request.__repr__\r\n\r\n* docs: update flow remarks\r\n\r\n* docs: fix typo\r\n\r\n* test: add non_empty_fields test\r\n\r\n* chore: remove non_empty_fields test\r\n\r\n* feat: polling per endpoint (#4111)\r\n\r\n* feat(polling): polling per endpoint configurable\r\n\r\n* fix: adjust tests\r\n\r\n* feat(polling): extend documentation\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: clean up\r\n\r\n* fix: adjust more tests\r\n\r\n* fix: remove repeat from flaky test\r\n\r\n* fix: k8s test\r\n\r\n* feat(polling): address pr feedback\r\n\r\n* feat: improve docs\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* feat(grpc): support connect grpc server via ssl tunnel (#4092)\r\n\r\n* feat(grpc): support ssl grpc connect if port is 443\r\n\r\n* fix(grpc): use https option instead of detect port automatically\r\n\r\n* chore: fix typo\r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* fix: update jina/peapods/networking.py\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* test(networking): add test for peapods networking\r\n\r\n* fix: address comments\r\n\r\nCo-authored-by: Joan Fontanals \r\n\r\n* feat(polling): unify polling args (#4113)\r\n\r\n* fix: several issues for jinad pods (#4119)\r\n\r\n* fix: activate for jinad pods\r\n\r\n* fix: dont expose worker pod in partial daemon\r\n\r\n* fix: workspace setting\r\n\r\n* fix: containerized flows\r\n\r\n* fix: hub test\r\n\r\n* feat(daemon): remote peas on star routing (#4112)\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix request in peas\r\n\r\n* test(daemon): fix sync async client test\r\n\r\n* test(daemon): enable remote peas test\r\n\r\n* test(daemon): replace send message to send request\r\n\r\n* test(daemon): declare pea tests in ci\r\n\r\n* test(daemon): use pea args fixture\r\n\r\n* test(daemon): head pea use default host\r\n\r\n* test(daemon): fix peas topologies\r\n\r\n* test(daemon): fix pseudo naming\r\n\r\n* test(daemon): use default host as host\r\n\r\n* test(daemon): fix executor path\r\n\r\n* test(daemon): add remote worker back\r\n\r\n* test(daemon): skip local remote remote topology\r\n\r\n* fix: jinad pea test setup\r\n\r\n* fix: jinad pea tests\r\n\r\n* fix: remove invalid assertion\r\n\r\nCo-authored-by: jacobowitz \r\n\r\n* feat: enable daemon tests again (#4132)\r\n\r\n* feat: enable daemon tests again\r\n\r\n* fix: remove bogy empty script file\r\n\r\n* fix: more jinad test fixes\r\n\r\n* style: fix overload and cli autocomplete\r\n\r\n* fix: scale and ru in jinad\r\n\r\n* fix: fix more jinad tests\r\n\r\nCo-authored-by: Jina Dev Bot \r\n\r\n* fix: fix flow test\r\n\r\n* fix: improve pea tests reliability (#4136)\r\n\r\nCo-authored-by: Joan Fontanals \r\nCo-authored-by: Jina Dev Bot \r\nCo-authored-by: Deepankar Mahapatro \r\nCo-authored-by: bwanglzu \r\nCo-authored-by: AlaeddineAbdessalem \r\nCo-authored-by: Zhaofeng Miao <522856232@qq.com>", "code": "def test_image_crafter_index(encoder_doc_array, tmpdir):\n \n", "url": "https://github.com/jina-ai/jina.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 5, "token_counts": 49, "n_ast_nodes": 15, "n_identifiers": 3, "d_id": 1818, "documentation": { "docstring": "In this test, we input one ``DocumentArray`` with one ``Document``,\n and the `craft` method in the ``ImageCrafter`` returns chunks.\n In the ``ImageCrafter``, we filtered out all the modalities and only kept `image/jpeg`.\n So the 2 chunks should left only 1 chunk.\n And the blob value of the ``Document`` is not empty once we finished crafting since\n we converted image uri/datauri to blob.\n ", "n_words": 62, "vocab_size": 49, "n_whitespaces": 80, "language": "en" } }, { "id": 167165, "commit_id": "830130a543619fe945365fdea5e6e5877fe81c6f", "repo": "pandas", "path": "pandas/core/common.py", "file_name": "common.py", "fun_name": "get_rename_function", "commit_message": "TYP: Series.quantile (#47304)\n\n* TYP: Series.quantile\r\n\r\n* common.py", "code": "def get_rename_function(mapper):\n \n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 2, "nloc": 3, "token_counts": 25, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 39944, "documentation": { "docstring": "\n Returns a function that will map names/labels, dependent if mapper\n is a dict, Series or just a function.\n ", "n_words": 18, "vocab_size": 16, "n_whitespaces": 28, "language": "en" } }, { "id": 42474, "commit_id": "c6d9e0529eecce2c0742ca47135b28e5316611e0", "repo": "nltk", "path": "nltk/sem/boxer.py", "file_name": "boxer.py", "fun_name": "_call_boxer", "commit_message": "Update boxer.py\n\nUsed to have this py2 to py3 error\r\nTypeError: write() argument must be str, not bytes", "code": "def _call_boxer(self, candc_out, verbose=False):\n \n f = None\n try:\n fd, temp_filename = tempfile.mkstemp(\n prefix=\"boxer-\", suffix=\".in\", text=True\n )\n f = os.fdopen(fd, \"w\")\n f.write(candc_out.decode(\"utf-8\"))\n finally:\n if f:\n f.close()\n\n args = [\n \"--box\",\n \"false\",\n \"--semantics\",\n \"drs\",\n #'--flat', 'false', # removed from boxer\n \"--resolve\",\n [\"false\", \"true\"][self._resolve],\n \"--elimeq\",\n [\"false\", \"true\"][self._elimeq],\n \"--format\",\n \"prolog\",\n \"--instantiate\",\n \"true\",\n \"--input\",\n temp_filename,\n ]\n stdout = self._call(None, self._boxer_bin, args, verbose)\n os.remove(temp_filename)\n return stdout\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 373, "n_words": 60, "vocab_size": 53, "complexity": 3, "nloc": 30, "token_counts": 142, "n_ast_nodes": 242, "n_identifiers": 24, "d_id": 7559, "documentation": { "docstring": "\n Call the ``boxer`` binary with the given input.\n\n :param candc_out: str output from C&C parser\n :return: stdout\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 46, "language": "en" } }, { "id": 186613, "commit_id": "7d9e9a49005de7961e84d2a7c608db57dbab3046", "repo": "certbot", "path": "certbot-apache/certbot_apache/_internal/apache_util.py", "file_name": "apache_util.py", "fun_name": "unique_id", "commit_message": "Add typing to certbot.apache (#9071)\n\n* Add typing to certbot.apache\r\n\r\nCo-authored-by: Adrien Ferrand ", "code": "def unique_id() -> str:\n \n return binascii.hexlify(os.urandom(16)).decode(\"utf-8\")\n\n", "url": "https://github.com/certbot/certbot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 44, "n_identifiers": 7, "d_id": 45525, "documentation": { "docstring": " Returns an unique id to be used as a VirtualHost identifier", "n_words": 11, "vocab_size": 11, "n_whitespaces": 11, "language": "en" } }, { "id": 107683, "commit_id": "eb12b029ffe2f110540a4338684d1a729d1ddfc5", "repo": "matplotlib", "path": "lib/mpl_toolkits/axes_grid1/axes_divider.py", "file_name": "axes_divider.py", "fun_name": "add_auto_adjustable_area", "commit_message": "Document, test, and simplify impl. of auto_adjustable_area.\n\nDocument behavior of auto_adjustable_area, and slightly modernize the\nexample.\n\nSimplify its implementation: `Padded` is just size addition and\n`GetExtentHelper` and `SizeFromFunc` can reasonably be fused into a\nsingle class; none of them are used anywhere else, so just deprecate\nthem as public APIs.\n\nAdd a test.", "code": "def add_auto_adjustable_area(self, use_axes, pad=0.1, adjust_dirs=None):\n \n if adjust_dirs is None:\n adjust_dirs = [\"left\", \"right\", \"bottom\", \"top\"]\n for d in adjust_dirs:\n self.append_size(d, Size._AxesDecorationsSize(use_axes, d) + pad)\n\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 67, "n_words": 24, "vocab_size": 23, "complexity": 3, "nloc": 5, "token_counts": 56, "n_ast_nodes": 87, "n_identifiers": 9, "d_id": 22866, "documentation": { "docstring": "\n Add auto-adjustable padding around *use_axes* to take their decorations\n (title, labels, ticks, ticklabels) into account during layout.\n\n Parameters\n ----------\n use_axes : `~.axes.Axes` or list of `~.axes.Axes`\n The Axes whose decorations are taken into account.\n pad : float, optional\n Additional padding in inches.\n adjust_dirs : list of {\"left\", \"right\", \"bottom\", \"top\"}, optional\n The sides where padding is added; defaults to all four sides.\n ", "n_words": 62, "vocab_size": 50, "n_whitespaces": 152, "language": "en" } }, { "id": 187182, "commit_id": "d09112ab1f6db6aa605650fe1ff6a3028344f90d", "repo": "streamlink", "path": "tests/test_api_validate.py", "file_name": "test_api_validate.py", "fun_name": "test_failure_subschema", "commit_message": "plugin.api.validate: rewrite tests\n\nCompletely rewrite tests using pytest, with full coverage", "code": "def test_failure_subschema(self, obj):\n with pytest.raises(validate.ValidationError) as cm:\n validate.validate(validate.attr({\"foo\": str}), obj)\n assert_validationerror(cm.value, )\n\n", "url": "https://github.com/streamlink/streamlink.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 36, "n_words": 12, "vocab_size": 12, "complexity": 1, "nloc": 9, "token_counts": 44, "n_ast_nodes": 75, "n_identifiers": 12, "d_id": 45728, "documentation": { "docstring": "\n ValidationError(AttrSchema):\n Could not validate attribute 'foo'\n Context(type):\n Type of 1 should be str, but is int\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 76, "language": "en" } }, { "id": 206026, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/forms/widgets.py", "file_name": "widgets.py", "fun_name": "clear_checkbox_id", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def clear_checkbox_id(self, name):\n \n return name + \"_id\"\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 23, "n_identifiers": 3, "d_id": 51329, "documentation": { "docstring": "\n Given the name of the clear checkbox input, return the HTML id for it.\n ", "n_words": 14, "vocab_size": 12, "n_whitespaces": 29, "language": "en" } }, { "id": 221588, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/concurrent/futures/_base.py", "file_name": "_base.py", "fun_name": "done", "commit_message": "add python 3.10.4 for windows", "code": "def done(self):\n \n with self._condition:\n return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 35, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 3, "token_counts": 23, "n_ast_nodes": 39, "n_identifiers": 7, "d_id": 56443, "documentation": { "docstring": "Return True of the future was cancelled or finished executing.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 67253, "commit_id": "494bd9ef78313436f0424b918f200dab8fc7c20b", "repo": "erpnext", "path": "erpnext/regional/report/provident_fund_deductions/provident_fund_deductions.py", "file_name": "provident_fund_deductions.py", "fun_name": "get_data", "commit_message": "style: format code with black", "code": "def get_data(filters):\n\tdata = []\n\n\tconditions = get_conditions(filters)\n\n\tsalary_slips = frappe.db.sql(\n\t\t\n\t\t% (conditions),\n\t\tas_dict=1,\n\t)\n\n\tcomponent_type_dict = frappe._dict(\n\t\tfrappe.db.sql(\n\t\t\t\n\t\t)\n\t)\n\n\tif not len(component_type_dict):\n\t\treturn []\n\n\tentry = frappe.db.sql(\n\t\t\n\t\t% (conditions, \", \".join([\"%s\"] * len(component_type_dict))),\n\t\ttuple(component_type_dict.keys()),\n\t\tas_dict=1,\n\t)\n\n\tdata_list = prepare_data(entry, component_type_dict)\n\n\tfor d in salary_slips:\n\t\ttotal = 0\n\t\tif data_list.get(d.name):\n\t\t\temployee = {\n\t\t\t\t\"employee\": data_list.get(d.name).get(\"employee\"),\n\t\t\t\t\"employee_name\": data_list.get(d.name).get(\"employee_name\"),\n\t\t\t\t\"pf_account\": data_list.get(d.name).get(\"pf_account\"),\n\t\t\t}\n\n\t\t\tif data_list.get(d.name).get(\"Provident Fund\"):\n\t\t\t\temployee[\"pf_amount\"] = data_list.get(d.name).get(\"Provident Fund\")\n\t\t\t\ttotal += data_list.get(d.name).get(\"Provident Fund\")\n\n\t\t\tif data_list.get(d.name).get(\"Additional Provident Fund\"):\n\t\t\t\temployee[\"additional_pf\"] = data_list.get(d.name).get(\"Additional Provident Fund\")\n\t\t\t\ttotal += data_list.get(d.name).get(\"Additional Provident Fund\")\n\n\t\t\tif data_list.get(d.name).get(\"Provident Fund Loan\"):\n\t\t\t\temployee[\"pf_loan\"] = data_list.get(d.name).get(\"Provident Fund Loan\")\n\t\t\t\ttotal += data_list.get(d.name).get(\"Provident Fund Loan\")\n\n\t\t\temployee[\"total\"] = total\n\n\t\t\tdata.append(employee)\n\n\treturn data\n\n\n@frappe.whitelist()", "url": "https://github.com/frappe/erpnext.git", "language": "Python", "ast_errors": "@frappe.whitelist()", "n_ast_errors": 1, "ast_levels": 17, "n_whitespaces": 67, "n_words": 107, "vocab_size": 60, "complexity": 7, "nloc": 52, "token_counts": 337, "n_ast_nodes": 586, "n_identifiers": 26, "d_id": 14456, "documentation": { "docstring": " select sal.name from `tabSalary Slip` sal\n\t\twhere docstatus = 1 %s\n\t\t select name, component_type from `tabSalary Component`\n\t\twhere component_type in ('Provident Fund', 'Additional Provident Fund', 'Provident Fund Loan') select sal.name, sal.employee, sal.employee_name, ded.salary_component, ded.amount\n\t\tfrom `tabSalary Slip` sal, `tabSalary Detail` ded\n\t\twhere sal.name = ded.parent\n\t\tand ded.parentfield = 'deductions'\n\t\tand ded.parenttype = 'Salary Slip'\n\t\tand sal.docstatus = 1 %s\n\t\tand ded.salary_component in (%s)\n\t", "n_words": 63, "vocab_size": 40, "n_whitespaces": 55, "language": "en" } }, { "id": 45966, "commit_id": "e1134590973355549272b1f3a213dbfa29698df7", "repo": "airflow", "path": "airflow/cli/commands/dag_command.py", "file_name": "dag_command.py", "fun_name": "dag_list_import_errors", "commit_message": "Add `list-import-errors` to `airflow dags` command (#22084)\n\nThis will help users to see the dags with import error and enable scripts\r\nprocess the output", "code": "def dag_list_import_errors(args):\n \n dagbag = DagBag(process_subdir(args.subdir))\n data = []\n for filename, errors in dagbag.import_errors.items():\n data.append({\"filepath\": filename, \"error\": errors})\n AirflowConsole().print_as(\n data=data,\n output=args.output,\n )\n\n\n@cli_utils.action_cli\n@suppress_logs_and_warning", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "@cli_utils.action_cli\n@suppress_logs_and_warning", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 60, "n_words": 23, "vocab_size": 21, "complexity": 2, "nloc": 9, "token_counts": 65, "n_ast_nodes": 119, "n_identifiers": 18, "d_id": 8751, "documentation": { "docstring": "Displays dags with import errors on the command line", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 223623, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/email/_parseaddr.py", "file_name": "_parseaddr.py", "fun_name": "mktime_tz", "commit_message": "add python 3.10.4 for windows", "code": "def mktime_tz(data):\n \n if data[9] is None:\n # No zone info, so localtime is better assumption than GMT\n return time.mktime(data[:8] + (-1,))\n else:\n t = calendar.timegm(data)\n return t - data[9]\n\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 66, "n_words": 29, "vocab_size": 25, "complexity": 2, "nloc": 6, "token_counts": 48, "n_ast_nodes": 79, "n_identifiers": 7, "d_id": 57013, "documentation": { "docstring": "Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 20660, "commit_id": "f3166e673fe8d40277b804d35d77dcdb760fc3b3", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/pyparsing/unicode.py", "file_name": "unicode.py", "fun_name": "identbodychars", "commit_message": "check point progress on only bringing in pip==22.0.4 (#4966)\n\n* vendor in pip==22.0.4\r\n\r\n* updating vendor packaging version\r\n\r\n* update pipdeptree to fix pipenv graph with new version of pip.\r\n\r\n* Vendoring of pip-shims 0.7.0\r\n\r\n* Vendoring of requirementslib 1.6.3\r\n\r\n* Update pip index safety restrictions patch for pip==22.0.4\r\n\r\n* Update patches\r\n\r\n* exclude pyptoject.toml from black to see if that helps.\r\n\r\n* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4", "code": "def identbodychars(cls):\n \n return \"\".join(\n sorted(\n set(\n cls.identchars\n + \"0123456789\"\n + \"\".join(\n [c for c in cls._chars_for_ranges if (\"_\" + c).isidentifier()]\n )\n )\n )\n )\n\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 21, "n_whitespaces": 196, "n_words": 24, "vocab_size": 18, "complexity": 3, "nloc": 12, "token_counts": 48, "n_ast_nodes": 86, "n_identifiers": 9, "d_id": 3469, "documentation": { "docstring": "\n all characters in this range that are valid identifier body characters,\n plus the digits 0-9\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 37, "language": "en" } }, { "id": 37498, "commit_id": "57e6464ac9a31156f1c93e59107323e6ec01309e", "repo": "transformers", "path": "src/transformers/testing_utils.py", "file_name": "testing_utils.py", "fun_name": "require_tf", "commit_message": "Update all require decorators to use skipUnless when possible (#16999)", "code": "def require_tf(test_case):\n \n return unittest.skipUnless(is_tf_available(), \"test requires TensorFlow\")(test_case)\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 13, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 20, "n_ast_nodes": 37, "n_identifiers": 5, "d_id": 6803, "documentation": { "docstring": "\n Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.\n ", "n_words": 15, "vocab_size": 15, "n_whitespaces": 22, "language": "en" } }, { "id": 107180, "commit_id": "ec4dfbc3c83866f487ff0bc9c87b0d43a1c02b22", "repo": "matplotlib", "path": "lib/matplotlib/tests/test_constrainedlayout.py", "file_name": "test_constrainedlayout.py", "fun_name": "test_constrained_layout23", "commit_message": "ENH: implement and use base layout_engine for more flexible layout.", "code": "def test_constrained_layout23():\n \n\n for i in range(2):\n fig = plt.figure(layout=\"constrained\", clear=True, num=\"123\")\n gs = fig.add_gridspec(1, 2)\n sub = gs[0].subgridspec(2, 2)\n fig.suptitle(\"Suptitle{}\".format(i))\n\n\n@image_comparison(['test_colorbar_location.png'],\n remove_text=True, style='mpl20')", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "@image_comparison(['test_colorbar_location.png'],\n remove_text=True, style='mpl20')", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 73, "n_words": 23, "vocab_size": 20, "complexity": 2, "nloc": 6, "token_counts": 65, "n_ast_nodes": 134, "n_identifiers": 18, "d_id": 22632, "documentation": { "docstring": "\n Comment in #11035: suptitle used to cause an exception when\n reusing a figure w/ CL with ``clear=True``.\n ", "n_words": 17, "vocab_size": 17, "n_whitespaces": 27, "language": "en" } }, { "id": 181603, "commit_id": "388616b6247ca4ea8de4e2f340d6206aee523541", "repo": "tpot", "path": "tests/driver_tests.py", "file_name": "driver_tests.py", "fun_name": "test_positive_integer_or_none_3", "commit_message": "Revert \"Deployed 7ccda9a with MkDocs version: 1.3.0\"\n\nThis reverts commit bd9629c40e01241766197119b581a99409b07068.", "code": "def test_positive_integer_or_none_3():\n \n assert_raises(Exception, positive_integer_or_none, 'foobar')\n\n", "url": "https://github.com/EpistasisLab/tpot.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 11, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 25, "n_identifiers": 4, "d_id": 43392, "documentation": { "docstring": "Assert that the TPOT CLI interface's positive_integer_or_none parsing throws an exception when n is not an integer and not None.", "n_words": 20, "vocab_size": 18, "n_whitespaces": 19, "language": "en" } }, { "id": 73182, "commit_id": "d10f15e55806c6944827d801cd9c2d53f5da4186", "repo": "wagtail", "path": "wagtail/contrib/modeladmin/options.py", "file_name": "options.py", "fun_name": "get_list_display_add_buttons", "commit_message": "Reformat with black", "code": "def get_list_display_add_buttons(self, request):\n \n return self.list_display_add_buttons or self.get_list_display(request)[0]\n", "url": "https://github.com/wagtail/wagtail.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 2, "nloc": 2, "token_counts": 22, "n_ast_nodes": 36, "n_identifiers": 5, "d_id": 15977, "documentation": { "docstring": "\n Return the name of the field/method from list_display where action\n buttons should be added. Defaults to the first item from\n get_list_display()\n ", "n_words": 21, "vocab_size": 18, "n_whitespaces": 50, "language": "en" } }, { "id": 185867, "commit_id": "67947d5806bb3181eba349f0da3fd35e0542d1be", "repo": "textual", "path": "src/textual/widgets/_placeholder.py", "file_name": "_placeholder.py", "fun_name": "_update_size_variant", "commit_message": "Fix documentation about the variant 'size'.", "code": "def _update_size_variant(self) -> None:\n \n width, height = self.size\n position_data = {\n \"width\": width,\n \"height\": height,\n }\n self.update(Panel(Align.center(Pretty(position_data)), title=\"Placeholder\"))\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 75, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 11, "token_counts": 48, "n_ast_nodes": 83, "n_identifiers": 12, "d_id": 45216, "documentation": { "docstring": "Update the placeholder with the \"size\" variant.\n\n This variant shows the the size of the widget.\n ", "n_words": 16, "vocab_size": 12, "n_whitespaces": 30, "language": "en" } }, { "id": 200100, "commit_id": "1e522ee112f19216f367b457b6804fd58b94f28b", "repo": "sympy", "path": "sympy/physics/mechanics/kane.py", "file_name": "kane.py", "fun_name": "mass_matrix_full_implicit", "commit_message": "redo of #22626 based on feedback", "code": "def mass_matrix_full_implicit(self):\n \n return self._mass_matrix_full(False)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 24, "n_identifiers": 3, "d_id": 49526, "documentation": { "docstring": "The mass matrix of the system, augmented by the kinematic\n differential equations in implicit form.", "n_words": 15, "vocab_size": 14, "n_whitespaces": 21, "language": "en" } }, { "id": 207402, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/admin_scripts/tests.py", "file_name": "tests.py", "fun_name": "test_import_error", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_import_error(self):\n \n self.write_settings_with_import_error(\"settings.py\")\n args = [\"check\", \"admin_scripts\"]\n out, err = self.run_manage(args)\n self.assertNoOutput(out)\n self.assertOutput(err, \"No module named\")\n self.assertOutput(err, \"foo42bar\")\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 67, "n_words": 18, "vocab_size": 16, "complexity": 1, "nloc": 7, "token_counts": 51, "n_ast_nodes": 93, "n_identifiers": 9, "d_id": 51952, "documentation": { "docstring": "\n import error: manage.py builtin commands shows useful diagnostic info\n when settings with import errors is provided (#14130).\n ", "n_words": 17, "vocab_size": 16, "n_whitespaces": 39, "language": "en" } }, { "id": 111253, "commit_id": "a79cd3542b3dd667d8a97293462e22ed26a04ee5", "repo": "spaCy", "path": "spacy/tests/test_displacy.py", "file_name": "test_displacy.py", "fun_name": "test_displacy_parse_spans_with_kb_id_options", "commit_message": "Add displacy support for overlapping Spans (#10332)\n\n* Fix docstring for EntityRenderer\r\n\r\n* Add warning in displacy if doc.spans are empty\r\n\r\n* Implement parse_spans converter\r\n\r\nOne notable change here is that the default spans_key is sc, and\r\nit's set by the user through the options.\r\n\r\n* Implement SpanRenderer\r\n\r\nHere, I implemented a SpanRenderer that looks similar to the\r\nEntityRenderer except for some templates. The spans_key, by default, is\r\nset to sc, but can be configured in the options (see parse_spans). The\r\nway I rendered these spans is per-token, i.e., I first check if each\r\ntoken (1) belongs to a given span type and (2) a starting token of a\r\ngiven span type. Once I have this information, I render them into the\r\nmarkup.\r\n\r\n* Fix mypy issues on typing\r\n\r\n* Add tests for displacy spans support\r\n\r\n* Update colors from RGB to hex\r\n\r\nCo-authored-by: Ines Montani \r\n\r\n* Remove unnecessary CSS properties\r\n\r\n* Add documentation for website\r\n\r\n* Remove unnecesasry scripts\r\n\r\n* Update wording on the documentation\r\n\r\nCo-authored-by: Sofie Van Landeghem \r\n\r\n* Put typing dependency on top of file\r\n\r\n* Put back z-index so that spans overlap properly\r\n\r\n* Make warning more explicit for spans_key\r\n\r\nCo-authored-by: Ines Montani \r\nCo-authored-by: Sofie Van Landeghem ", "code": "def test_displacy_parse_spans_with_kb_id_options(en_vocab):\n \n doc = Doc(en_vocab, words=[\"Welcome\", \"to\", \"the\", \"Bank\", \"of\", \"China\"])\n doc.spans[\"sc\"] = [\n Span(doc, 3, 6, \"ORG\", kb_id=\"Q790068\"),\n Span(doc, 5, 6, \"GPE\", kb_id=\"Q148\"),\n ]\n\n spans = displacy.parse_spans(\n doc, {\"kb_url_template\": \"https://wikidata.org/wiki/{}\"}\n )\n assert isinstance(spans, dict)\n assert spans[\"text\"] == \"Welcome to the Bank of China \"\n assert spans[\"spans\"] == [\n {\n \"start\": 15,\n \"end\": 28,\n \"start_token\": 3,\n \"end_token\": 6,\n \"label\": \"ORG\",\n \"kb_id\": \"Q790068\",\n \"kb_url\": \"https://wikidata.org/wiki/Q790068\",\n },\n {\n \"start\": 23,\n \"end\": 28,\n \"start_token\": 5,\n \"end_token\": 6,\n \"label\": \"GPE\",\n \"kb_id\": \"Q148\",\n \"kb_url\": \"https://wikidata.org/wiki/Q148\",\n },\n ]\n\n", "url": "https://github.com/explosion/spaCy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 315, "n_words": 82, "vocab_size": 57, "complexity": 1, "nloc": 31, "token_counts": 165, "n_ast_nodes": 294, "n_identifiers": 12, "d_id": 24366, "documentation": { "docstring": "Test that spans with kb_id on a Doc are converted into displaCy's format", "n_words": 13, "vocab_size": 13, "n_whitespaces": 12, "language": "en" } }, { "id": 278622, "commit_id": "3613c3defc39c236fb1592c4f7ba1a9cc887343a", "repo": "keras", "path": "keras/backend.py", "file_name": "backend.py", "fun_name": "__getitem__", "commit_message": "Remove pylint comments.\n\nPiperOrigin-RevId: 452353044", "code": "def __getitem__(self, key):\n \n if key is None:\n key = self._key()\n\n value = self._get_recursive(key)\n if value is None:\n value = self[key] = self.default_factory()\n return value\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 81, "n_words": 24, "vocab_size": 14, "complexity": 3, "nloc": 7, "token_counts": 47, "n_ast_nodes": 78, "n_identifiers": 7, "d_id": 82635, "documentation": { "docstring": "Gets the value at key (or current context), or sets default value.\n\n Args:\n key: May be `None` or `Graph`object. When `None`, the key is set to\n the current context.\n\n Returns:\n Either the cached or default value.\n ", "n_words": 36, "vocab_size": 27, "n_whitespaces": 86, "language": "en" } }, { "id": 241668, "commit_id": "5693a94c320297cf007f3bfd13ce4d7deeb1954a", "repo": "lightning", "path": "pytorch_lightning/trainer/connectors/checkpoint_connector.py", "file_name": "checkpoint_connector.py", "fun_name": "resume_end", "commit_message": "Extend the deprecation of `Trainer(resume_from_checkpoint)` (#11334)", "code": "def resume_end(self) -> None:\n \n assert self.trainer.state.fn is not None\n if self.resume_checkpoint_path:\n if self.trainer.state.fn == TrainerFn.FITTING:\n rank_zero_info(f\"Restored all states from the checkpoint file at {self.resume_checkpoint_path}\")\n elif self.trainer.state.fn in (TrainerFn.VALIDATING, TrainerFn.TESTING, TrainerFn.PREDICTING):\n rank_zero_info(f\"Loaded model weights from checkpoint at {self.resume_checkpoint_path}\")\n # TODO: remove resume_from_checkpoint_fit_path in v2.0\n if (\n self.trainer.state.fn == TrainerFn.FITTING\n and self.resume_checkpoint_path == self.resume_from_checkpoint_fit_path\n ):\n self.resume_from_checkpoint_fit_path = None\n self.resume_checkpoint_path = None\n self._loaded_checkpoint = {}\n\n # clear cache after restore\n torch.cuda.empty_cache()\n\n # wait for all to catch up\n self.trainer.strategy.barrier(\"CheckpointConnector.resume_end\")\n", "url": "https://github.com/Lightning-AI/lightning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 245, "n_words": 76, "vocab_size": 55, "complexity": 6, "nloc": 18, "token_counts": 126, "n_ast_nodes": 219, "n_identifiers": 19, "d_id": 69643, "documentation": { "docstring": "Signal the connector that all states have resumed and memory for the checkpoint object can be\n released.", "n_words": 17, "vocab_size": 16, "n_whitespaces": 23, "language": "en" } }, { "id": 268472, "commit_id": "b1ff0f4ebc7e964f8f67ffc344815a0d23577f45", "repo": "ansible", "path": "lib/ansible/cli/arguments/option_helpers.py", "file_name": "option_helpers.py", "fun_name": "unfrack_path", "commit_message": "vault secrets file, keep context when symlink (#78734)\n\n* vault secrets file, keep context when symlink\r\n\r\n\tfixes #18319\r\n\r\nCo-authored-by: Sloane Hertel <19572925+s-hertel@users.noreply.github.com>", "code": "def unfrack_path(pathsep=False, follow=True):\n ", "url": "https://github.com/ansible/ansible.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 6, "n_words": 3, "vocab_size": 3, "complexity": 1, "nloc": 3, "token_counts": 16, "n_ast_nodes": 21, "n_identifiers": 3, "d_id": 79515, "documentation": { "docstring": "Turn an Option's data into a single path in Ansible locations", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 166591, "commit_id": "e9350a4affbb424aaecad279f638a0dd1584df68", "repo": "pandas", "path": "pandas/tseries/frequencies.py", "file_name": "frequencies.py", "fun_name": "get_freq", "commit_message": "infer_freq handle non-nano (#47126)\n\n* infer_freq handle non-nano\r\n\r\n* remove unused import", "code": "def get_freq(self) -> str | None:\n \n if not self.is_monotonic or not self.index._is_unique:\n return None\n\n delta = self.deltas[0]\n ppd = periods_per_day(self._reso)\n if delta and _is_multiple(delta, ppd):\n return self._infer_daily_rule()\n\n # Business hourly, maybe. 17: one day / 65: one weekend\n if self.hour_deltas in ([1, 17], [1, 65], [1, 17, 65]):\n return \"BH\"\n\n # Possibly intraday frequency. Here we use the\n # original .asi8 values as the modified values\n # will not work around DST transitions. See #8772\n if not self.is_unique_asi8:\n return None\n\n delta = self.deltas_asi8[0]\n pph = ppd // 24\n ppm = pph // 60\n pps = ppm // 60\n if _is_multiple(delta, pph):\n # Hours\n return _maybe_add_count(\"H\", delta / pph)\n elif _is_multiple(delta, ppm):\n # Minutes\n return _maybe_add_count(\"T\", delta / ppm)\n elif _is_multiple(delta, pps):\n # Seconds\n return _maybe_add_count(\"S\", delta / pps)\n elif _is_multiple(delta, (pps // 1000)):\n # Milliseconds\n return _maybe_add_count(\"L\", delta / (pps // 1000))\n elif _is_multiple(delta, (pps // 1_000_000)):\n # Microseconds\n return _maybe_add_count(\"U\", delta / (pps // 1_000_000))\n else:\n # Nanoseconds\n return _maybe_add_count(\"N\", delta)\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 487, "n_words": 162, "vocab_size": 94, "complexity": 12, "nloc": 35, "token_counts": 210, "n_ast_nodes": 367, "n_identifiers": 20, "d_id": 39834, "documentation": { "docstring": "\n Find the appropriate frequency string to describe the inferred\n frequency of self.i8values\n\n Returns\n -------\n str or None\n ", "n_words": 17, "vocab_size": 15, "n_whitespaces": 60, "language": "en" } }, { "id": 337338, "commit_id": "5668270de74a09e5bff15891054f73ddbb1176ac", "repo": "accelerate", "path": "src/accelerate/test_utils/testing.py", "file_name": "testing.py", "fun_name": "require_tensorflow", "commit_message": "Add logging capabilities (#293)\n\nCo-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com>\r\n\r\n- Added experiment tracking API, and support for Weights and Biases, TensorBoard, and CometML + Tests\r\n- Added `tensorflow` to a new dependency list to be used during tests\r\n- Added three new functions in `Accelerator` to interact with the API", "code": "def require_tensorflow(test_case):\n \n if not is_tensorflow_available():\n return unittest.skip(\"test requires TensorFlow\")(test_case)\n else:\n return test_case\n\n", "url": "https://github.com/huggingface/accelerate.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 35, "n_words": 12, "vocab_size": 11, "complexity": 2, "nloc": 5, "token_counts": 26, "n_ast_nodes": 49, "n_identifiers": 5, "d_id": 121034, "documentation": { "docstring": "\n Decorator marking a test that requires TensorFlow installed. These tests are skipped when TensorFlow isn't\n installed\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 26, "language": "en" } }, { "id": 42464, "commit_id": "0fac0c0f8e4618c2bdd3d2137d5fb8a80f581246", "repo": "nltk", "path": "nltk/metrics/paice.py", "file_name": "paice.py", "fun_name": "_errt", "commit_message": "Update black to 22.3.0\n\nThe most recent release of Click (8.1.0) was breaking Black. See psf/black#2964", "code": "def _errt(self):\n \n # Count (UI, OI) pairs for truncation points until we find the segment where (ui, oi) crosses the truncation line\n self.coords = self._get_truncation_coordinates()\n if (0.0, 0.0) in self.coords:\n # Truncation line goes through origo, so ERRT cannot be counted\n if (self.ui, self.oi) != (0.0, 0.0):\n return float(\"inf\")\n else:\n return float(\"nan\")\n if (self.ui, self.oi) == (0.0, 0.0):\n # (ui, oi) is origo; define errt as 0.0\n return 0.0\n # Count the intersection point\n # Note that (self.ui, self.oi) cannot be (0.0, 0.0) and self.coords has different coordinates\n # so we have actual line segments instead of a line segment and a point\n intersection = _count_intersection(\n ((0, 0), (self.ui, self.oi)), self.coords[-2:]\n )\n # Count OP (length of the line from origo to (ui, oi))\n op = sqrt(self.ui**2 + self.oi**2)\n # Count OT (length of the line from origo to truncation line that goes through (ui, oi))\n ot = sqrt(intersection[0] ** 2 + intersection[1] ** 2)\n # OP / OT tells how well the stemming algorithm works compared to just truncating words\n return op / ot\n", "url": "https://github.com/nltk/nltk.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 383, "n_words": 175, "vocab_size": 100, "complexity": 4, "nloc": 15, "token_counts": 157, "n_ast_nodes": 230, "n_identifiers": 12, "d_id": 7553, "documentation": { "docstring": "Count Error-Rate Relative to Truncation (ERRT).\n\n :return: ERRT, length of the line from origo to (UI, OI) divided by\n the length of the line from origo to the point defined by the same\n line when extended until the truncation line.\n :rtype: float\n ", "n_words": 42, "vocab_size": 28, "n_whitespaces": 77, "language": "en" } }, { "id": 100370, "commit_id": "c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf", "repo": "faceswap", "path": "lib/utils.py", "file_name": "utils.py", "fun_name": "get_tf_version", "commit_message": "Update code to support Tensorflow versions up to 2.8 (#1213)\n\n* Update maximum tf version in setup + requirements\r\n\r\n* - bump max version of tf version in launcher\r\n- standardise tf version check\r\n\r\n* update keras get_custom_objects for tf>2.6\r\n\r\n* bugfix: force black text in GUI file dialogs (linux)\r\n\r\n* dssim loss - Move to stock tf.ssim function\r\n\r\n* Update optimizer imports for compatibility\r\n\r\n* fix logging for tf2.8\r\n\r\n* Fix GUI graphing for TF2.8\r\n\r\n* update tests\r\n\r\n* bump requirements.txt versions\r\n\r\n* Remove limit on nvidia-ml-py\r\n\r\n* Graphing bugfixes\r\n - Prevent live graph from displaying if data not yet available\r\n\r\n* bugfix: Live graph. Collect loss labels correctly\r\n\r\n* fix: live graph - swallow inconsistent loss errors\r\n\r\n* Bugfix: Prevent live graph from clearing during training\r\n\r\n* Fix graphing for AMD", "code": "def get_tf_version():\n \n global _TF_VERS # pylint:disable=global-statement\n if _TF_VERS is None:\n import tensorflow as tf # pylint:disable=import-outside-toplevel\n _TF_VERS = float(\".\".join(tf.__version__.split(\".\")[:2])) # pylint:disable=no-member\n return _TF_VERS\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 52, "n_words": 23, "vocab_size": 18, "complexity": 2, "nloc": 6, "token_counts": 40, "n_ast_nodes": 75, "n_identifiers": 8, "d_id": 19859, "documentation": { "docstring": " Obtain the major.minor version of currently installed Tensorflow.\n\n Returns\n -------\n float\n The currently installed tensorflow version\n ", "n_words": 16, "vocab_size": 13, "n_whitespaces": 36, "language": "en" } }, { "id": 101714, "commit_id": "e2a77e7c6e84e81f642cb22f528e25e3f2d2dbc1", "repo": "faceswap", "path": "tools/alignments/jobs.py", "file_name": "jobs.py", "fun_name": "_compile_output", "commit_message": "Alignments Tool - Typing, Documentation + Re-org", "code": "def _compile_output(self) -> Union[List[str], List[Tuple[str, int]]]:\n \n action = self._job.replace(\"-\", \"_\")\n processor = getattr(self, f\"_get_{action}\")\n logger.debug(\"Processor: %s\", processor)\n return [item for item in processor()] # pylint:disable=unnecessary-comprehension\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 61, "n_words": 25, "vocab_size": 24, "complexity": 2, "nloc": 12, "token_counts": 63, "n_ast_nodes": 106, "n_identifiers": 15, "d_id": 21118, "documentation": { "docstring": " Compile list of frames that meet criteria\n\n Returns\n -------\n list\n List of filenames or filenames and face indices for the selected criteria\n ", "n_words": 22, "vocab_size": 18, "n_whitespaces": 62, "language": "en" } }, { "id": 37157, "commit_id": "5da33f872913255d64717efe745a053975bbc28e", "repo": "transformers", "path": "src/transformers/modeling_utils.py", "file_name": "modeling_utils.py", "fun_name": "find_submodule_and_param_name", "commit_message": "[modeling utils] revamp `from_pretrained(..., low_cpu_mem_usage=True)` + tests (#16657)\n\n* add low_cpu_mem_usage tests\r\n\r\n* wip: revamping\r\n\r\n* wip\r\n\r\n* install /usr/bin/time\r\n\r\n* wip\r\n\r\n* cleanup\r\n\r\n* cleanup\r\n\r\n* cleanup\r\n\r\n* cleanup\r\n\r\n* cleanup\r\n\r\n* fix assert\r\n\r\n* put the wrapper back\r\n\r\n* cleanup; switch to bert-base-cased\r\n\r\n* Trigger CI\r\n\r\n* Trigger CI", "code": "def find_submodule_and_param_name(model, long_key, start_prefix):\n \n\n if len(start_prefix) > 0 and long_key.startswith(start_prefix):\n long_key = \".\".join(long_key.split(\".\")[1:])\n\n split_key = long_key.split(\".\")\n submodule = model\n while len(split_key) > 1:\n if hasattr(submodule, split_key[0]):\n submodule = getattr(submodule, split_key[0])\n del split_key[0]\n else:\n submodule = None\n break\n if submodule == model:\n submodule = None\n return submodule, split_key[0]\n\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 140, "n_words": 47, "vocab_size": 33, "complexity": 6, "nloc": 15, "token_counts": 109, "n_ast_nodes": 178, "n_identifiers": 12, "d_id": 6748, "documentation": { "docstring": "\n A helper util to find the last sub-module and the param/buffer name. If `start_prefix` is supplied it'll be removed\n from the start of the key\n ", "n_words": 25, "vocab_size": 22, "n_whitespaces": 35, "language": "en" } }, { "id": 47694, "commit_id": "49e336ae0302b386a2f47269a6d13988382d975f", "repo": "airflow", "path": "tests/utils/test_task_group.py", "file_name": "test_task_group.py", "fun_name": "test_task_group_context_mix", "commit_message": "Replace usage of `DummyOperator` with `EmptyOperator` (#22974)\n\n* Replace usage of `DummyOperator` with `EmptyOperator`", "code": "def test_task_group_context_mix():\n \n\n from airflow.decorators import task\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 50, "token_counts": 269, "n_ast_nodes": 21, "n_identifiers": 4, "d_id": 9216, "documentation": { "docstring": "Test cases to check nested TaskGroup context manager with taskgroup decorator", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 336293, "commit_id": "dd10da76a78e9566d12ddf1eb5aac90021b7e51d", "repo": "diffusers", "path": "src/diffusers/schedulers/scheduling_karras_ve.py", "file_name": "scheduling_karras_ve.py", "fun_name": "add_noise_to_input", "commit_message": "Add an alternative Karras et al. stochastic scheduler for VE models (#160)\n\n* karras + VE, not flexible yet\r\n\r\n* Fix inputs incompatibility with the original unet\r\n\r\n* Roll back sigma scaling\r\n\r\n* Apply suggestions from code review\r\n\r\n* Old comment\r\n\r\n* Fix doc", "code": "def add_noise_to_input(self, sample, sigma, generator=None):\n \n if self.s_min <= sigma <= self.s_max:\n gamma = min(self.s_churn / self.num_inference_steps, 2**0.5 - 1)\n else:\n gamma = 0\n\n # sample eps ~ N(0, S_noise^2 * I)\n eps = self.s_noise * torch.randn(sample.shape, generator=generator).to(sample.device)\n sigma_hat = sigma + gamma * sigma\n sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)\n\n return sample_hat, sigma_hat\n", "url": "https://github.com/huggingface/diffusers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 136, "n_words": 58, "vocab_size": 41, "complexity": 2, "nloc": 9, "token_counts": 107, "n_ast_nodes": 159, "n_identifiers": 20, "d_id": 120864, "documentation": { "docstring": "\n Explicit Langevin-like \"churn\" step of adding noise to the sample according to\n a factor gamma_i ≥ 0 to reach a higher noise level sigma_hat = sigma_i + gamma_i*sigma_i.\n ", "n_words": 28, "vocab_size": 24, "n_whitespaces": 50, "language": "en" } }, { "id": 178460, "commit_id": "c4ce69f97f7fefbcf637e9e59b6df056ad03eb16", "repo": "Nuitka", "path": "nuitka/build/SconsInterface.py", "file_name": "SconsInterface.py", "fun_name": "_getPythonForSconsExePath", "commit_message": "Scons: Refactor Python scan for major cleanup\n\n* This is in preparation of making it reusable for onefile\n compression which also has a simular need.", "code": "def _getPythonForSconsExePath():\n \n python_exe = Options.getPythonPathForScons()\n\n if python_exe is not None:\n return python_exe\n\n scons_supported_pythons = (\"3.5\", \"3.6\", \"3.7\", \"3.8\", \"3.9\", \"3.10\")\n if not Utils.isWin32Windows():\n scons_supported_pythons += (\"2.7\", \"2.6\")\n\n # Our inline copy needs no other module, just the right version of Python is needed.\n python_for_scons = findInstalledPython(\n python_versions=scons_supported_pythons, module_name=None, module_version=None\n )\n\n if python_for_scons is None:\n if Utils.isWin32Windows():\n scons_python_requirement = \"Python 3.5 or higher\"\n else:\n scons_python_requirement = \"Python 2.6, 2.7 or Python >= 3.5\"\n\n Tracing.scons_logger.sysexit(\n \n % scons_python_requirement\n )\n\n return python_for_scons.getPythonExe()\n\n\n@contextlib.contextmanager", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "@contextlib.contextmanager", "n_ast_errors": 1, "ast_levels": 12, "n_whitespaces": 202, "n_words": 79, "vocab_size": 56, "complexity": 5, "nloc": 28, "token_counts": 102, "n_ast_nodes": 192, "n_identifiers": 19, "d_id": 42705, "documentation": { "docstring": "Find a way to call any Python that works for Scons.\n\n Scons needs it as it doesn't support all Python versions.\n \\\nError, while Nuitka works with older Python, Scons does not, and therefore\nNuitka needs to find a %s executable, so please install\nit.\n\nYou may provide it using option \"--python-for-scons=path_to_python.exe\"\nin case it is not visible in registry, e.g. due to using uninstalled\nAnaconda Python.\n", "n_words": 67, "vocab_size": 54, "n_whitespaces": 66, "language": "en" } }, { "id": 321555, "commit_id": "d387b1a1084b9649009e5cffb9d71facc80bb41f", "repo": "qutebrowser", "path": "tests/unit/utils/test_version.py", "file_name": "test_version.py", "fun_name": "test_func", "commit_message": "tests: Adjust most imports", "code": "def test_func(self, qapp):\n \n pytest.importorskip(\"qutebrowser.qt.opengl\")\n version.opengl_info()\n", "url": "https://github.com/qutebrowser/qutebrowser.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 26, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 3, "token_counts": 19, "n_ast_nodes": 36, "n_identifiers": 7, "d_id": 117800, "documentation": { "docstring": "Simply call version.opengl_info() and see if it doesn't crash.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 222796, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/distutils/command/register.py", "file_name": "register.py", "fun_name": "classifiers", "commit_message": "add python 3.10.4 for windows", "code": "def classifiers(self):\n \n url = self.repository+'?:action=list_classifiers'\n response = urllib.request.urlopen(url)\n log.info(self._read_pypi_response(response))\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 37, "n_words": 9, "vocab_size": 8, "complexity": 1, "nloc": 4, "token_counts": 34, "n_ast_nodes": 60, "n_identifiers": 11, "d_id": 56745, "documentation": { "docstring": " Fetch the list of classifiers from the server.\n ", "n_words": 8, "vocab_size": 7, "n_whitespaces": 16, "language": "en" } }, { "id": 228764, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/bar/marker/_colorbar.py", "file_name": "_colorbar.py", "fun_name": "yanchor", "commit_message": "switch to black .22", "code": "def yanchor(self):\n \n return self[\"yanchor\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 60437, "documentation": { "docstring": "\n Sets this color bar's vertical position anchor This anchor\n binds the `y` position to the \"top\", \"middle\" or \"bottom\" of\n the color bar. Defaults to \"middle\" when `orientation` is \"v\"\n and \"bottom\" when `orientation` is \"h\".\n\n The 'yanchor' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['top', 'middle', 'bottom']\n\n Returns\n -------\n Any\n ", "n_words": 60, "vocab_size": 45, "n_whitespaces": 148, "language": "en" } }, { "id": 212794, "commit_id": "37c3afc8ca0dc0057a23ab512ee8b879074dd119", "repo": "PySimpleGUI", "path": "PySimpleGUI.py", "file_name": "PySimpleGUI.py", "fun_name": "_debugger_window_is_open", "commit_message": "ButtonMenu.Click aliased added. Debugger - automatically adds a timeout to read calls if a debug window is open. Still need to handle user-level multi-window support.", "code": "def _debugger_window_is_open():\n \n\n if _Debugger.debugger is None:\n return False\n debugger = _Debugger.debugger\n if debugger.popout_window or debugger.watcher_window:\n return True\n return False\n\n", "url": "https://github.com/PySimpleGUI/PySimpleGUI.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 48, "n_words": 19, "vocab_size": 14, "complexity": 4, "nloc": 7, "token_counts": 32, "n_ast_nodes": 54, "n_identifiers": 5, "d_id": 53407, "documentation": { "docstring": "\n Determines if one of the debugger window is currently open\n :return: returns True if the popout window or the main debug window is open\n :rtype: (bool)\n ", "n_words": 26, "vocab_size": 19, "n_whitespaces": 39, "language": "en" } }, { "id": 101353, "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", "repo": "faceswap", "path": "plugins/extract/pipeline.py", "file_name": "pipeline.py", "fun_name": "detected_faces", "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", "code": "def detected_faces(self) -> List[\"DetectedFace\"]:\n \n return self._detected_faces\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 15, "n_ast_nodes": 28, "n_identifiers": 4, "d_id": 20768, "documentation": { "docstring": "list: A list of :class:`~lib.align.DetectedFace` objects in the :attr:`image`. ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 9, "language": "en" } }, { "id": 101393, "commit_id": "1022651eb8a7741014f5d2ec7cbfe882120dfa5f", "repo": "faceswap", "path": "scripts/fsmedia.py", "file_name": "fsmedia.py", "fun_name": "_check_input_folder", "commit_message": "Bugfix: convert - Gif Writer\n - Fix non-launch error on Gif Writer\n - convert plugins - linting\n - convert/fs_media/preview/queue_manager - typing\n - Change convert items from dict to Dataclass", "code": "def _check_input_folder(self) -> bool:\n \n if not os.path.exists(self._args.input_dir):\n logger.error(\"Input location %s not found.\", self._args.input_dir)\n sys.exit(1)\n if (os.path.isfile(self._args.input_dir) and\n os.path.splitext(self._args.input_dir)[1].lower() in _video_extensions):\n logger.info(\"Input Video: %s\", self._args.input_dir)\n retval = True\n else:\n logger.info(\"Input Directory: %s\", self._args.input_dir)\n retval = False\n return retval\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 153, "n_words": 37, "vocab_size": 28, "complexity": 4, "nloc": 19, "token_counts": 113, "n_ast_nodes": 186, "n_identifiers": 18, "d_id": 20808, "documentation": { "docstring": " Check whether the input is a folder or video.\n\n Returns\n -------\n bool\n ``True`` if the input is a video otherwise ``False``\n ", "n_words": 21, "vocab_size": 17, "n_whitespaces": 61, "language": "en" } }, { "id": 101259, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "tools/manual/detected_faces.py", "file_name": "detected_faces.py", "fun_name": "_background_extract", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def _background_extract(self, output_folder, progress_queue):\n \n _io = dict(saver=ImagesSaver(get_folder(output_folder), as_bytes=True),\n loader=ImagesLoader(self._input_location, count=self._alignments.frames_count))\n\n for frame_idx, (filename, image) in enumerate(_io[\"loader\"].load()):\n logger.trace(\"Outputting frame: %s: %s\", frame_idx, filename)\n src_filename = os.path.basename(filename)\n frame_name = os.path.splitext(src_filename)[0]\n progress_queue.put(1)\n\n for face_idx, face in enumerate(self._frame_faces[frame_idx]):\n output = f\"{frame_name}_{face_idx}.png\"\n aligned = AlignedFace(face.landmarks_xy,\n image=image,\n centering=\"head\",\n size=512) # TODO user selectable size\n meta = dict(alignments=face.to_png_meta(),\n source=dict(alignments_version=self._alignments.version,\n original_filename=output,\n face_index=face_idx,\n source_filename=src_filename,\n source_is_video=self._globals.is_video,\n source_frame_dims=image.shape[:2]))\n\n b_image = encode_image(aligned.face, \".png\", metadata=meta)\n _io[\"saver\"].save(output, b_image)\n _io[\"saver\"].close()\n\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 575, "n_words": 65, "vocab_size": 56, "complexity": 3, "nloc": 24, "token_counts": 232, "n_ast_nodes": 366, "n_identifiers": 58, "d_id": 20679, "documentation": { "docstring": " Perform the background extraction in a thread so GUI doesn't become unresponsive.\n\n Parameters\n ----------\n output_folder: str\n The location to save the output faces to\n progress_queue: :class:`queue.Queue`\n The queue to place incremental counts to for updating the GUI's progress bar\n ", "n_words": 39, "vocab_size": 33, "n_whitespaces": 97, "language": "en" } }, { "id": 262759, "commit_id": "3ba0aaf983f5223000a713c9275ea66e21f78b11", "repo": "pyinstaller", "path": "tests/unit/test_isolation.py", "file_name": "test_isolation.py", "fun_name": "test_default_kwargs", "commit_message": "tests: add a test for calling isolated function with default (kw)args\n\nAdd tests that show that current implementation does not transfer\ndefault arguments (function.__defaults__) nor default keyword-only\narguments (function.__kwdefaults__) to the child process, resulting\nin a missing-positional-argument error unless all optional arguments\nare explicitly provided.", "code": "def test_default_kwargs():\n ", "url": "https://github.com/pyinstaller/pyinstaller.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 8, "token_counts": 67, "n_ast_nodes": 12, "n_identifiers": 1, "d_id": 77348, "documentation": { "docstring": "\n Verify that default keyword-only arguments are properly passed to the isolated function call.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 20, "language": "en" } }, { "id": 123916, "commit_id": "dc7ed086a5038775e378b32cb31fb4a79f418dd9", "repo": "ray", "path": "python/ray/util/ml_utils/checkpoint_manager.py", "file_name": "checkpoint_manager.py", "fun_name": "_tune_legacy_checkpoint_score_attr", "commit_message": "[AIR] More checkpoint configurability, `Result` extension (#25943)\n\nThis PR:\r\n* Allows the user to set `keep_checkpoints_num` and `checkpoint_score_attr` in `RunConfig` using the `CheckpointStrategy` dataclass\r\n* Adds two new fields to the `Result` object - `best_checkpoints` - a list of saved best checkpoints as determined by `CheckpointingConfig`.", "code": "def _tune_legacy_checkpoint_score_attr(self) -> Optional[str]:\n \n if self.checkpoint_score_attribute is None:\n return self.checkpoint_score_attribute\n prefix = \"\"\n if self.checkpoint_score_order == MIN:\n prefix = \"min-\"\n return f\"{prefix}{self.checkpoint_score_attribute}\"\n\n\n# Alias for backwards compatibility\n\ndeprecation_message = (\n \"`CheckpointStrategy` is deprecated and will be removed in \"\n \"the future. Please use `ray.air.config.CheckpointStrategy` \"\n \"instead.\"\n)\n\n\n@Deprecated(message=deprecation_message)\n@dataclass", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "@Deprecated(message=deprecation_message)\n@dataclass", "n_ast_errors": 1, "ast_levels": 9, "n_whitespaces": 110, "n_words": 49, "vocab_size": 41, "complexity": 3, "nloc": 11, "token_counts": 38, "n_ast_nodes": 111, "n_identifiers": 12, "d_id": 27474, "documentation": { "docstring": "Same as ``checkpoint_score_attr`` in ``tune.run``.\n\n Only used for Legacy API compatibility.\n ", "n_words": 11, "vocab_size": 11, "n_whitespaces": 25, "language": "en" } }, { "id": 251916, "commit_id": "b3587b52b25077f68116b9852b041d33e7fc6601", "repo": "mitmproxy", "path": "test/mitmproxy/proxy/layers/test_tcp.py", "file_name": "test_tcp.py", "fun_name": "test_receive_data_before_server_connected", "commit_message": "make it black!", "code": "def test_receive_data_before_server_connected(tctx):\n \n assert (\n Playbook(tcp.TCPLayer(tctx), hooks=False)\n << OpenConnection(tctx.server)\n >> DataReceived(tctx.client, b\"hello!\")\n >> reply(None, to=-2)\n << SendData(tctx.server, b\"hello!\")\n )\n\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 62, "n_words": 18, "vocab_size": 15, "complexity": 1, "nloc": 8, "token_counts": 63, "n_ast_nodes": 93, "n_identifiers": 13, "d_id": 73888, "documentation": { "docstring": "\n assert that data received before a server connection is established\n will still be forwarded.\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 24, "language": "en" } }, { "id": 82841, "commit_id": "9ba53df5a19131e6926027b2e73aaa77cec17272", "repo": "examples", "path": "distributed/sharded_tensor/tensor_parallel.py", "file_name": "tensor_parallel.py", "fun_name": "demo_tp", "commit_message": "Gh/fduwjj/2/base (#1007)\n\n* test ghstack\r\n\r\n[ghstack-poisoned]\r\n\r\n* Update base for Update on \"[PT-D] Add an example for Megatron-LM style example\"\r\n\r\n\r\n\r\n\r\n[ghstack-poisoned]\r\n\r\n* Update base for Update on \"[PT-D] Add an example for Megatron-LM style example\"\r\n\r\n\r\n\r\n\r\n[ghstack-poisoned]\r\n\r\n* Update base for Update on \"[PT-D] Add an example for Megatron-LM style example\"\r\n\r\n\r\n\r\n\r\n[ghstack-poisoned]\r\n\r\n* Update base for Update on \"[PT-D] Add an example for Megatron-LM style example\"\r\n\r\n\r\n\r\n\r\n[ghstack-poisoned]\r\n\r\n* [PT-D] Add an example for Megatron-LM style example (#1006)\r\n\r\n* [PT-D] Add an example for Megatron-LM style example\r\n\r\n[ghstack-poisoned]\r\n\r\n* Update on \"[PT-D] Add an example for Megatron-LM style example\"\r\n\r\n\r\n\r\n\r\n[ghstack-poisoned]", "code": "def demo_tp(rank, args):\n \n print(f\"Running basic Megatron style TP example on rank {rank}.\")\n setup(rank, args.world_size)\n # create a sharding plan based on the given world_size.\n module_sharding_plan = _get_toy_module_sharding_plan(\n args.world_size\n )\n\n # create model and move it to GPU with id rank\n model = ToyModel().cuda(rank)\n # Shard the module based on created plan.\n shard_module(model, module_sharding_plan)\n # Create a optimizer for the sharded module.\n optimizer = _get_toy_module_optim(model, 0.002)\n\n # Perform a num of iterations of forward/backward\n # and optimizations for the sharded module.\n for _ in range(args.iter_nums):\n inp = torch.rand(20, 10).cuda(rank)\n output = model(inp)\n output.sum().backward()\n optimizer.step()\n\n cleanup()\n\n", "url": "https://github.com/pytorch/examples.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 177, "n_words": 94, "vocab_size": 67, "complexity": 2, "nloc": 15, "token_counts": 103, "n_ast_nodes": 176, "n_identifiers": 25, "d_id": 17550, "documentation": { "docstring": "\n Main body of the demo of a basic version of tensor parallel by using\n PyTorch native sharded tensor APIs.\n ", "n_words": 19, "vocab_size": 16, "n_whitespaces": 29, "language": "en" } }, { "id": 21391, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/distlib/_backport/tarfile.py", "file_name": "tarfile.py", "fun_name": "makelink", "commit_message": "Vendor in pip 22.1.2", "code": "def makelink(self, tarinfo, targetpath):\n \n try:\n # For systems that support symbolic and hard links.\n if tarinfo.issym():\n os.symlink(tarinfo.linkname, targetpath)\n else:\n # See extract().\n if os.path.exists(tarinfo._link_target):\n os.link(tarinfo._link_target, targetpath)\n else:\n self._extract_member(self._find_link_target(tarinfo),\n targetpath)\n except symlink_exception:\n if tarinfo.issym():\n linkpath = os.path.join(os.path.dirname(tarinfo.name),\n tarinfo.linkname)\n else:\n linkpath = tarinfo.linkname\n else:\n try:\n self._extract_member(self._find_link_target(tarinfo),\n targetpath)\n except KeyError:\n raise ExtractError(\"unable to resolve link inside archive\")\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 432, "n_words": 54, "vocab_size": 39, "complexity": 6, "nloc": 22, "token_counts": 133, "n_ast_nodes": 219, "n_identifiers": 21, "d_id": 3804, "documentation": { "docstring": "Make a (symbolic) link called targetpath. If it cannot be created\n (platform limitation), we try to make a copy of the referenced file\n instead of a link.\n ", "n_words": 27, "vocab_size": 24, "n_whitespaces": 52, "language": "en" } }, { "id": 182279, "commit_id": "116f3735b68e8dd293dba4b3a183f98afbd0b167", "repo": "textual", "path": "src/textual/css/styles.py", "file_name": "styles.py", "fun_name": "get_rules", "commit_message": "docstrings", "code": "def get_rules(self) -> RulesMap:\n \n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 6, "token_counts": 8, "n_ast_nodes": 16, "n_identifiers": 3, "d_id": 43780, "documentation": { "docstring": "Get the rules in a mapping.\n\n Returns:\n RulesMap: A TypedDict of the rules.\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 38, "language": "en" } }, { "id": 165750, "commit_id": "24652cf178c12562585639cba39c46d62b95f107", "repo": "pandas", "path": "pandas/tests/extension/json/test_json.py", "file_name": "test_json.py", "fun_name": "test_fillna_frame", "commit_message": "TST: Convert skip -> xfail (#46427)", "code": "def test_fillna_frame(self):\n \n super().test_fillna_frame()\n\n\nunhashable = pytest.mark.xfail(reason=\"Unhashable\")\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 19, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 13, "n_ast_nodes": 47, "n_identifiers": 8, "d_id": 39706, "documentation": { "docstring": "We treat dictionaries as a mapping in fillna, not a scalar.", "n_words": 11, "vocab_size": 10, "n_whitespaces": 10, "language": "en" } }, { "id": 221712, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/contextlib.py", "file_name": "contextlib.py", "fun_name": "push", "commit_message": "add python 3.10.4 for windows", "code": "def push(self, exit):\n \n # We use an unbound method rather than a bound method to follow\n # the standard lookup behaviour for special methods.\n _cb_type = type(exit)\n\n try:\n exit_method = _cb_type.__exit__\n except AttributeError:\n # Not a context manager, so assume it's a callable.\n self._push_exit_callback(exit)\n else:\n self._push_cm_exit(exit, exit_method)\n return exit # Allow use as a decorator.\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 156, "n_words": 55, "vocab_size": 46, "complexity": 2, "nloc": 9, "token_counts": 42, "n_ast_nodes": 75, "n_identifiers": 10, "d_id": 56482, "documentation": { "docstring": "Registers a callback with the standard __exit__ method signature.\n\n Can suppress exceptions the same way __exit__ method can.\n Also accepts any object with an __exit__ method (registering a call\n to the method instead of the object itself).\n ", "n_words": 37, "vocab_size": 26, "n_whitespaces": 65, "language": "en" } }, { "id": 125623, "commit_id": "37f4692aa805eba230e2879c098320111788a64c", "repo": "ray", "path": "python/ray/tests/test_state_api.py", "file_name": "test_state_api.py", "fun_name": "test_filter_non_existent_column", "commit_message": "[State Observability] Fix \"No result for get crashing the formatting\" and \"Filtering not handled properly when key missing in the datum\" #26881\n\nFix two issues\r\n\r\nNo result for get crashing the formatting\r\nFiltering not handled properly when key missing in the datum", "code": "async def test_filter_non_existent_column(state_api_manager):\n \n data_source_client = state_api_manager.data_source_client\n id = b\"1234\"\n data_source_client.get_all_worker_info.return_value = GetAllWorkerInfoReply(\n worker_table_data=[\n generate_worker_data(id, pid=1),\n generate_worker_data(b\"12345\", pid=2),\n ],\n total=2,\n )\n result = await state_api_manager.list_workers(\n option=create_api_options(filters=[(\"exit_type\", \"=\", \"INTENDED_SYSTEM_EXIT\")])\n )\n assert len(result.result) == 0\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 106, "n_words": 32, "vocab_size": 28, "complexity": 1, "nloc": 14, "token_counts": 85, "n_ast_nodes": 138, "n_identifiers": 17, "d_id": 27927, "documentation": { "docstring": "Test when the non existent column is given, it handles that properly.\n\n Related: https://github.com/ray-project/ray/issues/26811\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 20, "language": "en" } }, { "id": 204364, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/staticfiles/storage.py", "file_name": "storage.py", "fun_name": "_url", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def _url(self, hashed_name_func, name, force=False, hashed_files=None):\n \n if settings.DEBUG and not force:\n hashed_name, fragment = name, \"\"\n else:\n clean_name, fragment = urldefrag(name)\n if urlsplit(clean_name).path.endswith(\"/\"): # don't hash paths\n hashed_name = name\n else:\n args = (clean_name,)\n if hashed_files is not None:\n args += (hashed_files,)\n hashed_name = hashed_name_func(*args)\n\n final_url = super().url(hashed_name)\n\n # Special casing for a @font-face hack, like url(myfont.eot?#iefix\")\n # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax\n query_fragment = \"?#\" in name # [sic!]\n if fragment or query_fragment:\n urlparts = list(urlsplit(final_url))\n if fragment and not urlparts[4]:\n urlparts[4] = fragment\n if query_fragment and not urlparts[3]:\n urlparts[2] += \"?\"\n final_url = urlunsplit(urlparts)\n\n return unquote(final_url)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 356, "n_words": 94, "vocab_size": 60, "complexity": 11, "nloc": 22, "token_counts": 156, "n_ast_nodes": 261, "n_identifiers": 24, "d_id": 50711, "documentation": { "docstring": "\n Return the non-hashed URL in DEBUG mode.\n ", "n_words": 7, "vocab_size": 7, "n_whitespaces": 22, "language": "en" } }, { "id": 60255, "commit_id": "cc4d0564756ca067516f71718a3d135996525909", "repo": "transferlearning", "path": "code/deep/BJMMD/caffe/python/caffe/io.py", "file_name": "io.py", "fun_name": "set_raw_scale", "commit_message": "Balanced joint maximum mean discrepancy for deep transfer learning", "code": "def set_raw_scale(self, in_, scale):\n \n self.__check_input(in_)\n self.raw_scale[in_] = scale\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 29, "n_words": 8, "vocab_size": 8, "complexity": 1, "nloc": 3, "token_counts": 24, "n_ast_nodes": 39, "n_identifiers": 6, "d_id": 12047, "documentation": { "docstring": "\n Set the scale of raw features s.t. the input blob = input * scale.\n While Python represents images in [0, 1], certain Caffe models\n like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale\n of these models must be 255.\n\n Parameters\n ----------\n in_ : which input to assign this scale factor\n scale : scale coefficient\n ", "n_words": 57, "vocab_size": 44, "n_whitespaces": 121, "language": "en" } }, { "id": 61929, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py", "file_name": "database.py", "fun_name": "list_distinfo_files", "commit_message": "upd; format", "code": "def list_distinfo_files(self):\n \n base = os.path.dirname(self.path)\n for path, checksum, size in self._get_records():\n # XXX add separator or use real relpath algo\n if not os.path.isabs(path):\n path = os.path.join(base, path)\n if path.startswith(self.path):\n yield path\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 115, "n_words": 31, "vocab_size": 28, "complexity": 4, "nloc": 7, "token_counts": 66, "n_ast_nodes": 108, "n_identifiers": 12, "d_id": 12754, "documentation": { "docstring": "\n Iterates over the ``RECORD`` entries and returns paths for each line if\n the path is pointing to a file located in the ``.dist-info`` directory\n or one of its subdirectories.\n\n :returns: iterator of paths\n ", "n_words": 33, "vocab_size": 29, "n_whitespaces": 69, "language": "en" } }, { "id": 43013, "commit_id": "a1a9a8f9a3adc63e783cf3fd699066f35e488d4f", "repo": "airflow", "path": "airflow/models/dag.py", "file_name": "dag.py", "fun_name": "validate", "commit_message": "Check bag DAG schedule_interval match tiemtable (#23113)\n\nThis guards against the DAG's timetable or schedule_interval from being\r\nchanged after it's created. Validation is done by creating a timetable\r\nand check its summary matches schedule_interval. The logic is not\r\nbullet-proof, especially if a custom timetable does not provide a useful\r\nsummary. But this is the best we can do.", "code": "def validate(self):\n \n if not self._check_schedule_interval_matches_timetable():\n raise AirflowDagInconsistent(\n f\"inconsistent schedule: timetable {self.timetable.summary!r} \"\n f\"does not match schedule_interval {self.schedule_interval!r}\",\n )\n self.params.validate()\n self.timetable.validate()\n", "url": "https://github.com/apache/airflow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 100, "n_words": 20, "vocab_size": 19, "complexity": 2, "nloc": 8, "token_counts": 37, "n_ast_nodes": 85, "n_identifiers": 8, "d_id": 7791, "documentation": { "docstring": "Validate the DAG has a coherent setup.\n\n This is called by the DAG bag before bagging the DAG.\n ", "n_words": 18, "vocab_size": 15, "n_whitespaces": 32, "language": "en" } }, { "id": 48752, "commit_id": "26e56d098d7cebdc910e84ce1d0d1a909c1988c3", "repo": "PaddleHub", "path": "modules/text/language_model/albert-base-v1/module.py", "file_name": "module.py", "fun_name": "get_tokenizer", "commit_message": "add albert-base-v1", "code": "def get_tokenizer(*args, **kwargs):\n \n return AlbertTokenizer.from_pretrained(pretrained_model_name_or_path='albert-base-v1', *args, **kwargs)\n", "url": "https://github.com/PaddlePaddle/PaddleHub.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 21, "n_words": 7, "vocab_size": 7, "complexity": 1, "nloc": 2, "token_counts": 25, "n_ast_nodes": 43, "n_identifiers": 6, "d_id": 9591, "documentation": { "docstring": "\n Gets the tokenizer that is customized for this module.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 55137, "commit_id": "c0cb1fee460c1bded9e3eb741ad7979402844bf8", "repo": "prefect", "path": "src/prefect/cli/base.py", "file_name": "base.py", "fun_name": "exit_with_error", "commit_message": "Update `set` command; allow CLI `console` object to be patched", "code": "def exit_with_error(message, code=1, **kwargs):\n \n kwargs.setdefault(\"style\", \"red\")\n app.console.print(message, **kwargs)\n raise typer.Exit(code)\n\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 22, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 4, "token_counts": 39, "n_ast_nodes": 66, "n_identifiers": 10, "d_id": 11226, "documentation": { "docstring": "\n Utility to print a stylized error message and exit with a non-zero code\n ", "n_words": 13, "vocab_size": 12, "n_whitespaces": 20, "language": "en" } }, { "id": 281178, "commit_id": "f40ba0d256a78ab2b8461f0df3a9a52ca7dc5704", "repo": "OpenBBTerminal", "path": "discordbot/stocks/government/contracts.py", "file_name": "contracts.py", "fun_name": "contracts_command", "commit_message": "Bot logging fix (#1105)\n\n* Write bot logs to stdout instead of a file\r\nHeroku's logging uses the stdout and has problems with files\r\n\r\n* Send \"you snooze you lose\" only if debug flag is enabled\r\n\r\n* Replace print statements with logger entries in the economy menu\r\n\r\n* Add logging to bot menu command calls\r\n\r\n* Silence bandit warnings about the REPLACE_ME token\r\n\r\n* Organize imports and update logging in economy menu\r\n\r\n* Organize imports and update logging in dps menu\r\n\r\n* Organize imports and update logging in dd menu\r\n\r\n* Organize imports and update logging in gov menu\r\n\r\n* Organize imports and update logging in options menu\r\n\r\n* Organize imports and update logging in screener menu\r\n\r\n* Organize imports and update logging in ta menu\r\n\r\n* Revert automatic import sorting\r\n\r\n* Add logging to the options reaction helper", "code": "async def contracts_command(ctx, ticker=\"\", past_transaction_days=\"\", raw=\"\"):\n \n try:\n # Debug user input\n if cfg.DEBUG:\n logger.debug(\n \"!stocks.gov.contracts %s %s %s\", ticker, past_transaction_days, raw\n )\n\n if past_transaction_days == \"\":\n past_transaction_days = 10\n else:\n if not past_transaction_days.lstrip(\"-\").isnumeric():\n raise Exception(\"Number has to be an integer\")\n past_transaction_days = int(past_transaction_days)\n\n if raw in [\"false\", \"False\", \"FALSE\", \"\"]:\n raw = False\n\n if raw in [\"true\", \"True\", \"TRUE\"]:\n raw = True\n\n if raw not in [True, False]:\n raise Exception(\"raw argument has to be true or false\")\n\n if ticker == \"\":\n raise Exception(\"A ticker is required\")\n\n # Retrieve Data\n df_contracts = quiverquant_model.get_government_trading(\"contracts\", ticker)\n\n if df_contracts.empty:\n raise Exception(\"No government contracts found\")\n\n # Output Data\n df_contracts[\"Date\"] = pd.to_datetime(df_contracts[\"Date\"]).dt.date\n\n df_contracts = df_contracts[\n df_contracts[\"Date\"].isin(\n df_contracts[\"Date\"].unique()[:past_transaction_days]\n )\n ]\n\n df_contracts.drop_duplicates(inplace=True)\n\n fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)\n\n df_contracts.groupby(\"Date\").sum().div(1000).plot(kind=\"bar\", rot=0, ax=ax)\n ax.set_ylabel(\"Amount ($1k)\")\n ax.set_title(f\"Sum of latest government contracts to {ticker}\")\n fig.tight_layout()\n\n plt.savefig(\"gov_contracts.png\")\n uploaded_image = gst_imgur.upload_image(\"gov_contracts.png\", title=\"something\")\n image_link = uploaded_image.link\n if cfg.DEBUG:\n logger.debug(\"Image URL: %s\", image_link)\n title = f\"Stocks: [quiverquant.com] Contracts by {ticker}\"\n if raw:\n description = df_contracts.to_string()\n embed = discord.Embed(\n title=title, description=description, colour=cfg.COLOR\n )\n else:\n embed = discord.Embed(title=title, colour=cfg.COLOR)\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n embed.set_image(url=image_link)\n os.remove(\"gov_contracts.png\")\n\n await ctx.send(embed=embed)\n\n except Exception as e:\n embed = discord.Embed(\n title=f\"ERROR Stocks: [quiverquant.com] Contracts by {ticker}\",\n colour=cfg.COLOR,\n description=e,\n )\n embed.set_author(\n name=cfg.AUTHOR_NAME,\n icon_url=cfg.AUTHOR_ICON_URL,\n )\n\n await ctx.send(embed=embed)\n", "url": "https://github.com/OpenBB-finance/OpenBBTerminal.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 799, "n_words": 200, "vocab_size": 131, "complexity": 12, "nloc": 66, "token_counts": 444, "n_ast_nodes": 766, "n_identifiers": 67, "d_id": 83584, "documentation": { "docstring": "Displays contracts associated with tickers [quiverquant.com]", "n_words": 6, "vocab_size": 6, "n_whitespaces": 5, "language": "en" } }, { "id": 317256, "commit_id": "b9c8d65940ec47a82332b8b1a67301da018ccadf", "repo": "core", "path": "tests/components/homekit_controller/test_storage.py", "file_name": "test_storage.py", "fun_name": "test_storage_is_updated_on_add", "commit_message": "Restore accessory state into pairing using new HKC methods (#75276)", "code": "async def test_storage_is_updated_on_add(hass, hass_storage, utcnow):\n \n await setup_test_component(hass, create_lightbulb_service)\n\n entity_map: EntityMapStorage = hass.data[ENTITY_MAP]\n hkid = \"00:00:00:00:00:00\"\n\n # Is in memory store updated?\n assert hkid in entity_map.storage_data\n\n # Is saved out to store?\n await flush_store(entity_map.store)\n assert hkid in hass_storage[ENTITY_MAP][\"data\"][\"pairings\"]\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 64, "n_words": 37, "vocab_size": 28, "complexity": 1, "nloc": 7, "token_counts": 56, "n_ast_nodes": 96, "n_identifiers": 14, "d_id": 115831, "documentation": { "docstring": "Test entity map storage is cleaned up on adding an accessory.", "n_words": 11, "vocab_size": 11, "n_whitespaces": 10, "language": "en" } }, { "id": 59074, "commit_id": "895a5203623c205ede2ee0c31f99be72822d5351", "repo": "prefect", "path": "src/prefect/logging/loggers.py", "file_name": "loggers.py", "fun_name": "disable_run_logger", "commit_message": "Add docstring", "code": "def disable_run_logger():\n \n with disable_logger(\"prefect.flow_run\"), disable_logger(\"prefect.task_run\"):\n yield\n", "url": "https://github.com/PrefectHQ/prefect.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 19, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 3, "token_counts": 17, "n_ast_nodes": 39, "n_identifiers": 2, "d_id": 11863, "documentation": { "docstring": "\n Gets both `prefect.flow_run` and `prefect.task_run` and disables them\n within the context manager. Upon exiting the context manager, both loggers\n are returned to its original state.\n ", "n_words": 25, "vocab_size": 21, "n_whitespaces": 38, "language": "en" } }, { "id": 154339, "commit_id": "39b36eb2a2e3bf3d612933e1c78545a8bb28cde4", "repo": "modin", "path": "modin/core/execution/dask/implementations/pandas_on_dask/partitioning/partition.py", "file_name": "partition.py", "fun_name": "add_to_apply_calls", "commit_message": "PERF-#4794: Compute caches in `_propagate_index_objs` (#4888)\n\nCo-authored-by: Mahesh Vashishtha \r\nSigned-off-by: Myachev ", "code": "def add_to_apply_calls(self, func, *args, length=None, width=None, **kwargs):\n \n return PandasOnDaskDataframePartition(\n self._data,\n call_queue=self.call_queue + [[func, args, kwargs]],\n length=length,\n width=width,\n )\n", "url": "https://github.com/modin-project/modin.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 83, "n_words": 18, "vocab_size": 18, "complexity": 1, "nloc": 7, "token_counts": 54, "n_ast_nodes": 76, "n_identifiers": 10, "d_id": 35932, "documentation": { "docstring": "\n Add a function to the call queue.\n\n Parameters\n ----------\n func : callable\n Function to be added to the call queue.\n *args : iterable\n Additional positional arguments to be passed in `func`.\n length : distributed.Future or int, optional\n Length, or reference to length, of wrapped ``pandas.DataFrame``.\n width : distributed.Future or int, optional\n Width, or reference to width, of wrapped ``pandas.DataFrame``.\n **kwargs : dict\n Additional keyword arguments to be passed in `func`.\n\n Returns\n -------\n PandasOnDaskDataframePartition\n A new ``PandasOnDaskDataframePartition`` object.\n\n Notes\n -----\n The keyword arguments are sent as a dictionary.\n ", "n_words": 87, "vocab_size": 54, "n_whitespaces": 259, "language": "en" } }, { "id": 167699, "commit_id": "9612375ca28ade056f15d4338f1bfde5d045c9fc", "repo": "pandas", "path": "pandas/core/config_init.py", "file_name": "config_init.py", "fun_name": "use_numexpr_cb", "commit_message": "TYP: return values in core/*.py (#47587)\n\n* TYP: return values in core/*.py\r\n\r\n* fix test\r\n\r\n* to_html\r\n\r\n* to_html part 2\r\n\r\n* DataFrame.query\r\n\r\n* more overloads\r\n\r\n* fix query?\r\n\r\n* increase stacklevel by one\r\n\r\n* fix rename_axis\r\n\r\n* and an overload for DataFrame.eval\r\n\r\n* address comments\r\n\r\n* fix typevar", "code": "def use_numexpr_cb(key) -> None:\n from pandas.core.computation import expressions\n\n expressions.set_use_numexpr(cf.get_option(key))\n\n\nuse_numba_doc = \n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 16, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 26, "n_ast_nodes": 48, "n_identifiers": 10, "d_id": 40082, "documentation": { "docstring": "\n: bool\n Use the numba engine option for select operations if it is installed,\n the default is False\n Valid values: False,True\n", "n_words": 21, "vocab_size": 19, "n_whitespaces": 29, "language": "en" } }, { "id": 201336, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "tests/auth_tests/test_management.py", "file_name": "test_management.py", "fun_name": "test_unavailable_models", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def test_unavailable_models(self):\n \n state = migrations.state.ProjectState()\n # Unavailable contenttypes.ContentType\n with self.assertNumQueries(0):\n create_permissions(self.app_config, verbosity=0, apps=state.apps)\n # Unavailable auth.Permission\n state = migrations.state.ProjectState(real_apps={\"contenttypes\"})\n with self.assertNumQueries(0):\n create_permissions(self.app_config, verbosity=0, apps=state.apps)\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 95, "n_words": 24, "vocab_size": 15, "complexity": 1, "nloc": 7, "token_counts": 77, "n_ast_nodes": 130, "n_identifiers": 11, "d_id": 49920, "documentation": { "docstring": "\n #24075 - Permissions shouldn't be created or deleted if the ContentType\n or Permission models aren't available.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 38, "language": "en" } }, { "id": 217717, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/http/client.py", "file_name": "client.py", "fun_name": "info", "commit_message": "add python 3.10.4 for windows", "code": "def info(self):\n \n return self.headers\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 54898, "documentation": { "docstring": "Returns an instance of the class mimetools.Message containing\n meta-information associated with the URL.\n\n When the method is HTTP, these headers are those returned by\n the server at the head of the retrieved HTML page (including\n Content-Length and Content-Type).\n\n When the method is FTP, a Content-Length header will be\n present if (as is now usual) the server passed back a file\n length in response to the FTP retrieval request. A\n Content-Type header will be present if the MIME type can be\n guessed.\n\n When the method is local-file, returned headers will include\n a Date representing the file's last-modified time, a\n Content-Length giving file size, and a Content-Type\n containing a guess at the file's type. See also the\n description of the mimetools module.\n\n ", "n_words": 120, "vocab_size": 74, "n_whitespaces": 225, "language": "en" } }, { "id": 276300, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/saving/utils_v1/export_utils.py", "file_name": "export_utils.py", "fun_name": "get_timestamped_export_dir", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def get_timestamped_export_dir(export_dir_base):\n \n attempts = 0\n while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS:\n timestamp = int(time.time())\n\n result_dir = tf.io.gfile.join(\n tf.compat.as_bytes(export_dir_base),\n tf.compat.as_bytes(str(timestamp)),\n )\n if not tf.compat.v1.gfile.Exists(result_dir):\n # Collisions are still possible (though extremely unlikely): this\n # directory is not actually created yet, but it will be almost\n # instantly on return from this function.\n return result_dir\n time.sleep(1)\n attempts += 1\n logging.warning(\n \"Directory {} already exists; retrying (attempt {}/{})\".format(\n tf.compat.as_str(result_dir),\n attempts,\n MAX_DIRECTORY_CREATION_ATTEMPTS,\n )\n )\n raise RuntimeError(\n \"Failed to obtain a unique export directory name after \"\n f\"{MAX_DIRECTORY_CREATION_ATTEMPTS} attempts.\"\n )\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 301, "n_words": 83, "vocab_size": 69, "complexity": 3, "nloc": 23, "token_counts": 112, "n_ast_nodes": 191, "n_identifiers": 23, "d_id": 81622, "documentation": { "docstring": "Builds a path to a new subdirectory within the base directory.\n\n Each export is written into a new subdirectory named using the\n current time. This guarantees monotonically increasing version\n numbers even across multiple runs of the pipeline.\n The timestamp used is the number of seconds since epoch UTC.\n\n Args:\n export_dir_base: A string containing a directory to write the exported\n graph and checkpoints.\n Returns:\n The full path of the new subdirectory (which is not actually created yet).\n\n Raises:\n RuntimeError: if repeated attempts fail to obtain a unique timestamped\n directory name.\n ", "n_words": 89, "vocab_size": 67, "n_whitespaces": 145, "language": "en" } }, { "id": 219736, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/_pydecimal.py", "file_name": "_pydecimal.py", "fun_name": "compare", "commit_message": "add python 3.10.4 for windows", "code": "def compare(self, a, b):\n \n a = _convert_other(a, raiseit=True)\n return a.compare(b, context=self)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 32, "n_words": 11, "vocab_size": 11, "complexity": 1, "nloc": 3, "token_counts": 31, "n_ast_nodes": 48, "n_identifiers": 7, "d_id": 55755, "documentation": { "docstring": "Compares values numerically.\n\n If the signs of the operands differ, a value representing each operand\n ('-1' if the operand is less than zero, '0' if the operand is zero or\n negative zero, or '1' if the operand is greater than zero) is used in\n place of that operand for the comparison instead of the actual\n operand.\n\n The comparison is then effected by subtracting the second operand from\n the first and then returning a value according to the result of the\n subtraction: '-1' if the result is less than zero, '0' if the result is\n zero or negative zero, or '1' if the result is greater than zero.\n\n >>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))\n Decimal('-1')\n >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))\n Decimal('0')\n >>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))\n Decimal('0')\n >>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))\n Decimal('1')\n >>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))\n Decimal('1')\n >>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))\n Decimal('-1')\n >>> ExtendedContext.compare(1, 2)\n Decimal('-1')\n >>> ExtendedContext.compare(Decimal(1), 2)\n Decimal('-1')\n >>> ExtendedContext.compare(1, Decimal(2))\n Decimal('-1')\n ", "n_words": 143, "vocab_size": 67, "n_whitespaces": 339, "language": "en" } }, { "id": 248015, "commit_id": "1783156dbcf4164692e66275d1c29857c434995b", "repo": "synapse", "path": "synapse/storage/databases/main/registration.py", "file_name": "registration.py", "fun_name": "count_daily_user_type", "commit_message": "Add some type hints to datastore (#12423)\n\n* Add some type hints to datastore\r\n\r\n* newsfile\r\n\r\n* change `Collection` to `List`\r\n\r\n* refactor return type of `select_users_txn`\r\n\r\n* correct type hint in `stream.py`\r\n\r\n* Remove `Optional` in `select_users_txn`\r\n\r\n* remove not needed return type in `__init__`\r\n\r\n* Revert change in `get_stream_id_for_event_txn`\r\n\r\n* Remove import from `Literal`", "code": "async def count_daily_user_type(self) -> Dict[str, int]:\n \n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 13, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 11, "token_counts": 27, "n_ast_nodes": 23, "n_identifiers": 5, "d_id": 72046, "documentation": { "docstring": "\n Counts 1) native non guest users\n 2) native guests users\n 3) bridged users\n who registered on the homeserver in the past 24 hours\n ", "n_words": 23, "vocab_size": 19, "n_whitespaces": 73, "language": "en" } }, { "id": 168124, "commit_id": "14de3fd9ca4178bfce5dd681fa5d0925e057c04d", "repo": "pandas", "path": "pandas/core/groupby/groupby.py", "file_name": "groupby.py", "fun_name": "__iter__", "commit_message": "DEPR: returning tuple when grouping by a list containing single element (#47761)\n\n* DOC #45443 edited the documentation of where/mask functions\r\n\r\n* DOC #45443 edited the documentation of where/mask functions\r\n\r\n* Update generic.py\r\n\r\n* ENH: add suffixes argument to DataFrame.compare #44354\r\n\r\n* Edited the tests\r\n\r\n* space fixing\r\n\r\n* Update shared_docs.py\r\n\r\n* Update series.py\r\n\r\n* Update series.py\r\n\r\n* invalid argument tests\r\n\r\n* issue reference\r\n\r\n* syntax editing\r\n\r\n* grammar fixing\r\n\r\n* edit doc\r\n\r\n* editting doc\r\n\r\n* Update 02_read_write.rst\r\n\r\n* Update 02_read_write.rst\r\n\r\n* Update v1.5.0.rst\r\n\r\n* Update v1.5.0.rst\r\n\r\n* np\r\n\r\n* 1.5.0 rst\r\n\r\n* created tests for invalid input\r\n\r\n* space\r\n\r\n* space\r\n\r\n* space\r\n\r\n* editing test\r\n\r\n* deprecated\r\n\r\n* syntax\r\n\r\n* editting existed examples\r\n\r\n* syntax\r\n\r\n* edit past tests\r\n\r\n* editting pivot\r\n\r\n* ex\r\n\r\n* editing internal use\r\n\r\n* pivot\r\n\r\n* warning expected\r\n\r\n* warning\r\n\r\n* ignore doc warning\r\n\r\n* doc\r\n\r\n* tests\r\n\r\n* ignore warning\r\n\r\n* test\r\n\r\n* plotting\r\n\r\n* test\r\n\r\n* doc\r\n\r\n* doc\r\n\r\n* white space\r\n\r\n* doc\r\n\r\n* doc\r\n\r\n* doc\r\n\r\n* doc\r\n\r\n* stacklevel\r\n\r\n* pivot\r\n\r\n* pivot\r\n\r\n* cookbook\r\n\r\n* flake8\r\n\r\n* flake8\r\n\r\n* what's new\r\n\r\n* syntax\r\n\r\n* itr\r\n\r\n* car names\r\n\r\n* test edit\r\n\r\n* fixing tests\r\n\r\n* fixing tests\r\n\r\n* flake8\r\n\r\n* rst edit\r\n\r\n* __iter__ edit\r\n\r\n* flake8\r\n\r\n* flake8\r\n\r\n* space\r\n\r\n* test\r\n\r\n* merge\r\n\r\n* ignore the type\r\n\r\n* mypy\r\n\r\n* type\r\n\r\n* self.keys\r\n\r\n* tests\r\n\r\n* .\r\n\r\n* .\r\n\r\n* adding keys\r\n\r\n* order\r\n\r\n* attribute\r\n\r\n* ignores\r\n\r\n* Update hist.py\r\n\r\n* ignore\r\n\r\n* .\r\n\r\n* .\r\n\r\n* .\r\n\r\n* .\r\n\r\n* .\r\n\r\n* Update doc/source/whatsnew/v1.5.0.rst\r\n\r\nCo-authored-by: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com>\r\n\r\nCo-authored-by: Richard Shadrach <45562402+rhshadrach@users.noreply.github.com>", "code": "def __iter__(self) -> Iterator[tuple[Hashable, NDFrameT]]:\n \n keys = self.keys\n if isinstance(keys, list) and len(keys) == 1:\n warnings.warn(\n (\n \"In a future version of pandas, a length 1 \"\n \"tuple will be returned when iterating over a \"\n \"a groupby with a grouper equal to a list of \"\n \"length 1. Don't supply a list with a single grouper \"\n \"to avoid this warning.\"\n ),\n FutureWarning,\n stacklevel=find_stack_level(),\n )\n return self.grouper.get_iterator(self._selected_obj, axis=self.axis)\n\n\n# To track operations that expand dimensions, like ohlc\nOutputFrameOrSeries = TypeVar(\"OutputFrameOrSeries\", bound=NDFrame)\n\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 285, "n_words": 82, "vocab_size": 68, "complexity": 3, "nloc": 23, "token_counts": 74, "n_ast_nodes": 140, "n_identifiers": 23, "d_id": 40213, "documentation": { "docstring": "\n Groupby iterator.\n\n Returns\n -------\n Generator yielding sequence of (name, subsetted object)\n for each group\n ", "n_words": 14, "vocab_size": 14, "n_whitespaces": 57, "language": "en" } }, { "id": 61234, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py", "file_name": "misc.py", "fun_name": "_transform_url", "commit_message": "upd; format", "code": "def _transform_url(url, transform_netloc):\n # type: (str, Callable[[str], Tuple[Any, ...]]) -> Tuple[str, NetlocTuple]\n \n purl = urllib.parse.urlsplit(url)\n netloc_tuple = transform_netloc(purl.netloc)\n # stripped url\n url_pieces = (purl.scheme, netloc_tuple[0], purl.path, purl.query, purl.fragment)\n surl = urllib.parse.urlunsplit(url_pieces)\n return surl, cast(\"NetlocTuple\", netloc_tuple)\n\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 59, "n_words": 35, "vocab_size": 31, "complexity": 1, "nloc": 6, "token_counts": 69, "n_ast_nodes": 109, "n_identifiers": 17, "d_id": 12455, "documentation": { "docstring": "Transform and replace netloc in a url.\n\n transform_netloc is a function taking the netloc and returning a\n tuple. The first element of this tuple is the new netloc. The\n entire tuple is returned.\n\n Returns a tuple containing the transformed url as item 0 and the\n original tuple returned by transform_netloc as item 1.\n ", "n_words": 53, "vocab_size": 35, "n_whitespaces": 71, "language": "en" } }, { "id": 247957, "commit_id": "2e2d8cc2f9b9af5f8b48d75e22c474e08feca236", "repo": "synapse", "path": "tests/rest/admin/test_server_notice.py", "file_name": "test_server_notice.py", "fun_name": "test_update_notice_user_name_when_changed", "commit_message": "Update the server notices user profile in room if changed. (#12115)", "code": "def test_update_notice_user_name_when_changed(self) -> None:\n \n server_notice_request_content = {\n \"user_id\": self.other_user,\n \"content\": {\"msgtype\": \"m.text\", \"body\": \"test msg one\"},\n }\n\n self.make_request(\n \"POST\",\n self.url,\n access_token=self.admin_user_tok,\n content=server_notice_request_content,\n )\n\n # simulate a change in server config after a server restart.\n new_display_name = \"new display name\"\n self.server_notices_manager._config.servernotices.server_notices_mxid_display_name = (\n new_display_name\n )\n self.server_notices_manager.get_or_create_notice_room_for_user.cache.invalidate_all()\n\n self.make_request(\n \"POST\",\n self.url,\n access_token=self.admin_user_tok,\n content=server_notice_request_content,\n )\n\n invited_rooms = self._check_invite_and_join_status(self.other_user, 1, 0)\n notice_room_id = invited_rooms[0].room_id\n self.helper.join(\n room=notice_room_id, user=self.other_user, tok=self.other_user_token\n )\n\n notice_user_state_in_room = self.helper.get_state(\n notice_room_id,\n \"m.room.member\",\n self.other_user_token,\n state_key=\"@notices:test\",\n )\n self.assertEqual(notice_user_state_in_room[\"displayname\"], new_display_name)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 383, "n_words": 74, "vocab_size": 57, "complexity": 1, "nloc": 38, "token_counts": 175, "n_ast_nodes": 282, "n_identifiers": 31, "d_id": 72024, "documentation": { "docstring": "\n Tests that existing server notices user name in room is updated after\n server notice config changes.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 38, "language": "en" } }, { "id": 243941, "commit_id": "9bf37f509ddf6aea1be3a4ad19036f96b9fc3902", "repo": "mmdetection", "path": "mmdet/core/bbox/assigners/max_iou_assigner.py", "file_name": "max_iou_assigner.py", "fun_name": "assign_wrt_overlaps", "commit_message": "fix typos in comment (#7124)\n\nbbox A's assigned_gt_inds will be overwritten to be bbox 2 instead of bbox B (In the previous content, bbox B was not mentioned).", "code": "def assign_wrt_overlaps(self, overlaps, gt_labels=None):\n \n num_gts, num_bboxes = overlaps.size(0), overlaps.size(1)\n\n # 1. assign -1 by default\n assigned_gt_inds = overlaps.new_full((num_bboxes, ),\n -1,\n dtype=torch.long)\n\n if num_gts == 0 or num_bboxes == 0:\n # No ground truth or boxes, return empty assignment\n max_overlaps = overlaps.new_zeros((num_bboxes, ))\n if num_gts == 0:\n # No truth, assign everything to background\n assigned_gt_inds[:] = 0\n if gt_labels is None:\n assigned_labels = None\n else:\n assigned_labels = overlaps.new_full((num_bboxes, ),\n -1,\n dtype=torch.long)\n return AssignResult(\n num_gts,\n assigned_gt_inds,\n max_overlaps,\n labels=assigned_labels)\n\n # for each anchor, which gt best overlaps with it\n # for each anchor, the max iou of all gts\n max_overlaps, argmax_overlaps = overlaps.max(dim=0)\n # for each gt, which anchor best overlaps with it\n # for each gt, the max iou of all proposals\n gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)\n\n # 2. assign negative: below\n # the negative inds are set to be 0\n if isinstance(self.neg_iou_thr, float):\n assigned_gt_inds[(max_overlaps >= 0)\n & (max_overlaps < self.neg_iou_thr)] = 0\n elif isinstance(self.neg_iou_thr, tuple):\n assert len(self.neg_iou_thr) == 2\n assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0])\n & (max_overlaps < self.neg_iou_thr[1])] = 0\n\n # 3. assign positive: above positive IoU threshold\n pos_inds = max_overlaps >= self.pos_iou_thr\n assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1\n\n if self.match_low_quality:\n # Low-quality matching will overwrite the assigned_gt_inds assigned\n # in Step 3. Thus, the assigned gt might not be the best one for\n # prediction.\n # For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2,\n # bbox 1 will be assigned as the best target for bbox A in step 3.\n # However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's\n # assigned_gt_inds will be overwritten to be bbox 2.\n # This might be the reason that it is not used in ROI Heads.\n for i in range(num_gts):\n if gt_max_overlaps[i] >= self.min_pos_iou:\n if self.gt_max_assign_all:\n max_iou_inds = overlaps[i, :] == gt_max_overlaps[i]\n assigned_gt_inds[max_iou_inds] = i + 1\n else:\n assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1\n\n if gt_labels is not None:\n assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)\n pos_inds = torch.nonzero(\n assigned_gt_inds > 0, as_tuple=False).squeeze()\n if pos_inds.numel() > 0:\n assigned_labels[pos_inds] = gt_labels[\n assigned_gt_inds[pos_inds] - 1]\n else:\n assigned_labels = None\n\n return AssignResult(\n num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 1286, "n_words": 342, "vocab_size": 168, "complexity": 13, "nloc": 50, "token_counts": 379, "n_ast_nodes": 593, "n_identifiers": 39, "d_id": 70154, "documentation": { "docstring": "Assign w.r.t. the overlaps of bboxes with gts.\n\n Args:\n overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,\n shape(k, n).\n gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ).\n\n Returns:\n :obj:`AssignResult`: The assign result.\n ", "n_words": 35, "vocab_size": 32, "n_whitespaces": 104, "language": "en" } }, { "id": 100733, "commit_id": "afec52309326304f4323029039e49bfcf928ef43", "repo": "faceswap", "path": "lib/logger.py", "file_name": "logger.py", "fun_name": "_rewrite_warnings", "commit_message": "Bugfixes:\n - Stats graph - Handle NaNs in data\n - logger - de-elevate matplotlib font messages", "code": "def _rewrite_warnings(cls, record):\n \n if record.levelno == 30 and record.funcName == \"warn\" and record.module == \"ag_logging\":\n # TF 2.3 in Conda is imported with the wrong gast(0.4 when 0.3.3 should be used). This\n # causes warnings in autograph. They don't appear to impact performance so de-elevate\n # warning to debug\n record.levelno = 10\n record.levelname = \"DEBUG\"\n\n if record.levelno == 30 and (record.funcName == \"_tfmw_add_deprecation_warning\" or\n record.module in (\"deprecation\", \"deprecation_wrapper\")):\n # Keras Deprecations.\n record.levelno = 10\n record.levelname = \"DEBUG\"\n\n return record\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 231, "n_words": 79, "vocab_size": 55, "complexity": 7, "nloc": 9, "token_counts": 74, "n_ast_nodes": 134, "n_identifiers": 7, "d_id": 20188, "documentation": { "docstring": " Change certain warning messages from WARNING to DEBUG to avoid passing non-important\n information to output.\n\n Parameters\n ----------\n record: :class:`logging.LogRecord`\n The log record to check for rewriting\n\n Returns\n -------\n :class:`logging.LogRecord`\n The log rewritten or untouched record\n\n ", "n_words": 35, "vocab_size": 28, "n_whitespaces": 114, "language": "en" } }, { "id": 101635, "commit_id": "2d312a9db228c025d0bd2ea7a4f747a2c644b5d8", "repo": "faceswap", "path": "tools/alignments/alignments.py", "file_name": "alignments.py", "fun_name": "_find_alignments", "commit_message": "Minor updates and fixups\n - Mask Tool - Typing + BiSeNet mask update fix\n - Alignments Tool - Auto search for alignments file", "code": "def _find_alignments(self) -> str:\n \n fname = self._args.alignments_file\n frames = self._args.frames_dir\n if fname and os.path.isfile(fname) and os.path.splitext(fname)[-1].lower() == \".fsa\":\n return fname\n if fname:\n logger.error(\"Not a valid alignments file: '%s'\", fname)\n sys.exit(1)\n\n if not frames or not os.path.exists(frames):\n logger.error(\"Not a valid frames folder: '%s'. Can't scan for alignments.\", frames)\n sys.exit(1)\n\n fname = \"alignments.fsa\"\n if os.path.isdir(frames) and os.path.exists(os.path.join(frames, fname)):\n return fname\n\n if os.path.isdir(frames) or os.path.splitext(frames)[-1] not in _video_extensions:\n logger.error(\"Can't find a valid alignments file in location: %s\", frames)\n sys.exit(1)\n\n fname = f\"{os.path.splitext(frames)[0]}_{fname}\"\n if not os.path.exists(fname):\n logger.error(\"Can't find a valid alignments file for video: %s\", frames)\n sys.exit(1)\n\n return fname\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 289, "n_words": 95, "vocab_size": 50, "complexity": 12, "nloc": 32, "token_counts": 204, "n_ast_nodes": 360, "n_identifiers": 21, "d_id": 21043, "documentation": { "docstring": " If an alignments folder is required and hasn't been provided, scan for a file based on\n the video folder.\n\n Exits if an alignments file cannot be located\n\n Returns\n -------\n str\n The full path to an alignments file\n ", "n_words": 37, "vocab_size": 31, "n_whitespaces": 91, "language": "en" } }, { "id": 271605, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/engine/training.py", "file_name": "training.py", "fun_name": "build", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def build(self, input_shape):\n \n if self._is_graph_network:\n super().build(input_shape)\n return\n\n if input_shape is None:\n raise ValueError(\n \"Input shape must be defined when calling `build()` on \"\n \"a `Model` subclass.\"\n )\n valid_types = (tuple, list, tf.TensorShape, dict)\n if not isinstance(input_shape, valid_types):\n raise ValueError(\n \"Specified input shape is not one of the valid types. \"\n \"Please specify a batch input shape of type tuple or \"\n \"list of input shapes. User provided \"\n \"input type: {}.\".format(type(input_shape))\n )\n\n if input_shape and not self.inputs:\n # We create placeholders for the `None`s in the shape and build the model\n # in a Graph. Since tf.Variable is compatible with both eager execution\n # and graph building, the variables created after building the model in\n # a Graph are still valid when executing eagerly.\n if tf.executing_eagerly():\n graph = tf.__internal__.FuncGraph(\"build_graph\")\n else:\n graph = backend.get_graph()\n with graph.as_default():\n if isinstance(input_shape, list) and all(\n d is None or isinstance(d, int) for d in input_shape\n ):\n input_shape = tuple(input_shape)\n if isinstance(input_shape, list):\n x = [\n base_layer_utils.generate_placeholders_from_shape(shape)\n for shape in input_shape\n ]\n elif isinstance(input_shape, dict):\n x = {\n k: base_layer_utils.generate_placeholders_from_shape(\n shape\n )\n for k, shape in input_shape.items()\n }\n else:\n x = base_layer_utils.generate_placeholders_from_shape(\n input_shape\n )\n\n kwargs = {}\n call_signature = self._call_spec.full_argspec\n call_args = call_signature.args\n # Exclude `self`, `inputs`, and any argument with a default value.\n if len(call_args) > 2:\n if call_signature.defaults:\n call_args = call_args[2 : -len(call_signature.defaults)]\n else:\n call_args = call_args[2:]\n for arg in call_args:\n if arg == \"training\":\n # Case where `training` is a positional arg with no default.\n kwargs[\"training\"] = False\n else:\n # Has invalid call signature with unknown positional arguments.\n raise ValueError(\n \"Currently, you cannot build your model if it has \"\n \"positional or keyword arguments that are not \"\n \"inputs to the model, but are required for its \"\n \"`call()` method. Instead, in order to instantiate \"\n \"and build your model, `call()` your model on real \"\n \"tensor data with all expected call arguments. The argument \"\n \"for `call()` can be a single list/tuple that contains \"\n \"multiple inputs.\"\n )\n elif len(call_args) < 2:\n # Signature without `inputs`.\n raise ValueError(\n \"You can only call `build()` on a model if its `call()` \"\n \"method accepts an `inputs` argument.\"\n )\n try:\n self.call(x, **kwargs)\n except (tf.errors.InvalidArgumentError, TypeError) as e:\n raise ValueError(\n \"You cannot build your model by calling `build` \"\n \"if your layers do not support float type inputs. \"\n \"Instead, in order to instantiate and build your \"\n \"model, call your model on real tensor data (of \"\n \"the correct dtype).\\n\\nThe actual error from \"\n f\"`call` is: {e}.\"\n )\n super().build(input_shape)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 2068, "n_words": 414, "vocab_size": 227, "complexity": 21, "nloc": 82, "token_counts": 345, "n_ast_nodes": 609, "n_identifiers": 46, "d_id": 80825, "documentation": { "docstring": "Builds the model based on input shapes received.\n\n This is to be used for subclassed models, which do not know at instantiation\n time what their inputs look like.\n\n This method only exists for users who want to call `model.build()` in a\n standalone way (as a substitute for calling the model on real data to\n build it). It will never be called by the framework (and thus it will\n never throw unexpected errors in an unrelated workflow).\n\n Args:\n input_shape: Single tuple, `TensorShape` instance, or list/dict of shapes,\n where shapes are tuples, integers, or `TensorShape` instances.\n\n Raises:\n ValueError:\n 1. In case of invalid user-provided data (not of type tuple,\n list, `TensorShape`, or dict).\n 2. If the model requires call arguments that are agnostic\n to the input shapes (positional or keyword arg in call signature).\n 3. If not all layers were properly built.\n 4. If float type inputs are not supported within the layers.\n\n In each of these cases, the user should build their model by calling it\n on real tensor data.\n ", "n_words": 169, "vocab_size": 117, "n_whitespaces": 349, "language": "en" } }, { "id": 184610, "commit_id": "7df1c123e9fbc8641052a30ba74282f9d9ec1870", "repo": "textual", "path": "src/textual/file_monitor.py", "file_name": "file_monitor.py", "fun_name": "check", "commit_message": "docstrings", "code": "def check(self) -> bool:\n \n modified = self._get_modified()\n changed = modified != self._modified\n self._modified = modified\n return changed\n", "url": "https://github.com/Textualize/textual.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 52, "n_words": 17, "vocab_size": 11, "complexity": 1, "nloc": 6, "token_counts": 29, "n_ast_nodes": 50, "n_identifiers": 7, "d_id": 44712, "documentation": { "docstring": "Check the monitored file. Return True if it was changed.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 323160, "commit_id": "44a290e94d1becd1f09fddc3d873f9e19c9d6919", "repo": "PaddleNLP", "path": "paddlenlp/trainer/trainer_callback.py", "file_name": "trainer_callback.py", "fun_name": "_new_training", "commit_message": "[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761)\n\n* add some datasets for finetune.\r\n\r\n* support fine tune for all tastks.\r\n\r\n* add trainer prototype.\r\n\r\n* init verison for paddlenlp trainer.\r\n\r\n* refine trainer.\r\n\r\n* update for some details.\r\n\r\n* support multi-cards training evaluation.\r\n\r\n* support load from ckpt.\r\n\r\n* support for export inference model.\r\n\r\n* first version of trainer.\r\n\r\n* seq cls support clue.\r\n\r\n* trainer support for token classification and question answersing tasks.\r\n\r\n* fix as reviews.\r\n\r\nCo-authored-by: Zeyu Chen ", "code": "def _new_training(self):\n \n self.should_training_stop = False\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 21, "n_identifiers": 3, "d_id": 118391, "documentation": { "docstring": "Internal method that resets the variable for a new training.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 253285, "commit_id": "07a40208a32cb2d48a1f2a24d2569894b5a378a0", "repo": "mitmproxy", "path": "mitmproxy/coretypes/serializable.py", "file_name": "serializable.py", "fun_name": "set_state", "commit_message": "`rm -rf stateobject`", "code": "def set_state(self, state):\n \n raise NotImplementedError()\n", "url": "https://github.com/mitmproxy/mitmproxy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 22, "n_identifiers": 4, "d_id": 74053, "documentation": { "docstring": "\n Set object state to the given state. Consumes the passed state.\n May return a `dataclasses.FrozenInstanceError` if the object is immutable.\n ", "n_words": 20, "vocab_size": 16, "n_whitespaces": 42, "language": "en" } }, { "id": 98544, "commit_id": "94c896a4a3663abbd31775957f1aa5448fde5491", "repo": "sentry", "path": "src/sentry/eventstore/base.py", "file_name": "base.py", "fun_name": "get_next_event_id", "commit_message": "ref: clean up sentry flake8 plugin (#33847)\n\n* fix: Remove unused `# noqa` lint disable comments\r\n\r\n* ref: clean up sentry flake8 plugin\r\n\r\n- remove S005: pyupgrade handles this for us\r\n- remove `pycodestyle` handling: flake8 does this natively\r\n- clean up the ignore list and use extend-ignore", "code": "def get_next_event_id(self, event, snuba_filter):\n \n raise NotImplementedError\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 20, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 12, "n_ast_nodes": 20, "n_identifiers": 5, "d_id": 19582, "documentation": { "docstring": "\n Gets the next event given a current event and some conditions/filters.\n Returns a tuple of (project_id, event_id)\n\n Arguments:\n event (Event): Event object\n snuba_filter (Filter): Filter\n ", "n_words": 25, "vocab_size": 22, "n_whitespaces": 68, "language": "en" } }, { "id": 162744, "commit_id": "9120cdffe618c6c2ff16fe6a311b6a1367efdbc8", "repo": "AutoEq", "path": "research/neo_peq/legacy_frequency_response.py", "file_name": "legacy_frequency_response.py", "fun_name": "center", "commit_message": "Added PEQ configs to CLI and function interfaces. Improved default value handling for PEQ parameters and added more predefined configs. Removed legacy PEQ optimization. Fixed readme write. Improved shelf filter initialization. Added plot method to PEQ. Notebook for comparing old and new optimizers. Bug fixes.", "code": "def center(self, frequency=1000):\n \n equal_energy_fr = self.__class__(name='equal_energy', frequency=self.frequency.copy(), raw=self.raw.copy())\n equal_energy_fr.interpolate()\n interpolator = InterpolatedUnivariateSpline(np.log10(equal_energy_fr.frequency), equal_energy_fr.raw, k=1)\n if type(frequency) in [list, np.ndarray] and len(frequency) > 1:\n # Use the average of the gain values between the given frequencies as the difference to be subtracted\n diff = np.mean(equal_energy_fr.raw[np.logical_and(\n equal_energy_fr.frequency >= frequency[0],\n equal_energy_fr.frequency <= frequency[1]\n )])\n else:\n if type(frequency) in [list, np.ndarray]:\n # List or array with only one element\n frequency = frequency[0]\n # Use the gain value at the given frequency as the difference to be subtracted\n diff = interpolator(np.log10(frequency))\n\n self.raw -= diff\n if len(self.smoothed):\n self.smoothed -= diff\n if len(self.error):\n self.error += diff\n if len(self.error_smoothed):\n self.error_smoothed += diff\n\n # Everything but raw, smoothed, errors and target is affected by centering, reset them\n self.reset(raw=False, smoothed=False, error=False, error_smoothed=False, target=False)\n\n return -diff\n", "url": "https://github.com/jaakkopasanen/AutoEq.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 375, "n_words": 125, "vocab_size": 87, "complexity": 7, "nloc": 22, "token_counts": 225, "n_ast_nodes": 353, "n_identifiers": 26, "d_id": 39282, "documentation": { "docstring": "Removed bias from frequency response.\n\n Args:\n frequency: Frequency which is set to 0 dB. If this is a list with two values then an average between the two\n frequencies is set to 0 dB.\n\n Returns:\n Gain shifted\n ", "n_words": 37, "vocab_size": 30, "n_whitespaces": 102, "language": "en" } }, { "id": 124636, "commit_id": "b3878e26d765e28dd7c69abadbd856181037db97", "repo": "ray", "path": "python/ray/air/config.py", "file_name": "config.py", "fun_name": "additional_resources_per_worker", "commit_message": "[AIR] Fix `ResourceChangingScheduler` not working with AIR (#26307)\n\nThis PR ensures that the new trial resources set by `ResourceChangingScheduler` are respected by the train loop logic by modifying the scaling config to match. Previously, even though trials had their resources updated, the scaling config was not modified which lead to eg. new workers not being spawned in the `DataParallelTrainer` even though resources were available.\r\n\r\nIn order to accomplish this, `ScalingConfigDataClass` is updated to allow equality comparisons with other `ScalingConfigDataClass`es (using the underlying PGF) and to create a `ScalingConfigDataClass` from a PGF.\r\n\r\nPlease note that this is an internal only change intended to actually make `ResourceChangingScheduler` work. In the future, `ResourceChangingScheduler` should be updated to operate on `ScalingConfigDataClass`es instead of PGFs as it is now. That will require a deprecation cycle.", "code": "def additional_resources_per_worker(self):\n \n return {\n k: v\n for k, v in self._resources_per_worker_not_none.items()\n if k not in [\"CPU\", \"GPU\"]\n }\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 72, "n_words": 18, "vocab_size": 16, "complexity": 3, "nloc": 6, "token_counts": 33, "n_ast_nodes": 56, "n_identifiers": 6, "d_id": 27641, "documentation": { "docstring": "Resources per worker, not including CPU or GPU resources.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 243769, "commit_id": "2ae55ccbdad9c842929fb238ea1eb81d1f999024", "repo": "Pillow", "path": "src/PIL/ImageMorph.py", "file_name": "ImageMorph.py", "fun_name": "match", "commit_message": "Improve exception traceback readability", "code": "def match(self, image):\n \n if self.lut is None:\n msg = \"No operator loaded\"\n raise Exception(msg)\n\n if image.mode != \"L\":\n msg = \"Image mode must be L\"\n raise ValueError(msg)\n return _imagingmorph.match(bytes(self.lut), image.im.id)\n", "url": "https://github.com/python-pillow/Pillow.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 102, "n_words": 30, "vocab_size": 26, "complexity": 3, "nloc": 8, "token_counts": 56, "n_ast_nodes": 96, "n_identifiers": 12, "d_id": 70119, "documentation": { "docstring": "Get a list of coordinates matching the morphological operation on\n an image.\n\n Returns a list of tuples of (x,y) coordinates\n of all matching pixels. See :ref:`coordinate-system`.", "n_words": 26, "vocab_size": 19, "n_whitespaces": 46, "language": "en" } }, { "id": 6465, "commit_id": "cbff12a584ac253b6953551fecd8a66afc320de7", "repo": "ludwig", "path": "ludwig/utils/checkpoint_utils.py", "file_name": "checkpoint_utils.py", "fun_name": "get_files", "commit_message": "Fixes FileExistsError thrown after training on windows completes (#1845)\n\n* Catch exception when os.rename throws when renaming checkpoint.\r\n\r\n* Filter out -tmp prefix (or any other) when sorting files in get_files.\r\n\r\n* Use os.replace instead of os.rename, this works on windows\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\n* Add comment to sort.\r\n\r\n* [pre-commit.ci] auto fixes from pre-commit.com hooks\r\n\r\nfor more information, see https://pre-commit.ci\r\n\r\nCo-authored-by: Daniel Treiman \r\nCo-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>", "code": "def get_files(d, pattern, sort=True):\n \n files = glob(osp.join(d, pattern))\n files = [f for f in files if osp.isfile(f)]\n if sort:\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 10, "n_whitespaces": 31, "n_words": 19, "vocab_size": 15, "complexity": 4, "nloc": 7, "token_counts": 80, "n_ast_nodes": 69, "n_identifiers": 10, "d_id": 1000, "documentation": { "docstring": "Return a list of files in a given directory.\n\n Args:\n d (str): The path to the directory.\n pattern (str): The wildcard to filter files with.\n sort (bool): Whether to sort the returned list. Assumes filenames contain a number value to sort by (tmp-001).\n ", "n_words": 43, "vocab_size": 31, "n_whitespaces": 64, "language": "en" } }, { "id": 197247, "commit_id": "c03c0eb2136e693b8431c19dd3294d832b4a394c", "repo": "sympy", "path": "sympy/physics/vector/vector.py", "file_name": "vector.py", "fun_name": "free_symbols", "commit_message": "Add .free_dynamicsymbols to physics vectors.", "code": "def free_symbols(self, reference_frame):\n \n\n return self.to_matrix(reference_frame).free_symbols\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 19, "n_words": 5, "vocab_size": 5, "complexity": 1, "nloc": 2, "token_counts": 17, "n_ast_nodes": 29, "n_identifiers": 4, "d_id": 48408, "documentation": { "docstring": "Returns the free symbols in the measure numbers of the vector\n expressed in the given reference frame.\n\n Parameters\n ==========\n reference_frame : ReferenceFrame\n The frame with respect to which the free symbols of the given\n vector is to be determined.\n\n Returns\n =======\n set of Symbol\n set of symbols present in the measure numbers of\n ``reference_frame``.\n\n See Also\n ========\n\n - :meth:`~sympy.core.basic.Basic.free_symbols`\n\n ", "n_words": 59, "vocab_size": 37, "n_whitespaces": 180, "language": "en" } }, { "id": 22123, "commit_id": "cd5a9683be69c86c8f3adcd13385a9bc5db198ec", "repo": "pipenv", "path": "pipenv/patched/pip/_vendor/requests/sessions.py", "file_name": "sessions.py", "fun_name": "should_strip_auth", "commit_message": "Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.", "code": "def should_strip_auth(self, old_url, new_url):\n \n old_parsed = urlparse(old_url)\n new_parsed = urlparse(new_url)\n if old_parsed.hostname != new_parsed.hostname:\n return True\n # Special case: allow http -> https redirect when using the standard\n # ports. This isn't specified by RFC 7235, but is kept to avoid\n # breaking backwards compatibility with older versions of requests\n # that allowed any redirects on the same host.\n if (\n old_parsed.scheme == \"http\"\n and old_parsed.port in (80, None)\n and new_parsed.scheme == \"https\"\n and new_parsed.port in (443, None)\n ):\n return False\n\n # Handle default port usage corresponding to scheme.\n changed_port = old_parsed.port != new_parsed.port\n changed_scheme = old_parsed.scheme != new_parsed.scheme\n default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None)\n if (\n not changed_scheme\n and old_parsed.port in default_port\n and new_parsed.port in default_port\n ):\n return False\n\n # Standard case: root URI must match\n return changed_port or changed_scheme\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 366, "n_words": 130, "vocab_size": 87, "complexity": 10, "nloc": 22, "token_counts": 128, "n_ast_nodes": 206, "n_identifiers": 15, "d_id": 4199, "documentation": { "docstring": "Decide whether Authorization header should be removed when redirecting", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 265088, "commit_id": "84f056171286d18c1c14a2fc9d28155a7dcf169a", "repo": "netbox", "path": "netbox/dcim/models/racks.py", "file_name": "racks.py", "fun_name": "units", "commit_message": "Initial work on half-height RUs", "code": "def units(self):\n \n max_position = self.u_height + decimal.Decimal(0.5)\n if self.desc_units:\n drange(0.5, max_position, 0.5)\n return drange(max_position, 0.5, -0.5)\n", "url": "https://github.com/netbox-community/netbox.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 55, "n_words": 16, "vocab_size": 16, "complexity": 2, "nloc": 5, "token_counts": 51, "n_ast_nodes": 65, "n_identifiers": 8, "d_id": 77978, "documentation": { "docstring": "\n Return a list of unit numbers, top to bottom.\n ", "n_words": 9, "vocab_size": 9, "n_whitespaces": 24, "language": "en" } }, { "id": 5869, "commit_id": "4fb8f63181f5153b4f6778c6ef8dad61022c4f3f", "repo": "ludwig", "path": "tests/integration_tests/test_visualization.py", "file_name": "test_visualization.py", "fun_name": "test_visualization_compare_classifiers_from_pred_csv_output_saved", "commit_message": "Use tempfile to automatically garbage collect data and modeling artifacts in ludwig integration tests. (#1642)\n\n* Use tmpdir to automatically garbage collect data and modeling artifacts in ludwig integration tests.", "code": "def test_visualization_compare_classifiers_from_pred_csv_output_saved(csv_filename):\n \n input_features = [category_feature(vocab_size=10)]\n output_features = [category_feature(vocab_size=2, reduce_input=\"sum\")]\n\n # Generate test data\n rel_path = generate_data(input_features, output_features, csv_filename)\n exp_dir_name = run_experiment_with_visualization(input_features, output_features, dataset=rel_path)\n vis_output_pattern_pdf = os.path.join(exp_dir_name, \"*.pdf\")\n vis_output_pattern_png = os.path.join(exp_dir_name, \"*.png\")\n output_feature_name = get_output_feature_name(exp_dir_name)\n prediction = os.path.join(exp_dir_name, PREDICTIONS_PARQUET_FILE_NAME)\n experiment_source_data_name = csv_filename.split(\".\")[0]\n ground_truth = experiment_source_data_name + \".csv\"\n split_file = experiment_source_data_name + \".split.csv\"\n ground_truth_metadata = experiment_source_data_name + \".meta.json\"\n test_cmd_pdf = [\n \"python\",\n \"-m\",\n \"ludwig.visualize\",\n \"--visualization\",\n \"compare_classifiers_performance_from_pred\",\n \"--ground_truth_metadata\",\n ground_truth_metadata,\n \"--ground_truth\",\n ground_truth,\n \"--output_feature_name\",\n output_feature_name,\n \"--split_file\",\n split_file,\n \"--predictions\",\n prediction,\n prediction,\n \"--model_names\",\n \"Model1\",\n \"Model2\",\n \"-od\",\n exp_dir_name,\n ]\n test_cmd_png = test_cmd_pdf.copy() + [\"-ff\", \"png\"]\n\n commands = [test_cmd_pdf, test_cmd_png]\n vis_patterns = [vis_output_pattern_pdf, vis_output_pattern_png]\n\n for command, viz_pattern in zip(commands, vis_patterns):\n result = subprocess.run(command)\n figure_cnt = glob.glob(viz_pattern)\n\n assert 0 == result.returncode\n assert 1 == len(figure_cnt)\n\n", "url": "https://github.com/ludwig-ai/ludwig.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 350, "n_words": 115, "vocab_size": 86, "complexity": 2, "nloc": 44, "token_counts": 234, "n_ast_nodes": 385, "n_identifiers": 41, "d_id": 864, "documentation": { "docstring": "Ensure pdf and png figures from the experiments can be saved.\n\n Predictions are loaded from csv file.\n :param csv_filename: csv fixture from tests.fixtures.filenames.csv_filename\n :return: None\n ", "n_words": 25, "vocab_size": 22, "n_whitespaces": 37, "language": "en" } }, { "id": 195861, "commit_id": "cda8dfe6f45dc5ed394c2f5cda706cd6c729f713", "repo": "sympy", "path": "sympy/functions/elementary/miscellaneous.py", "file_name": "miscellaneous.py", "fun_name": "real_root", "commit_message": "Improved documentation formatting", "code": "def real_root(arg, n=None, evaluate=None):\n r\n from sympy.functions.elementary.complexes import Abs, im, sign\n from sympy.functions.elementary.piecewise import Piecewise\n if n is not None:\n return Piecewise(\n (root(arg, n, evaluate=evaluate), Or(Eq(n, S.One), Eq(n, S.NegativeOne))),\n (Mul(sign(arg), root(Abs(arg), n, evaluate=evaluate), evaluate=evaluate),\n And(Eq(im(arg), S.Zero), Eq(Mod(n, 2), S.One))),\n (root(arg, n, evaluate=evaluate), True))\n rv = sympify(arg)\n n1pow = Transform(lambda x: -(-x.base)**x.exp,\n lambda x:\n x.is_Pow and\n x.base.is_negative and\n x.exp.is_Rational and\n x.exp.p == 1 and x.exp.q % 2)\n return rv.xreplace(n1pow)\n\n###############################################################################\n############################# MINIMUM and MAXIMUM #############################\n###############################################################################\n\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 248, "n_words": 75, "vocab_size": 58, "complexity": 6, "nloc": 61, "token_counts": 221, "n_ast_nodes": 322, "n_identifiers": 36, "d_id": 47448, "documentation": { "docstring": "Return the real *n*'th-root of *arg* if possible.\n\n Parameters\n ==========\n\n n : int or None, optional\n If *n* is ``None``, then all instances of\n $(-n)^{1/\\text{odd}}$ will be changed to $-n^{1/\\text{odd}}$.\n This will only create a real root of a principal root.\n The presence of other factors may cause the result to not be\n real.\n\n evaluate : bool, optional\n The parameter determines if the expression should be evaluated.\n If ``None``, its value is taken from\n ``global_parameters.evaluate``.\n\n Examples\n ========\n\n >>> from sympy import root, real_root\n\n >>> real_root(-8, 3)\n -2\n >>> root(-8, 3)\n 2*(-1)**(1/3)\n >>> real_root(_)\n -2\n\n If one creates a non-principal root and applies real_root, the\n result will not be real (so use with caution):\n\n >>> root(-8, 3, 2)\n -2*(-1)**(2/3)\n >>> real_root(_)\n -2*(-1)**(2/3)\n\n See Also\n ========\n\n sympy.polys.rootoftools.rootof\n sympy.core.power.integer_nthroot\n root, sqrt\n ", "n_words": 128, "vocab_size": 88, "n_whitespaces": 259, "language": "en" } }, { "id": 127153, "commit_id": "4692e8d8023e789120d3f22b41ffb136b50f70ea", "repo": "ray", "path": "python/ray/_private/worker.py", "file_name": "worker.py", "fun_name": "get_dashboard_url", "commit_message": "[core] Don't override external dashboard URL in internal KV store (#27901)\n\nFix 2.0.0 release blocker bug where Ray State API and Jobs not accessible if the override URL doesn't support adding additional subpaths. This PR keeps the localhost dashboard URL in the internal KV store and only overrides in values printed or returned to the user.\r\nimages.githubusercontent.com/6900234/184809934-8d150874-90fe-4b45-a13d-bce1807047de.png\">", "code": "def get_dashboard_url():\n \n if ray_constants.RAY_OVERRIDE_DASHBOARD_URL in os.environ:\n return _remove_protocol_from_url(\n os.environ.get(ray_constants.RAY_OVERRIDE_DASHBOARD_URL)\n )\n else:\n worker = global_worker\n worker.check_connected()\n return _global_node.webui_url\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 72, "n_words": 17, "vocab_size": 16, "complexity": 2, "nloc": 9, "token_counts": 42, "n_ast_nodes": 72, "n_identifiers": 12, "d_id": 28373, "documentation": { "docstring": "Get the URL to access the Ray dashboard.\n\n Note that the URL does not specify which node the dashboard is on.\n\n Returns:\n The URL of the dashboard as a string.\n ", "n_words": 30, "vocab_size": 23, "n_whitespaces": 46, "language": "en" } }, { "id": 215096, "commit_id": "f1c37893caf90738288e789c3233ab934630254f", "repo": "salt", "path": "tests/pytests/unit/modules/test_aixpkg.py", "file_name": "test_aixpkg.py", "fun_name": "test_install_non_rpm_using_dnf_gen_error", "commit_message": "Working tests for install", "code": "def test_install_non_rpm_using_dnf_gen_error():\n \n info_fake_error = \n dnf_call = MagicMock(\n return_value={\"retcode\": 1, \"stdout\": \"\", \"stderr\": info_fake_error}\n )\n list_pkgs_mock = MagicMock(side_effect=[{\"info\": \"6.6-2\"}, {\"info\": \"6.6-2\"}])\n with patch(\"pathlib.Path.is_file\", return_value=True):\n with patch.dict(\n aixpkg.__salt__,\n {\"cmd.run_all\": dnf_call, \"config.get\": MagicMock(return_value=False)},\n ), patch.object(aixpkg, \"list_pkgs\", list_pkgs_mock):\n expected = {\n \"changes\": {},\n \"errors\": [info_fake_error],\n }\n with pytest.raises(CommandExecutionError) as exc_info:\n aixpkg.install(\"info_fake.rpm\")\n assert exc_info.value.info == expected, exc_info.value.info\n assert dnf_call.call_count == 1\n libpath_env = {\"LIBPATH\": \"/opt/freeware/lib:/usr/lib\"}\n dnf_call.assert_any_call(\n \"/opt/freeware/bin/dnf install --allowerasing --assumeyes info_fake.rpm\",\n env=libpath_env,\n ignore_retcode=True,\n python_shell=False,\n )\n\n", "url": "https://github.com/saltstack/salt.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 326, "n_words": 70, "vocab_size": 60, "complexity": 1, "nloc": 29, "token_counts": 172, "n_ast_nodes": 303, "n_identifiers": 26, "d_id": 53813, "documentation": { "docstring": "\n Test install of non rpm using dnf which should generate an error\n Last metadata expiration check: 1 day, 23:40:22 ago on Mon Dec 6 19:26:36 EST 2021.\nNo match for argument: info_fake\nError: Unable to find a match: info_fake\n", "n_words": 39, "vocab_size": 38, "n_whitespaces": 44, "language": "en" } }, { "id": 60741, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/index/package_finder.py", "file_name": "package_finder.py", "fun_name": "get_install_candidate", "commit_message": "upd; format", "code": "def get_install_candidate(self, link_evaluator, link):\n # type: (LinkEvaluator, Link) -> Optional[InstallationCandidate]\n \n is_candidate, result = link_evaluator.evaluate_link(link)\n if not is_candidate:\n if result:\n self._log_skipped_link(link, reason=result)\n return None\n\n return InstallationCandidate(\n name=link_evaluator.project_name,\n link=link,\n version=result,\n )\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 141, "n_words": 29, "vocab_size": 27, "complexity": 3, "nloc": 11, "token_counts": 57, "n_ast_nodes": 89, "n_identifiers": 13, "d_id": 12270, "documentation": { "docstring": "\n If the link is a candidate for install, convert it to an\n InstallationCandidate and return it. Otherwise, return None.\n ", "n_words": 19, "vocab_size": 18, "n_whitespaces": 41, "language": "en" } }, { "id": 139252, "commit_id": "5d9bf4234a038e16eaa73fed7d4ec8cef3f0038f", "repo": "ray", "path": "python/ray/ml/examples/upload_to_wandb.py", "file_name": "upload_to_wandb.py", "fun_name": "get_train_dataset", "commit_message": "[air] Example to track runs with Weights & Biases (#24459)\n\nThis PR \r\n- adds an example on how to run Ray Train and log results to weights & biases\r\n- adds functionality to the W&B plugin to store checkpoints\r\n- fixes a bug introduced in #24017\r\n- Adds a CI utility script to setup credentials\r\n- Adds a CI utility script to remove test state from external services cc @simon-mo", "code": "def get_train_dataset() -> ray.data.Dataset:\n \n data_raw = load_breast_cancer(as_frame=True)\n df = data_raw[\"data\"]\n df[\"target\"] = data_raw[\"target\"]\n return ray.data.from_pandas(df)\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 30, "n_words": 15, "vocab_size": 13, "complexity": 1, "nloc": 6, "token_counts": 43, "n_ast_nodes": 76, "n_identifiers": 9, "d_id": 31643, "documentation": { "docstring": "Return the \"Breast cancer\" dataset as a Ray dataset.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 164842, "commit_id": "769fc54897953d366d573d4e45cf13177a5f582b", "repo": "pandas", "path": "pandas/core/internals/blocks.py", "file_name": "blocks.py", "fun_name": "where", "commit_message": "REF: dispatch Block.fillna to putmask/where (#45911)", "code": "def where(self, other, cond, _downcast=\"infer\") -> list[Block]:\n \n assert cond.ndim == self.ndim\n assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame))\n\n transpose = self.ndim == 2\n\n # EABlocks override where\n values = cast(np.ndarray, self.values)\n orig_other = other\n if transpose:\n values = values.T\n\n icond, noop = validate_putmask(values, ~cond)\n if noop:\n # GH-39595: Always return a copy; short-circuit up/downcasting\n return [self.copy()]\n\n if other is lib.no_default:\n other = self.fill_value\n\n other = self._standardize_fill_value(other)\n\n try:\n # try/except here is equivalent to a self._can_hold_element check,\n # but this gets us back 'casted' which we will re-use below;\n # without using 'casted', expressions.where may do unwanted upcasts.\n casted = np_can_hold_element(values.dtype, other)\n except (ValueError, TypeError, LossySetitemError):\n # we cannot coerce, return a compat dtype\n\n if self.ndim == 1 or self.shape[0] == 1:\n # no need to split columns\n\n block = self.coerce_to_target_dtype(other)\n blocks = block.where(orig_other, cond)\n return self._maybe_downcast(blocks, downcast=_downcast)\n\n else:\n # since _maybe_downcast would split blocks anyway, we\n # can avoid some potential upcast/downcast by splitting\n # on the front end.\n is_array = isinstance(other, (np.ndarray, ExtensionArray))\n\n res_blocks = []\n nbs = self._split()\n for i, nb in enumerate(nbs):\n oth = other\n if is_array:\n # we have a different value per-column\n oth = other[:, i : i + 1]\n\n submask = cond[:, i : i + 1]\n rbs = nb.where(oth, submask, _downcast=_downcast)\n res_blocks.extend(rbs)\n return res_blocks\n\n else:\n other = casted\n alt = setitem_datetimelike_compat(values, icond.sum(), other)\n if alt is not other:\n if is_list_like(other) and len(other) < len(values):\n # call np.where with other to get the appropriate ValueError\n np.where(~icond, values, other)\n raise NotImplementedError(\n \"This should not be reached; call to np.where above is \"\n \"expected to raise ValueError. Please report a bug at \"\n \"github.com/pandas-dev/pandas\"\n )\n result = values.copy()\n np.putmask(result, icond, alt)\n else:\n # By the time we get here, we should have all Series/Index\n # args extracted to ndarray\n if (\n is_list_like(other)\n and not isinstance(other, np.ndarray)\n and len(other) == self.shape[-1]\n ):\n # If we don't do this broadcasting here, then expressions.where\n # will broadcast a 1D other to be row-like instead of\n # column-like.\n other = np.array(other).reshape(values.shape)\n # If lengths don't match (or len(other)==1), we will raise\n # inside expressions.where, see test_series_where\n\n # Note: expressions.where may upcast.\n result = expressions.where(~icond, values, other)\n # The np_can_hold_element check _should_ ensure that we always\n # have result.dtype == self.dtype here.\n\n if transpose:\n result = result.T\n\n return [self.make_block(result)]\n", "url": "https://github.com/pandas-dev/pandas.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 19, "n_whitespaces": 1483, "n_words": 377, "vocab_size": 229, "complexity": 16, "nloc": 71, "token_counts": 422, "n_ast_nodes": 680, "n_identifiers": 63, "d_id": 39610, "documentation": { "docstring": "\n evaluate the block; return result block(s) from the result\n\n Parameters\n ----------\n other : a ndarray/object\n cond : np.ndarray[bool], SparseArray[bool], or BooleanArray\n _downcast : str or None, default \"infer\"\n Private because we only specify it when calling from fillna.\n\n Returns\n -------\n List[Block]\n ", "n_words": 41, "vocab_size": 35, "n_whitespaces": 123, "language": "en" } }, { "id": 30469, "commit_id": "06a84e0400b7f7f847a7a7d06eedba766cdbced3", "repo": "spotify-downloader", "path": "spotdl/utils/ffmpeg.py", "file_name": "ffmpeg.py", "fun_name": "download_ffmpeg", "commit_message": "added option to preserve original audio", "code": "def download_ffmpeg() -> Path:\n \n\n os_name = platform.system().lower()\n os_arch = platform.machine().lower()\n\n ffmpeg_url = FFMPEG_URLS.get(os_name, {}).get(os_arch)\n ffmpeg_path = Path(\n os.path.join(\n get_spotdl_path(), \"ffmpeg\" + (\".exe\" if os_name == \"windows\" else \"\")\n )\n )\n\n if ffmpeg_url is None:\n raise FFmpegError(\"FFmpeg binary is not available for your system.\")\n\n # Download binary and save it to a file in spotdl directory\n ffmpeg_binary = requests.get(ffmpeg_url, allow_redirects=True, timeout=10).content\n with open(ffmpeg_path, \"wb\") as ffmpeg_file:\n ffmpeg_file.write(ffmpeg_binary)\n\n # Set executable permission on linux and mac\n if os_name in [\"linux\", \"darwin\"]:\n ffmpeg_path.chmod(ffmpeg_path.stat().st_mode | stat.S_IEXEC)\n\n return ffmpeg_path\n\n", "url": "https://github.com/spotDL/spotify-downloader.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 169, "n_words": 84, "vocab_size": 68, "complexity": 4, "nloc": 28, "token_counts": 143, "n_ast_nodes": 249, "n_identifiers": 29, "d_id": 5607, "documentation": { "docstring": "\n Download ffmpeg binary to spotdl directory.\n\n ### Returns\n - Path to ffmpeg binary.\n\n ### Notes\n - ffmpeg is downloaded from github releases\n for current platform and architecture.\n - executable permission is set for ffmpeg binary.\n ", "n_words": 35, "vocab_size": 25, "n_whitespaces": 64, "language": "en" } }, { "id": 222529, "commit_id": "8198943edd73a363c266633e1aa5b2a9e9c9f526", "repo": "XX-Net", "path": "python3.10.4/Lib/dis.py", "file_name": "dis.py", "fun_name": "dis", "commit_message": "add python 3.10.4 for windows", "code": "def dis(x=None, *, file=None, depth=None):\n \n if x is None:\n distb(file=file)\n return\n # Extract functions from methods.\n if hasattr(x, '__func__'):\n x = x.__func__\n # Extract compiled code objects from...\n if hasattr(x, '__code__'): # ...a function, or\n x = x.__code__\n elif hasattr(x, 'gi_code'): #...a generator object, or\n x = x.gi_code\n elif hasattr(x, 'ag_code'): #...an asynchronous generator object, or\n x = x.ag_code\n elif hasattr(x, 'cr_code'): #...a coroutine.\n x = x.cr_code\n # Perform the disassembly.\n if hasattr(x, '__dict__'): # Class or module\n items = sorted(x.__dict__.items())\n for name, x1 in items:\n if isinstance(x1, _have_code):\n print(\"Disassembly of %s:\" % name, file=file)\n try:\n dis(x1, file=file, depth=depth)\n except TypeError as msg:\n print(\"Sorry:\", msg, file=file)\n print(file=file)\n elif hasattr(x, 'co_code'): # Code object\n _disassemble_recursive(x, file=file, depth=depth)\n elif isinstance(x, (bytes, bytearray)): # Raw bytecode\n _disassemble_bytes(x, file=file)\n elif isinstance(x, str): # Source code\n _disassemble_str(x, file=file, depth=depth)\n else:\n raise TypeError(\"don't know how to disassemble %s objects\" %\n type(x).__name__)\n", "url": "https://github.com/XX-net/XX-Net.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 421, "n_words": 145, "vocab_size": 96, "complexity": 14, "nloc": 33, "token_counts": 249, "n_ast_nodes": 413, "n_identifiers": 29, "d_id": 56619, "documentation": { "docstring": "Disassemble classes, methods, functions, and other compiled objects.\n\n With no argument, disassemble the last traceback.\n\n Compiled objects currently include generator objects, async generator\n objects, and coroutine objects, all of which store their code object\n in a special attribute.\n ", "n_words": 38, "vocab_size": 34, "n_whitespaces": 53, "language": "en" } }, { "id": 159776, "commit_id": "66a61b03658f3c9f312505dcf7eab07e4cf91ac6", "repo": "numpy", "path": "numpy/lib/tests/test_io.py", "file_name": "test_io.py", "fun_name": "test_loadtxt_converter_with_unicode_dtype", "commit_message": "Port over tests from npreadtext test suite\n\n- Add test for parsing scientific notation.\n- Add multiple-char comment test.\n- Port over tests for structured dtypes.\n- Add tests for exceptions on skiprows/max_rows.\n- port over ndmin tests.\n- Make structured data reusable, add unpack tests.\n- Port over delimiter tests.\n- Port over maxrows test w/ various dtypes.\n- Port over test of exception msg on parse failure.\n- Port over test for converters w/neg indices.\n- Port over usecols tests\n- Port over unicode tests.\n- Port over more converter tests.\n- Port over test for large rows.\n- Port over test for string-len discovery.\n- Port over float conversion accuracy test.\n- Port over bool test.\n- Add test for implicit float->int conversion.\n- Port over complex parsing tests.\n- Port over tests for reading from generator.\n- Port over object cleanup test.\n- Port over bytes incompat test.\n- Port over converters tests.\n\nCo-authored-by: Warren Weckesser \nCo-authored-by: Sebastian Berg ", "code": "def test_loadtxt_converter_with_unicode_dtype():\n \n txt = StringIO('abc,def\\nrst,xyz')\n conv = bytes.upper\n res = np.loadtxt(txt, dtype=np.dtype(\"U3\"), converters=conv, delimiter=\",\")\n expected = np.array([['ABC', 'DEF'], ['RST', 'XYZ']])\n assert_equal(res, expected)\n\n", "url": "https://github.com/numpy/numpy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 40, "n_words": 22, "vocab_size": 19, "complexity": 1, "nloc": 6, "token_counts": 67, "n_ast_nodes": 118, "n_identifiers": 15, "d_id": 38421, "documentation": { "docstring": "\n With the default 'bytes' encoding, tokens are encoded prior to being passed\n to the converter. This means that the output of the converter may be bytes\n instead of unicode as expected by `read_rows`.\n\n This test checks that outputs from the above scenario are properly decoded\n prior to parsing by `read_rows`.\n ", "n_words": 50, "vocab_size": 37, "n_whitespaces": 69, "language": "en" } }, { "id": 101242, "commit_id": "5e73437be47f2410439a3c6716de96354e6a0c94", "repo": "faceswap", "path": "plugins/extract/align/_base.py", "file_name": "_base.py", "fun_name": "get_batch", "commit_message": "lib.align updates:\n - alignments.py\n - Add typed dicts for imported alignments\n - Explicitly check for presence of thumb value in alignments dict\n - linting\n - detected_face.py\n - Typing\n - Linting\n - Legacy support for pre-aligned face\n - Update dependencies to new property names", "code": "def get_batch(self, queue):\n \n exhausted = False\n batch = {}\n idx = 0\n while idx < self.batchsize:\n item = self._collect_item(queue)\n if item == \"EOF\":\n logger.trace(\"EOF received\")\n exhausted = True\n break\n # Put frames with no faces into the out queue to keep TQDM consistent\n if not item.detected_faces:\n self._queues[\"out\"].put(item)\n continue\n\n converted_image = item.get_image_copy(self.color_format)\n for f_idx, face in enumerate(item.detected_faces):\n batch.setdefault(\"image\", []).append(converted_image)\n batch.setdefault(\"detected_faces\", []).append(face)\n batch.setdefault(\"filename\", []).append(item.filename)\n idx += 1\n if idx == self.batchsize:\n frame_faces = len(item.detected_faces)\n if f_idx + 1 != frame_faces:\n self._rollover = ExtractMedia(\n item.filename,\n item.image,\n detected_faces=item.detected_faces[f_idx + 1:])\n logger.trace(\"Rolled over %s faces of %s to next batch for '%s'\",\n len(self._rollover.detected_faces), frame_faces,\n item.filename)\n break\n if batch:\n logger.trace(\"Returning batch: %s\", {k: v.shape if isinstance(v, np.ndarray) else v\n for k, v in batch.items()})\n else:\n logger.trace(item)\n return exhausted, batch\n", "url": "https://github.com/deepfakes/faceswap.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 20, "n_whitespaces": 721, "n_words": 123, "vocab_size": 91, "complexity": 10, "nloc": 36, "token_counts": 255, "n_ast_nodes": 416, "n_identifiers": 35, "d_id": 20662, "documentation": { "docstring": " Get items for inputting into the aligner from the queue in batches\n\n Items are returned from the ``queue`` in batches of\n :attr:`~plugins.extract._base.Extractor.batchsize`\n\n Items are received as :class:`~plugins.extract.pipeline.ExtractMedia` objects and converted\n to ``dict`` for internal processing.\n\n To ensure consistent batch sizes for aligner the items are split into separate items for\n each :class:`~lib.align.DetectedFace` object.\n\n Remember to put ``'EOF'`` to the out queue after processing\n the final batch\n\n Outputs items in the following format. All lists are of length\n :attr:`~plugins.extract._base.Extractor.batchsize`:\n\n >>> {'filename': [],\n >>> 'image': [],\n >>> 'detected_faces': [[ None:\n \n event_type = \"org.matrix.test_state\"\n\n # This content will be updated later on, and since we actually use a reference on\n # the dict it does the right thing. It's a bit hacky but a handy way of making\n # sure the state actually gets updated.\n event_content = {\"i\": -1}\n\n api = self.hs.get_module_api()\n\n # Define a callback that sends a custom event on power levels update.", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 124, "n_words": 68, "vocab_size": 55, "complexity": 2, "nloc": 20, "token_counts": 118, "n_ast_nodes": 57, "n_identifiers": 7, "d_id": 71598, "documentation": { "docstring": "Tests that a state event sent by a module while processing another state event\n doesn't get dropped from the state of the room. This is to guard against a bug\n where Synapse has been observed doing so, see https://github.com/matrix-org/synapse/issues/10830\n ", "n_words": 39, "vocab_size": 33, "n_whitespaces": 60, "language": "en" } }, { "id": 123112, "commit_id": "803b90729d25fda253011c505d0189e8e63cc039", "repo": "EasyOCR", "path": "easyocr/DBNet/DBNet.py", "file_name": "DBNet.py", "fun_name": "image2hmap", "commit_message": "add dbnet", "code": "def image2hmap(self, image_tensor):\n \n return self.model.forward(image_tensor, training=False)\n ", "url": "https://github.com/JaidedAI/EasyOCR.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 28, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 2, "token_counts": 21, "n_ast_nodes": 34, "n_identifiers": 6, "d_id": 27289, "documentation": { "docstring": "\n Run the model to obtain a heatmap tensor from a image tensor. The heatmap\n tensor indicates the probability of each pixel being a part of text area.\n\n Parameters\n ----------\n image_tensor : torch.tensor\n Image tensor.\n\n Returns\n -------\n torch.tensor\n Probability heatmap tensor.\n ", "n_words": 40, "vocab_size": 30, "n_whitespaces": 126, "language": "en" } }, { "id": 203671, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/auth/mixins.py", "file_name": "mixins.py", "fun_name": "get_permission_required", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_permission_required(self):\n \n if self.permission_required is None:\n raise ImproperlyConfigured(\n \"{0} is missing the permission_required attribute. Define {0}.permission_required, or override \"\n \"{0}.get_permission_required().\".format(self.__class__.__name__)\n )\n if isinstance(self.permission_required, str):\n perms = (self.permission_required,)\n else:\n perms = self.permission_required\n return perms\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 142, "n_words": 33, "vocab_size": 27, "complexity": 3, "nloc": 11, "token_counts": 55, "n_ast_nodes": 94, "n_identifiers": 10, "d_id": 50504, "documentation": { "docstring": "\n Override this method to override the permission_required attribute.\n Must return an iterable.\n ", "n_words": 12, "vocab_size": 12, "n_whitespaces": 34, "language": "en" } }, { "id": 224546, "commit_id": "9c0a8e50b11b70f803500cd73e7256b63f64b5e3", "repo": "mkdocs", "path": "mkdocs/config/base.py", "file_name": "base.py", "fun_name": "pre_validation", "commit_message": "Move some documentation into code, add misc API docs page (#2934)", "code": "def pre_validation(self, config, key_name):\n \n", "url": "https://github.com/mkdocs/mkdocs.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 11, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 1, "token_counts": 10, "n_ast_nodes": 17, "n_identifiers": 4, "d_id": 57334, "documentation": { "docstring": "\n Before all options are validated, perform a pre-validation process.\n\n The pre-validation process method should be implemented by subclasses.\n ", "n_words": 18, "vocab_size": 17, "n_whitespaces": 40, "language": "en" } }, { "id": 178518, "commit_id": "bf42800f1ddfae3563a20d9e4fbc79265e8d5209", "repo": "Nuitka", "path": "nuitka/utils/Execution.py", "file_name": "Execution.py", "fun_name": "withEnvironmentVarOverridden", "commit_message": "Minor cleanups\n\n* Typo cleanups", "code": "def withEnvironmentVarOverridden(env_var_name, value):\n \n\n if env_var_name in os.environ:\n old_value = os.environ[env_var_name]\n else:\n old_value = None\n\n if value is not None:\n os.environ[env_var_name] = value\n elif old_value is not None:\n del os.environ[env_var_name]\n\n yield\n\n if old_value is None:\n if value is not None:\n del os.environ[env_var_name]\n else:\n os.environ[env_var_name] = old_value\n\n\n@contextmanager", "url": "https://github.com/Nuitka/Nuitka.git", "language": "Python", "ast_errors": "@contextmanager", "n_ast_errors": 1, "ast_levels": 11, "n_whitespaces": 122, "n_words": 46, "vocab_size": 20, "complexity": 6, "nloc": 15, "token_counts": 84, "n_ast_nodes": 137, "n_identifiers": 7, "d_id": 42726, "documentation": { "docstring": "Change an environment and restore it after context.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 244473, "commit_id": "924c381a78eb70cede198e042ef34e038e05c15a", "repo": "mmdetection", "path": "mmdet/models/detectors/base.py", "file_name": "base.py", "fun_name": "forward", "commit_message": "Modify RetinaNet model interface", "code": "def forward(self, data, optimizer=None, return_loss=False, **kwargs):\n \n batch_inputs, batch_data_samples = self.preprocss_data(data)\n\n if torch.onnx.is_in_onnx_export():\n # TODO: Delete\n assert len(batch_inputs) == 1\n return self.onnx_export(batch_inputs, batch_data_samples)\n\n if return_loss:\n losses = self.forward_train(batch_inputs, batch_data_samples,\n **kwargs)\n loss, log_vars = self._parse_losses(losses)\n\n outputs = dict(\n loss=loss,\n log_vars=log_vars,\n num_samples=len(batch_data_samples))\n return outputs\n else:\n # TODO: refactor and support aug test later\n assert isinstance(data[0]['inputs'], torch.Tensor), \\\n 'Only support simple test currently. Aug-test is ' \\\n 'not supported yet'\n return self.forward_simple_test(batch_inputs, batch_data_samples,\n **kwargs)\n", "url": "https://github.com/open-mmlab/mmdetection.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 372, "n_words": 70, "vocab_size": 55, "complexity": 3, "nloc": 20, "token_counts": 135, "n_ast_nodes": 209, "n_identifiers": 25, "d_id": 70395, "documentation": { "docstring": "The iteration step during training and testing. This method defines\n an iteration step during training and testing, except for the back\n propagation and optimizer updating during training, which are done in\n an optimizer hook.\n\n Args:\n data (list[dict]): The output of dataloader.\n optimizer (:obj:`torch.optim.Optimizer`, dict, Optional): The\n optimizer of runner. This argument is unused and reserved.\n Default to None.\n return_loss (bool): Whether to return loss. In general,\n it will be set to True during training and False\n during testing. Default to False.\n\n Returns:\n during training\n dict: It should contain at least 3 keys: ``loss``,\n ``log_vars``, ``num_samples``.\n - ``loss`` is a tensor for back propagation, which can be a\n weighted sum of multiple losses.\n - ``log_vars`` contains all the variables to be sent to the\n logger.\n - ``num_samples`` indicates the batch size (when the model\n is DDP, it means the batch size on each GPU), which is\n used for averaging the logs.\n\n during testing\n list(obj:`DetDataSample`): Detection results of the\n input images. Each DetDataSample usually contains\n ``pred_instances`` or ``pred_panoptic_seg`` or\n ``pred_sem_seg``.\n ", "n_words": 168, "vocab_size": 111, "n_whitespaces": 562, "language": "en" } }, { "id": 198467, "commit_id": "9d58006fc0a23afcba38f641c9472917c436428a", "repo": "sympy", "path": "sympy/core/basic.py", "file_name": "basic.py", "fun_name": "matches", "commit_message": "Code cleanup", "code": "def matches(self, expr, repl_dict=None, old=False):\n \n expr = sympify(expr)\n if not isinstance(expr, self.__class__):\n return None\n\n if repl_dict is None:\n repl_dict = {}\n else:\n repl_dict = repl_dict.copy()\n\n if self == expr:\n return repl_dict\n\n if len(self.args) != len(expr.args):\n return None\n\n d = repl_dict # already a copy\n for arg, other_arg in zip(self.args, expr.args):\n if arg == other_arg:\n continue\n if arg.is_Relational:\n try:\n d = arg.xreplace(d).matches(other_arg, d, old=old)\n except TypeError: # Should be InvalidComparisonError when introduced\n d = None\n else:\n d = arg.xreplace(d).matches(other_arg, d, old=old)\n if d is None:\n return None\n return d\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 375, "n_words": 88, "vocab_size": 52, "complexity": 10, "nloc": 26, "token_counts": 164, "n_ast_nodes": 260, "n_identifiers": 18, "d_id": 48951, "documentation": { "docstring": "\n Helper method for match() that looks for a match between Wild symbols\n in self and expressions in expr.\n\n Examples\n ========\n\n >>> from sympy import symbols, Wild, Basic\n >>> a, b, c = symbols('a b c')\n >>> x = Wild('x')\n >>> Basic(a + x, x).matches(Basic(a + b, c)) is None\n True\n >>> Basic(a + x, x).matches(Basic(a + b + c, b + c))\n {x_: b + c}\n ", "n_words": 66, "vocab_size": 45, "n_whitespaces": 151, "language": "en" } }, { "id": 298521, "commit_id": "424731863423f7b7cd2e29f418ddbce01418828b", "repo": "core", "path": "homeassistant/components/xiaomi_miio/vacuum.py", "file_name": "vacuum.py", "fun_name": "supported_features", "commit_message": "Use VacuumEntityFeature in xiaomi_miio (#70564)", "code": "def supported_features(self):\n \n return self._attr_supported_features\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 10, "n_ast_nodes": 19, "n_identifiers": 3, "d_id": 97465, "documentation": { "docstring": "Flag vacuum cleaner robot features that are supported.", "n_words": 8, "vocab_size": 8, "n_whitespaces": 7, "language": "en" } }, { "id": 231821, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/layout/_xaxis.py", "file_name": "_xaxis.py", "fun_name": "overlaying", "commit_message": "switch to black .22", "code": "def overlaying(self):\n \n return self[\"overlaying\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 63265, "documentation": { "docstring": "\n If set a same-letter axis id, this axis is overlaid on top of\n the corresponding same-letter axis, with traces and axes\n visible for both axes. If False, this axis does not overlay any\n same-letter axes. In this case, for axes with overlapping\n domains only the highest-numbered axis will be visible.\n\n The 'overlaying' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['free']\n - A string that matches one of the following regular expressions:\n ['^x([2-9]|[1-9][0-9]+)?( domain)?$',\n '^y([2-9]|[1-9][0-9]+)?( domain)?$']\n\n Returns\n -------\n Any\n ", "n_words": 87, "vocab_size": 64, "n_whitespaces": 221, "language": "en" } }, { "id": 21627, "commit_id": "c69d55f7c82d5ae2cce542bcfb98d043ca4836a0", "repo": "pipenv", "path": "pipenv/patched/notpip/_vendor/urllib3/util/ssl_match_hostname.py", "file_name": "ssl_match_hostname.py", "fun_name": "match_hostname", "commit_message": "Vendor in pip 22.1.2", "code": "def match_hostname(cert, hostname):\n \n if not cert:\n raise ValueError(\n \"empty or no certificate, match_hostname needs a \"\n \"SSL socket or SSL context with either \"\n \"CERT_OPTIONAL or CERT_REQUIRED\"\n )\n try:\n # Divergence from upstream: ipaddress can't handle byte str\n host_ip = ipaddress.ip_address(_to_unicode(hostname))\n except (UnicodeError, ValueError):\n # ValueError: Not an IP address (common case)\n # UnicodeError: Divergence from upstream: Have to deal with ipaddress not taking\n # byte strings. addresses should be all ascii, so we consider it not\n # an ipaddress in this case\n host_ip = None\n except AttributeError:\n # Divergence from upstream: Make ipaddress library optional\n if ipaddress is None:\n host_ip = None\n else: # Defensive\n raise\n dnsnames = []\n san = cert.get(\"subjectAltName\", ())\n for key, value in san:\n if key == \"DNS\":\n if host_ip is None and _dnsname_match(value, hostname):\n return\n dnsnames.append(value)\n elif key == \"IP Address\":\n if host_ip is not None and _ipaddress_match(value, host_ip):\n return\n dnsnames.append(value)\n if not dnsnames:\n # The subject is only checked when there is no dNSName entry\n # in subjectAltName\n for sub in cert.get(\"subject\", ()):\n for key, value in sub:\n # XXX according to RFC 2818, the most specific Common Name\n # must be used.\n if key == \"commonName\":\n if _dnsname_match(value, hostname):\n return\n dnsnames.append(value)\n if len(dnsnames) > 1:\n raise CertificateError(\n \"hostname %r \"\n \"doesn't match either of %s\" % (hostname, \", \".join(map(repr, dnsnames)))\n )\n elif len(dnsnames) == 1:\n raise CertificateError(\"hostname %r doesn't match %r\" % (hostname, dnsnames[0]))\n else:\n raise CertificateError(\n \"no appropriate commonName or subjectAltName fields were found\"\n )\n", "url": "https://github.com/pypa/pipenv.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 715, "n_words": 244, "vocab_size": 147, "complexity": 19, "nloc": 45, "token_counts": 230, "n_ast_nodes": 401, "n_identifiers": 24, "d_id": 3963, "documentation": { "docstring": "Verify that *cert* (in decoded format as returned by\n SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125\n rules are followed, but IP addresses are not accepted for *hostname*.\n\n CertificateError is raised on failure. On success, the function\n returns nothing.\n ", "n_words": 40, "vocab_size": 36, "n_whitespaces": 56, "language": "en" } }, { "id": 249645, "commit_id": "3bbe532abb7bfc41467597731ac1a18c0331f539", "repo": "synapse", "path": "tests/rest/client/test_relations.py", "file_name": "test_relations.py", "fun_name": "test_include", "commit_message": "Add an API for listing threads in a room. (#13394)\n\nImplement the /threads endpoint from MSC3856.\r\n\r\nThis is currently unstable and behind an experimental configuration\r\nflag.\r\n\r\nIt includes a background update to backfill data, results from\r\nthe /threads endpoint will be partial until that finishes.", "code": "def test_include(self) -> None:\n \n # Thread 1 has the user as the root event.\n thread_1 = self.parent_id\n self._send_relation(\n RelationTypes.THREAD, \"m.room.test\", access_token=self.user2_token\n )\n\n # Thread 2 has the user replying.\n res = self.helper.send(self.room, body=\"Thread Root!\", tok=self.user2_token)\n thread_2 = res[\"event_id\"]\n self._send_relation(RelationTypes.THREAD, \"m.room.test\", parent_id=thread_2)\n\n # Thread 3 has the user not participating in.\n res = self.helper.send(self.room, body=\"Another thread!\", tok=self.user2_token)\n thread_3 = res[\"event_id\"]\n self._send_relation(\n RelationTypes.THREAD,\n \"m.room.test\",\n access_token=self.user2_token,\n parent_id=thread_3,\n )\n\n # All threads in the room.\n channel = self.make_request(\n \"GET\",\n f\"/_matrix/client/unstable/org.matrix.msc3856/rooms/{self.room}/threads\",\n access_token=self.user_token,\n )\n self.assertEquals(200, channel.code, channel.json_body)\n thread_roots = [ev[\"event_id\"] for ev in channel.json_body[\"chunk\"]]\n self.assertEqual(\n thread_roots, [thread_3, thread_2, thread_1], channel.json_body\n )\n\n # Only participated threads.\n channel = self.make_request(\n \"GET\",\n f\"/_matrix/client/unstable/org.matrix.msc3856/rooms/{self.room}/threads?include=participated\",\n access_token=self.user_token,\n )\n self.assertEquals(200, channel.code, channel.json_body)\n thread_roots = [ev[\"event_id\"] for ev in channel.json_body[\"chunk\"]]\n self.assertEqual(thread_roots, [thread_2, thread_1], channel.json_body)\n", "url": "https://github.com/matrix-org/synapse.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 441, "n_words": 120, "vocab_size": 70, "complexity": 3, "nloc": 35, "token_counts": 252, "n_ast_nodes": 409, "n_identifiers": 26, "d_id": 73041, "documentation": { "docstring": "Filtering threads to all or participated in should work.", "n_words": 9, "vocab_size": 9, "n_whitespaces": 8, "language": "en" } }, { "id": 151037, "commit_id": "dae3b3d86adde0f6c7065ce1d083d9ceac62e5ef", "repo": "freqtrade", "path": "freqtrade/freqai/freqai_interface.py", "file_name": "freqai_interface.py", "fun_name": "shutdown", "commit_message": "support shutting down freqai", "code": "def shutdown(self):\n \n logger.info(\"Stopping FreqAI\")\n self._stop_event.set()\n\n logger.info(\"Waiting on Training iteration\")\n for _thread in self._threads:\n _thread.join()\n", "url": "https://github.com/freqtrade/freqtrade.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 60, "n_words": 14, "vocab_size": 14, "complexity": 2, "nloc": 6, "token_counts": 37, "n_ast_nodes": 68, "n_identifiers": 9, "d_id": 34928, "documentation": { "docstring": "\n Cleans up threads on Shutdown, set stop event. Join threads to wait\n for current training iteration.\n ", "n_words": 16, "vocab_size": 15, "n_whitespaces": 38, "language": "en" } }, { "id": 206196, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/template/base.py", "file_name": "base.py", "fun_name": "render_annotated", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def render_annotated(self, context):\n \n try:\n return self.render(context)\n except Exception as e:\n if context.template.engine.debug:\n # Store the actual node that caused the exception.\n if not hasattr(e, \"_culprit_node\"):\n e._culprit_node = self\n if (\n not hasattr(e, \"template_debug\")\n and context.render_context.template.origin == e._culprit_node.origin\n ):\n e.template_debug = (\n context.render_context.template.get_exception_info(\n e,\n e._culprit_node.token,\n )\n )\n raise\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 356, "n_words": 47, "vocab_size": 39, "complexity": 6, "nloc": 18, "token_counts": 94, "n_ast_nodes": 154, "n_identifiers": 16, "d_id": 51403, "documentation": { "docstring": "\n Render the node. If debug is True and an exception occurs during\n rendering, the exception is annotated with contextual line information\n where it occurred in the template. For internal usage this method is\n preferred over using the render method directly.\n ", "n_words": 40, "vocab_size": 33, "n_whitespaces": 76, "language": "en" } }, { "id": 275623, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/optimizers/optimizer_v2/utils.py", "file_name": "utils.py", "fun_name": "filter_empty_gradients", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def filter_empty_gradients(grads_and_vars):\n \n grads_and_vars = tuple(grads_and_vars)\n if not grads_and_vars:\n return grads_and_vars\n\n filtered = []\n vars_with_empty_grads = []\n for grad, var in grads_and_vars:\n if grad is None:\n vars_with_empty_grads.append(var)\n else:\n filtered.append((grad, var))\n filtered = tuple(filtered)\n\n if not filtered:\n variable = ([v.name for _, v in grads_and_vars],)\n raise ValueError(\n f\"No gradients provided for any variable: {variable}. \"\n f\"Provided `grads_and_vars` is {grads_and_vars}.\"\n )\n if vars_with_empty_grads:\n logging.warning(\n (\n \"Gradients do not exist for variables %s when minimizing the loss. \"\n \"If you're using `model.compile()`, did you forget to provide a `loss`\"\n \"argument?\"\n ),\n ([v.name for v in vars_with_empty_grads]),\n )\n return filtered\n\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 13, "n_whitespaces": 303, "n_words": 95, "vocab_size": 69, "complexity": 8, "nloc": 28, "token_counts": 118, "n_ast_nodes": 203, "n_identifiers": 15, "d_id": 81435, "documentation": { "docstring": "Filter out `(grad, var)` pairs that have a gradient equal to `None`.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 203385, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admin/filters.py", "file_name": "filters.py", "fun_name": "expected_parameters", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def expected_parameters(self):\n \n raise NotImplementedError(\n \"subclasses of ListFilter must provide an expected_parameters() method\"\n )\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 45, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 4, "token_counts": 11, "n_ast_nodes": 23, "n_identifiers": 3, "d_id": 50351, "documentation": { "docstring": "\n Return the list of parameter names that are expected from the\n request's query string and that will be used by this filter.\n ", "n_words": 22, "vocab_size": 20, "n_whitespaces": 44, "language": "en" } }, { "id": 199851, "commit_id": "75e3143a934c3427f39b82613d77f6d6f55e00b4", "repo": "sympy", "path": "sympy/polys/appellseqs.py", "file_name": "appellseqs.py", "fun_name": "euler_poly", "commit_message": "Varying Dirichlet L-series for evalfing the André function", "code": "def euler_poly(n, x=None, polys=False):\n r\n return named_poly(n, dup_euler, QQ, \"Euler polynomial\", (x,), polys)\n", "url": "https://github.com/sympy/sympy.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 18, "n_words": 13, "vocab_size": 13, "complexity": 1, "nloc": 22, "token_counts": 33, "n_ast_nodes": 47, "n_identifiers": 7, "d_id": 49404, "documentation": { "docstring": "Generates the Euler polynomial `\\operatorname{E}_n(x)`.\n\n These are scaled and reindexed versions of the Genocchi polynomials:\n\n .. math :: \\operatorname{E}_n(x) = -\\frac{\\operatorname{G}_{n+1}(x)}{n+1}\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n\n See Also\n ========\n\n sympy.functions.combinatorial.numbers.euler\n ", "n_words": 51, "vocab_size": 44, "n_whitespaces": 98, "language": "en" } }, { "id": 192920, "commit_id": "a5536de95d8e703645e391f4bd885ef489ab35bd", "repo": "vision", "path": "torchvision/transforms/transforms.py", "file_name": "transforms.py", "fun_name": "forward", "commit_message": "Added antialias arg to resized crop transform and op (#6193)", "code": "def forward(self, img):\n \n i, j, h, w = self.get_params(img, self.scale, self.ratio)\n return F.resized_crop(img, i, j, h, w, self.size, self.interpolation, antialias=self.antialias)\n", "url": "https://github.com/pytorch/vision.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 41, "n_words": 20, "vocab_size": 17, "complexity": 1, "nloc": 3, "token_counts": 59, "n_ast_nodes": 82, "n_identifiers": 15, "d_id": 46921, "documentation": { "docstring": "\n Args:\n img (PIL Image or Tensor): Image to be cropped and resized.\n\n Returns:\n PIL Image or Tensor: Randomly cropped and resized image.\n ", "n_words": 22, "vocab_size": 17, "n_whitespaces": 66, "language": "en" } }, { "id": 227572, "commit_id": "43e3a4011080911901176aab919c0ecf5046ddd3", "repo": "plotly.py", "path": "packages/python/plotly/plotly/graph_objs/_pie.py", "file_name": "_pie.py", "fun_name": "pullsrc", "commit_message": "switch to black .22", "code": "def pullsrc(self):\n \n return self[\"pullsrc\"]\n", "url": "https://github.com/plotly/plotly.py.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 18, "n_words": 4, "vocab_size": 4, "complexity": 1, "nloc": 2, "token_counts": 11, "n_ast_nodes": 22, "n_identifiers": 2, "d_id": 59245, "documentation": { "docstring": "\n Sets the source reference on Chart Studio Cloud for `pull`.\n\n The 'pullsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "n_words": 27, "vocab_size": 25, "n_whitespaces": 77, "language": "en" } }, { "id": 127144, "commit_id": "61880591e97c2760d929c16263e039d503b1b9a9", "repo": "ray", "path": "rllib/algorithms/dt/tests/test_dt_model.py", "file_name": "test_dt_model.py", "fun_name": "_generate_input_dict", "commit_message": "[RLlib] Add DTTorchModel (#27872)", "code": "def _generate_input_dict(B, T, obs_space, action_space):\n \n # generate deterministic inputs\n # obs\n obs = np.arange(B * T * obs_space.shape[0], dtype=np.float32).reshape(\n (B, T, obs_space.shape[0])\n )\n # actions\n if isinstance(action_space, gym.spaces.Box):\n act = np.arange(B * T * action_space.shape[0], dtype=np.float32).reshape(\n (B, T, action_space.shape[0])\n )\n else:\n act = np.mod(np.arange(B * T, dtype=np.int32).reshape((B, T)), action_space.n)\n # returns to go\n rtg = np.arange(B * (T + 1), dtype=np.float32).reshape((B, T + 1, 1))\n # timesteps\n timesteps = np.stack([np.arange(T, dtype=np.int32) for _ in range(B)], axis=0)\n # attention mask\n mask = np.ones((B, T), dtype=np.float32)\n\n input_dict = SampleBatch(\n {\n SampleBatch.OBS: obs,\n SampleBatch.ACTIONS: act,\n SampleBatch.RETURNS_TO_GO: rtg,\n SampleBatch.T: timesteps,\n SampleBatch.ATTENTION_MASKS: mask,\n }\n )\n input_dict = convert_to_torch_tensor(input_dict)\n return input_dict\n\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 17, "n_whitespaces": 267, "n_words": 105, "vocab_size": 70, "complexity": 3, "nloc": 24, "token_counts": 266, "n_ast_nodes": 395, "n_identifiers": 35, "d_id": 28371, "documentation": { "docstring": "Generate input_dict that has completely fake values.", "n_words": 7, "vocab_size": 7, "n_whitespaces": 6, "language": "en" } }, { "id": 288740, "commit_id": "58d531841bc2197616e7285d0561a9935d9bda73", "repo": "core", "path": "tests/test_bootstrap.py", "file_name": "test_bootstrap.py", "fun_name": "test_empty_integrations_list_is_only_sent_at_the_end_of_bootstrap", "commit_message": "Fix typo SIGNAL_BOOTSTRAP_INTEGRATONS -> SIGNAL_BOOTSTRAP_INTEGRATIONS (#79970)", "code": "async def test_empty_integrations_list_is_only_sent_at_the_end_of_bootstrap(hass):\n \n order = []\n", "url": "https://github.com/home-assistant/core.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 7, "n_whitespaces": 12, "n_words": 6, "vocab_size": 6, "complexity": 1, "nloc": 35, "token_counts": 168, "n_ast_nodes": 21, "n_identifiers": 3, "d_id": 87893, "documentation": { "docstring": "Test empty integrations list is only sent at the end of bootstrap.", "n_words": 12, "vocab_size": 12, "n_whitespaces": 11, "language": "en" } }, { "id": 32328, "commit_id": "99eb9b523f9b9ea6096323ce5610ce6633acc88a", "repo": "transformers", "path": "examples/pytorch/test_accelerate_examples.py", "file_name": "test_accelerate_examples.py", "fun_name": "test_run_glue_no_trainer", "commit_message": "Fix `no_trainer` CI (#18242)\n\n* Fix all tests", "code": "def test_run_glue_no_trainer(self):\n tmp_dir = self.get_auto_remove_tmp_dir()\n testargs = f.split()\n\n if is_cuda_and_apex_available():\n testargs.append(\"--fp16\")\n\n run_command(self._launch_args + testargs)\n result = get_results(tmp_dir)\n self.assertGreaterEqual(result[\"eval_accuracy\"], 0.75)\n self.assertTrue(os.path.exists(os.path.join(tmp_dir, \"epoch_0\")))\n self.assertTrue(os.path.exists(os.path.join(tmp_dir, \"glue_no_trainer\")))\n", "url": "https://github.com/huggingface/transformers.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 89, "n_words": 23, "vocab_size": 20, "complexity": 2, "nloc": 22, "token_counts": 102, "n_ast_nodes": 180, "n_identifiers": 19, "d_id": 5902, "documentation": { "docstring": "\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ", "n_words": 16, "vocab_size": 16, "n_whitespaces": 145, "language": "en" } }, { "id": 203843, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/gis/db/backends/postgis/operations.py", "file_name": "operations.py", "fun_name": "get_geom_placeholder", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_geom_placeholder(self, f, value, compiler):\n \n transform_func = self.spatial_function_name(\"Transform\")\n if hasattr(value, \"as_sql\"):\n if value.field.srid == f.srid:\n placeholder = \"%s\"\n else:\n placeholder = \"%s(%%s, %s)\" % (transform_func, f.srid)\n return placeholder\n\n # Get the srid for this object\n if value is None:\n value_srid = None\n else:\n value_srid = value.srid\n\n # Adding Transform() to the SQL placeholder if the value srid\n # is not equal to the field srid.\n if value_srid is None or value_srid == f.srid:\n placeholder = \"%s\"\n else:\n placeholder = \"%s(%%s, %s)\" % (transform_func, f.srid)\n\n return placeholder\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 14, "n_whitespaces": 270, "n_words": 86, "vocab_size": 45, "complexity": 6, "nloc": 17, "token_counts": 101, "n_ast_nodes": 174, "n_identifiers": 12, "d_id": 50554, "documentation": { "docstring": "\n Provide a proper substitution value for Geometries or rasters that are\n not in the SRID of the field. Specifically, this routine will\n substitute in the ST_Transform() function call.\n ", "n_words": 28, "vocab_size": 25, "n_whitespaces": 57, "language": "en" } }, { "id": 60887, "commit_id": "f638f5d0e6c8ebed0e69a6584bc7f003ec646580", "repo": "transferlearning", "path": ".venv/lib/python3.8/site-packages/pip/_internal/network/lazy_wheel.py", "file_name": "lazy_wheel.py", "fun_name": "truncate", "commit_message": "upd; format", "code": "def truncate(self, size=None):\n # type: (Optional[int]) -> int\n \n return self._file.truncate(size)\n", "url": "https://github.com/jindongwang/transferlearning.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 8, "n_whitespaces": 31, "n_words": 10, "vocab_size": 10, "complexity": 1, "nloc": 2, "token_counts": 19, "n_ast_nodes": 33, "n_identifiers": 4, "d_id": 12321, "documentation": { "docstring": "Resize the stream to the given size in bytes.\n\n If size is unspecified resize to the current position.\n The current stream position isn't changed.\n\n Return the new file size.\n ", "n_words": 29, "vocab_size": 22, "n_whitespaces": 57, "language": "en" } }, { "id": 139548, "commit_id": "5c96e7223b468fed6b6db763c837728c721f78cd", "repo": "ray", "path": "rllib/agents/dqn/tests/test_dqn.py", "file_name": "test_dqn.py", "fun_name": "test_dqn_compilation", "commit_message": "[RLlib] SimpleQ (minor cleanups) and DQN TrainerConfig objects. (#24584)", "code": "def test_dqn_compilation(self):\n \n num_iterations = 1\n config = dqn.dqn.DQNConfig().rollouts(num_rollout_workers=2)\n\n for _ in framework_iterator(config, with_eager_tracing=True):\n # Double-dueling DQN.\n print(\"Double-dueling\")\n plain_config = deepcopy(config)\n trainer = dqn.DQNTrainer(config=plain_config, env=\"CartPole-v0\")\n for i in range(num_iterations):\n results = trainer.train()\n check_train_results(results)\n print(results)\n\n check_compute_single_action(trainer)\n trainer.stop()\n\n # Rainbow.\n print(\"Rainbow\")\n rainbow_config = deepcopy(config).training(\n num_atoms=10, noisy=True, double_q=True, dueling=True, n_step=5\n )\n trainer = dqn.DQNTrainer(config=rainbow_config, env=\"CartPole-v0\")\n for i in range(num_iterations):\n results = trainer.train()\n check_train_results(results)\n print(results)\n\n check_compute_single_action(trainer)\n\n trainer.stop()\n", "url": "https://github.com/ray-project/ray.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 12, "n_whitespaces": 360, "n_words": 62, "vocab_size": 40, "complexity": 4, "nloc": 24, "token_counts": 172, "n_ast_nodes": 286, "n_identifiers": 31, "d_id": 31730, "documentation": { "docstring": "Test whether a DQNTrainer can be built on all frameworks.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 269921, "commit_id": "84afc5193d38057e2e2badf9c889ea87d80d8fbf", "repo": "keras", "path": "keras/callbacks.py", "file_name": "callbacks.py", "fun_name": "on_test_end", "commit_message": "Reformatting the codebase with black.\n\nPiperOrigin-RevId: 450093126", "code": "def on_test_end(self, logs=None):\n \n logs = self._process_logs(logs)\n for callback in self.callbacks:\n callback.on_test_end(logs)\n", "url": "https://github.com/keras-team/keras.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 9, "n_whitespaces": 43, "n_words": 11, "vocab_size": 11, "complexity": 2, "nloc": 4, "token_counts": 31, "n_ast_nodes": 51, "n_identifiers": 6, "d_id": 80332, "documentation": { "docstring": "Calls the `on_test_end` methods of its callbacks.\n\n Args:\n logs: Dict. Currently, no data is passed via this argument\n for this method, but that may change in the future.\n ", "n_words": 28, "vocab_size": 26, "n_whitespaces": 66, "language": "en" } }, { "id": 109939, "commit_id": "df6f95703b60348e01603f98a439b133da2938a0", "repo": "matplotlib", "path": "lib/mpl_toolkits/mplot3d/axes3d.py", "file_name": "axes3d.py", "fun_name": "_add_contourf_set", "commit_message": "Improve mpl_toolkit documentation", "code": "def _add_contourf_set(self, cset, zdir='z', offset=None):\n \n zdir = '-' + zdir\n\n midpoints = cset.levels[:-1] + np.diff(cset.levels) / 2\n # Linearly interpolate to get levels for any extensions\n if cset._extend_min:\n min_level = cset.levels[0] - np.diff(cset.levels[:2]) / 2\n midpoints = np.insert(midpoints, 0, min_level)\n if cset._extend_max:\n max_level = cset.levels[-1] + np.diff(cset.levels[-2:]) / 2\n midpoints = np.append(midpoints, max_level)\n\n for z, linec in zip(midpoints, cset.collections):\n if offset is not None:\n z = offset\n art3d.poly_collection_2d_to_3d(linec, z, zdir=zdir)\n linec.set_sort_zpos(z)\n return midpoints\n", "url": "https://github.com/matplotlib/matplotlib.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 15, "n_whitespaces": 221, "n_words": 73, "vocab_size": 52, "complexity": 5, "nloc": 15, "token_counts": 165, "n_ast_nodes": 257, "n_identifiers": 22, "d_id": 23846, "documentation": { "docstring": "\n Returns\n -------\n levels : `numpy.ndarray`\n Levels at which the filled contours are added.\n ", "n_words": 13, "vocab_size": 13, "n_whitespaces": 53, "language": "en" } }, { "id": 322093, "commit_id": "b0c35d5e1ff02a634fa26392b60d3885c2c78677", "repo": "PaddleNLP", "path": "paddlenlp/transformers/blenderbot_small/modeling.py", "file_name": "modeling.py", "fun_name": "forward", "commit_message": "Fix the attention mask for fp16 (#1585)", "code": "def forward(self, input_ids=None, attention_mask=None):\n \n if input_ids is None:\n raise ValueError(\"Input_ids cannot be None.\")\n inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale\n inputs_embed_pos = self.encoder_embed_positions(input_ids.shape)\n hidden_states = inputs_embeds + inputs_embed_pos\n hidden_states = self.encoder_layernorm_embedding(hidden_states)\n encoder_input = self.encoder_dropout(hidden_states)\n\n if attention_mask is None:\n attention_mask = paddle.cast(\n input_ids == self.pad_token_id,\n dtype=paddle.get_default_dtype()).unsqueeze([1, 2]) * -1e4\n attention_mask.stop_gradient = True\n\n encoder_output = self.encoder(encoder_input, src_mask=attention_mask)\n return encoder_output\n\n", "url": "https://github.com/PaddlePaddle/PaddleNLP.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 16, "n_whitespaces": 188, "n_words": 55, "vocab_size": 38, "complexity": 3, "nloc": 15, "token_counts": 123, "n_ast_nodes": 196, "n_identifiers": 25, "d_id": 118053, "documentation": { "docstring": "\n Returns:\n Tensor: The last hidden-states at the last layer of the encoder.\n It's data type should be `float` and has a shape of `(batch_size, seq_lens, hidden_size)`.\n ``seq_lens`` corresponds to the length of input sequence.\n ", "n_words": 34, "vocab_size": 29, "n_whitespaces": 82, "language": "en" } }, { "id": 204594, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/core/management/base.py", "file_name": "base.py", "fun_name": "no_translations", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def no_translations(handle_func):\n \n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 6, "n_whitespaces": 5, "n_words": 2, "vocab_size": 2, "complexity": 1, "nloc": 3, "token_counts": 10, "n_ast_nodes": 13, "n_identifiers": 2, "d_id": 50805, "documentation": { "docstring": "Decorator that forces a command to run with translations deactivated.", "n_words": 10, "vocab_size": 10, "n_whitespaces": 9, "language": "en" } }, { "id": 203580, "commit_id": "9c19aff7c7561e3a82978a272ecdaad40dda5c00", "repo": "django", "path": "django/contrib/admindocs/views.py", "file_name": "views.py", "fun_name": "get_return_data_type", "commit_message": "Refs #33476 -- Reformatted code with Black.", "code": "def get_return_data_type(func_name):\n \n if func_name.startswith(\"get_\"):\n if func_name.endswith(\"_list\"):\n return \"List\"\n elif func_name.endswith(\"_count\"):\n return \"Integer\"\n return \"\"\n\n", "url": "https://github.com/django/django.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 59, "n_words": 14, "vocab_size": 11, "complexity": 4, "nloc": 7, "token_counts": 36, "n_ast_nodes": 73, "n_identifiers": 4, "d_id": 50457, "documentation": { "docstring": "Return a somewhat-helpful data type given a function name", "n_words": 9, "vocab_size": 8, "n_whitespaces": 8, "language": "en" } }, { "id": 122864, "commit_id": "b8ae8e3fa10f9abe998459fac1513915acee776d", "repo": "jax", "path": "tests/filecheck/jax_filecheck_helpers.py", "file_name": "jax_filecheck_helpers.py", "fun_name": "print_ir", "commit_message": "(NFC) Prepare for migration from producing MHLO to producing StableHLO\n\nThis CL renames occurrences of \"mhlo\" in: 1) names, 2) tests, 3) prose in order\nto prepare for the upcoming migration.\n\nUnchanged occurrences:\n 1) Public API that contains \"mhlo\", e.g. XlaLowering.mhlo and the \"mhlo\"\n argument value in Lowering.as_text and Lowering.compiler_ir.\n 2) Documentation (changelog, JEPs, IR examples, etc).\n 3) One rare situation where prose says \"StableHLO\" and \"MHLO\" in one sentence,\n so both are necessary to disambiguate.\n\nPiperOrigin-RevId: 495771153", "code": "def print_ir(*prototypes):\n def lower(f):\n \n inputs = tree_util.tree_map(np.array, prototypes)\n flat_inputs, _ = tree_util.tree_flatten(inputs)\n shape_strs = \" \".join([f\"{x.dtype.name}[{','.join(map(str, x.shape))}]\"\n for x in flat_inputs])\n name = f.func.__name__ if hasattr(f, \"func\") else f.__name__\n print(f\"\\nTEST: {name} {shape_strs}\")\n print(jax.jit(f).lower(*inputs).compiler_ir())\n return lower\n", "url": "https://github.com/google/jax.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 18, "n_whitespaces": 84, "n_words": 35, "vocab_size": 31, "complexity": 1, "nloc": 3, "token_counts": 10, "n_ast_nodes": 198, "n_identifiers": 27, "d_id": 27259, "documentation": { "docstring": "Prints the MLIR IR that results from lowering `f`.\n\n The arguments to `f` are taken to be arrays shaped like `prototypes`.", "n_words": 21, "vocab_size": 20, "n_whitespaces": 23, "language": "en" } }, { "id": 85988, "commit_id": "245b174c30ba3d814043221faa327b1a14d64859", "repo": "sentry", "path": "tests/sentry/sentry_metrics/test_batch.py", "file_name": "test_batch.py", "fun_name": "test_extract_strings_with_rollout", "commit_message": "feat(indexer): Allow mechanism to not index tag values (#38837)\n\nThis PR adds mechanism to skip indexing tag values. The code is based on\r\nan option introduced in https://github.com/getsentry/sentry/pull/38758/\r\n\r\nAfter the change, when the option is configured, the indexer on\r\nperformance would send strings for tag values", "code": "def test_extract_strings_with_rollout(rollout_option, option_value, expected, set_sentry_option):\n \n if rollout_option:\n set_sentry_option(rollout_option, option_value)\n outer_message = _construct_outer_message(\n [\n (counter_payload, []),\n (counter_payload_org_70, []),\n (distribution_payload, []),\n (set_payload, []),\n ]\n )\n batch = IndexerBatch(UseCaseKey.PERFORMANCE, outer_message, rollout_option)\n\n assert batch.extract_strings() == expected\n\n", "url": "https://github.com/getsentry/sentry.git", "language": "Python", "ast_errors": "", "n_ast_errors": 0, "ast_levels": 11, "n_whitespaces": 115, "n_words": 32, "vocab_size": 28, "complexity": 2, "nloc": 13, "token_counts": 76, "n_ast_nodes": 111, "n_identifiers": 16, "d_id": 18068, "documentation": { "docstring": "\n Test that the indexer batch extracts the correct strings from the messages\n based on the rollout option name and the option value.\n ", "n_words": 22, "vocab_size": 17, "n_whitespaces": 32, "language": "en" } } ]