Kaizouku commited on
Commit
2260825
·
verified ·
1 Parent(s): 76e8c81

Upload 564 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. public/gpt-2/gpt2/config.json +31 -0
  2. public/gpt-2/gpt2/merges.txt +0 -0
  3. public/gpt-2/gpt2/pytorch_model.bin +3 -0
  4. public/gpt-2/gpt2/vocab.json +0 -0
  5. public/gpt-2/packaging-21.0.dist-info/LICENSE +3 -0
  6. public/gpt-2/packaging-21.0.dist-info/LICENSE.APACHE +177 -0
  7. public/gpt-2/packaging-21.0.dist-info/LICENSE.BSD +23 -0
  8. public/gpt-2/packaging-21.0.dist-info/METADATA +425 -0
  9. public/gpt-2/packaging-21.0.dist-info/RECORD +19 -0
  10. public/gpt-2/packaging-21.0.dist-info/WHEEL +5 -0
  11. public/gpt-2/packaging-21.0.dist-info/top_level.txt +1 -0
  12. public/gpt-2/packaging/__about__.py +26 -0
  13. public/gpt-2/packaging/__init__.py +25 -0
  14. public/gpt-2/packaging/_manylinux.py +301 -0
  15. public/gpt-2/packaging/_musllinux.py +136 -0
  16. public/gpt-2/packaging/_structures.py +67 -0
  17. public/gpt-2/packaging/markers.py +304 -0
  18. public/gpt-2/packaging/py.typed +0 -0
  19. public/gpt-2/packaging/requirements.py +146 -0
  20. public/gpt-2/packaging/specifiers.py +828 -0
  21. public/gpt-2/packaging/tags.py +484 -0
  22. public/gpt-2/packaging/utils.py +136 -0
  23. public/gpt-2/packaging/version.py +504 -0
  24. public/gpt-2/transformers-4.9.1.dist-info/LICENSE +203 -0
  25. public/gpt-2/transformers-4.9.1.dist-info/METADATA +547 -0
  26. public/gpt-2/transformers-4.9.1.dist-info/RECORD +532 -0
  27. public/gpt-2/transformers-4.9.1.dist-info/WHEEL +5 -0
  28. public/gpt-2/transformers-4.9.1.dist-info/entry_points.txt +3 -0
  29. public/gpt-2/transformers-4.9.1.dist-info/top_level.txt +1 -0
  30. public/gpt-2/transformers/__init__.py +0 -0
  31. public/gpt-2/transformers/__init__.py.orig +0 -0
  32. public/gpt-2/transformers/activations.py +113 -0
  33. public/gpt-2/transformers/activations_tf.py +94 -0
  34. public/gpt-2/transformers/benchmark/__init__.py +0 -0
  35. public/gpt-2/transformers/benchmark/benchmark.py +267 -0
  36. public/gpt-2/transformers/benchmark/benchmark_args.py +115 -0
  37. public/gpt-2/transformers/benchmark/benchmark_args_tf.py +136 -0
  38. public/gpt-2/transformers/benchmark/benchmark_args_utils.py +145 -0
  39. public/gpt-2/transformers/benchmark/benchmark_tf.py +294 -0
  40. public/gpt-2/transformers/benchmark/benchmark_utils.py +909 -0
  41. public/gpt-2/transformers/commands/__init__.py +27 -0
  42. public/gpt-2/transformers/commands/add_new_model.py +228 -0
  43. public/gpt-2/transformers/commands/convert.py +179 -0
  44. public/gpt-2/transformers/commands/download.py +46 -0
  45. public/gpt-2/transformers/commands/env.py +89 -0
  46. public/gpt-2/transformers/commands/lfs.py +227 -0
  47. public/gpt-2/transformers/commands/run.py +112 -0
  48. public/gpt-2/transformers/commands/serving.py +231 -0
  49. public/gpt-2/transformers/commands/train.py +160 -0
  50. public/gpt-2/transformers/commands/transformers_cli.py +55 -0
public/gpt-2/gpt2/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "bos_token_id": 50256,
8
+ "embd_pdrop": 0.1,
9
+ "eos_token_id": 50256,
10
+ "initializer_range": 0.02,
11
+ "layer_norm_epsilon": 1e-05,
12
+ "model_type": "gpt2",
13
+ "n_ctx": 1024,
14
+ "n_embd": 768,
15
+ "n_head": 12,
16
+ "n_layer": 12,
17
+ "n_positions": 1024,
18
+ "resid_pdrop": 0.1,
19
+ "summary_activation": null,
20
+ "summary_first_dropout": 0.1,
21
+ "summary_proj_to_labels": true,
22
+ "summary_type": "cls_index",
23
+ "summary_use_proj": true,
24
+ "task_specific_params": {
25
+ "text-generation": {
26
+ "do_sample": true,
27
+ "max_length": 50
28
+ }
29
+ },
30
+ "vocab_size": 50257
31
+ }
public/gpt-2/gpt2/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
public/gpt-2/gpt2/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c5d3f4b8b76583b422fcb9189ad6c89d5d97a094541ce8932dce3ecabde1421
3
+ size 548118077
public/gpt-2/gpt2/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
public/gpt-2/packaging-21.0.dist-info/LICENSE ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ This software is made available under the terms of *either* of the licenses
2
+ found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made
3
+ under the terms of *both* these licenses.
public/gpt-2/packaging-21.0.dist-info/LICENSE.APACHE ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
public/gpt-2/packaging-21.0.dist-info/LICENSE.BSD ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) Donald Stufft and individual contributors.
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions are met:
6
+
7
+ 1. Redistributions of source code must retain the above copyright notice,
8
+ this list of conditions and the following disclaimer.
9
+
10
+ 2. Redistributions in binary form must reproduce the above copyright
11
+ notice, this list of conditions and the following disclaimer in the
12
+ documentation and/or other materials provided with the distribution.
13
+
14
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
15
+ ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
18
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
public/gpt-2/packaging-21.0.dist-info/METADATA ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: packaging
3
+ Version: 21.0
4
+ Summary: Core utilities for Python packages
5
+ Home-page: https://github.com/pypa/packaging
6
+ Author: Donald Stufft and individual contributors
7
+ Author-email: donald@stufft.io
8
+ License: BSD-2-Clause or Apache-2.0
9
+ Platform: UNKNOWN
10
+ Classifier: Development Status :: 5 - Production/Stable
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: License :: OSI Approved :: Apache Software License
13
+ Classifier: License :: OSI Approved :: BSD License
14
+ Classifier: Programming Language :: Python
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3 :: Only
17
+ Classifier: Programming Language :: Python :: 3.6
18
+ Classifier: Programming Language :: Python :: 3.7
19
+ Classifier: Programming Language :: Python :: 3.8
20
+ Classifier: Programming Language :: Python :: 3.9
21
+ Classifier: Programming Language :: Python :: Implementation :: CPython
22
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
23
+ Requires-Python: >=3.6
24
+ Description-Content-Type: text/x-rst
25
+ License-File: LICENSE
26
+ License-File: LICENSE.APACHE
27
+ License-File: LICENSE.BSD
28
+ Requires-Dist: pyparsing (>=2.0.2)
29
+
30
+ packaging
31
+ =========
32
+
33
+ .. start-intro
34
+
35
+ Reusable core utilities for various Python Packaging
36
+ `interoperability specifications <https://packaging.python.org/specifications/>`_.
37
+
38
+ This library provides utilities that implement the interoperability
39
+ specifications which have clearly one correct behaviour (eg: :pep:`440`)
40
+ or benefit greatly from having a single shared implementation (eg: :pep:`425`).
41
+
42
+ .. end-intro
43
+
44
+ The ``packaging`` project includes the following: version handling, specifiers,
45
+ markers, requirements, tags, utilities.
46
+
47
+ Documentation
48
+ -------------
49
+
50
+ The `documentation`_ provides information and the API for the following:
51
+
52
+ - Version Handling
53
+ - Specifiers
54
+ - Markers
55
+ - Requirements
56
+ - Tags
57
+ - Utilities
58
+
59
+ Installation
60
+ ------------
61
+
62
+ Use ``pip`` to install these utilities::
63
+
64
+ pip install packaging
65
+
66
+ Discussion
67
+ ----------
68
+
69
+ If you run into bugs, you can file them in our `issue tracker`_.
70
+
71
+ You can also join ``#pypa`` on Freenode to ask questions or get involved.
72
+
73
+
74
+ .. _`documentation`: https://packaging.pypa.io/
75
+ .. _`issue tracker`: https://github.com/pypa/packaging/issues
76
+
77
+
78
+ Code of Conduct
79
+ ---------------
80
+
81
+ Everyone interacting in the packaging project's codebases, issue trackers, chat
82
+ rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_.
83
+
84
+ .. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md
85
+
86
+ Contributing
87
+ ------------
88
+
89
+ The ``CONTRIBUTING.rst`` file outlines how to contribute to this project as
90
+ well as how to report a potential security issue. The documentation for this
91
+ project also covers information about `project development`_ and `security`_.
92
+
93
+ .. _`project development`: https://packaging.pypa.io/en/latest/development/
94
+ .. _`security`: https://packaging.pypa.io/en/latest/security/
95
+
96
+ Project History
97
+ ---------------
98
+
99
+ Please review the ``CHANGELOG.rst`` file or the `Changelog documentation`_ for
100
+ recent changes and project history.
101
+
102
+ .. _`Changelog documentation`: https://packaging.pypa.io/en/latest/changelog/
103
+
104
+ Changelog
105
+ ---------
106
+
107
+ 21.0 - 2021-07-03
108
+ ~~~~~~~~~~~~~~~~~
109
+
110
+ * `packaging` is now only compatible with Python 3.6 and above.
111
+ * Add support for zip files in ``parse_sdist_filename`` (`#429 <https://github.com/pypa/packaging/issues/429>`__)
112
+
113
+ 20.9 - 2021-01-29
114
+ ~~~~~~~~~~~~~~~~~
115
+
116
+ * Run `isort <https://pypi.org/project/isort/>`_ over the code base (`#377 <https://github.com/pypa/packaging/issues/377>`__)
117
+ * Add support for the ``macosx_10_*_universal2`` platform tags (`#379 <https://github.com/pypa/packaging/issues/379>`__)
118
+ * Introduce ``packaging.utils.parse_wheel_filename()`` and ``parse_sdist_filename()``
119
+ (`#387 <https://github.com/pypa/packaging/issues/387>`__ and `#389 <https://github.com/pypa/packaging/issues/389>`__)
120
+
121
+ 20.8 - 2020-12-11
122
+ ~~~~~~~~~~~~~~~~~
123
+
124
+ * Revert back to setuptools for compatibility purposes for some Linux distros (`#363 <https://github.com/pypa/packaging/issues/363>`__)
125
+ * Do not insert an underscore in wheel tags when the interpreter version number
126
+ is more than 2 digits (`#372 <https://github.com/pypa/packaging/issues/372>`__)
127
+
128
+ 20.7 - 2020-11-28
129
+ ~~~~~~~~~~~~~~~~~
130
+
131
+ No unreleased changes.
132
+
133
+ 20.6 - 2020-11-28
134
+ ~~~~~~~~~~~~~~~~~
135
+
136
+ .. note:: This release was subsequently yanked, and these changes were included in 20.7.
137
+
138
+ * Fix flit configuration, to include LICENSE files (`#357 <https://github.com/pypa/packaging/issues/357>`__)
139
+ * Make `intel` a recognized CPU architecture for the `universal` macOS platform tag (`#361 <https://github.com/pypa/packaging/issues/361>`__)
140
+ * Add some missing type hints to `packaging.requirements` (issue:`350`)
141
+
142
+ 20.5 - 2020-11-27
143
+ ~~~~~~~~~~~~~~~~~
144
+
145
+ * Officially support Python 3.9 (`#343 <https://github.com/pypa/packaging/issues/343>`__)
146
+ * Deprecate the ``LegacyVersion`` and ``LegacySpecifier`` classes (`#321 <https://github.com/pypa/packaging/issues/321>`__)
147
+ * Handle ``OSError`` on non-dynamic executables when attempting to resolve
148
+ the glibc version string.
149
+
150
+ 20.4 - 2020-05-19
151
+ ~~~~~~~~~~~~~~~~~
152
+
153
+ * Canonicalize version before comparing specifiers. (`#282 <https://github.com/pypa/packaging/issues/282>`__)
154
+ * Change type hint for ``canonicalize_name`` to return
155
+ ``packaging.utils.NormalizedName``.
156
+ This enables the use of static typing tools (like mypy) to detect mixing of
157
+ normalized and un-normalized names.
158
+
159
+ 20.3 - 2020-03-05
160
+ ~~~~~~~~~~~~~~~~~
161
+
162
+ * Fix changelog for 20.2.
163
+
164
+ 20.2 - 2020-03-05
165
+ ~~~~~~~~~~~~~~~~~
166
+
167
+ * Fix a bug that caused a 32-bit OS that runs on a 64-bit ARM CPU (e.g. ARM-v8,
168
+ aarch64), to report the wrong bitness.
169
+
170
+ 20.1 - 2020-01-24
171
+ ~~~~~~~~~~~~~~~~~~~
172
+
173
+ * Fix a bug caused by reuse of an exhausted iterator. (`#257 <https://github.com/pypa/packaging/issues/257>`__)
174
+
175
+ 20.0 - 2020-01-06
176
+ ~~~~~~~~~~~~~~~~~
177
+
178
+ * Add type hints (`#191 <https://github.com/pypa/packaging/issues/191>`__)
179
+
180
+ * Add proper trove classifiers for PyPy support (`#198 <https://github.com/pypa/packaging/issues/198>`__)
181
+
182
+ * Scale back depending on ``ctypes`` for manylinux support detection (`#171 <https://github.com/pypa/packaging/issues/171>`__)
183
+
184
+ * Use ``sys.implementation.name`` where appropriate for ``packaging.tags`` (`#193 <https://github.com/pypa/packaging/issues/193>`__)
185
+
186
+ * Expand upon the API provided by ``packaging.tags``: ``interpreter_name()``, ``mac_platforms()``, ``compatible_tags()``, ``cpython_tags()``, ``generic_tags()`` (`#187 <https://github.com/pypa/packaging/issues/187>`__)
187
+
188
+ * Officially support Python 3.8 (`#232 <https://github.com/pypa/packaging/issues/232>`__)
189
+
190
+ * Add ``major``, ``minor``, and ``micro`` aliases to ``packaging.version.Version`` (`#226 <https://github.com/pypa/packaging/issues/226>`__)
191
+
192
+ * Properly mark ``packaging`` has being fully typed by adding a `py.typed` file (`#226 <https://github.com/pypa/packaging/issues/226>`__)
193
+
194
+ 19.2 - 2019-09-18
195
+ ~~~~~~~~~~~~~~~~~
196
+
197
+ * Remove dependency on ``attrs`` (`#178 <https://github.com/pypa/packaging/issues/178>`__, `#179 <https://github.com/pypa/packaging/issues/179>`__)
198
+
199
+ * Use appropriate fallbacks for CPython ABI tag (`#181 <https://github.com/pypa/packaging/issues/181>`__, `#185 <https://github.com/pypa/packaging/issues/185>`__)
200
+
201
+ * Add manylinux2014 support (`#186 <https://github.com/pypa/packaging/issues/186>`__)
202
+
203
+ * Improve ABI detection (`#181 <https://github.com/pypa/packaging/issues/181>`__)
204
+
205
+ * Properly handle debug wheels for Python 3.8 (`#172 <https://github.com/pypa/packaging/issues/172>`__)
206
+
207
+ * Improve detection of debug builds on Windows (`#194 <https://github.com/pypa/packaging/issues/194>`__)
208
+
209
+ 19.1 - 2019-07-30
210
+ ~~~~~~~~~~~~~~~~~
211
+
212
+ * Add the ``packaging.tags`` module. (`#156 <https://github.com/pypa/packaging/issues/156>`__)
213
+
214
+ * Correctly handle two-digit versions in ``python_version`` (`#119 <https://github.com/pypa/packaging/issues/119>`__)
215
+
216
+
217
+ 19.0 - 2019-01-20
218
+ ~~~~~~~~~~~~~~~~~
219
+
220
+ * Fix string representation of PEP 508 direct URL requirements with markers.
221
+
222
+ * Better handling of file URLs
223
+
224
+ This allows for using ``file:///absolute/path``, which was previously
225
+ prevented due to the missing ``netloc``.
226
+
227
+ This allows for all file URLs that ``urlunparse`` turns back into the
228
+ original URL to be valid.
229
+
230
+
231
+ 18.0 - 2018-09-26
232
+ ~~~~~~~~~~~~~~~~~
233
+
234
+ * Improve error messages when invalid requirements are given. (`#129 <https://github.com/pypa/packaging/issues/129>`__)
235
+
236
+
237
+ 17.1 - 2017-02-28
238
+ ~~~~~~~~~~~~~~~~~
239
+
240
+ * Fix ``utils.canonicalize_version`` when supplying non PEP 440 versions.
241
+
242
+
243
+ 17.0 - 2017-02-28
244
+ ~~~~~~~~~~~~~~~~~
245
+
246
+ * Drop support for python 2.6, 3.2, and 3.3.
247
+
248
+ * Define minimal pyparsing version to 2.0.2 (`#91 <https://github.com/pypa/packaging/issues/91>`__).
249
+
250
+ * Add ``epoch``, ``release``, ``pre``, ``dev``, and ``post`` attributes to
251
+ ``Version`` and ``LegacyVersion`` (`#34 <https://github.com/pypa/packaging/issues/34>`__).
252
+
253
+ * Add ``Version().is_devrelease`` and ``LegacyVersion().is_devrelease`` to
254
+ make it easy to determine if a release is a development release.
255
+
256
+ * Add ``utils.canonicalize_version`` to canonicalize version strings or
257
+ ``Version`` instances (`#121 <https://github.com/pypa/packaging/issues/121>`__).
258
+
259
+
260
+ 16.8 - 2016-10-29
261
+ ~~~~~~~~~~~~~~~~~
262
+
263
+ * Fix markers that utilize ``in`` so that they render correctly.
264
+
265
+ * Fix an erroneous test on Python RC releases.
266
+
267
+
268
+ 16.7 - 2016-04-23
269
+ ~~~~~~~~~~~~~~~~~
270
+
271
+ * Add support for the deprecated ``python_implementation`` marker which was
272
+ an undocumented setuptools marker in addition to the newer markers.
273
+
274
+
275
+ 16.6 - 2016-03-29
276
+ ~~~~~~~~~~~~~~~~~
277
+
278
+ * Add support for the deprecated, PEP 345 environment markers in addition to
279
+ the newer markers.
280
+
281
+
282
+ 16.5 - 2016-02-26
283
+ ~~~~~~~~~~~~~~~~~
284
+
285
+ * Fix a regression in parsing requirements with whitespaces between the comma
286
+ separators.
287
+
288
+
289
+ 16.4 - 2016-02-22
290
+ ~~~~~~~~~~~~~~~~~
291
+
292
+ * Fix a regression in parsing requirements like ``foo (==4)``.
293
+
294
+
295
+ 16.3 - 2016-02-21
296
+ ~~~~~~~~~~~~~~~~~
297
+
298
+ * Fix a bug where ``packaging.requirements:Requirement`` was overly strict when
299
+ matching legacy requirements.
300
+
301
+
302
+ 16.2 - 2016-02-09
303
+ ~~~~~~~~~~~~~~~~~
304
+
305
+ * Add a function that implements the name canonicalization from PEP 503.
306
+
307
+
308
+ 16.1 - 2016-02-07
309
+ ~~~~~~~~~~~~~~~~~
310
+
311
+ * Implement requirement specifiers from PEP 508.
312
+
313
+
314
+ 16.0 - 2016-01-19
315
+ ~~~~~~~~~~~~~~~~~
316
+
317
+ * Relicense so that packaging is available under *either* the Apache License,
318
+ Version 2.0 or a 2 Clause BSD license.
319
+
320
+ * Support installation of packaging when only distutils is available.
321
+
322
+ * Fix ``==`` comparison when there is a prefix and a local version in play.
323
+ (`#41 <https://github.com/pypa/packaging/issues/41>`__).
324
+
325
+ * Implement environment markers from PEP 508.
326
+
327
+
328
+ 15.3 - 2015-08-01
329
+ ~~~~~~~~~~~~~~~~~
330
+
331
+ * Normalize post-release spellings for rev/r prefixes. `#35 <https://github.com/pypa/packaging/issues/35>`__
332
+
333
+
334
+ 15.2 - 2015-05-13
335
+ ~~~~~~~~~~~~~~~~~
336
+
337
+ * Fix an error where the arbitrary specifier (``===``) was not correctly
338
+ allowing pre-releases when it was being used.
339
+
340
+ * Expose the specifier and version parts through properties on the
341
+ ``Specifier`` classes.
342
+
343
+ * Allow iterating over the ``SpecifierSet`` to get access to all of the
344
+ ``Specifier`` instances.
345
+
346
+ * Allow testing if a version is contained within a specifier via the ``in``
347
+ operator.
348
+
349
+
350
+ 15.1 - 2015-04-13
351
+ ~~~~~~~~~~~~~~~~~
352
+
353
+ * Fix a logic error that was causing inconsistent answers about whether or not
354
+ a pre-release was contained within a ``SpecifierSet`` or not.
355
+
356
+
357
+ 15.0 - 2015-01-02
358
+ ~~~~~~~~~~~~~~~~~
359
+
360
+ * Add ``Version().is_postrelease`` and ``LegacyVersion().is_postrelease`` to
361
+ make it easy to determine if a release is a post release.
362
+
363
+ * Add ``Version().base_version`` and ``LegacyVersion().base_version`` to make
364
+ it easy to get the public version without any pre or post release markers.
365
+
366
+ * Support the update to PEP 440 which removed the implied ``!=V.*`` when using
367
+ either ``>V`` or ``<V`` and which instead special cased the handling of
368
+ pre-releases, post-releases, and local versions when using ``>V`` or ``<V``.
369
+
370
+
371
+ 14.5 - 2014-12-17
372
+ ~~~~~~~~~~~~~~~~~
373
+
374
+ * Normalize release candidates as ``rc`` instead of ``c``.
375
+
376
+ * Expose the ``VERSION_PATTERN`` constant, a regular expression matching
377
+ a valid version.
378
+
379
+
380
+ 14.4 - 2014-12-15
381
+ ~~~~~~~~~~~~~~~~~
382
+
383
+ * Ensure that versions are normalized before comparison when used in a
384
+ specifier with a less than (``<``) or greater than (``>``) operator.
385
+
386
+
387
+ 14.3 - 2014-11-19
388
+ ~~~~~~~~~~~~~~~~~
389
+
390
+ * **BACKWARDS INCOMPATIBLE** Refactor specifier support so that it can sanely
391
+ handle legacy specifiers as well as PEP 440 specifiers.
392
+
393
+ * **BACKWARDS INCOMPATIBLE** Move the specifier support out of
394
+ ``packaging.version`` into ``packaging.specifiers``.
395
+
396
+
397
+ 14.2 - 2014-09-10
398
+ ~~~~~~~~~~~~~~~~~
399
+
400
+ * Add prerelease support to ``Specifier``.
401
+ * Remove the ability to do ``item in Specifier()`` and replace it with
402
+ ``Specifier().contains(item)`` in order to allow flags that signal if a
403
+ prerelease should be accepted or not.
404
+ * Add a method ``Specifier().filter()`` which will take an iterable and returns
405
+ an iterable with items that do not match the specifier filtered out.
406
+
407
+
408
+ 14.1 - 2014-09-08
409
+ ~~~~~~~~~~~~~~~~~
410
+
411
+ * Allow ``LegacyVersion`` and ``Version`` to be sorted together.
412
+ * Add ``packaging.version.parse()`` to enable easily parsing a version string
413
+ as either a ``Version`` or a ``LegacyVersion`` depending on it's PEP 440
414
+ validity.
415
+
416
+
417
+ 14.0 - 2014-09-05
418
+ ~~~~~~~~~~~~~~~~~
419
+
420
+ * Initial release.
421
+
422
+
423
+ .. _`master`: https://github.com/pypa/packaging/
424
+
425
+
public/gpt-2/packaging-21.0.dist-info/RECORD ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ packaging/__about__.py,sha256=p_OQloqH2saadcbUQmWEsWK857dI6_ff5E3aSiCqGFA,661
2
+ packaging/__init__.py,sha256=b9Kk5MF7KxhhLgcDmiUWukN-LatWFxPdNug0joPhHSk,497
3
+ packaging/_manylinux.py,sha256=XcbiXB-qcjv3bcohp6N98TMpOP4_j3m-iOA8ptK2GWY,11488
4
+ packaging/_musllinux.py,sha256=z5yeG1ygOPx4uUyLdqj-p8Dk5UBb5H_b0NIjW9yo8oA,4378
5
+ packaging/_structures.py,sha256=TMiAgFbdUOPmIfDIfiHc3KFhSJ8kMjof2QS5I-2NyQ8,1629
6
+ packaging/markers.py,sha256=Fygi3_eZnjQ-3VJizW5AhI5wvo0Hb6RMk4DidsKpOC0,8475
7
+ packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ packaging/requirements.py,sha256=rjaGRCMepZS1mlYMjJ5Qh6rfq3gtsCRQUQmftGZ_bu8,4664
9
+ packaging/specifiers.py,sha256=MZ-fYcNL3u7pNrt-6g2EQO7AbRXkjc-SPEYwXMQbLmc,30964
10
+ packaging/tags.py,sha256=akIerYw8W0sz4OW9HHozgawWnbt2GGOPm3sviW0jowY,15714
11
+ packaging/utils.py,sha256=dJjeat3BS-TYn1RrUFVwufUMasbtzLfYRoy_HXENeFQ,4200
12
+ packaging/version.py,sha256=_fLRNrFrxYcHVfyo8vk9j8s6JM8N_xsSxVFr6RJyco8,14665
13
+ packaging-21.0.dist-info/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197
14
+ packaging-21.0.dist-info/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174
15
+ packaging-21.0.dist-info/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344
16
+ packaging-21.0.dist-info/METADATA,sha256=ZV4MesCjT-YxFEJvLzsJ31kKmmj4ltiMUl3JvqxJfqI,13418
17
+ packaging-21.0.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92
18
+ packaging-21.0.dist-info/top_level.txt,sha256=zFdHrhWnPslzsiP455HutQsqPB6v0KCtNUMtUtrefDw,10
19
+ packaging-21.0.dist-info/RECORD,,
public/gpt-2/packaging-21.0.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.36.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
public/gpt-2/packaging-21.0.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ packaging
public/gpt-2/packaging/__about__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ __all__ = [
6
+ "__title__",
7
+ "__summary__",
8
+ "__uri__",
9
+ "__version__",
10
+ "__author__",
11
+ "__email__",
12
+ "__license__",
13
+ "__copyright__",
14
+ ]
15
+
16
+ __title__ = "packaging"
17
+ __summary__ = "Core utilities for Python packages"
18
+ __uri__ = "https://github.com/pypa/packaging"
19
+
20
+ __version__ = "21.0"
21
+
22
+ __author__ = "Donald Stufft and individual contributors"
23
+ __email__ = "donald@stufft.io"
24
+
25
+ __license__ = "BSD-2-Clause or Apache-2.0"
26
+ __copyright__ = "2014-2019 %s" % __author__
public/gpt-2/packaging/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ from .__about__ import (
6
+ __author__,
7
+ __copyright__,
8
+ __email__,
9
+ __license__,
10
+ __summary__,
11
+ __title__,
12
+ __uri__,
13
+ __version__,
14
+ )
15
+
16
+ __all__ = [
17
+ "__title__",
18
+ "__summary__",
19
+ "__uri__",
20
+ "__version__",
21
+ "__author__",
22
+ "__email__",
23
+ "__license__",
24
+ "__copyright__",
25
+ ]
public/gpt-2/packaging/_manylinux.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import functools
3
+ import os
4
+ import re
5
+ import struct
6
+ import sys
7
+ import warnings
8
+ from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple
9
+
10
+
11
+ # Python does not provide platform information at sufficient granularity to
12
+ # identify the architecture of the running executable in some cases, so we
13
+ # determine it dynamically by reading the information from the running
14
+ # process. This only applies on Linux, which uses the ELF format.
15
+ class _ELFFileHeader:
16
+ # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
17
+ class _InvalidELFFileHeader(ValueError):
18
+ """
19
+ An invalid ELF file header was found.
20
+ """
21
+
22
+ ELF_MAGIC_NUMBER = 0x7F454C46
23
+ ELFCLASS32 = 1
24
+ ELFCLASS64 = 2
25
+ ELFDATA2LSB = 1
26
+ ELFDATA2MSB = 2
27
+ EM_386 = 3
28
+ EM_S390 = 22
29
+ EM_ARM = 40
30
+ EM_X86_64 = 62
31
+ EF_ARM_ABIMASK = 0xFF000000
32
+ EF_ARM_ABI_VER5 = 0x05000000
33
+ EF_ARM_ABI_FLOAT_HARD = 0x00000400
34
+
35
+ def __init__(self, file: IO[bytes]) -> None:
36
+ def unpack(fmt: str) -> int:
37
+ try:
38
+ data = file.read(struct.calcsize(fmt))
39
+ result: Tuple[int, ...] = struct.unpack(fmt, data)
40
+ except struct.error:
41
+ raise _ELFFileHeader._InvalidELFFileHeader()
42
+ return result[0]
43
+
44
+ self.e_ident_magic = unpack(">I")
45
+ if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
46
+ raise _ELFFileHeader._InvalidELFFileHeader()
47
+ self.e_ident_class = unpack("B")
48
+ if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
49
+ raise _ELFFileHeader._InvalidELFFileHeader()
50
+ self.e_ident_data = unpack("B")
51
+ if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
52
+ raise _ELFFileHeader._InvalidELFFileHeader()
53
+ self.e_ident_version = unpack("B")
54
+ self.e_ident_osabi = unpack("B")
55
+ self.e_ident_abiversion = unpack("B")
56
+ self.e_ident_pad = file.read(7)
57
+ format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
58
+ format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
59
+ format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
60
+ format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
61
+ self.e_type = unpack(format_h)
62
+ self.e_machine = unpack(format_h)
63
+ self.e_version = unpack(format_i)
64
+ self.e_entry = unpack(format_p)
65
+ self.e_phoff = unpack(format_p)
66
+ self.e_shoff = unpack(format_p)
67
+ self.e_flags = unpack(format_i)
68
+ self.e_ehsize = unpack(format_h)
69
+ self.e_phentsize = unpack(format_h)
70
+ self.e_phnum = unpack(format_h)
71
+ self.e_shentsize = unpack(format_h)
72
+ self.e_shnum = unpack(format_h)
73
+ self.e_shstrndx = unpack(format_h)
74
+
75
+
76
+ def _get_elf_header() -> Optional[_ELFFileHeader]:
77
+ try:
78
+ with open(sys.executable, "rb") as f:
79
+ elf_header = _ELFFileHeader(f)
80
+ except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
81
+ return None
82
+ return elf_header
83
+
84
+
85
+ def _is_linux_armhf() -> bool:
86
+ # hard-float ABI can be detected from the ELF header of the running
87
+ # process
88
+ # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
89
+ elf_header = _get_elf_header()
90
+ if elf_header is None:
91
+ return False
92
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
93
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
94
+ result &= elf_header.e_machine == elf_header.EM_ARM
95
+ result &= (
96
+ elf_header.e_flags & elf_header.EF_ARM_ABIMASK
97
+ ) == elf_header.EF_ARM_ABI_VER5
98
+ result &= (
99
+ elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
100
+ ) == elf_header.EF_ARM_ABI_FLOAT_HARD
101
+ return result
102
+
103
+
104
+ def _is_linux_i686() -> bool:
105
+ elf_header = _get_elf_header()
106
+ if elf_header is None:
107
+ return False
108
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
109
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
110
+ result &= elf_header.e_machine == elf_header.EM_386
111
+ return result
112
+
113
+
114
+ def _have_compatible_abi(arch: str) -> bool:
115
+ if arch == "armv7l":
116
+ return _is_linux_armhf()
117
+ if arch == "i686":
118
+ return _is_linux_i686()
119
+ return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
120
+
121
+
122
+ # If glibc ever changes its major version, we need to know what the last
123
+ # minor version was, so we can build the complete list of all versions.
124
+ # For now, guess what the highest minor version might be, assume it will
125
+ # be 50 for testing. Once this actually happens, update the dictionary
126
+ # with the actual value.
127
+ _LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
128
+
129
+
130
+ class _GLibCVersion(NamedTuple):
131
+ major: int
132
+ minor: int
133
+
134
+
135
+ def _glibc_version_string_confstr() -> Optional[str]:
136
+ """
137
+ Primary implementation of glibc_version_string using os.confstr.
138
+ """
139
+ # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
140
+ # to be broken or missing. This strategy is used in the standard library
141
+ # platform module.
142
+ # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
143
+ try:
144
+ # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
145
+ version_string = os.confstr("CS_GNU_LIBC_VERSION")
146
+ assert version_string is not None
147
+ _, version = version_string.split()
148
+ except (AssertionError, AttributeError, OSError, ValueError):
149
+ # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
150
+ return None
151
+ return version
152
+
153
+
154
+ def _glibc_version_string_ctypes() -> Optional[str]:
155
+ """
156
+ Fallback implementation of glibc_version_string using ctypes.
157
+ """
158
+ try:
159
+ import ctypes
160
+ except ImportError:
161
+ return None
162
+
163
+ # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
164
+ # manpage says, "If filename is NULL, then the returned handle is for the
165
+ # main program". This way we can let the linker do the work to figure out
166
+ # which libc our process is actually using.
167
+ #
168
+ # We must also handle the special case where the executable is not a
169
+ # dynamically linked executable. This can occur when using musl libc,
170
+ # for example. In this situation, dlopen() will error, leading to an
171
+ # OSError. Interestingly, at least in the case of musl, there is no
172
+ # errno set on the OSError. The single string argument used to construct
173
+ # OSError comes from libc itself and is therefore not portable to
174
+ # hard code here. In any case, failure to call dlopen() means we
175
+ # can proceed, so we bail on our attempt.
176
+ try:
177
+ process_namespace = ctypes.CDLL(None)
178
+ except OSError:
179
+ return None
180
+
181
+ try:
182
+ gnu_get_libc_version = process_namespace.gnu_get_libc_version
183
+ except AttributeError:
184
+ # Symbol doesn't exist -> therefore, we are not linked to
185
+ # glibc.
186
+ return None
187
+
188
+ # Call gnu_get_libc_version, which returns a string like "2.5"
189
+ gnu_get_libc_version.restype = ctypes.c_char_p
190
+ version_str: str = gnu_get_libc_version()
191
+ # py2 / py3 compatibility:
192
+ if not isinstance(version_str, str):
193
+ version_str = version_str.decode("ascii")
194
+
195
+ return version_str
196
+
197
+
198
+ def _glibc_version_string() -> Optional[str]:
199
+ """Returns glibc version string, or None if not using glibc."""
200
+ return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
201
+
202
+
203
+ def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
204
+ """Parse glibc version.
205
+
206
+ We use a regexp instead of str.split because we want to discard any
207
+ random junk that might come after the minor version -- this might happen
208
+ in patched/forked versions of glibc (e.g. Linaro's version of glibc
209
+ uses version strings like "2.20-2014.11"). See gh-3588.
210
+ """
211
+ m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
212
+ if not m:
213
+ warnings.warn(
214
+ "Expected glibc version with 2 components major.minor,"
215
+ " got: %s" % version_str,
216
+ RuntimeWarning,
217
+ )
218
+ return -1, -1
219
+ return int(m.group("major")), int(m.group("minor"))
220
+
221
+
222
+ @functools.lru_cache()
223
+ def _get_glibc_version() -> Tuple[int, int]:
224
+ version_str = _glibc_version_string()
225
+ if version_str is None:
226
+ return (-1, -1)
227
+ return _parse_glibc_version(version_str)
228
+
229
+
230
+ # From PEP 513, PEP 600
231
+ def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
232
+ sys_glibc = _get_glibc_version()
233
+ if sys_glibc < version:
234
+ return False
235
+ # Check for presence of _manylinux module.
236
+ try:
237
+ import _manylinux # noqa
238
+ except ImportError:
239
+ return True
240
+ if hasattr(_manylinux, "manylinux_compatible"):
241
+ result = _manylinux.manylinux_compatible(version[0], version[1], arch)
242
+ if result is not None:
243
+ return bool(result)
244
+ return True
245
+ if version == _GLibCVersion(2, 5):
246
+ if hasattr(_manylinux, "manylinux1_compatible"):
247
+ return bool(_manylinux.manylinux1_compatible)
248
+ if version == _GLibCVersion(2, 12):
249
+ if hasattr(_manylinux, "manylinux2010_compatible"):
250
+ return bool(_manylinux.manylinux2010_compatible)
251
+ if version == _GLibCVersion(2, 17):
252
+ if hasattr(_manylinux, "manylinux2014_compatible"):
253
+ return bool(_manylinux.manylinux2014_compatible)
254
+ return True
255
+
256
+
257
+ _LEGACY_MANYLINUX_MAP = {
258
+ # CentOS 7 w/ glibc 2.17 (PEP 599)
259
+ (2, 17): "manylinux2014",
260
+ # CentOS 6 w/ glibc 2.12 (PEP 571)
261
+ (2, 12): "manylinux2010",
262
+ # CentOS 5 w/ glibc 2.5 (PEP 513)
263
+ (2, 5): "manylinux1",
264
+ }
265
+
266
+
267
+ def platform_tags(linux: str, arch: str) -> Iterator[str]:
268
+ if not _have_compatible_abi(arch):
269
+ return
270
+ # Oldest glibc to be supported regardless of architecture is (2, 17).
271
+ too_old_glibc2 = _GLibCVersion(2, 16)
272
+ if arch in {"x86_64", "i686"}:
273
+ # On x86/i686 also oldest glibc to be supported is (2, 5).
274
+ too_old_glibc2 = _GLibCVersion(2, 4)
275
+ current_glibc = _GLibCVersion(*_get_glibc_version())
276
+ glibc_max_list = [current_glibc]
277
+ # We can assume compatibility across glibc major versions.
278
+ # https://sourceware.org/bugzilla/show_bug.cgi?id=24636
279
+ #
280
+ # Build a list of maximum glibc versions so that we can
281
+ # output the canonical list of all glibc from current_glibc
282
+ # down to too_old_glibc2, including all intermediary versions.
283
+ for glibc_major in range(current_glibc.major - 1, 1, -1):
284
+ glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
285
+ glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
286
+ for glibc_max in glibc_max_list:
287
+ if glibc_max.major == too_old_glibc2.major:
288
+ min_minor = too_old_glibc2.minor
289
+ else:
290
+ # For other glibc major versions oldest supported is (x, 0).
291
+ min_minor = -1
292
+ for glibc_minor in range(glibc_max.minor, min_minor, -1):
293
+ glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
294
+ tag = "manylinux_{}_{}".format(*glibc_version)
295
+ if _is_compatible(tag, arch, glibc_version):
296
+ yield linux.replace("linux", tag)
297
+ # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
298
+ if glibc_version in _LEGACY_MANYLINUX_MAP:
299
+ legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
300
+ if _is_compatible(legacy_tag, arch, glibc_version):
301
+ yield linux.replace("linux", legacy_tag)
public/gpt-2/packaging/_musllinux.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """PEP 656 support.
2
+
3
+ This module implements logic to detect if the currently running Python is
4
+ linked against musl, and what musl version is used.
5
+ """
6
+
7
+ import contextlib
8
+ import functools
9
+ import operator
10
+ import os
11
+ import re
12
+ import struct
13
+ import subprocess
14
+ import sys
15
+ from typing import IO, Iterator, NamedTuple, Optional, Tuple
16
+
17
+
18
+ def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
19
+ return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
20
+
21
+
22
+ def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
23
+ """Detect musl libc location by parsing the Python executable.
24
+
25
+ Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
26
+ ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
27
+ """
28
+ f.seek(0)
29
+ try:
30
+ ident = _read_unpacked(f, "16B")
31
+ except struct.error:
32
+ return None
33
+ if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
34
+ return None
35
+ f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
36
+
37
+ try:
38
+ # e_fmt: Format for program header.
39
+ # p_fmt: Format for section header.
40
+ # p_idx: Indexes to find p_type, p_offset, and p_filesz.
41
+ e_fmt, p_fmt, p_idx = {
42
+ 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
43
+ 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
44
+ }[ident[4]]
45
+ except KeyError:
46
+ return None
47
+ else:
48
+ p_get = operator.itemgetter(*p_idx)
49
+
50
+ # Find the interpreter section and return its content.
51
+ try:
52
+ _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
53
+ except struct.error:
54
+ return None
55
+ for i in range(e_phnum + 1):
56
+ f.seek(e_phoff + e_phentsize * i)
57
+ try:
58
+ p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
59
+ except struct.error:
60
+ return None
61
+ if p_type != 3: # Not PT_INTERP.
62
+ continue
63
+ f.seek(p_offset)
64
+ interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
65
+ if "musl" not in interpreter:
66
+ return None
67
+ return interpreter
68
+ return None
69
+
70
+
71
+ class _MuslVersion(NamedTuple):
72
+ major: int
73
+ minor: int
74
+
75
+
76
+ def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
77
+ lines = [n for n in (n.strip() for n in output.splitlines()) if n]
78
+ if len(lines) < 2 or lines[0][:4] != "musl":
79
+ return None
80
+ m = re.match(r"Version (\d+)\.(\d+)", lines[1])
81
+ if not m:
82
+ return None
83
+ return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
84
+
85
+
86
+ @functools.lru_cache()
87
+ def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
88
+ """Detect currently-running musl runtime version.
89
+
90
+ This is done by checking the specified executable's dynamic linking
91
+ information, and invoking the loader to parse its output for a version
92
+ string. If the loader is musl, the output would be something like::
93
+
94
+ musl libc (x86_64)
95
+ Version 1.2.2
96
+ Dynamic Program Loader
97
+ """
98
+ with contextlib.ExitStack() as stack:
99
+ try:
100
+ f = stack.enter_context(open(executable, "rb"))
101
+ except IOError:
102
+ return None
103
+ ld = _parse_ld_musl_from_elf(f)
104
+ if not ld:
105
+ return None
106
+ proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
107
+ return _parse_musl_version(proc.stderr)
108
+
109
+
110
+ def platform_tags(arch: str) -> Iterator[str]:
111
+ """Generate musllinux tags compatible to the current platform.
112
+
113
+ :param arch: Should be the part of platform tag after the ``linux_``
114
+ prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
115
+ prerequisite for the current platform to be musllinux-compatible.
116
+
117
+ :returns: An iterator of compatible musllinux tags.
118
+ """
119
+ sys_musl = _get_musl_version(sys.executable)
120
+ if sys_musl is None: # Python not dynamically linked against musl.
121
+ return
122
+ for minor in range(sys_musl.minor, -1, -1):
123
+ yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
124
+
125
+
126
+ if __name__ == "__main__": # pragma: no cover
127
+ import sysconfig
128
+
129
+ plat = sysconfig.get_platform()
130
+ assert plat.startswith("linux-"), "not linux"
131
+
132
+ print("plat:", plat)
133
+ print("musl:", _get_musl_version(sys.executable))
134
+ print("tags:", end=" ")
135
+ for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
136
+ print(t, end="\n ")
public/gpt-2/packaging/_structures.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+
6
+ class InfinityType:
7
+ def __repr__(self) -> str:
8
+ return "Infinity"
9
+
10
+ def __hash__(self) -> int:
11
+ return hash(repr(self))
12
+
13
+ def __lt__(self, other: object) -> bool:
14
+ return False
15
+
16
+ def __le__(self, other: object) -> bool:
17
+ return False
18
+
19
+ def __eq__(self, other: object) -> bool:
20
+ return isinstance(other, self.__class__)
21
+
22
+ def __ne__(self, other: object) -> bool:
23
+ return not isinstance(other, self.__class__)
24
+
25
+ def __gt__(self, other: object) -> bool:
26
+ return True
27
+
28
+ def __ge__(self, other: object) -> bool:
29
+ return True
30
+
31
+ def __neg__(self: object) -> "NegativeInfinityType":
32
+ return NegativeInfinity
33
+
34
+
35
+ Infinity = InfinityType()
36
+
37
+
38
+ class NegativeInfinityType:
39
+ def __repr__(self) -> str:
40
+ return "-Infinity"
41
+
42
+ def __hash__(self) -> int:
43
+ return hash(repr(self))
44
+
45
+ def __lt__(self, other: object) -> bool:
46
+ return True
47
+
48
+ def __le__(self, other: object) -> bool:
49
+ return True
50
+
51
+ def __eq__(self, other: object) -> bool:
52
+ return isinstance(other, self.__class__)
53
+
54
+ def __ne__(self, other: object) -> bool:
55
+ return not isinstance(other, self.__class__)
56
+
57
+ def __gt__(self, other: object) -> bool:
58
+ return False
59
+
60
+ def __ge__(self, other: object) -> bool:
61
+ return False
62
+
63
+ def __neg__(self: object) -> InfinityType:
64
+ return Infinity
65
+
66
+
67
+ NegativeInfinity = NegativeInfinityType()
public/gpt-2/packaging/markers.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ import operator
6
+ import os
7
+ import platform
8
+ import sys
9
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
10
+
11
+ from pyparsing import ( # noqa: N817
12
+ Forward,
13
+ Group,
14
+ Literal as L,
15
+ ParseException,
16
+ ParseResults,
17
+ QuotedString,
18
+ ZeroOrMore,
19
+ stringEnd,
20
+ stringStart,
21
+ )
22
+
23
+ from .specifiers import InvalidSpecifier, Specifier
24
+
25
+ __all__ = [
26
+ "InvalidMarker",
27
+ "UndefinedComparison",
28
+ "UndefinedEnvironmentName",
29
+ "Marker",
30
+ "default_environment",
31
+ ]
32
+
33
+ Operator = Callable[[str, str], bool]
34
+
35
+
36
+ class InvalidMarker(ValueError):
37
+ """
38
+ An invalid marker was found, users should refer to PEP 508.
39
+ """
40
+
41
+
42
+ class UndefinedComparison(ValueError):
43
+ """
44
+ An invalid operation was attempted on a value that doesn't support it.
45
+ """
46
+
47
+
48
+ class UndefinedEnvironmentName(ValueError):
49
+ """
50
+ A name was attempted to be used that does not exist inside of the
51
+ environment.
52
+ """
53
+
54
+
55
+ class Node:
56
+ def __init__(self, value: Any) -> None:
57
+ self.value = value
58
+
59
+ def __str__(self) -> str:
60
+ return str(self.value)
61
+
62
+ def __repr__(self) -> str:
63
+ return f"<{self.__class__.__name__}('{self}')>"
64
+
65
+ def serialize(self) -> str:
66
+ raise NotImplementedError
67
+
68
+
69
+ class Variable(Node):
70
+ def serialize(self) -> str:
71
+ return str(self)
72
+
73
+
74
+ class Value(Node):
75
+ def serialize(self) -> str:
76
+ return f'"{self}"'
77
+
78
+
79
+ class Op(Node):
80
+ def serialize(self) -> str:
81
+ return str(self)
82
+
83
+
84
+ VARIABLE = (
85
+ L("implementation_version")
86
+ | L("platform_python_implementation")
87
+ | L("implementation_name")
88
+ | L("python_full_version")
89
+ | L("platform_release")
90
+ | L("platform_version")
91
+ | L("platform_machine")
92
+ | L("platform_system")
93
+ | L("python_version")
94
+ | L("sys_platform")
95
+ | L("os_name")
96
+ | L("os.name") # PEP-345
97
+ | L("sys.platform") # PEP-345
98
+ | L("platform.version") # PEP-345
99
+ | L("platform.machine") # PEP-345
100
+ | L("platform.python_implementation") # PEP-345
101
+ | L("python_implementation") # undocumented setuptools legacy
102
+ | L("extra") # PEP-508
103
+ )
104
+ ALIASES = {
105
+ "os.name": "os_name",
106
+ "sys.platform": "sys_platform",
107
+ "platform.version": "platform_version",
108
+ "platform.machine": "platform_machine",
109
+ "platform.python_implementation": "platform_python_implementation",
110
+ "python_implementation": "platform_python_implementation",
111
+ }
112
+ VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
113
+
114
+ VERSION_CMP = (
115
+ L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
116
+ )
117
+
118
+ MARKER_OP = VERSION_CMP | L("not in") | L("in")
119
+ MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
120
+
121
+ MARKER_VALUE = QuotedString("'") | QuotedString('"')
122
+ MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
123
+
124
+ BOOLOP = L("and") | L("or")
125
+
126
+ MARKER_VAR = VARIABLE | MARKER_VALUE
127
+
128
+ MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
129
+ MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
130
+
131
+ LPAREN = L("(").suppress()
132
+ RPAREN = L(")").suppress()
133
+
134
+ MARKER_EXPR = Forward()
135
+ MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
136
+ MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
137
+
138
+ MARKER = stringStart + MARKER_EXPR + stringEnd
139
+
140
+
141
+ def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
142
+ if isinstance(results, ParseResults):
143
+ return [_coerce_parse_result(i) for i in results]
144
+ else:
145
+ return results
146
+
147
+
148
+ def _format_marker(
149
+ marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
150
+ ) -> str:
151
+
152
+ assert isinstance(marker, (list, tuple, str))
153
+
154
+ # Sometimes we have a structure like [[...]] which is a single item list
155
+ # where the single item is itself it's own list. In that case we want skip
156
+ # the rest of this function so that we don't get extraneous () on the
157
+ # outside.
158
+ if (
159
+ isinstance(marker, list)
160
+ and len(marker) == 1
161
+ and isinstance(marker[0], (list, tuple))
162
+ ):
163
+ return _format_marker(marker[0])
164
+
165
+ if isinstance(marker, list):
166
+ inner = (_format_marker(m, first=False) for m in marker)
167
+ if first:
168
+ return " ".join(inner)
169
+ else:
170
+ return "(" + " ".join(inner) + ")"
171
+ elif isinstance(marker, tuple):
172
+ return " ".join([m.serialize() for m in marker])
173
+ else:
174
+ return marker
175
+
176
+
177
+ _operators: Dict[str, Operator] = {
178
+ "in": lambda lhs, rhs: lhs in rhs,
179
+ "not in": lambda lhs, rhs: lhs not in rhs,
180
+ "<": operator.lt,
181
+ "<=": operator.le,
182
+ "==": operator.eq,
183
+ "!=": operator.ne,
184
+ ">=": operator.ge,
185
+ ">": operator.gt,
186
+ }
187
+
188
+
189
+ def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
190
+ try:
191
+ spec = Specifier("".join([op.serialize(), rhs]))
192
+ except InvalidSpecifier:
193
+ pass
194
+ else:
195
+ return spec.contains(lhs)
196
+
197
+ oper: Optional[Operator] = _operators.get(op.serialize())
198
+ if oper is None:
199
+ raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
200
+
201
+ return oper(lhs, rhs)
202
+
203
+
204
+ class Undefined:
205
+ pass
206
+
207
+
208
+ _undefined = Undefined()
209
+
210
+
211
+ def _get_env(environment: Dict[str, str], name: str) -> str:
212
+ value: Union[str, Undefined] = environment.get(name, _undefined)
213
+
214
+ if isinstance(value, Undefined):
215
+ raise UndefinedEnvironmentName(
216
+ f"{name!r} does not exist in evaluation environment."
217
+ )
218
+
219
+ return value
220
+
221
+
222
+ def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
223
+ groups: List[List[bool]] = [[]]
224
+
225
+ for marker in markers:
226
+ assert isinstance(marker, (list, tuple, str))
227
+
228
+ if isinstance(marker, list):
229
+ groups[-1].append(_evaluate_markers(marker, environment))
230
+ elif isinstance(marker, tuple):
231
+ lhs, op, rhs = marker
232
+
233
+ if isinstance(lhs, Variable):
234
+ lhs_value = _get_env(environment, lhs.value)
235
+ rhs_value = rhs.value
236
+ else:
237
+ lhs_value = lhs.value
238
+ rhs_value = _get_env(environment, rhs.value)
239
+
240
+ groups[-1].append(_eval_op(lhs_value, op, rhs_value))
241
+ else:
242
+ assert marker in ["and", "or"]
243
+ if marker == "or":
244
+ groups.append([])
245
+
246
+ return any(all(item) for item in groups)
247
+
248
+
249
+ def format_full_version(info: "sys._version_info") -> str:
250
+ version = "{0.major}.{0.minor}.{0.micro}".format(info)
251
+ kind = info.releaselevel
252
+ if kind != "final":
253
+ version += kind[0] + str(info.serial)
254
+ return version
255
+
256
+
257
+ def default_environment() -> Dict[str, str]:
258
+ iver = format_full_version(sys.implementation.version)
259
+ implementation_name = sys.implementation.name
260
+ return {
261
+ "implementation_name": implementation_name,
262
+ "implementation_version": iver,
263
+ "os_name": os.name,
264
+ "platform_machine": platform.machine(),
265
+ "platform_release": platform.release(),
266
+ "platform_system": platform.system(),
267
+ "platform_version": platform.version(),
268
+ "python_full_version": platform.python_version(),
269
+ "platform_python_implementation": platform.python_implementation(),
270
+ "python_version": ".".join(platform.python_version_tuple()[:2]),
271
+ "sys_platform": sys.platform,
272
+ }
273
+
274
+
275
+ class Marker:
276
+ def __init__(self, marker: str) -> None:
277
+ try:
278
+ self._markers = _coerce_parse_result(MARKER.parseString(marker))
279
+ except ParseException as e:
280
+ raise InvalidMarker(
281
+ f"Invalid marker: {marker!r}, parse error at "
282
+ f"{marker[e.loc : e.loc + 8]!r}"
283
+ )
284
+
285
+ def __str__(self) -> str:
286
+ return _format_marker(self._markers)
287
+
288
+ def __repr__(self) -> str:
289
+ return f"<Marker('{self}')>"
290
+
291
+ def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
292
+ """Evaluate a marker.
293
+
294
+ Return the boolean from evaluating the given marker against the
295
+ environment. environment is an optional argument to override all or
296
+ part of the determined environment.
297
+
298
+ The environment is determined from the current Python process.
299
+ """
300
+ current_environment = default_environment()
301
+ if environment is not None:
302
+ current_environment.update(environment)
303
+
304
+ return _evaluate_markers(self._markers, current_environment)
public/gpt-2/packaging/py.typed ADDED
File without changes
public/gpt-2/packaging/requirements.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ import re
6
+ import string
7
+ import urllib.parse
8
+ from typing import List, Optional as TOptional, Set
9
+
10
+ from pyparsing import ( # noqa
11
+ Combine,
12
+ Literal as L,
13
+ Optional,
14
+ ParseException,
15
+ Regex,
16
+ Word,
17
+ ZeroOrMore,
18
+ originalTextFor,
19
+ stringEnd,
20
+ stringStart,
21
+ )
22
+
23
+ from .markers import MARKER_EXPR, Marker
24
+ from .specifiers import LegacySpecifier, Specifier, SpecifierSet
25
+
26
+
27
+ class InvalidRequirement(ValueError):
28
+ """
29
+ An invalid requirement was found, users should refer to PEP 508.
30
+ """
31
+
32
+
33
+ ALPHANUM = Word(string.ascii_letters + string.digits)
34
+
35
+ LBRACKET = L("[").suppress()
36
+ RBRACKET = L("]").suppress()
37
+ LPAREN = L("(").suppress()
38
+ RPAREN = L(")").suppress()
39
+ COMMA = L(",").suppress()
40
+ SEMICOLON = L(";").suppress()
41
+ AT = L("@").suppress()
42
+
43
+ PUNCTUATION = Word("-_.")
44
+ IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
45
+ IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
46
+
47
+ NAME = IDENTIFIER("name")
48
+ EXTRA = IDENTIFIER
49
+
50
+ URI = Regex(r"[^ ]+")("url")
51
+ URL = AT + URI
52
+
53
+ EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
54
+ EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
55
+
56
+ VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
57
+ VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
58
+
59
+ VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
60
+ VERSION_MANY = Combine(
61
+ VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
62
+ )("_raw_spec")
63
+ _VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)
64
+ _VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
65
+
66
+ VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
67
+ VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
68
+
69
+ MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
70
+ MARKER_EXPR.setParseAction(
71
+ lambda s, l, t: Marker(s[t._original_start : t._original_end])
72
+ )
73
+ MARKER_SEPARATOR = SEMICOLON
74
+ MARKER = MARKER_SEPARATOR + MARKER_EXPR
75
+
76
+ VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
77
+ URL_AND_MARKER = URL + Optional(MARKER)
78
+
79
+ NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
80
+
81
+ REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
82
+ # pyparsing isn't thread safe during initialization, so we do it eagerly, see
83
+ # issue #104
84
+ REQUIREMENT.parseString("x[]")
85
+
86
+
87
+ class Requirement:
88
+ """Parse a requirement.
89
+
90
+ Parse a given requirement string into its parts, such as name, specifier,
91
+ URL, and extras. Raises InvalidRequirement on a badly-formed requirement
92
+ string.
93
+ """
94
+
95
+ # TODO: Can we test whether something is contained within a requirement?
96
+ # If so how do we do that? Do we need to test against the _name_ of
97
+ # the thing as well as the version? What about the markers?
98
+ # TODO: Can we normalize the name and extra name?
99
+
100
+ def __init__(self, requirement_string: str) -> None:
101
+ try:
102
+ req = REQUIREMENT.parseString(requirement_string)
103
+ except ParseException as e:
104
+ raise InvalidRequirement(
105
+ f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}'
106
+ )
107
+
108
+ self.name: str = req.name
109
+ if req.url:
110
+ parsed_url = urllib.parse.urlparse(req.url)
111
+ if parsed_url.scheme == "file":
112
+ if urllib.parse.urlunparse(parsed_url) != req.url:
113
+ raise InvalidRequirement("Invalid URL given")
114
+ elif not (parsed_url.scheme and parsed_url.netloc) or (
115
+ not parsed_url.scheme and not parsed_url.netloc
116
+ ):
117
+ raise InvalidRequirement(f"Invalid URL: {req.url}")
118
+ self.url: TOptional[str] = req.url
119
+ else:
120
+ self.url = None
121
+ self.extras: Set[str] = set(req.extras.asList() if req.extras else [])
122
+ self.specifier: SpecifierSet = SpecifierSet(req.specifier)
123
+ self.marker: TOptional[Marker] = req.marker if req.marker else None
124
+
125
+ def __str__(self) -> str:
126
+ parts: List[str] = [self.name]
127
+
128
+ if self.extras:
129
+ formatted_extras = ",".join(sorted(self.extras))
130
+ parts.append(f"[{formatted_extras}]")
131
+
132
+ if self.specifier:
133
+ parts.append(str(self.specifier))
134
+
135
+ if self.url:
136
+ parts.append(f"@ {self.url}")
137
+ if self.marker:
138
+ parts.append(" ")
139
+
140
+ if self.marker:
141
+ parts.append(f"; {self.marker}")
142
+
143
+ return "".join(parts)
144
+
145
+ def __repr__(self) -> str:
146
+ return f"<Requirement('{self}')>"
public/gpt-2/packaging/specifiers.py ADDED
@@ -0,0 +1,828 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ import abc
6
+ import functools
7
+ import itertools
8
+ import re
9
+ import warnings
10
+ from typing import (
11
+ Callable,
12
+ Dict,
13
+ Iterable,
14
+ Iterator,
15
+ List,
16
+ Optional,
17
+ Pattern,
18
+ Set,
19
+ Tuple,
20
+ TypeVar,
21
+ Union,
22
+ )
23
+
24
+ from .utils import canonicalize_version
25
+ from .version import LegacyVersion, Version, parse
26
+
27
+ ParsedVersion = Union[Version, LegacyVersion]
28
+ UnparsedVersion = Union[Version, LegacyVersion, str]
29
+ VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
30
+ CallableOperator = Callable[[ParsedVersion, str], bool]
31
+
32
+
33
+ class InvalidSpecifier(ValueError):
34
+ """
35
+ An invalid specifier was found, users should refer to PEP 440.
36
+ """
37
+
38
+
39
+ class BaseSpecifier(metaclass=abc.ABCMeta):
40
+ @abc.abstractmethod
41
+ def __str__(self) -> str:
42
+ """
43
+ Returns the str representation of this Specifier like object. This
44
+ should be representative of the Specifier itself.
45
+ """
46
+
47
+ @abc.abstractmethod
48
+ def __hash__(self) -> int:
49
+ """
50
+ Returns a hash value for this Specifier like object.
51
+ """
52
+
53
+ @abc.abstractmethod
54
+ def __eq__(self, other: object) -> bool:
55
+ """
56
+ Returns a boolean representing whether or not the two Specifier like
57
+ objects are equal.
58
+ """
59
+
60
+ @abc.abstractmethod
61
+ def __ne__(self, other: object) -> bool:
62
+ """
63
+ Returns a boolean representing whether or not the two Specifier like
64
+ objects are not equal.
65
+ """
66
+
67
+ @abc.abstractproperty
68
+ def prereleases(self) -> Optional[bool]:
69
+ """
70
+ Returns whether or not pre-releases as a whole are allowed by this
71
+ specifier.
72
+ """
73
+
74
+ @prereleases.setter
75
+ def prereleases(self, value: bool) -> None:
76
+ """
77
+ Sets whether or not pre-releases as a whole are allowed by this
78
+ specifier.
79
+ """
80
+
81
+ @abc.abstractmethod
82
+ def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
83
+ """
84
+ Determines if the given item is contained within this specifier.
85
+ """
86
+
87
+ @abc.abstractmethod
88
+ def filter(
89
+ self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
90
+ ) -> Iterable[VersionTypeVar]:
91
+ """
92
+ Takes an iterable of items and filters them so that only items which
93
+ are contained within this specifier are allowed in it.
94
+ """
95
+
96
+
97
+ class _IndividualSpecifier(BaseSpecifier):
98
+
99
+ _operators: Dict[str, str] = {}
100
+ _regex: Pattern[str]
101
+
102
+ def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
103
+ match = self._regex.search(spec)
104
+ if not match:
105
+ raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
106
+
107
+ self._spec: Tuple[str, str] = (
108
+ match.group("operator").strip(),
109
+ match.group("version").strip(),
110
+ )
111
+
112
+ # Store whether or not this Specifier should accept prereleases
113
+ self._prereleases = prereleases
114
+
115
+ def __repr__(self) -> str:
116
+ pre = (
117
+ f", prereleases={self.prereleases!r}"
118
+ if self._prereleases is not None
119
+ else ""
120
+ )
121
+
122
+ return "<{}({!r}{})>".format(self.__class__.__name__, str(self), pre)
123
+
124
+ def __str__(self) -> str:
125
+ return "{}{}".format(*self._spec)
126
+
127
+ @property
128
+ def _canonical_spec(self) -> Tuple[str, str]:
129
+ return self._spec[0], canonicalize_version(self._spec[1])
130
+
131
+ def __hash__(self) -> int:
132
+ return hash(self._canonical_spec)
133
+
134
+ def __eq__(self, other: object) -> bool:
135
+ if isinstance(other, str):
136
+ try:
137
+ other = self.__class__(str(other))
138
+ except InvalidSpecifier:
139
+ return NotImplemented
140
+ elif not isinstance(other, self.__class__):
141
+ return NotImplemented
142
+
143
+ return self._canonical_spec == other._canonical_spec
144
+
145
+ def __ne__(self, other: object) -> bool:
146
+ if isinstance(other, str):
147
+ try:
148
+ other = self.__class__(str(other))
149
+ except InvalidSpecifier:
150
+ return NotImplemented
151
+ elif not isinstance(other, self.__class__):
152
+ return NotImplemented
153
+
154
+ return self._spec != other._spec
155
+
156
+ def _get_operator(self, op: str) -> CallableOperator:
157
+ operator_callable: CallableOperator = getattr(
158
+ self, f"_compare_{self._operators[op]}"
159
+ )
160
+ return operator_callable
161
+
162
+ def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
163
+ if not isinstance(version, (LegacyVersion, Version)):
164
+ version = parse(version)
165
+ return version
166
+
167
+ @property
168
+ def operator(self) -> str:
169
+ return self._spec[0]
170
+
171
+ @property
172
+ def version(self) -> str:
173
+ return self._spec[1]
174
+
175
+ @property
176
+ def prereleases(self) -> Optional[bool]:
177
+ return self._prereleases
178
+
179
+ @prereleases.setter
180
+ def prereleases(self, value: bool) -> None:
181
+ self._prereleases = value
182
+
183
+ def __contains__(self, item: str) -> bool:
184
+ return self.contains(item)
185
+
186
+ def contains(
187
+ self, item: UnparsedVersion, prereleases: Optional[bool] = None
188
+ ) -> bool:
189
+
190
+ # Determine if prereleases are to be allowed or not.
191
+ if prereleases is None:
192
+ prereleases = self.prereleases
193
+
194
+ # Normalize item to a Version or LegacyVersion, this allows us to have
195
+ # a shortcut for ``"2.0" in Specifier(">=2")
196
+ normalized_item = self._coerce_version(item)
197
+
198
+ # Determine if we should be supporting prereleases in this specifier
199
+ # or not, if we do not support prereleases than we can short circuit
200
+ # logic if this version is a prereleases.
201
+ if normalized_item.is_prerelease and not prereleases:
202
+ return False
203
+
204
+ # Actually do the comparison to determine if this item is contained
205
+ # within this Specifier or not.
206
+ operator_callable: CallableOperator = self._get_operator(self.operator)
207
+ return operator_callable(normalized_item, self.version)
208
+
209
+ def filter(
210
+ self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
211
+ ) -> Iterable[VersionTypeVar]:
212
+
213
+ yielded = False
214
+ found_prereleases = []
215
+
216
+ kw = {"prereleases": prereleases if prereleases is not None else True}
217
+
218
+ # Attempt to iterate over all the values in the iterable and if any of
219
+ # them match, yield them.
220
+ for version in iterable:
221
+ parsed_version = self._coerce_version(version)
222
+
223
+ if self.contains(parsed_version, **kw):
224
+ # If our version is a prerelease, and we were not set to allow
225
+ # prereleases, then we'll store it for later in case nothing
226
+ # else matches this specifier.
227
+ if parsed_version.is_prerelease and not (
228
+ prereleases or self.prereleases
229
+ ):
230
+ found_prereleases.append(version)
231
+ # Either this is not a prerelease, or we should have been
232
+ # accepting prereleases from the beginning.
233
+ else:
234
+ yielded = True
235
+ yield version
236
+
237
+ # Now that we've iterated over everything, determine if we've yielded
238
+ # any values, and if we have not and we have any prereleases stored up
239
+ # then we will go ahead and yield the prereleases.
240
+ if not yielded and found_prereleases:
241
+ for version in found_prereleases:
242
+ yield version
243
+
244
+
245
+ class LegacySpecifier(_IndividualSpecifier):
246
+
247
+ _regex_str = r"""
248
+ (?P<operator>(==|!=|<=|>=|<|>))
249
+ \s*
250
+ (?P<version>
251
+ [^,;\s)]* # Since this is a "legacy" specifier, and the version
252
+ # string can be just about anything, we match everything
253
+ # except for whitespace, a semi-colon for marker support,
254
+ # a closing paren since versions can be enclosed in
255
+ # them, and a comma since it's a version separator.
256
+ )
257
+ """
258
+
259
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
260
+
261
+ _operators = {
262
+ "==": "equal",
263
+ "!=": "not_equal",
264
+ "<=": "less_than_equal",
265
+ ">=": "greater_than_equal",
266
+ "<": "less_than",
267
+ ">": "greater_than",
268
+ }
269
+
270
+ def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
271
+ super().__init__(spec, prereleases)
272
+
273
+ warnings.warn(
274
+ "Creating a LegacyVersion has been deprecated and will be "
275
+ "removed in the next major release",
276
+ DeprecationWarning,
277
+ )
278
+
279
+ def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
280
+ if not isinstance(version, LegacyVersion):
281
+ version = LegacyVersion(str(version))
282
+ return version
283
+
284
+ def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
285
+ return prospective == self._coerce_version(spec)
286
+
287
+ def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
288
+ return prospective != self._coerce_version(spec)
289
+
290
+ def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
291
+ return prospective <= self._coerce_version(spec)
292
+
293
+ def _compare_greater_than_equal(
294
+ self, prospective: LegacyVersion, spec: str
295
+ ) -> bool:
296
+ return prospective >= self._coerce_version(spec)
297
+
298
+ def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
299
+ return prospective < self._coerce_version(spec)
300
+
301
+ def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
302
+ return prospective > self._coerce_version(spec)
303
+
304
+
305
+ def _require_version_compare(
306
+ fn: Callable[["Specifier", ParsedVersion, str], bool]
307
+ ) -> Callable[["Specifier", ParsedVersion, str], bool]:
308
+ @functools.wraps(fn)
309
+ def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
310
+ if not isinstance(prospective, Version):
311
+ return False
312
+ return fn(self, prospective, spec)
313
+
314
+ return wrapped
315
+
316
+
317
+ class Specifier(_IndividualSpecifier):
318
+
319
+ _regex_str = r"""
320
+ (?P<operator>(~=|==|!=|<=|>=|<|>|===))
321
+ (?P<version>
322
+ (?:
323
+ # The identity operators allow for an escape hatch that will
324
+ # do an exact string match of the version you wish to install.
325
+ # This will not be parsed by PEP 440 and we cannot determine
326
+ # any semantic meaning from it. This operator is discouraged
327
+ # but included entirely as an escape hatch.
328
+ (?<====) # Only match for the identity operator
329
+ \s*
330
+ [^\s]* # We just match everything, except for whitespace
331
+ # since we are only testing for strict identity.
332
+ )
333
+ |
334
+ (?:
335
+ # The (non)equality operators allow for wild card and local
336
+ # versions to be specified so we have to define these two
337
+ # operators separately to enable that.
338
+ (?<===|!=) # Only match for equals and not equals
339
+
340
+ \s*
341
+ v?
342
+ (?:[0-9]+!)? # epoch
343
+ [0-9]+(?:\.[0-9]+)* # release
344
+ (?: # pre release
345
+ [-_\.]?
346
+ (a|b|c|rc|alpha|beta|pre|preview)
347
+ [-_\.]?
348
+ [0-9]*
349
+ )?
350
+ (?: # post release
351
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
352
+ )?
353
+
354
+ # You cannot use a wild card and a dev or local version
355
+ # together so group them with a | and make them optional.
356
+ (?:
357
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
358
+ (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
359
+ |
360
+ \.\* # Wild card syntax of .*
361
+ )?
362
+ )
363
+ |
364
+ (?:
365
+ # The compatible operator requires at least two digits in the
366
+ # release segment.
367
+ (?<=~=) # Only match for the compatible operator
368
+
369
+ \s*
370
+ v?
371
+ (?:[0-9]+!)? # epoch
372
+ [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
373
+ (?: # pre release
374
+ [-_\.]?
375
+ (a|b|c|rc|alpha|beta|pre|preview)
376
+ [-_\.]?
377
+ [0-9]*
378
+ )?
379
+ (?: # post release
380
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
381
+ )?
382
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
383
+ )
384
+ |
385
+ (?:
386
+ # All other operators only allow a sub set of what the
387
+ # (non)equality operators do. Specifically they do not allow
388
+ # local versions to be specified nor do they allow the prefix
389
+ # matching wild cards.
390
+ (?<!==|!=|~=) # We have special cases for these
391
+ # operators so we want to make sure they
392
+ # don't match here.
393
+
394
+ \s*
395
+ v?
396
+ (?:[0-9]+!)? # epoch
397
+ [0-9]+(?:\.[0-9]+)* # release
398
+ (?: # pre release
399
+ [-_\.]?
400
+ (a|b|c|rc|alpha|beta|pre|preview)
401
+ [-_\.]?
402
+ [0-9]*
403
+ )?
404
+ (?: # post release
405
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
406
+ )?
407
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
408
+ )
409
+ )
410
+ """
411
+
412
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
413
+
414
+ _operators = {
415
+ "~=": "compatible",
416
+ "==": "equal",
417
+ "!=": "not_equal",
418
+ "<=": "less_than_equal",
419
+ ">=": "greater_than_equal",
420
+ "<": "less_than",
421
+ ">": "greater_than",
422
+ "===": "arbitrary",
423
+ }
424
+
425
+ @_require_version_compare
426
+ def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
427
+
428
+ # Compatible releases have an equivalent combination of >= and ==. That
429
+ # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
430
+ # implement this in terms of the other specifiers instead of
431
+ # implementing it ourselves. The only thing we need to do is construct
432
+ # the other specifiers.
433
+
434
+ # We want everything but the last item in the version, but we want to
435
+ # ignore suffix segments.
436
+ prefix = ".".join(
437
+ list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
438
+ )
439
+
440
+ # Add the prefix notation to the end of our string
441
+ prefix += ".*"
442
+
443
+ return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
444
+ prospective, prefix
445
+ )
446
+
447
+ @_require_version_compare
448
+ def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
449
+
450
+ # We need special logic to handle prefix matching
451
+ if spec.endswith(".*"):
452
+ # In the case of prefix matching we want to ignore local segment.
453
+ prospective = Version(prospective.public)
454
+ # Split the spec out by dots, and pretend that there is an implicit
455
+ # dot in between a release segment and a pre-release segment.
456
+ split_spec = _version_split(spec[:-2]) # Remove the trailing .*
457
+
458
+ # Split the prospective version out by dots, and pretend that there
459
+ # is an implicit dot in between a release segment and a pre-release
460
+ # segment.
461
+ split_prospective = _version_split(str(prospective))
462
+
463
+ # Shorten the prospective version to be the same length as the spec
464
+ # so that we can determine if the specifier is a prefix of the
465
+ # prospective version or not.
466
+ shortened_prospective = split_prospective[: len(split_spec)]
467
+
468
+ # Pad out our two sides with zeros so that they both equal the same
469
+ # length.
470
+ padded_spec, padded_prospective = _pad_version(
471
+ split_spec, shortened_prospective
472
+ )
473
+
474
+ return padded_prospective == padded_spec
475
+ else:
476
+ # Convert our spec string into a Version
477
+ spec_version = Version(spec)
478
+
479
+ # If the specifier does not have a local segment, then we want to
480
+ # act as if the prospective version also does not have a local
481
+ # segment.
482
+ if not spec_version.local:
483
+ prospective = Version(prospective.public)
484
+
485
+ return prospective == spec_version
486
+
487
+ @_require_version_compare
488
+ def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
489
+ return not self._compare_equal(prospective, spec)
490
+
491
+ @_require_version_compare
492
+ def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
493
+
494
+ # NB: Local version identifiers are NOT permitted in the version
495
+ # specifier, so local version labels can be universally removed from
496
+ # the prospective version.
497
+ return Version(prospective.public) <= Version(spec)
498
+
499
+ @_require_version_compare
500
+ def _compare_greater_than_equal(
501
+ self, prospective: ParsedVersion, spec: str
502
+ ) -> bool:
503
+
504
+ # NB: Local version identifiers are NOT permitted in the version
505
+ # specifier, so local version labels can be universally removed from
506
+ # the prospective version.
507
+ return Version(prospective.public) >= Version(spec)
508
+
509
+ @_require_version_compare
510
+ def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
511
+
512
+ # Convert our spec to a Version instance, since we'll want to work with
513
+ # it as a version.
514
+ spec = Version(spec_str)
515
+
516
+ # Check to see if the prospective version is less than the spec
517
+ # version. If it's not we can short circuit and just return False now
518
+ # instead of doing extra unneeded work.
519
+ if not prospective < spec:
520
+ return False
521
+
522
+ # This special case is here so that, unless the specifier itself
523
+ # includes is a pre-release version, that we do not accept pre-release
524
+ # versions for the version mentioned in the specifier (e.g. <3.1 should
525
+ # not match 3.1.dev0, but should match 3.0.dev0).
526
+ if not spec.is_prerelease and prospective.is_prerelease:
527
+ if Version(prospective.base_version) == Version(spec.base_version):
528
+ return False
529
+
530
+ # If we've gotten to here, it means that prospective version is both
531
+ # less than the spec version *and* it's not a pre-release of the same
532
+ # version in the spec.
533
+ return True
534
+
535
+ @_require_version_compare
536
+ def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
537
+
538
+ # Convert our spec to a Version instance, since we'll want to work with
539
+ # it as a version.
540
+ spec = Version(spec_str)
541
+
542
+ # Check to see if the prospective version is greater than the spec
543
+ # version. If it's not we can short circuit and just return False now
544
+ # instead of doing extra unneeded work.
545
+ if not prospective > spec:
546
+ return False
547
+
548
+ # This special case is here so that, unless the specifier itself
549
+ # includes is a post-release version, that we do not accept
550
+ # post-release versions for the version mentioned in the specifier
551
+ # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
552
+ if not spec.is_postrelease and prospective.is_postrelease:
553
+ if Version(prospective.base_version) == Version(spec.base_version):
554
+ return False
555
+
556
+ # Ensure that we do not allow a local version of the version mentioned
557
+ # in the specifier, which is technically greater than, to match.
558
+ if prospective.local is not None:
559
+ if Version(prospective.base_version) == Version(spec.base_version):
560
+ return False
561
+
562
+ # If we've gotten to here, it means that prospective version is both
563
+ # greater than the spec version *and* it's not a pre-release of the
564
+ # same version in the spec.
565
+ return True
566
+
567
+ def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
568
+ return str(prospective).lower() == str(spec).lower()
569
+
570
+ @property
571
+ def prereleases(self) -> bool:
572
+
573
+ # If there is an explicit prereleases set for this, then we'll just
574
+ # blindly use that.
575
+ if self._prereleases is not None:
576
+ return self._prereleases
577
+
578
+ # Look at all of our specifiers and determine if they are inclusive
579
+ # operators, and if they are if they are including an explicit
580
+ # prerelease.
581
+ operator, version = self._spec
582
+ if operator in ["==", ">=", "<=", "~=", "==="]:
583
+ # The == specifier can include a trailing .*, if it does we
584
+ # want to remove before parsing.
585
+ if operator == "==" and version.endswith(".*"):
586
+ version = version[:-2]
587
+
588
+ # Parse the version, and if it is a pre-release than this
589
+ # specifier allows pre-releases.
590
+ if parse(version).is_prerelease:
591
+ return True
592
+
593
+ return False
594
+
595
+ @prereleases.setter
596
+ def prereleases(self, value: bool) -> None:
597
+ self._prereleases = value
598
+
599
+
600
+ _prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
601
+
602
+
603
+ def _version_split(version: str) -> List[str]:
604
+ result: List[str] = []
605
+ for item in version.split("."):
606
+ match = _prefix_regex.search(item)
607
+ if match:
608
+ result.extend(match.groups())
609
+ else:
610
+ result.append(item)
611
+ return result
612
+
613
+
614
+ def _is_not_suffix(segment: str) -> bool:
615
+ return not any(
616
+ segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
617
+ )
618
+
619
+
620
+ def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
621
+ left_split, right_split = [], []
622
+
623
+ # Get the release segment of our versions
624
+ left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
625
+ right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
626
+
627
+ # Get the rest of our versions
628
+ left_split.append(left[len(left_split[0]) :])
629
+ right_split.append(right[len(right_split[0]) :])
630
+
631
+ # Insert our padding
632
+ left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
633
+ right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
634
+
635
+ return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
636
+
637
+
638
+ class SpecifierSet(BaseSpecifier):
639
+ def __init__(
640
+ self, specifiers: str = "", prereleases: Optional[bool] = None
641
+ ) -> None:
642
+
643
+ # Split on , to break each individual specifier into it's own item, and
644
+ # strip each item to remove leading/trailing whitespace.
645
+ split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
646
+
647
+ # Parsed each individual specifier, attempting first to make it a
648
+ # Specifier and falling back to a LegacySpecifier.
649
+ parsed: Set[_IndividualSpecifier] = set()
650
+ for specifier in split_specifiers:
651
+ try:
652
+ parsed.add(Specifier(specifier))
653
+ except InvalidSpecifier:
654
+ parsed.add(LegacySpecifier(specifier))
655
+
656
+ # Turn our parsed specifiers into a frozen set and save them for later.
657
+ self._specs = frozenset(parsed)
658
+
659
+ # Store our prereleases value so we can use it later to determine if
660
+ # we accept prereleases or not.
661
+ self._prereleases = prereleases
662
+
663
+ def __repr__(self) -> str:
664
+ pre = (
665
+ f", prereleases={self.prereleases!r}"
666
+ if self._prereleases is not None
667
+ else ""
668
+ )
669
+
670
+ return "<SpecifierSet({!r}{})>".format(str(self), pre)
671
+
672
+ def __str__(self) -> str:
673
+ return ",".join(sorted(str(s) for s in self._specs))
674
+
675
+ def __hash__(self) -> int:
676
+ return hash(self._specs)
677
+
678
+ def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
679
+ if isinstance(other, str):
680
+ other = SpecifierSet(other)
681
+ elif not isinstance(other, SpecifierSet):
682
+ return NotImplemented
683
+
684
+ specifier = SpecifierSet()
685
+ specifier._specs = frozenset(self._specs | other._specs)
686
+
687
+ if self._prereleases is None and other._prereleases is not None:
688
+ specifier._prereleases = other._prereleases
689
+ elif self._prereleases is not None and other._prereleases is None:
690
+ specifier._prereleases = self._prereleases
691
+ elif self._prereleases == other._prereleases:
692
+ specifier._prereleases = self._prereleases
693
+ else:
694
+ raise ValueError(
695
+ "Cannot combine SpecifierSets with True and False prerelease "
696
+ "overrides."
697
+ )
698
+
699
+ return specifier
700
+
701
+ def __eq__(self, other: object) -> bool:
702
+ if isinstance(other, (str, _IndividualSpecifier)):
703
+ other = SpecifierSet(str(other))
704
+ elif not isinstance(other, SpecifierSet):
705
+ return NotImplemented
706
+
707
+ return self._specs == other._specs
708
+
709
+ def __ne__(self, other: object) -> bool:
710
+ if isinstance(other, (str, _IndividualSpecifier)):
711
+ other = SpecifierSet(str(other))
712
+ elif not isinstance(other, SpecifierSet):
713
+ return NotImplemented
714
+
715
+ return self._specs != other._specs
716
+
717
+ def __len__(self) -> int:
718
+ return len(self._specs)
719
+
720
+ def __iter__(self) -> Iterator[_IndividualSpecifier]:
721
+ return iter(self._specs)
722
+
723
+ @property
724
+ def prereleases(self) -> Optional[bool]:
725
+
726
+ # If we have been given an explicit prerelease modifier, then we'll
727
+ # pass that through here.
728
+ if self._prereleases is not None:
729
+ return self._prereleases
730
+
731
+ # If we don't have any specifiers, and we don't have a forced value,
732
+ # then we'll just return None since we don't know if this should have
733
+ # pre-releases or not.
734
+ if not self._specs:
735
+ return None
736
+
737
+ # Otherwise we'll see if any of the given specifiers accept
738
+ # prereleases, if any of them do we'll return True, otherwise False.
739
+ return any(s.prereleases for s in self._specs)
740
+
741
+ @prereleases.setter
742
+ def prereleases(self, value: bool) -> None:
743
+ self._prereleases = value
744
+
745
+ def __contains__(self, item: UnparsedVersion) -> bool:
746
+ return self.contains(item)
747
+
748
+ def contains(
749
+ self, item: UnparsedVersion, prereleases: Optional[bool] = None
750
+ ) -> bool:
751
+
752
+ # Ensure that our item is a Version or LegacyVersion instance.
753
+ if not isinstance(item, (LegacyVersion, Version)):
754
+ item = parse(item)
755
+
756
+ # Determine if we're forcing a prerelease or not, if we're not forcing
757
+ # one for this particular filter call, then we'll use whatever the
758
+ # SpecifierSet thinks for whether or not we should support prereleases.
759
+ if prereleases is None:
760
+ prereleases = self.prereleases
761
+
762
+ # We can determine if we're going to allow pre-releases by looking to
763
+ # see if any of the underlying items supports them. If none of them do
764
+ # and this item is a pre-release then we do not allow it and we can
765
+ # short circuit that here.
766
+ # Note: This means that 1.0.dev1 would not be contained in something
767
+ # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
768
+ if not prereleases and item.is_prerelease:
769
+ return False
770
+
771
+ # We simply dispatch to the underlying specs here to make sure that the
772
+ # given version is contained within all of them.
773
+ # Note: This use of all() here means that an empty set of specifiers
774
+ # will always return True, this is an explicit design decision.
775
+ return all(s.contains(item, prereleases=prereleases) for s in self._specs)
776
+
777
+ def filter(
778
+ self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
779
+ ) -> Iterable[VersionTypeVar]:
780
+
781
+ # Determine if we're forcing a prerelease or not, if we're not forcing
782
+ # one for this particular filter call, then we'll use whatever the
783
+ # SpecifierSet thinks for whether or not we should support prereleases.
784
+ if prereleases is None:
785
+ prereleases = self.prereleases
786
+
787
+ # If we have any specifiers, then we want to wrap our iterable in the
788
+ # filter method for each one, this will act as a logical AND amongst
789
+ # each specifier.
790
+ if self._specs:
791
+ for spec in self._specs:
792
+ iterable = spec.filter(iterable, prereleases=bool(prereleases))
793
+ return iterable
794
+ # If we do not have any specifiers, then we need to have a rough filter
795
+ # which will filter out any pre-releases, unless there are no final
796
+ # releases, and which will filter out LegacyVersion in general.
797
+ else:
798
+ filtered: List[VersionTypeVar] = []
799
+ found_prereleases: List[VersionTypeVar] = []
800
+
801
+ item: UnparsedVersion
802
+ parsed_version: Union[Version, LegacyVersion]
803
+
804
+ for item in iterable:
805
+ # Ensure that we some kind of Version class for this item.
806
+ if not isinstance(item, (LegacyVersion, Version)):
807
+ parsed_version = parse(item)
808
+ else:
809
+ parsed_version = item
810
+
811
+ # Filter out any item which is parsed as a LegacyVersion
812
+ if isinstance(parsed_version, LegacyVersion):
813
+ continue
814
+
815
+ # Store any item which is a pre-release for later unless we've
816
+ # already found a final version or we are accepting prereleases
817
+ if parsed_version.is_prerelease and not prereleases:
818
+ if not filtered:
819
+ found_prereleases.append(item)
820
+ else:
821
+ filtered.append(item)
822
+
823
+ # If we've found no items except for pre-releases, then we'll go
824
+ # ahead and use the pre-releases
825
+ if not filtered and found_prereleases and prereleases is None:
826
+ return found_prereleases
827
+
828
+ return filtered
public/gpt-2/packaging/tags.py ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ import logging
6
+ import platform
7
+ import sys
8
+ import sysconfig
9
+ from importlib.machinery import EXTENSION_SUFFIXES
10
+ from typing import (
11
+ Dict,
12
+ FrozenSet,
13
+ Iterable,
14
+ Iterator,
15
+ List,
16
+ Optional,
17
+ Sequence,
18
+ Tuple,
19
+ Union,
20
+ cast,
21
+ )
22
+
23
+ from . import _manylinux, _musllinux
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+ PythonVersion = Sequence[int]
28
+ MacVersion = Tuple[int, int]
29
+
30
+ INTERPRETER_SHORT_NAMES: Dict[str, str] = {
31
+ "python": "py", # Generic.
32
+ "cpython": "cp",
33
+ "pypy": "pp",
34
+ "ironpython": "ip",
35
+ "jython": "jy",
36
+ }
37
+
38
+
39
+ _32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
40
+
41
+
42
+ class Tag:
43
+ """
44
+ A representation of the tag triple for a wheel.
45
+
46
+ Instances are considered immutable and thus are hashable. Equality checking
47
+ is also supported.
48
+ """
49
+
50
+ __slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
51
+
52
+ def __init__(self, interpreter: str, abi: str, platform: str) -> None:
53
+ self._interpreter = interpreter.lower()
54
+ self._abi = abi.lower()
55
+ self._platform = platform.lower()
56
+ # The __hash__ of every single element in a Set[Tag] will be evaluated each time
57
+ # that a set calls its `.disjoint()` method, which may be called hundreds of
58
+ # times when scanning a page of links for packages with tags matching that
59
+ # Set[Tag]. Pre-computing the value here produces significant speedups for
60
+ # downstream consumers.
61
+ self._hash = hash((self._interpreter, self._abi, self._platform))
62
+
63
+ @property
64
+ def interpreter(self) -> str:
65
+ return self._interpreter
66
+
67
+ @property
68
+ def abi(self) -> str:
69
+ return self._abi
70
+
71
+ @property
72
+ def platform(self) -> str:
73
+ return self._platform
74
+
75
+ def __eq__(self, other: object) -> bool:
76
+ if not isinstance(other, Tag):
77
+ return NotImplemented
78
+
79
+ return (
80
+ (self._hash == other._hash) # Short-circuit ASAP for perf reasons.
81
+ and (self._platform == other._platform)
82
+ and (self._abi == other._abi)
83
+ and (self._interpreter == other._interpreter)
84
+ )
85
+
86
+ def __hash__(self) -> int:
87
+ return self._hash
88
+
89
+ def __str__(self) -> str:
90
+ return f"{self._interpreter}-{self._abi}-{self._platform}"
91
+
92
+ def __repr__(self) -> str:
93
+ return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
94
+
95
+
96
+ def parse_tag(tag: str) -> FrozenSet[Tag]:
97
+ """
98
+ Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
99
+
100
+ Returning a set is required due to the possibility that the tag is a
101
+ compressed tag set.
102
+ """
103
+ tags = set()
104
+ interpreters, abis, platforms = tag.split("-")
105
+ for interpreter in interpreters.split("."):
106
+ for abi in abis.split("."):
107
+ for platform_ in platforms.split("."):
108
+ tags.add(Tag(interpreter, abi, platform_))
109
+ return frozenset(tags)
110
+
111
+
112
+ def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
113
+ value = sysconfig.get_config_var(name)
114
+ if value is None and warn:
115
+ logger.debug(
116
+ "Config variable '%s' is unset, Python ABI tag may be incorrect", name
117
+ )
118
+ return value
119
+
120
+
121
+ def _normalize_string(string: str) -> str:
122
+ return string.replace(".", "_").replace("-", "_")
123
+
124
+
125
+ def _abi3_applies(python_version: PythonVersion) -> bool:
126
+ """
127
+ Determine if the Python version supports abi3.
128
+
129
+ PEP 384 was first implemented in Python 3.2.
130
+ """
131
+ return len(python_version) > 1 and tuple(python_version) >= (3, 2)
132
+
133
+
134
+ def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
135
+ py_version = tuple(py_version) # To allow for version comparison.
136
+ abis = []
137
+ version = _version_nodot(py_version[:2])
138
+ debug = pymalloc = ucs4 = ""
139
+ with_debug = _get_config_var("Py_DEBUG", warn)
140
+ has_refcount = hasattr(sys, "gettotalrefcount")
141
+ # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
142
+ # extension modules is the best option.
143
+ # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
144
+ has_ext = "_d.pyd" in EXTENSION_SUFFIXES
145
+ if with_debug or (with_debug is None and (has_refcount or has_ext)):
146
+ debug = "d"
147
+ if py_version < (3, 8):
148
+ with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
149
+ if with_pymalloc or with_pymalloc is None:
150
+ pymalloc = "m"
151
+ if py_version < (3, 3):
152
+ unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
153
+ if unicode_size == 4 or (
154
+ unicode_size is None and sys.maxunicode == 0x10FFFF
155
+ ):
156
+ ucs4 = "u"
157
+ elif debug:
158
+ # Debug builds can also load "normal" extension modules.
159
+ # We can also assume no UCS-4 or pymalloc requirement.
160
+ abis.append(f"cp{version}")
161
+ abis.insert(
162
+ 0,
163
+ "cp{version}{debug}{pymalloc}{ucs4}".format(
164
+ version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
165
+ ),
166
+ )
167
+ return abis
168
+
169
+
170
+ def cpython_tags(
171
+ python_version: Optional[PythonVersion] = None,
172
+ abis: Optional[Iterable[str]] = None,
173
+ platforms: Optional[Iterable[str]] = None,
174
+ *,
175
+ warn: bool = False,
176
+ ) -> Iterator[Tag]:
177
+ """
178
+ Yields the tags for a CPython interpreter.
179
+
180
+ The tags consist of:
181
+ - cp<python_version>-<abi>-<platform>
182
+ - cp<python_version>-abi3-<platform>
183
+ - cp<python_version>-none-<platform>
184
+ - cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
185
+
186
+ If python_version only specifies a major version then user-provided ABIs and
187
+ the 'none' ABItag will be used.
188
+
189
+ If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
190
+ their normal position and not at the beginning.
191
+ """
192
+ if not python_version:
193
+ python_version = sys.version_info[:2]
194
+
195
+ interpreter = "cp{}".format(_version_nodot(python_version[:2]))
196
+
197
+ if abis is None:
198
+ if len(python_version) > 1:
199
+ abis = _cpython_abis(python_version, warn)
200
+ else:
201
+ abis = []
202
+ abis = list(abis)
203
+ # 'abi3' and 'none' are explicitly handled later.
204
+ for explicit_abi in ("abi3", "none"):
205
+ try:
206
+ abis.remove(explicit_abi)
207
+ except ValueError:
208
+ pass
209
+
210
+ platforms = list(platforms or _platform_tags())
211
+ for abi in abis:
212
+ for platform_ in platforms:
213
+ yield Tag(interpreter, abi, platform_)
214
+ if _abi3_applies(python_version):
215
+ yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
216
+ yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
217
+
218
+ if _abi3_applies(python_version):
219
+ for minor_version in range(python_version[1] - 1, 1, -1):
220
+ for platform_ in platforms:
221
+ interpreter = "cp{version}".format(
222
+ version=_version_nodot((python_version[0], minor_version))
223
+ )
224
+ yield Tag(interpreter, "abi3", platform_)
225
+
226
+
227
+ def _generic_abi() -> Iterator[str]:
228
+ abi = sysconfig.get_config_var("SOABI")
229
+ if abi:
230
+ yield _normalize_string(abi)
231
+
232
+
233
+ def generic_tags(
234
+ interpreter: Optional[str] = None,
235
+ abis: Optional[Iterable[str]] = None,
236
+ platforms: Optional[Iterable[str]] = None,
237
+ *,
238
+ warn: bool = False,
239
+ ) -> Iterator[Tag]:
240
+ """
241
+ Yields the tags for a generic interpreter.
242
+
243
+ The tags consist of:
244
+ - <interpreter>-<abi>-<platform>
245
+
246
+ The "none" ABI will be added if it was not explicitly provided.
247
+ """
248
+ if not interpreter:
249
+ interp_name = interpreter_name()
250
+ interp_version = interpreter_version(warn=warn)
251
+ interpreter = "".join([interp_name, interp_version])
252
+ if abis is None:
253
+ abis = _generic_abi()
254
+ platforms = list(platforms or _platform_tags())
255
+ abis = list(abis)
256
+ if "none" not in abis:
257
+ abis.append("none")
258
+ for abi in abis:
259
+ for platform_ in platforms:
260
+ yield Tag(interpreter, abi, platform_)
261
+
262
+
263
+ def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
264
+ """
265
+ Yields Python versions in descending order.
266
+
267
+ After the latest version, the major-only version will be yielded, and then
268
+ all previous versions of that major version.
269
+ """
270
+ if len(py_version) > 1:
271
+ yield "py{version}".format(version=_version_nodot(py_version[:2]))
272
+ yield "py{major}".format(major=py_version[0])
273
+ if len(py_version) > 1:
274
+ for minor in range(py_version[1] - 1, -1, -1):
275
+ yield "py{version}".format(version=_version_nodot((py_version[0], minor)))
276
+
277
+
278
+ def compatible_tags(
279
+ python_version: Optional[PythonVersion] = None,
280
+ interpreter: Optional[str] = None,
281
+ platforms: Optional[Iterable[str]] = None,
282
+ ) -> Iterator[Tag]:
283
+ """
284
+ Yields the sequence of tags that are compatible with a specific version of Python.
285
+
286
+ The tags consist of:
287
+ - py*-none-<platform>
288
+ - <interpreter>-none-any # ... if `interpreter` is provided.
289
+ - py*-none-any
290
+ """
291
+ if not python_version:
292
+ python_version = sys.version_info[:2]
293
+ platforms = list(platforms or _platform_tags())
294
+ for version in _py_interpreter_range(python_version):
295
+ for platform_ in platforms:
296
+ yield Tag(version, "none", platform_)
297
+ if interpreter:
298
+ yield Tag(interpreter, "none", "any")
299
+ for version in _py_interpreter_range(python_version):
300
+ yield Tag(version, "none", "any")
301
+
302
+
303
+ def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
304
+ if not is_32bit:
305
+ return arch
306
+
307
+ if arch.startswith("ppc"):
308
+ return "ppc"
309
+
310
+ return "i386"
311
+
312
+
313
+ def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
314
+ formats = [cpu_arch]
315
+ if cpu_arch == "x86_64":
316
+ if version < (10, 4):
317
+ return []
318
+ formats.extend(["intel", "fat64", "fat32"])
319
+
320
+ elif cpu_arch == "i386":
321
+ if version < (10, 4):
322
+ return []
323
+ formats.extend(["intel", "fat32", "fat"])
324
+
325
+ elif cpu_arch == "ppc64":
326
+ # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
327
+ if version > (10, 5) or version < (10, 4):
328
+ return []
329
+ formats.append("fat64")
330
+
331
+ elif cpu_arch == "ppc":
332
+ if version > (10, 6):
333
+ return []
334
+ formats.extend(["fat32", "fat"])
335
+
336
+ if cpu_arch in {"arm64", "x86_64"}:
337
+ formats.append("universal2")
338
+
339
+ if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
340
+ formats.append("universal")
341
+
342
+ return formats
343
+
344
+
345
+ def mac_platforms(
346
+ version: Optional[MacVersion] = None, arch: Optional[str] = None
347
+ ) -> Iterator[str]:
348
+ """
349
+ Yields the platform tags for a macOS system.
350
+
351
+ The `version` parameter is a two-item tuple specifying the macOS version to
352
+ generate platform tags for. The `arch` parameter is the CPU architecture to
353
+ generate platform tags for. Both parameters default to the appropriate value
354
+ for the current system.
355
+ """
356
+ version_str, _, cpu_arch = platform.mac_ver()
357
+ if version is None:
358
+ version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
359
+ else:
360
+ version = version
361
+ if arch is None:
362
+ arch = _mac_arch(cpu_arch)
363
+ else:
364
+ arch = arch
365
+
366
+ if (10, 0) <= version and version < (11, 0):
367
+ # Prior to Mac OS 11, each yearly release of Mac OS bumped the
368
+ # "minor" version number. The major version was always 10.
369
+ for minor_version in range(version[1], -1, -1):
370
+ compat_version = 10, minor_version
371
+ binary_formats = _mac_binary_formats(compat_version, arch)
372
+ for binary_format in binary_formats:
373
+ yield "macosx_{major}_{minor}_{binary_format}".format(
374
+ major=10, minor=minor_version, binary_format=binary_format
375
+ )
376
+
377
+ if version >= (11, 0):
378
+ # Starting with Mac OS 11, each yearly release bumps the major version
379
+ # number. The minor versions are now the midyear updates.
380
+ for major_version in range(version[0], 10, -1):
381
+ compat_version = major_version, 0
382
+ binary_formats = _mac_binary_formats(compat_version, arch)
383
+ for binary_format in binary_formats:
384
+ yield "macosx_{major}_{minor}_{binary_format}".format(
385
+ major=major_version, minor=0, binary_format=binary_format
386
+ )
387
+
388
+ if version >= (11, 0):
389
+ # Mac OS 11 on x86_64 is compatible with binaries from previous releases.
390
+ # Arm64 support was introduced in 11.0, so no Arm binaries from previous
391
+ # releases exist.
392
+ #
393
+ # However, the "universal2" binary format can have a
394
+ # macOS version earlier than 11.0 when the x86_64 part of the binary supports
395
+ # that version of macOS.
396
+ if arch == "x86_64":
397
+ for minor_version in range(16, 3, -1):
398
+ compat_version = 10, minor_version
399
+ binary_formats = _mac_binary_formats(compat_version, arch)
400
+ for binary_format in binary_formats:
401
+ yield "macosx_{major}_{minor}_{binary_format}".format(
402
+ major=compat_version[0],
403
+ minor=compat_version[1],
404
+ binary_format=binary_format,
405
+ )
406
+ else:
407
+ for minor_version in range(16, 3, -1):
408
+ compat_version = 10, minor_version
409
+ binary_format = "universal2"
410
+ yield "macosx_{major}_{minor}_{binary_format}".format(
411
+ major=compat_version[0],
412
+ minor=compat_version[1],
413
+ binary_format=binary_format,
414
+ )
415
+
416
+
417
+ def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
418
+ linux = _normalize_string(sysconfig.get_platform())
419
+ if is_32bit:
420
+ if linux == "linux_x86_64":
421
+ linux = "linux_i686"
422
+ elif linux == "linux_aarch64":
423
+ linux = "linux_armv7l"
424
+ _, arch = linux.split("_", 1)
425
+ yield from _manylinux.platform_tags(linux, arch)
426
+ yield from _musllinux.platform_tags(arch)
427
+ yield linux
428
+
429
+
430
+ def _generic_platforms() -> Iterator[str]:
431
+ yield _normalize_string(sysconfig.get_platform())
432
+
433
+
434
+ def _platform_tags() -> Iterator[str]:
435
+ """
436
+ Provides the platform tags for this installation.
437
+ """
438
+ if platform.system() == "Darwin":
439
+ return mac_platforms()
440
+ elif platform.system() == "Linux":
441
+ return _linux_platforms()
442
+ else:
443
+ return _generic_platforms()
444
+
445
+
446
+ def interpreter_name() -> str:
447
+ """
448
+ Returns the name of the running interpreter.
449
+ """
450
+ name = sys.implementation.name
451
+ return INTERPRETER_SHORT_NAMES.get(name) or name
452
+
453
+
454
+ def interpreter_version(*, warn: bool = False) -> str:
455
+ """
456
+ Returns the version of the running interpreter.
457
+ """
458
+ version = _get_config_var("py_version_nodot", warn=warn)
459
+ if version:
460
+ version = str(version)
461
+ else:
462
+ version = _version_nodot(sys.version_info[:2])
463
+ return version
464
+
465
+
466
+ def _version_nodot(version: PythonVersion) -> str:
467
+ return "".join(map(str, version))
468
+
469
+
470
+ def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
471
+ """
472
+ Returns the sequence of tag triples for the running interpreter.
473
+
474
+ The order of the sequence corresponds to priority order for the
475
+ interpreter, from most to least important.
476
+ """
477
+
478
+ interp_name = interpreter_name()
479
+ if interp_name == "cp":
480
+ yield from cpython_tags(warn=warn)
481
+ else:
482
+ yield from generic_tags()
483
+
484
+ yield from compatible_tags()
public/gpt-2/packaging/utils.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ import re
6
+ from typing import FrozenSet, NewType, Tuple, Union, cast
7
+
8
+ from .tags import Tag, parse_tag
9
+ from .version import InvalidVersion, Version
10
+
11
+ BuildTag = Union[Tuple[()], Tuple[int, str]]
12
+ NormalizedName = NewType("NormalizedName", str)
13
+
14
+
15
+ class InvalidWheelFilename(ValueError):
16
+ """
17
+ An invalid wheel filename was found, users should refer to PEP 427.
18
+ """
19
+
20
+
21
+ class InvalidSdistFilename(ValueError):
22
+ """
23
+ An invalid sdist filename was found, users should refer to the packaging user guide.
24
+ """
25
+
26
+
27
+ _canonicalize_regex = re.compile(r"[-_.]+")
28
+ # PEP 427: The build number must start with a digit.
29
+ _build_tag_regex = re.compile(r"(\d+)(.*)")
30
+
31
+
32
+ def canonicalize_name(name: str) -> NormalizedName:
33
+ # This is taken from PEP 503.
34
+ value = _canonicalize_regex.sub("-", name).lower()
35
+ return cast(NormalizedName, value)
36
+
37
+
38
+ def canonicalize_version(version: Union[Version, str]) -> str:
39
+ """
40
+ This is very similar to Version.__str__, but has one subtle difference
41
+ with the way it handles the release segment.
42
+ """
43
+ if isinstance(version, str):
44
+ try:
45
+ parsed = Version(version)
46
+ except InvalidVersion:
47
+ # Legacy versions cannot be normalized
48
+ return version
49
+ else:
50
+ parsed = version
51
+
52
+ parts = []
53
+
54
+ # Epoch
55
+ if parsed.epoch != 0:
56
+ parts.append(f"{parsed.epoch}!")
57
+
58
+ # Release segment
59
+ # NB: This strips trailing '.0's to normalize
60
+ parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release)))
61
+
62
+ # Pre-release
63
+ if parsed.pre is not None:
64
+ parts.append("".join(str(x) for x in parsed.pre))
65
+
66
+ # Post-release
67
+ if parsed.post is not None:
68
+ parts.append(f".post{parsed.post}")
69
+
70
+ # Development release
71
+ if parsed.dev is not None:
72
+ parts.append(f".dev{parsed.dev}")
73
+
74
+ # Local version segment
75
+ if parsed.local is not None:
76
+ parts.append(f"+{parsed.local}")
77
+
78
+ return "".join(parts)
79
+
80
+
81
+ def parse_wheel_filename(
82
+ filename: str,
83
+ ) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
84
+ if not filename.endswith(".whl"):
85
+ raise InvalidWheelFilename(
86
+ f"Invalid wheel filename (extension must be '.whl'): {filename}"
87
+ )
88
+
89
+ filename = filename[:-4]
90
+ dashes = filename.count("-")
91
+ if dashes not in (4, 5):
92
+ raise InvalidWheelFilename(
93
+ f"Invalid wheel filename (wrong number of parts): {filename}"
94
+ )
95
+
96
+ parts = filename.split("-", dashes - 2)
97
+ name_part = parts[0]
98
+ # See PEP 427 for the rules on escaping the project name
99
+ if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
100
+ raise InvalidWheelFilename(f"Invalid project name: {filename}")
101
+ name = canonicalize_name(name_part)
102
+ version = Version(parts[1])
103
+ if dashes == 5:
104
+ build_part = parts[2]
105
+ build_match = _build_tag_regex.match(build_part)
106
+ if build_match is None:
107
+ raise InvalidWheelFilename(
108
+ f"Invalid build number: {build_part} in '{filename}'"
109
+ )
110
+ build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
111
+ else:
112
+ build = ()
113
+ tags = parse_tag(parts[-1])
114
+ return (name, version, build, tags)
115
+
116
+
117
+ def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
118
+ if filename.endswith(".tar.gz"):
119
+ file_stem = filename[: -len(".tar.gz")]
120
+ elif filename.endswith(".zip"):
121
+ file_stem = filename[: -len(".zip")]
122
+ else:
123
+ raise InvalidSdistFilename(
124
+ f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
125
+ f" {filename}"
126
+ )
127
+
128
+ # We are requiring a PEP 440 version, which cannot contain dashes,
129
+ # so we split on the last dash.
130
+ name_part, sep, version_part = file_stem.rpartition("-")
131
+ if not sep:
132
+ raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
133
+
134
+ name = canonicalize_name(name_part)
135
+ version = Version(version_part)
136
+ return (name, version)
public/gpt-2/packaging/version.py ADDED
@@ -0,0 +1,504 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is dual licensed under the terms of the Apache License, Version
2
+ # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3
+ # for complete details.
4
+
5
+ import collections
6
+ import itertools
7
+ import re
8
+ import warnings
9
+ from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
10
+
11
+ from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
12
+
13
+ __all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
14
+
15
+ InfiniteTypes = Union[InfinityType, NegativeInfinityType]
16
+ PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
17
+ SubLocalType = Union[InfiniteTypes, int, str]
18
+ LocalType = Union[
19
+ NegativeInfinityType,
20
+ Tuple[
21
+ Union[
22
+ SubLocalType,
23
+ Tuple[SubLocalType, str],
24
+ Tuple[NegativeInfinityType, SubLocalType],
25
+ ],
26
+ ...,
27
+ ],
28
+ ]
29
+ CmpKey = Tuple[
30
+ int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
31
+ ]
32
+ LegacyCmpKey = Tuple[int, Tuple[str, ...]]
33
+ VersionComparisonMethod = Callable[
34
+ [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
35
+ ]
36
+
37
+ _Version = collections.namedtuple(
38
+ "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
39
+ )
40
+
41
+
42
+ def parse(version: str) -> Union["LegacyVersion", "Version"]:
43
+ """
44
+ Parse the given version string and return either a :class:`Version` object
45
+ or a :class:`LegacyVersion` object depending on if the given version is
46
+ a valid PEP 440 version or a legacy version.
47
+ """
48
+ try:
49
+ return Version(version)
50
+ except InvalidVersion:
51
+ return LegacyVersion(version)
52
+
53
+
54
+ class InvalidVersion(ValueError):
55
+ """
56
+ An invalid version was found, users should refer to PEP 440.
57
+ """
58
+
59
+
60
+ class _BaseVersion:
61
+ _key: Union[CmpKey, LegacyCmpKey]
62
+
63
+ def __hash__(self) -> int:
64
+ return hash(self._key)
65
+
66
+ # Please keep the duplicated `isinstance` check
67
+ # in the six comparisons hereunder
68
+ # unless you find a way to avoid adding overhead function calls.
69
+ def __lt__(self, other: "_BaseVersion") -> bool:
70
+ if not isinstance(other, _BaseVersion):
71
+ return NotImplemented
72
+
73
+ return self._key < other._key
74
+
75
+ def __le__(self, other: "_BaseVersion") -> bool:
76
+ if not isinstance(other, _BaseVersion):
77
+ return NotImplemented
78
+
79
+ return self._key <= other._key
80
+
81
+ def __eq__(self, other: object) -> bool:
82
+ if not isinstance(other, _BaseVersion):
83
+ return NotImplemented
84
+
85
+ return self._key == other._key
86
+
87
+ def __ge__(self, other: "_BaseVersion") -> bool:
88
+ if not isinstance(other, _BaseVersion):
89
+ return NotImplemented
90
+
91
+ return self._key >= other._key
92
+
93
+ def __gt__(self, other: "_BaseVersion") -> bool:
94
+ if not isinstance(other, _BaseVersion):
95
+ return NotImplemented
96
+
97
+ return self._key > other._key
98
+
99
+ def __ne__(self, other: object) -> bool:
100
+ if not isinstance(other, _BaseVersion):
101
+ return NotImplemented
102
+
103
+ return self._key != other._key
104
+
105
+
106
+ class LegacyVersion(_BaseVersion):
107
+ def __init__(self, version: str) -> None:
108
+ self._version = str(version)
109
+ self._key = _legacy_cmpkey(self._version)
110
+
111
+ warnings.warn(
112
+ "Creating a LegacyVersion has been deprecated and will be "
113
+ "removed in the next major release",
114
+ DeprecationWarning,
115
+ )
116
+
117
+ def __str__(self) -> str:
118
+ return self._version
119
+
120
+ def __repr__(self) -> str:
121
+ return f"<LegacyVersion('{self}')>"
122
+
123
+ @property
124
+ def public(self) -> str:
125
+ return self._version
126
+
127
+ @property
128
+ def base_version(self) -> str:
129
+ return self._version
130
+
131
+ @property
132
+ def epoch(self) -> int:
133
+ return -1
134
+
135
+ @property
136
+ def release(self) -> None:
137
+ return None
138
+
139
+ @property
140
+ def pre(self) -> None:
141
+ return None
142
+
143
+ @property
144
+ def post(self) -> None:
145
+ return None
146
+
147
+ @property
148
+ def dev(self) -> None:
149
+ return None
150
+
151
+ @property
152
+ def local(self) -> None:
153
+ return None
154
+
155
+ @property
156
+ def is_prerelease(self) -> bool:
157
+ return False
158
+
159
+ @property
160
+ def is_postrelease(self) -> bool:
161
+ return False
162
+
163
+ @property
164
+ def is_devrelease(self) -> bool:
165
+ return False
166
+
167
+
168
+ _legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
169
+
170
+ _legacy_version_replacement_map = {
171
+ "pre": "c",
172
+ "preview": "c",
173
+ "-": "final-",
174
+ "rc": "c",
175
+ "dev": "@",
176
+ }
177
+
178
+
179
+ def _parse_version_parts(s: str) -> Iterator[str]:
180
+ for part in _legacy_version_component_re.split(s):
181
+ part = _legacy_version_replacement_map.get(part, part)
182
+
183
+ if not part or part == ".":
184
+ continue
185
+
186
+ if part[:1] in "0123456789":
187
+ # pad for numeric comparison
188
+ yield part.zfill(8)
189
+ else:
190
+ yield "*" + part
191
+
192
+ # ensure that alpha/beta/candidate are before final
193
+ yield "*final"
194
+
195
+
196
+ def _legacy_cmpkey(version: str) -> LegacyCmpKey:
197
+
198
+ # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
199
+ # greater than or equal to 0. This will effectively put the LegacyVersion,
200
+ # which uses the defacto standard originally implemented by setuptools,
201
+ # as before all PEP 440 versions.
202
+ epoch = -1
203
+
204
+ # This scheme is taken from pkg_resources.parse_version setuptools prior to
205
+ # it's adoption of the packaging library.
206
+ parts: List[str] = []
207
+ for part in _parse_version_parts(version.lower()):
208
+ if part.startswith("*"):
209
+ # remove "-" before a prerelease tag
210
+ if part < "*final":
211
+ while parts and parts[-1] == "*final-":
212
+ parts.pop()
213
+
214
+ # remove trailing zeros from each series of numeric parts
215
+ while parts and parts[-1] == "00000000":
216
+ parts.pop()
217
+
218
+ parts.append(part)
219
+
220
+ return epoch, tuple(parts)
221
+
222
+
223
+ # Deliberately not anchored to the start and end of the string, to make it
224
+ # easier for 3rd party code to reuse
225
+ VERSION_PATTERN = r"""
226
+ v?
227
+ (?:
228
+ (?:(?P<epoch>[0-9]+)!)? # epoch
229
+ (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
230
+ (?P<pre> # pre-release
231
+ [-_\.]?
232
+ (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
233
+ [-_\.]?
234
+ (?P<pre_n>[0-9]+)?
235
+ )?
236
+ (?P<post> # post release
237
+ (?:-(?P<post_n1>[0-9]+))
238
+ |
239
+ (?:
240
+ [-_\.]?
241
+ (?P<post_l>post|rev|r)
242
+ [-_\.]?
243
+ (?P<post_n2>[0-9]+)?
244
+ )
245
+ )?
246
+ (?P<dev> # dev release
247
+ [-_\.]?
248
+ (?P<dev_l>dev)
249
+ [-_\.]?
250
+ (?P<dev_n>[0-9]+)?
251
+ )?
252
+ )
253
+ (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
254
+ """
255
+
256
+
257
+ class Version(_BaseVersion):
258
+
259
+ _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
260
+
261
+ def __init__(self, version: str) -> None:
262
+
263
+ # Validate the version and parse it into pieces
264
+ match = self._regex.search(version)
265
+ if not match:
266
+ raise InvalidVersion(f"Invalid version: '{version}'")
267
+
268
+ # Store the parsed out pieces of the version
269
+ self._version = _Version(
270
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
271
+ release=tuple(int(i) for i in match.group("release").split(".")),
272
+ pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
273
+ post=_parse_letter_version(
274
+ match.group("post_l"), match.group("post_n1") or match.group("post_n2")
275
+ ),
276
+ dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
277
+ local=_parse_local_version(match.group("local")),
278
+ )
279
+
280
+ # Generate a key which will be used for sorting
281
+ self._key = _cmpkey(
282
+ self._version.epoch,
283
+ self._version.release,
284
+ self._version.pre,
285
+ self._version.post,
286
+ self._version.dev,
287
+ self._version.local,
288
+ )
289
+
290
+ def __repr__(self) -> str:
291
+ return f"<Version('{self}')>"
292
+
293
+ def __str__(self) -> str:
294
+ parts = []
295
+
296
+ # Epoch
297
+ if self.epoch != 0:
298
+ parts.append(f"{self.epoch}!")
299
+
300
+ # Release segment
301
+ parts.append(".".join(str(x) for x in self.release))
302
+
303
+ # Pre-release
304
+ if self.pre is not None:
305
+ parts.append("".join(str(x) for x in self.pre))
306
+
307
+ # Post-release
308
+ if self.post is not None:
309
+ parts.append(f".post{self.post}")
310
+
311
+ # Development release
312
+ if self.dev is not None:
313
+ parts.append(f".dev{self.dev}")
314
+
315
+ # Local version segment
316
+ if self.local is not None:
317
+ parts.append(f"+{self.local}")
318
+
319
+ return "".join(parts)
320
+
321
+ @property
322
+ def epoch(self) -> int:
323
+ _epoch: int = self._version.epoch
324
+ return _epoch
325
+
326
+ @property
327
+ def release(self) -> Tuple[int, ...]:
328
+ _release: Tuple[int, ...] = self._version.release
329
+ return _release
330
+
331
+ @property
332
+ def pre(self) -> Optional[Tuple[str, int]]:
333
+ _pre: Optional[Tuple[str, int]] = self._version.pre
334
+ return _pre
335
+
336
+ @property
337
+ def post(self) -> Optional[int]:
338
+ return self._version.post[1] if self._version.post else None
339
+
340
+ @property
341
+ def dev(self) -> Optional[int]:
342
+ return self._version.dev[1] if self._version.dev else None
343
+
344
+ @property
345
+ def local(self) -> Optional[str]:
346
+ if self._version.local:
347
+ return ".".join(str(x) for x in self._version.local)
348
+ else:
349
+ return None
350
+
351
+ @property
352
+ def public(self) -> str:
353
+ return str(self).split("+", 1)[0]
354
+
355
+ @property
356
+ def base_version(self) -> str:
357
+ parts = []
358
+
359
+ # Epoch
360
+ if self.epoch != 0:
361
+ parts.append(f"{self.epoch}!")
362
+
363
+ # Release segment
364
+ parts.append(".".join(str(x) for x in self.release))
365
+
366
+ return "".join(parts)
367
+
368
+ @property
369
+ def is_prerelease(self) -> bool:
370
+ return self.dev is not None or self.pre is not None
371
+
372
+ @property
373
+ def is_postrelease(self) -> bool:
374
+ return self.post is not None
375
+
376
+ @property
377
+ def is_devrelease(self) -> bool:
378
+ return self.dev is not None
379
+
380
+ @property
381
+ def major(self) -> int:
382
+ return self.release[0] if len(self.release) >= 1 else 0
383
+
384
+ @property
385
+ def minor(self) -> int:
386
+ return self.release[1] if len(self.release) >= 2 else 0
387
+
388
+ @property
389
+ def micro(self) -> int:
390
+ return self.release[2] if len(self.release) >= 3 else 0
391
+
392
+
393
+ def _parse_letter_version(
394
+ letter: str, number: Union[str, bytes, SupportsInt]
395
+ ) -> Optional[Tuple[str, int]]:
396
+
397
+ if letter:
398
+ # We consider there to be an implicit 0 in a pre-release if there is
399
+ # not a numeral associated with it.
400
+ if number is None:
401
+ number = 0
402
+
403
+ # We normalize any letters to their lower case form
404
+ letter = letter.lower()
405
+
406
+ # We consider some words to be alternate spellings of other words and
407
+ # in those cases we want to normalize the spellings to our preferred
408
+ # spelling.
409
+ if letter == "alpha":
410
+ letter = "a"
411
+ elif letter == "beta":
412
+ letter = "b"
413
+ elif letter in ["c", "pre", "preview"]:
414
+ letter = "rc"
415
+ elif letter in ["rev", "r"]:
416
+ letter = "post"
417
+
418
+ return letter, int(number)
419
+ if not letter and number:
420
+ # We assume if we are given a number, but we are not given a letter
421
+ # then this is using the implicit post release syntax (e.g. 1.0-1)
422
+ letter = "post"
423
+
424
+ return letter, int(number)
425
+
426
+ return None
427
+
428
+
429
+ _local_version_separators = re.compile(r"[\._-]")
430
+
431
+
432
+ def _parse_local_version(local: str) -> Optional[LocalType]:
433
+ """
434
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
435
+ """
436
+ if local is not None:
437
+ return tuple(
438
+ part.lower() if not part.isdigit() else int(part)
439
+ for part in _local_version_separators.split(local)
440
+ )
441
+ return None
442
+
443
+
444
+ def _cmpkey(
445
+ epoch: int,
446
+ release: Tuple[int, ...],
447
+ pre: Optional[Tuple[str, int]],
448
+ post: Optional[Tuple[str, int]],
449
+ dev: Optional[Tuple[str, int]],
450
+ local: Optional[Tuple[SubLocalType]],
451
+ ) -> CmpKey:
452
+
453
+ # When we compare a release version, we want to compare it with all of the
454
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
455
+ # leading zeros until we come to something non zero, then take the rest
456
+ # re-reverse it back into the correct order and make it a tuple and use
457
+ # that for our sorting key.
458
+ _release = tuple(
459
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
460
+ )
461
+
462
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
463
+ # We'll do this by abusing the pre segment, but we _only_ want to do this
464
+ # if there is not a pre or a post segment. If we have one of those then
465
+ # the normal sorting rules will handle this case correctly.
466
+ if pre is None and post is None and dev is not None:
467
+ _pre: PrePostDevType = NegativeInfinity
468
+ # Versions without a pre-release (except as noted above) should sort after
469
+ # those with one.
470
+ elif pre is None:
471
+ _pre = Infinity
472
+ else:
473
+ _pre = pre
474
+
475
+ # Versions without a post segment should sort before those with one.
476
+ if post is None:
477
+ _post: PrePostDevType = NegativeInfinity
478
+
479
+ else:
480
+ _post = post
481
+
482
+ # Versions without a development segment should sort after those with one.
483
+ if dev is None:
484
+ _dev: PrePostDevType = Infinity
485
+
486
+ else:
487
+ _dev = dev
488
+
489
+ if local is None:
490
+ # Versions without a local segment should sort before those with one.
491
+ _local: LocalType = NegativeInfinity
492
+ else:
493
+ # Versions with a local segment need that segment parsed to implement
494
+ # the sorting rules in PEP440.
495
+ # - Alpha numeric segments sort before numeric segments
496
+ # - Alpha numeric segments sort lexicographically
497
+ # - Numeric segments sort numerically
498
+ # - Shorter versions sort before longer versions when the prefixes
499
+ # match exactly
500
+ _local = tuple(
501
+ (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
502
+ )
503
+
504
+ return epoch, _release, _pre, _post, _dev, _local
public/gpt-2/transformers-4.9.1.dist-info/LICENSE ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2018- The Hugging Face team. All rights reserved.
2
+
3
+ Apache License
4
+ Version 2.0, January 2004
5
+ http://www.apache.org/licenses/
6
+
7
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
8
+
9
+ 1. Definitions.
10
+
11
+ "License" shall mean the terms and conditions for use, reproduction,
12
+ and distribution as defined by Sections 1 through 9 of this document.
13
+
14
+ "Licensor" shall mean the copyright owner or entity authorized by
15
+ the copyright owner that is granting the License.
16
+
17
+ "Legal Entity" shall mean the union of the acting entity and all
18
+ other entities that control, are controlled by, or are under common
19
+ control with that entity. For the purposes of this definition,
20
+ "control" means (i) the power, direct or indirect, to cause the
21
+ direction or management of such entity, whether by contract or
22
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
23
+ outstanding shares, or (iii) beneficial ownership of such entity.
24
+
25
+ "You" (or "Your") shall mean an individual or Legal Entity
26
+ exercising permissions granted by this License.
27
+
28
+ "Source" form shall mean the preferred form for making modifications,
29
+ including but not limited to software source code, documentation
30
+ source, and configuration files.
31
+
32
+ "Object" form shall mean any form resulting from mechanical
33
+ transformation or translation of a Source form, including but
34
+ not limited to compiled object code, generated documentation,
35
+ and conversions to other media types.
36
+
37
+ "Work" shall mean the work of authorship, whether in Source or
38
+ Object form, made available under the License, as indicated by a
39
+ copyright notice that is included in or attached to the work
40
+ (an example is provided in the Appendix below).
41
+
42
+ "Derivative Works" shall mean any work, whether in Source or Object
43
+ form, that is based on (or derived from) the Work and for which the
44
+ editorial revisions, annotations, elaborations, or other modifications
45
+ represent, as a whole, an original work of authorship. For the purposes
46
+ of this License, Derivative Works shall not include works that remain
47
+ separable from, or merely link (or bind by name) to the interfaces of,
48
+ the Work and Derivative Works thereof.
49
+
50
+ "Contribution" shall mean any work of authorship, including
51
+ the original version of the Work and any modifications or additions
52
+ to that Work or Derivative Works thereof, that is intentionally
53
+ submitted to Licensor for inclusion in the Work by the copyright owner
54
+ or by an individual or Legal Entity authorized to submit on behalf of
55
+ the copyright owner. For the purposes of this definition, "submitted"
56
+ means any form of electronic, verbal, or written communication sent
57
+ to the Licensor or its representatives, including but not limited to
58
+ communication on electronic mailing lists, source code control systems,
59
+ and issue tracking systems that are managed by, or on behalf of, the
60
+ Licensor for the purpose of discussing and improving the Work, but
61
+ excluding communication that is conspicuously marked or otherwise
62
+ designated in writing by the copyright owner as "Not a Contribution."
63
+
64
+ "Contributor" shall mean Licensor and any individual or Legal Entity
65
+ on behalf of whom a Contribution has been received by Licensor and
66
+ subsequently incorporated within the Work.
67
+
68
+ 2. Grant of Copyright License. Subject to the terms and conditions of
69
+ this License, each Contributor hereby grants to You a perpetual,
70
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
71
+ copyright license to reproduce, prepare Derivative Works of,
72
+ publicly display, publicly perform, sublicense, and distribute the
73
+ Work and such Derivative Works in Source or Object form.
74
+
75
+ 3. Grant of Patent License. Subject to the terms and conditions of
76
+ this License, each Contributor hereby grants to You a perpetual,
77
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
78
+ (except as stated in this section) patent license to make, have made,
79
+ use, offer to sell, sell, import, and otherwise transfer the Work,
80
+ where such license applies only to those patent claims licensable
81
+ by such Contributor that are necessarily infringed by their
82
+ Contribution(s) alone or by combination of their Contribution(s)
83
+ with the Work to which such Contribution(s) was submitted. If You
84
+ institute patent litigation against any entity (including a
85
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
86
+ or a Contribution incorporated within the Work constitutes direct
87
+ or contributory patent infringement, then any patent licenses
88
+ granted to You under this License for that Work shall terminate
89
+ as of the date such litigation is filed.
90
+
91
+ 4. Redistribution. You may reproduce and distribute copies of the
92
+ Work or Derivative Works thereof in any medium, with or without
93
+ modifications, and in Source or Object form, provided that You
94
+ meet the following conditions:
95
+
96
+ (a) You must give any other recipients of the Work or
97
+ Derivative Works a copy of this License; and
98
+
99
+ (b) You must cause any modified files to carry prominent notices
100
+ stating that You changed the files; and
101
+
102
+ (c) You must retain, in the Source form of any Derivative Works
103
+ that You distribute, all copyright, patent, trademark, and
104
+ attribution notices from the Source form of the Work,
105
+ excluding those notices that do not pertain to any part of
106
+ the Derivative Works; and
107
+
108
+ (d) If the Work includes a "NOTICE" text file as part of its
109
+ distribution, then any Derivative Works that You distribute must
110
+ include a readable copy of the attribution notices contained
111
+ within such NOTICE file, excluding those notices that do not
112
+ pertain to any part of the Derivative Works, in at least one
113
+ of the following places: within a NOTICE text file distributed
114
+ as part of the Derivative Works; within the Source form or
115
+ documentation, if provided along with the Derivative Works; or,
116
+ within a display generated by the Derivative Works, if and
117
+ wherever such third-party notices normally appear. The contents
118
+ of the NOTICE file are for informational purposes only and
119
+ do not modify the License. You may add Your own attribution
120
+ notices within Derivative Works that You distribute, alongside
121
+ or as an addendum to the NOTICE text from the Work, provided
122
+ that such additional attribution notices cannot be construed
123
+ as modifying the License.
124
+
125
+ You may add Your own copyright statement to Your modifications and
126
+ may provide additional or different license terms and conditions
127
+ for use, reproduction, or distribution of Your modifications, or
128
+ for any such Derivative Works as a whole, provided Your use,
129
+ reproduction, and distribution of the Work otherwise complies with
130
+ the conditions stated in this License.
131
+
132
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
133
+ any Contribution intentionally submitted for inclusion in the Work
134
+ by You to the Licensor shall be under the terms and conditions of
135
+ this License, without any additional terms or conditions.
136
+ Notwithstanding the above, nothing herein shall supersede or modify
137
+ the terms of any separate license agreement you may have executed
138
+ with Licensor regarding such Contributions.
139
+
140
+ 6. Trademarks. This License does not grant permission to use the trade
141
+ names, trademarks, service marks, or product names of the Licensor,
142
+ except as required for reasonable and customary use in describing the
143
+ origin of the Work and reproducing the content of the NOTICE file.
144
+
145
+ 7. Disclaimer of Warranty. Unless required by applicable law or
146
+ agreed to in writing, Licensor provides the Work (and each
147
+ Contributor provides its Contributions) on an "AS IS" BASIS,
148
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
149
+ implied, including, without limitation, any warranties or conditions
150
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
151
+ PARTICULAR PURPOSE. You are solely responsible for determining the
152
+ appropriateness of using or redistributing the Work and assume any
153
+ risks associated with Your exercise of permissions under this License.
154
+
155
+ 8. Limitation of Liability. In no event and under no legal theory,
156
+ whether in tort (including negligence), contract, or otherwise,
157
+ unless required by applicable law (such as deliberate and grossly
158
+ negligent acts) or agreed to in writing, shall any Contributor be
159
+ liable to You for damages, including any direct, indirect, special,
160
+ incidental, or consequential damages of any character arising as a
161
+ result of this License or out of the use or inability to use the
162
+ Work (including but not limited to damages for loss of goodwill,
163
+ work stoppage, computer failure or malfunction, or any and all
164
+ other commercial damages or losses), even if such Contributor
165
+ has been advised of the possibility of such damages.
166
+
167
+ 9. Accepting Warranty or Additional Liability. While redistributing
168
+ the Work or Derivative Works thereof, You may choose to offer,
169
+ and charge a fee for, acceptance of support, warranty, indemnity,
170
+ or other liability obligations and/or rights consistent with this
171
+ License. However, in accepting such obligations, You may act only
172
+ on Your own behalf and on Your sole responsibility, not on behalf
173
+ of any other Contributor, and only if You agree to indemnify,
174
+ defend, and hold each Contributor harmless for any liability
175
+ incurred by, or claims asserted against, such Contributor by reason
176
+ of your accepting any such warranty or additional liability.
177
+
178
+ END OF TERMS AND CONDITIONS
179
+
180
+ APPENDIX: How to apply the Apache License to your work.
181
+
182
+ To apply the Apache License to your work, attach the following
183
+ boilerplate notice, with the fields enclosed by brackets "[]"
184
+ replaced with your own identifying information. (Don't include
185
+ the brackets!) The text should be enclosed in the appropriate
186
+ comment syntax for the file format. We also recommend that a
187
+ file or class name and description of purpose be included on the
188
+ same "printed page" as the copyright notice for easier
189
+ identification within third-party archives.
190
+
191
+ Copyright [yyyy] [name of copyright owner]
192
+
193
+ Licensed under the Apache License, Version 2.0 (the "License");
194
+ you may not use this file except in compliance with the License.
195
+ You may obtain a copy of the License at
196
+
197
+ http://www.apache.org/licenses/LICENSE-2.0
198
+
199
+ Unless required by applicable law or agreed to in writing, software
200
+ distributed under the License is distributed on an "AS IS" BASIS,
201
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
202
+ See the License for the specific language governing permissions and
203
+ limitations under the License.
public/gpt-2/transformers-4.9.1.dist-info/METADATA ADDED
@@ -0,0 +1,547 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: transformers
3
+ Version: 4.9.1
4
+ Summary: State-of-the-art Natural Language Processing for TensorFlow 2.0 and PyTorch
5
+ Home-page: https://github.com/huggingface/transformers
6
+ Author: Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Sam Shleifer, Patrick von Platen, Sylvain Gugger, Suraj Patil, Stas Bekman, Google AI Language Team Authors, Open AI team Authors, Facebook AI Authors, Carnegie Mellon University Authors
7
+ Author-email: thomas@huggingface.co
8
+ License: Apache
9
+ Keywords: NLP deep learning transformer pytorch tensorflow BERT GPT GPT-2 google openai CMU
10
+ Platform: UNKNOWN
11
+ Classifier: Development Status :: 5 - Production/Stable
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Intended Audience :: Education
14
+ Classifier: Intended Audience :: Science/Research
15
+ Classifier: License :: OSI Approved :: Apache Software License
16
+ Classifier: Operating System :: OS Independent
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.6
19
+ Classifier: Programming Language :: Python :: 3.7
20
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
21
+ Requires-Python: >=3.6.0
22
+ Description-Content-Type: text/markdown
23
+ Requires-Dist: filelock
24
+ Requires-Dist: huggingface-hub (==0.0.12)
25
+ Requires-Dist: numpy (>=1.17)
26
+ Requires-Dist: packaging
27
+ Requires-Dist: pyyaml (>=5.1)
28
+ Requires-Dist: regex (!=2019.12.17)
29
+ Requires-Dist: requests
30
+ Requires-Dist: sacremoses
31
+ Requires-Dist: tokenizers (<0.11,>=0.10.1)
32
+ Requires-Dist: tqdm (>=4.27)
33
+ Requires-Dist: dataclasses ; python_version < "3.7"
34
+ Requires-Dist: importlib-metadata ; python_version < "3.8"
35
+ Provides-Extra: all
36
+ Requires-Dist: tensorflow (>=2.3) ; extra == 'all'
37
+ Requires-Dist: onnxconverter-common ; extra == 'all'
38
+ Requires-Dist: keras2onnx ; extra == 'all'
39
+ Requires-Dist: torch (>=1.0) ; extra == 'all'
40
+ Requires-Dist: jax (>=0.2.8) ; extra == 'all'
41
+ Requires-Dist: jaxlib (>=0.1.65) ; extra == 'all'
42
+ Requires-Dist: flax (>=0.3.4) ; extra == 'all'
43
+ Requires-Dist: optax (>=0.0.8) ; extra == 'all'
44
+ Requires-Dist: sentencepiece (==0.1.91) ; extra == 'all'
45
+ Requires-Dist: protobuf ; extra == 'all'
46
+ Requires-Dist: tokenizers (<0.11,>=0.10.1) ; extra == 'all'
47
+ Requires-Dist: soundfile ; extra == 'all'
48
+ Requires-Dist: torchaudio ; extra == 'all'
49
+ Requires-Dist: Pillow ; extra == 'all'
50
+ Requires-Dist: optuna ; extra == 'all'
51
+ Requires-Dist: ray[tune] ; extra == 'all'
52
+ Requires-Dist: timm ; extra == 'all'
53
+ Requires-Dist: codecarbon (==1.2.0) ; extra == 'all'
54
+ Provides-Extra: codecarbon
55
+ Requires-Dist: codecarbon (==1.2.0) ; extra == 'codecarbon'
56
+ Provides-Extra: deepspeed
57
+ Requires-Dist: deepspeed (>=0.4.3) ; extra == 'deepspeed'
58
+ Provides-Extra: dev
59
+ Requires-Dist: tensorflow (>=2.3) ; extra == 'dev'
60
+ Requires-Dist: onnxconverter-common ; extra == 'dev'
61
+ Requires-Dist: keras2onnx ; extra == 'dev'
62
+ Requires-Dist: torch (>=1.0) ; extra == 'dev'
63
+ Requires-Dist: jax (>=0.2.8) ; extra == 'dev'
64
+ Requires-Dist: jaxlib (>=0.1.65) ; extra == 'dev'
65
+ Requires-Dist: flax (>=0.3.4) ; extra == 'dev'
66
+ Requires-Dist: optax (>=0.0.8) ; extra == 'dev'
67
+ Requires-Dist: sentencepiece (==0.1.91) ; extra == 'dev'
68
+ Requires-Dist: protobuf ; extra == 'dev'
69
+ Requires-Dist: tokenizers (<0.11,>=0.10.1) ; extra == 'dev'
70
+ Requires-Dist: soundfile ; extra == 'dev'
71
+ Requires-Dist: torchaudio ; extra == 'dev'
72
+ Requires-Dist: Pillow ; extra == 'dev'
73
+ Requires-Dist: optuna ; extra == 'dev'
74
+ Requires-Dist: ray[tune] ; extra == 'dev'
75
+ Requires-Dist: timm ; extra == 'dev'
76
+ Requires-Dist: codecarbon (==1.2.0) ; extra == 'dev'
77
+ Requires-Dist: pytest ; extra == 'dev'
78
+ Requires-Dist: pytest-xdist ; extra == 'dev'
79
+ Requires-Dist: timeout-decorator ; extra == 'dev'
80
+ Requires-Dist: parameterized ; extra == 'dev'
81
+ Requires-Dist: psutil ; extra == 'dev'
82
+ Requires-Dist: datasets ; extra == 'dev'
83
+ Requires-Dist: pytest-timeout ; extra == 'dev'
84
+ Requires-Dist: black (==21.4b0) ; extra == 'dev'
85
+ Requires-Dist: sacrebleu (>=1.4.12) ; extra == 'dev'
86
+ Requires-Dist: rouge-score ; extra == 'dev'
87
+ Requires-Dist: nltk ; extra == 'dev'
88
+ Requires-Dist: GitPython ; extra == 'dev'
89
+ Requires-Dist: faiss-cpu ; extra == 'dev'
90
+ Requires-Dist: cookiecutter (==1.7.2) ; extra == 'dev'
91
+ Requires-Dist: isort (>=5.5.4) ; extra == 'dev'
92
+ Requires-Dist: flake8 (>=3.8.3) ; extra == 'dev'
93
+ Requires-Dist: fugashi (>=1.0) ; extra == 'dev'
94
+ Requires-Dist: ipadic (<2.0,>=1.0.0) ; extra == 'dev'
95
+ Requires-Dist: unidic-lite (>=1.0.7) ; extra == 'dev'
96
+ Requires-Dist: unidic (>=1.0.2) ; extra == 'dev'
97
+ Requires-Dist: docutils (==0.16.0) ; extra == 'dev'
98
+ Requires-Dist: recommonmark ; extra == 'dev'
99
+ Requires-Dist: sphinx (==3.2.1) ; extra == 'dev'
100
+ Requires-Dist: sphinx-markdown-tables ; extra == 'dev'
101
+ Requires-Dist: sphinx-rtd-theme (==0.4.3) ; extra == 'dev'
102
+ Requires-Dist: sphinx-copybutton ; extra == 'dev'
103
+ Requires-Dist: sphinxext-opengraph (==0.4.1) ; extra == 'dev'
104
+ Requires-Dist: scikit-learn ; extra == 'dev'
105
+ Provides-Extra: docs
106
+ Requires-Dist: tensorflow (>=2.3) ; extra == 'docs'
107
+ Requires-Dist: onnxconverter-common ; extra == 'docs'
108
+ Requires-Dist: keras2onnx ; extra == 'docs'
109
+ Requires-Dist: torch (>=1.0) ; extra == 'docs'
110
+ Requires-Dist: jax (>=0.2.8) ; extra == 'docs'
111
+ Requires-Dist: jaxlib (>=0.1.65) ; extra == 'docs'
112
+ Requires-Dist: flax (>=0.3.4) ; extra == 'docs'
113
+ Requires-Dist: optax (>=0.0.8) ; extra == 'docs'
114
+ Requires-Dist: sentencepiece (==0.1.91) ; extra == 'docs'
115
+ Requires-Dist: protobuf ; extra == 'docs'
116
+ Requires-Dist: tokenizers (<0.11,>=0.10.1) ; extra == 'docs'
117
+ Requires-Dist: soundfile ; extra == 'docs'
118
+ Requires-Dist: torchaudio ; extra == 'docs'
119
+ Requires-Dist: Pillow ; extra == 'docs'
120
+ Requires-Dist: optuna ; extra == 'docs'
121
+ Requires-Dist: ray[tune] ; extra == 'docs'
122
+ Requires-Dist: timm ; extra == 'docs'
123
+ Requires-Dist: codecarbon (==1.2.0) ; extra == 'docs'
124
+ Requires-Dist: docutils (==0.16.0) ; extra == 'docs'
125
+ Requires-Dist: recommonmark ; extra == 'docs'
126
+ Requires-Dist: sphinx (==3.2.1) ; extra == 'docs'
127
+ Requires-Dist: sphinx-markdown-tables ; extra == 'docs'
128
+ Requires-Dist: sphinx-rtd-theme (==0.4.3) ; extra == 'docs'
129
+ Requires-Dist: sphinx-copybutton ; extra == 'docs'
130
+ Requires-Dist: sphinxext-opengraph (==0.4.1) ; extra == 'docs'
131
+ Provides-Extra: docs_specific
132
+ Requires-Dist: docutils (==0.16.0) ; extra == 'docs_specific'
133
+ Requires-Dist: recommonmark ; extra == 'docs_specific'
134
+ Requires-Dist: sphinx (==3.2.1) ; extra == 'docs_specific'
135
+ Requires-Dist: sphinx-markdown-tables ; extra == 'docs_specific'
136
+ Requires-Dist: sphinx-rtd-theme (==0.4.3) ; extra == 'docs_specific'
137
+ Requires-Dist: sphinx-copybutton ; extra == 'docs_specific'
138
+ Requires-Dist: sphinxext-opengraph (==0.4.1) ; extra == 'docs_specific'
139
+ Provides-Extra: fairscale
140
+ Requires-Dist: fairscale (>0.3) ; extra == 'fairscale'
141
+ Provides-Extra: flax
142
+ Requires-Dist: jax (>=0.2.8) ; extra == 'flax'
143
+ Requires-Dist: jaxlib (>=0.1.65) ; extra == 'flax'
144
+ Requires-Dist: flax (>=0.3.4) ; extra == 'flax'
145
+ Requires-Dist: optax (>=0.0.8) ; extra == 'flax'
146
+ Provides-Extra: integrations
147
+ Requires-Dist: optuna ; extra == 'integrations'
148
+ Requires-Dist: ray[tune] ; extra == 'integrations'
149
+ Provides-Extra: ja
150
+ Requires-Dist: fugashi (>=1.0) ; extra == 'ja'
151
+ Requires-Dist: ipadic (<2.0,>=1.0.0) ; extra == 'ja'
152
+ Requires-Dist: unidic-lite (>=1.0.7) ; extra == 'ja'
153
+ Requires-Dist: unidic (>=1.0.2) ; extra == 'ja'
154
+ Provides-Extra: modelcreation
155
+ Requires-Dist: cookiecutter (==1.7.2) ; extra == 'modelcreation'
156
+ Provides-Extra: onnx
157
+ Requires-Dist: onnxconverter-common ; extra == 'onnx'
158
+ Requires-Dist: keras2onnx ; extra == 'onnx'
159
+ Requires-Dist: onnxruntime (>=1.4.0) ; extra == 'onnx'
160
+ Requires-Dist: onnxruntime-tools (>=1.4.2) ; extra == 'onnx'
161
+ Provides-Extra: onnxruntime
162
+ Requires-Dist: onnxruntime (>=1.4.0) ; extra == 'onnxruntime'
163
+ Requires-Dist: onnxruntime-tools (>=1.4.2) ; extra == 'onnxruntime'
164
+ Provides-Extra: optuna
165
+ Requires-Dist: optuna ; extra == 'optuna'
166
+ Provides-Extra: quality
167
+ Requires-Dist: black (==21.4b0) ; extra == 'quality'
168
+ Requires-Dist: isort (>=5.5.4) ; extra == 'quality'
169
+ Requires-Dist: flake8 (>=3.8.3) ; extra == 'quality'
170
+ Provides-Extra: ray
171
+ Requires-Dist: ray[tune] ; extra == 'ray'
172
+ Provides-Extra: retrieval
173
+ Requires-Dist: faiss-cpu ; extra == 'retrieval'
174
+ Requires-Dist: datasets ; extra == 'retrieval'
175
+ Provides-Extra: sagemaker
176
+ Requires-Dist: sagemaker (>=2.31.0) ; extra == 'sagemaker'
177
+ Provides-Extra: sentencepiece
178
+ Requires-Dist: sentencepiece (==0.1.91) ; extra == 'sentencepiece'
179
+ Requires-Dist: protobuf ; extra == 'sentencepiece'
180
+ Provides-Extra: serving
181
+ Requires-Dist: pydantic ; extra == 'serving'
182
+ Requires-Dist: uvicorn ; extra == 'serving'
183
+ Requires-Dist: fastapi ; extra == 'serving'
184
+ Requires-Dist: starlette ; extra == 'serving'
185
+ Provides-Extra: sklearn
186
+ Requires-Dist: scikit-learn ; extra == 'sklearn'
187
+ Provides-Extra: speech
188
+ Requires-Dist: soundfile ; extra == 'speech'
189
+ Requires-Dist: torchaudio ; extra == 'speech'
190
+ Provides-Extra: testing
191
+ Requires-Dist: pytest ; extra == 'testing'
192
+ Requires-Dist: pytest-xdist ; extra == 'testing'
193
+ Requires-Dist: timeout-decorator ; extra == 'testing'
194
+ Requires-Dist: parameterized ; extra == 'testing'
195
+ Requires-Dist: psutil ; extra == 'testing'
196
+ Requires-Dist: datasets ; extra == 'testing'
197
+ Requires-Dist: pytest-timeout ; extra == 'testing'
198
+ Requires-Dist: black (==21.4b0) ; extra == 'testing'
199
+ Requires-Dist: sacrebleu (>=1.4.12) ; extra == 'testing'
200
+ Requires-Dist: rouge-score ; extra == 'testing'
201
+ Requires-Dist: nltk ; extra == 'testing'
202
+ Requires-Dist: GitPython ; extra == 'testing'
203
+ Requires-Dist: faiss-cpu ; extra == 'testing'
204
+ Requires-Dist: cookiecutter (==1.7.2) ; extra == 'testing'
205
+ Provides-Extra: tf
206
+ Requires-Dist: tensorflow (>=2.3) ; extra == 'tf'
207
+ Requires-Dist: onnxconverter-common ; extra == 'tf'
208
+ Requires-Dist: keras2onnx ; extra == 'tf'
209
+ Provides-Extra: tf-cpu
210
+ Requires-Dist: tensorflow-cpu (>=2.3) ; extra == 'tf-cpu'
211
+ Requires-Dist: onnxconverter-common ; extra == 'tf-cpu'
212
+ Requires-Dist: keras2onnx ; extra == 'tf-cpu'
213
+ Provides-Extra: timm
214
+ Requires-Dist: timm ; extra == 'timm'
215
+ Provides-Extra: tokenizers
216
+ Requires-Dist: tokenizers (<0.11,>=0.10.1) ; extra == 'tokenizers'
217
+ Provides-Extra: torch
218
+ Requires-Dist: torch (>=1.0) ; extra == 'torch'
219
+ Provides-Extra: torchhub
220
+ Requires-Dist: filelock ; extra == 'torchhub'
221
+ Requires-Dist: huggingface-hub (==0.0.12) ; extra == 'torchhub'
222
+ Requires-Dist: importlib-metadata ; extra == 'torchhub'
223
+ Requires-Dist: numpy (>=1.17) ; extra == 'torchhub'
224
+ Requires-Dist: packaging ; extra == 'torchhub'
225
+ Requires-Dist: protobuf ; extra == 'torchhub'
226
+ Requires-Dist: regex (!=2019.12.17) ; extra == 'torchhub'
227
+ Requires-Dist: requests ; extra == 'torchhub'
228
+ Requires-Dist: sacremoses ; extra == 'torchhub'
229
+ Requires-Dist: sentencepiece (==0.1.91) ; extra == 'torchhub'
230
+ Requires-Dist: torch (>=1.0) ; extra == 'torchhub'
231
+ Requires-Dist: tokenizers (<0.11,>=0.10.1) ; extra == 'torchhub'
232
+ Requires-Dist: tqdm (>=4.27) ; extra == 'torchhub'
233
+ Provides-Extra: vision
234
+ Requires-Dist: Pillow ; extra == 'vision'
235
+
236
+ <!---
237
+ Copyright 2020 The HuggingFace Team. All rights reserved.
238
+
239
+ Licensed under the Apache License, Version 2.0 (the "License");
240
+ you may not use this file except in compliance with the License.
241
+ You may obtain a copy of the License at
242
+
243
+ http://www.apache.org/licenses/LICENSE-2.0
244
+
245
+ Unless required by applicable law or agreed to in writing, software
246
+ distributed under the License is distributed on an "AS IS" BASIS,
247
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
248
+ See the License for the specific language governing permissions and
249
+ limitations under the License.
250
+ -->
251
+
252
+ <p align="center">
253
+ <br>
254
+ <img src="https://raw.githubusercontent.com/huggingface/transformers/master/docs/source/imgs/transformers_logo_name.png" width="400"/>
255
+ <br>
256
+ <p>
257
+ <p align="center">
258
+ <a href="https://circleci.com/gh/huggingface/transformers">
259
+ <img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/master">
260
+ </a>
261
+ <a href="https://github.com/huggingface/transformers/blob/master/LICENSE">
262
+ <img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
263
+ </a>
264
+ <a href="https://huggingface.co/transformers/index.html">
265
+ <img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/transformers/index.html.svg?down_color=red&down_message=offline&up_message=online">
266
+ </a>
267
+ <a href="https://github.com/huggingface/transformers/releases">
268
+ <img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
269
+ </a>
270
+ <a href="https://github.com/huggingface/transformers/blob/master/CODE_OF_CONDUCT.md">
271
+ <img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
272
+ </a>
273
+ <a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
274
+ </p>
275
+
276
+ <h4 align="center">
277
+ <p>
278
+ <b>English</b> |
279
+ <a href="https://github.com/huggingface/transformers/blob/master/README_zh-hans.md">简体中文</a> |
280
+ <a href="https://github.com/huggingface/transformers/blob/master/README_zh-hant.md">繁體中文</a>
281
+ <p>
282
+ </h4>
283
+
284
+ <h3 align="center">
285
+ <p>State-of-the-art Natural Language Processing for Jax, PyTorch and TensorFlow</p>
286
+ </h3>
287
+
288
+ <h3 align="center">
289
+ <a href="https://hf.co/course"><img src="https://raw.githubusercontent.com/huggingface/transformers/master/docs/source/imgs/course_banner.png"></a>
290
+ </h3>
291
+
292
+ 🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction, question answering, summarization, translation, text generation and more in over 100 languages. Its aim is to make cutting-edge NLP easier to use for everyone.
293
+
294
+ 🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and then share them with the community on our [model hub](https://huggingface.co/models). At the same time, each python module defining an architecture is fully standalone and can be modified to enable quick research experiments.
295
+
296
+ 🤗 Transformers is backed by the three most popular deep learning libraries — [Jax](https://jax.readthedocs.io/en/latest/), [PyTorch](https://pytorch.org/) and [TensorFlow](https://www.tensorflow.org/) — with a seamless integration between them. It's straightforward to train your models with one before loading them for inference with the other.
297
+
298
+ ## Online demos
299
+
300
+ You can test most of our models directly on their pages from the [model hub](https://huggingface.co/models). We also offer [private model hosting, versioning, & an inference API](https://huggingface.co/pricing) for public and private models.
301
+
302
+ Here are a few examples:
303
+ - [Masked word completion with BERT](https://huggingface.co/bert-base-uncased?text=Paris+is+the+%5BMASK%5D+of+France)
304
+ - [Name Entity Recognition with Electra](https://huggingface.co/dbmdz/electra-large-discriminator-finetuned-conll03-english?text=My+name+is+Sarah+and+I+live+in+London+city)
305
+ - [Text generation with GPT-2](https://huggingface.co/gpt2?text=A+long+time+ago%2C+)
306
+ - [Natural Language Inference with RoBERTa](https://huggingface.co/roberta-large-mnli?text=The+dog+was+lost.+Nobody+lost+any+animal)
307
+ - [Summarization with BART](https://huggingface.co/facebook/bart-large-cnn?text=The+tower+is+324+metres+%281%2C063+ft%29+tall%2C+about+the+same+height+as+an+81-storey+building%2C+and+the+tallest+structure+in+Paris.+Its+base+is+square%2C+measuring+125+metres+%28410+ft%29+on+each+side.+During+its+construction%2C+the+Eiffel+Tower+surpassed+the+Washington+Monument+to+become+the+tallest+man-made+structure+in+the+world%2C+a+title+it+held+for+41+years+until+the+Chrysler+Building+in+New+York+City+was+finished+in+1930.+It+was+the+first+structure+to+reach+a+height+of+300+metres.+Due+to+the+addition+of+a+broadcasting+aerial+at+the+top+of+the+tower+in+1957%2C+it+is+now+taller+than+the+Chrysler+Building+by+5.2+metres+%2817+ft%29.+Excluding+transmitters%2C+the+Eiffel+Tower+is+the+second+tallest+free-standing+structure+in+France+after+the+Millau+Viaduct)
308
+ - [Question answering with DistilBERT](https://huggingface.co/distilbert-base-uncased-distilled-squad?text=Which+name+is+also+used+to+describe+the+Amazon+rainforest+in+English%3F&context=The+Amazon+rainforest+%28Portuguese%3A+Floresta+Amaz%C3%B4nica+or+Amaz%C3%B4nia%3B+Spanish%3A+Selva+Amaz%C3%B3nica%2C+Amazon%C3%ADa+or+usually+Amazonia%3B+French%3A+For%C3%AAt+amazonienne%3B+Dutch%3A+Amazoneregenwoud%29%2C+also+known+in+English+as+Amazonia+or+the+Amazon+Jungle%2C+is+a+moist+broadleaf+forest+that+covers+most+of+the+Amazon+basin+of+South+America.+This+basin+encompasses+7%2C000%2C000+square+kilometres+%282%2C700%2C000+sq+mi%29%2C+of+which+5%2C500%2C000+square+kilometres+%282%2C100%2C000+sq+mi%29+are+covered+by+the+rainforest.+This+region+includes+territory+belonging+to+nine+nations.+The+majority+of+the+forest+is+contained+within+Brazil%2C+with+60%25+of+the+rainforest%2C+followed+by+Peru+with+13%25%2C+Colombia+with+10%25%2C+and+with+minor+amounts+in+Venezuela%2C+Ecuador%2C+Bolivia%2C+Guyana%2C+Suriname+and+French+Guiana.+States+or+departments+in+four+nations+contain+%22Amazonas%22+in+their+names.+The+Amazon+represents+over+half+of+the+planet%27s+remaining+rainforests%2C+and+comprises+the+largest+and+most+biodiverse+tract+of+tropical+rainforest+in+the+world%2C+with+an+estimated+390+billion+individual+trees+divided+into+16%2C000+species)
309
+ - [Translation with T5](https://huggingface.co/t5-base?text=My+name+is+Wolfgang+and+I+live+in+Berlin)
310
+
311
+ **[Write With Transformer](https://transformer.huggingface.co)**, built by the Hugging Face team, is the official demo of this repo’s text generation capabilities.
312
+
313
+ ## If you are looking for custom support from the Hugging Face team
314
+
315
+ <a target="_blank" href="https://huggingface.co/support">
316
+ <img alt="HuggingFace Expert Acceleration Program" src="https://huggingface.co/front/thumbnails/support.png" style="max-width: 600px; border: 1px solid #eee; border-radius: 4px; box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);">
317
+ </a><br>
318
+
319
+ ## Quick tour
320
+
321
+ To immediately use a model on a given text, we provide the `pipeline` API. Pipelines group together a pretrained model with the preprocessing that was used during that model's training. Here is how to quickly use a pipeline to classify positive versus negative texts:
322
+
323
+ ```python
324
+ >>> from transformers import pipeline
325
+
326
+ # Allocate a pipeline for sentiment-analysis
327
+ >>> classifier = pipeline('sentiment-analysis')
328
+ >>> classifier('We are very happy to introduce pipeline to the transformers repository.')
329
+ [{'label': 'POSITIVE', 'score': 0.9996980428695679}]
330
+ ```
331
+
332
+ The second line of code downloads and caches the pretrained model used by the pipeline, while the third evaluates it on the given text. Here the answer is "positive" with a confidence of 99.97%.
333
+
334
+ Many NLP tasks have a pre-trained `pipeline` ready to go. For example, we can easily extract question answers given context:
335
+
336
+ ``` python
337
+ >>> from transformers import pipeline
338
+
339
+ # Allocate a pipeline for question-answering
340
+ >>> question_answerer = pipeline('question-answering')
341
+ >>> question_answerer({
342
+ ... 'question': 'What is the name of the repository ?',
343
+ ... 'context': 'Pipeline has been included in the huggingface/transformers repository'
344
+ ... })
345
+ {'score': 0.30970096588134766, 'start': 34, 'end': 58, 'answer': 'huggingface/transformers'}
346
+
347
+ ```
348
+
349
+ In addition to the answer, the pretrained model used here returned its confidence score, along with the start position and end position of the answer in the tokenized sentence. You can learn more about the tasks supported by the `pipeline` API in [this tutorial](https://huggingface.co/transformers/task_summary.html).
350
+
351
+ To download and use any of the pretrained models on your given task, all it takes is three lines of code. Here is the PyTorch version:
352
+ ```python
353
+ >>> from transformers import AutoTokenizer, AutoModel
354
+
355
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
356
+ >>> model = AutoModel.from_pretrained("bert-base-uncased")
357
+
358
+ >>> inputs = tokenizer("Hello world!", return_tensors="pt")
359
+ >>> outputs = model(**inputs)
360
+ ```
361
+ And here is the equivalent code for TensorFlow:
362
+ ```python
363
+ >>> from transformers import AutoTokenizer, TFAutoModel
364
+
365
+ >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
366
+ >>> model = TFAutoModel.from_pretrained("bert-base-uncased")
367
+
368
+ >>> inputs = tokenizer("Hello world!", return_tensors="tf")
369
+ >>> outputs = model(**inputs)
370
+ ```
371
+
372
+ The tokenizer is responsible for all the preprocessing the pretrained model expects, and can be called directly on a single string (as in the above examples) or a list. It will output a dictionary that you can use in downstream code or simply directly pass to your model using the ** argument unpacking operator.
373
+
374
+ The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) or a [TensorFlow `tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) (depending on your backend) which you can use normally. [This tutorial](https://huggingface.co/transformers/training.html) explains how to integrate such a model into a classic PyTorch or TensorFlow training loop, or how to use our `Trainer` API to quickly fine-tune on a new dataset.
375
+
376
+ ## Why should I use transformers?
377
+
378
+ 1. Easy-to-use state-of-the-art models:
379
+ - High performance on NLU and NLG tasks.
380
+ - Low barrier to entry for educators and practitioners.
381
+ - Few user-facing abstractions with just three classes to learn.
382
+ - A unified API for using all our pretrained models.
383
+
384
+ 1. Lower compute costs, smaller carbon footprint:
385
+ - Researchers can share trained models instead of always retraining.
386
+ - Practitioners can reduce compute time and production costs.
387
+ - Dozens of architectures with over 2,000 pretrained models, some in more than 100 languages.
388
+
389
+ 1. Choose the right framework for every part of a model's lifetime:
390
+ - Train state-of-the-art models in 3 lines of code.
391
+ - Move a single model between TF2.0/PyTorch frameworks at will.
392
+ - Seamlessly pick the right framework for training, evaluation and production.
393
+
394
+ 1. Easily customize a model or an example to your needs:
395
+ - We provide examples for each architecture to reproduce the results published by its original authors.
396
+ - Model internals are exposed as consistently as possible.
397
+ - Model files can be used independently of the library for quick experiments.
398
+
399
+ ## Why shouldn't I use transformers?
400
+
401
+ - This library is not a modular toolbox of building blocks for neural nets. The code in the model files is not refactored with additional abstractions on purpose, so that researchers can quickly iterate on each of the models without diving into additional abstractions/files.
402
+ - The training API is not intended to work on any model but is optimized to work with the models provided by the library. For generic machine learning loops, you should use another library.
403
+ - While we strive to present as many use cases as possible, the scripts in our [examples folder](https://github.com/huggingface/transformers/tree/master/examples) are just that: examples. It is expected that they won't work out-of-the box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs.
404
+
405
+ ## Installation
406
+
407
+ ### With pip
408
+
409
+ This repository is tested on Python 3.6+, Flax 0.3.2+, PyTorch 1.3.1+ and TensorFlow 2.3+.
410
+
411
+ You should install 🤗 Transformers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
412
+
413
+ First, create a virtual environment with the version of Python you're going to use and activate it.
414
+
415
+ Then, you will need to install at least one of Flax, PyTorch or TensorFlow.
416
+ Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/), [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) and/or [Flax installation page](https://github.com/google/flax#quick-install) regarding the specific install command for your platform.
417
+
418
+ When one of those backends has been installed, 🤗 Transformers can be installed using pip as follows:
419
+
420
+ ```bash
421
+ pip install transformers
422
+ ```
423
+
424
+ If you'd like to play with the examples or need the bleeding edge of the code and can't wait for a new release, you must [install the library from source](https://huggingface.co/transformers/installation.html#installing-from-source).
425
+
426
+ ### With conda
427
+
428
+ Since Transformers version v4.0.0, we now have a conda channel: `huggingface`.
429
+
430
+ 🤗 Transformers can be installed using conda as follows:
431
+
432
+ ```shell script
433
+ conda install -c huggingface transformers
434
+ ```
435
+
436
+ Follow the installation pages of Flax, PyTorch or TensorFlow to see how to install them with conda.
437
+
438
+ ## Model architectures
439
+
440
+ **[All the model checkpoints](https://huggingface.co/models)** provided by 🤗 Transformers are seamlessly integrated from the huggingface.co [model hub](https://huggingface.co) where they are uploaded directly by [users](https://huggingface.co/users) and [organizations](https://huggingface.co/organizations).
441
+
442
+ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://huggingface.co/api/shields/models&color=brightgreen)
443
+
444
+ 🤗 Transformers currently provides the following architectures (see [here](https://huggingface.co/transformers/model_summary.html) for a high-level summary of each them):
445
+
446
+ 1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
447
+ 1. **[BART](https://huggingface.co/transformers/model_doc/bart.html)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/pdf/1910.13461.pdf) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
448
+ 1. **[BARThez](https://huggingface.co/transformers/model_doc/barthez.html)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
449
+ 1. **[BERT](https://huggingface.co/transformers/model_doc/bert.html)** (from Google) released with the paper [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.
450
+ 1. **[BERT For Sequence Generation](https://huggingface.co/transformers/model_doc/bertgeneration.html)** (from Google) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
451
+ 1. **[BigBird-RoBERTa](https://huggingface.co/transformers/model_doc/bigbird.html)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
452
+ 1. **[BigBird-Pegasus](https://huggingface.co/transformers/model_doc/bigbird_pegasus.html)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
453
+ 1. **[Blenderbot](https://huggingface.co/transformers/model_doc/blenderbot.html)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
454
+ 1. **[BlenderbotSmall](https://huggingface.co/transformers/model_doc/blenderbot_small.html)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
455
+ 1. **[BORT](https://huggingface.co/transformers/model_doc/bort.html)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
456
+ 1. **[ByT5](https://huggingface.co/transformers/model_doc/byt5.html)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
457
+ 1. **[CamemBERT](https://huggingface.co/transformers/model_doc/camembert.html)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
458
+ 1. **[CANINE](https://huggingface.co/transformers/model_doc/canine.html)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
459
+ 1. **[CLIP](https://huggingface.co/transformers/model_doc/clip.html)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
460
+ 1. **[ConvBERT](https://huggingface.co/transformers/model_doc/convbert.html)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
461
+ 1. **[CPM](https://huggingface.co/transformers/model_doc/cpm.html)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
462
+ 1. **[CTRL](https://huggingface.co/transformers/model_doc/ctrl.html)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
463
+ 1. **[DeBERTa](https://huggingface.co/transformers/model_doc/deberta.html)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
464
+ 1. **[DeBERTa-v2](https://huggingface.co/transformers/model_doc/deberta_v2.html)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
465
+ 1. **[DeiT](https://huggingface.co/transformers/model_doc/deit.html)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
466
+ 1. **[DETR](https://huggingface.co/transformers/model_doc/detr.html)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.
467
+ 1. **[DialoGPT](https://huggingface.co/transformers/model_doc/dialogpt.html)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
468
+ 1. **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/master/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/master/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/master/examples/distillation) and a German version of DistilBERT.
469
+ 1. **[DPR](https://huggingface.co/transformers/model_doc/dpr.html)** (from Facebook) released with the paper [Dense Passage Retrieval
470
+ for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon
471
+ Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
472
+ 1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
473
+ 1. **[FlauBERT](https://huggingface.co/transformers/model_doc/flaubert.html)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
474
+ 1. **[Funnel Transformer](https://huggingface.co/transformers/model_doc/funnel.html)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
475
+ 1. **[GPT](https://huggingface.co/transformers/model_doc/gpt.html)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
476
+ 1. **[GPT-2](https://huggingface.co/transformers/model_doc/gpt2.html)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
477
+ 1. **[GPT Neo](https://huggingface.co/transformers/model_doc/gpt_neo.html)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
478
+ 1. **[Hubert](https://huggingface.co/transformers/model_doc/hubert.html)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
479
+ 1. **[I-BERT](https://huggingface.co/transformers/model_doc/ibert.html)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer
480
+ 1. **[LayoutLM](https://huggingface.co/transformers/model_doc/layoutlm.html)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
481
+ 1. **[LED](https://huggingface.co/transformers/model_doc/led.html)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
482
+ 1. **[Longformer](https://huggingface.co/transformers/model_doc/longformer.html)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
483
+ 1. **[LUKE](https://huggingface.co/transformers/model_doc/luke.html)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
484
+ 1. **[LXMERT](https://huggingface.co/transformers/model_doc/lxmert.html)** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal.
485
+ 1. **[M2M100](https://huggingface.co/transformers/model_doc/m2m_100.html)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
486
+ 1. **[MarianMT](https://huggingface.co/transformers/model_doc/marian.html)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
487
+ 1. **[MBart](https://huggingface.co/transformers/model_doc/mbart.html)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
488
+ 1. **[MBart-50](https://huggingface.co/transformers/model_doc/mbart.html)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
489
+ 1. **[Megatron-BERT](https://huggingface.co/transformers/model_doc/megatron_bert.html)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
490
+ 1. **[Megatron-GPT2](https://huggingface.co/transformers/model_doc/megatron_gpt2.html)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
491
+ 1. **[MPNet](https://huggingface.co/transformers/model_doc/mpnet.html)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
492
+ 1. **[MT5](https://huggingface.co/transformers/model_doc/mt5.html)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
493
+ 1. **[Pegasus](https://huggingface.co/transformers/model_doc/pegasus.html)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777)> by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
494
+ 1. **[ProphetNet](https://huggingface.co/transformers/model_doc/prophetnet.html)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
495
+ 1. **[Reformer](https://huggingface.co/transformers/model_doc/reformer.html)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
496
+ 1. **[RoBERTa](https://huggingface.co/transformers/model_doc/roberta.html)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
497
+ 1. **[RoFormer](https://huggingface.co/transformers/model_doc/roformer.html)** (from ZhuiyiTechnology), released together with the paper a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
498
+ 1. **[SpeechToTextTransformer](https://huggingface.co/transformers/model_doc/speech_to_text.html)** (from Facebook), released together with the paper [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.
499
+ 1. **[SqueezeBert](https://huggingface.co/transformers/model_doc/squeezebert.html)** released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
500
+ 1. **[T5](https://huggingface.co/transformers/model_doc/t5.html)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
501
+ 1. **[TAPAS](https://huggingface.co/transformers/model_doc/tapas.html)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
502
+ 1. **[Transformer-XL](https://huggingface.co/transformers/model_doc/transformerxl.html)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
503
+ 1. **[Vision Transformer (ViT)](https://huggingface.co/transformers/model_doc/vit.html)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
504
+ 1. **[VisualBERT](https://huggingface.co/transformers/model_doc/visual_bert.html)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
505
+ 1. **[Wav2Vec2](https://huggingface.co/transformers/model_doc/wav2vec2.html)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
506
+ 1. **[XLM](https://huggingface.co/transformers/model_doc/xlm.html)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
507
+ 1. **[XLM-ProphetNet](https://huggingface.co/transformers/model_doc/xlmprophetnet.html)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
508
+ 1. **[XLM-RoBERTa](https://huggingface.co/transformers/model_doc/xlmroberta.html)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
509
+ 1. **[XLNet](https://huggingface.co/transformers/model_doc/xlnet.html)** (from Google/CMU) released with the paper [​XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
510
+ 1. **[XLSR-Wav2Vec2](https://huggingface.co/transformers/model_doc/xlsr_wav2vec2.html)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
511
+ 1. Want to contribute a new model? We have added a **detailed guide and templates** to guide you in the process of adding a new model. You can find them in the [`templates`](./templates) folder of the repository. Be sure to check the [contributing guidelines](./CONTRIBUTING.md) and contact the maintainers or open an issue to collect feedbacks before starting your PR.
512
+
513
+ To check if each model has an implementation in Flax, PyTorch or TensorFlow, or has an associated tokenizer backed by the 🤗 Tokenizers library, refer to [this table](https://huggingface.co/transformers/index.html#supported-frameworks).
514
+
515
+ These implementations have been tested on several datasets (see the example scripts) and should match the performance of the original implementations. You can find more details on performance in the Examples section of the [documentation](https://huggingface.co/transformers/examples.html).
516
+
517
+
518
+ ## Learn more
519
+
520
+ | Section | Description |
521
+ |-|-|
522
+ | [Documentation](https://huggingface.co/transformers/) | Full API documentation and tutorials |
523
+ | [Task summary](https://huggingface.co/transformers/task_summary.html) | Tasks supported by 🤗 Transformers |
524
+ | [Preprocessing tutorial](https://huggingface.co/transformers/preprocessing.html) | Using the `Tokenizer` class to prepare data for the models |
525
+ | [Training and fine-tuning](https://huggingface.co/transformers/training.html) | Using the models provided by 🤗 Transformers in a PyTorch/TensorFlow training loop and the `Trainer` API |
526
+ | [Quick tour: Fine-tuning/usage scripts](https://github.com/huggingface/transformers/tree/master/examples) | Example scripts for fine-tuning models on a wide range of tasks |
527
+ | [Model sharing and uploading](https://huggingface.co/transformers/model_sharing.html) | Upload and share your fine-tuned models with the community |
528
+ | [Migration](https://huggingface.co/transformers/migration.html) | Migrate to 🤗 Transformers from `pytorch-transformers` or `pytorch-pretrained-bert` |
529
+
530
+ ## Citation
531
+
532
+ We now have a [paper](https://www.aclweb.org/anthology/2020.emnlp-demos.6/) you can cite for the 🤗 Transformers library:
533
+ ```bibtex
534
+ @inproceedings{wolf-etal-2020-transformers,
535
+ title = "Transformers: State-of-the-Art Natural Language Processing",
536
+ author = "Thomas Wolf and Lysandre Debut and Victor Sanh and Julien Chaumond and Clement Delangue and Anthony Moi and Pierric Cistac and Tim Rault and Rémi Louf and Morgan Funtowicz and Joe Davison and Sam Shleifer and Patrick von Platen and Clara Ma and Yacine Jernite and Julien Plu and Canwen Xu and Teven Le Scao and Sylvain Gugger and Mariama Drame and Quentin Lhoest and Alexander M. Rush",
537
+ booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations",
538
+ month = oct,
539
+ year = "2020",
540
+ address = "Online",
541
+ publisher = "Association for Computational Linguistics",
542
+ url = "https://www.aclweb.org/anthology/2020.emnlp-demos.6",
543
+ pages = "38--45"
544
+ }
545
+ ```
546
+
547
+
public/gpt-2/transformers-4.9.1.dist-info/RECORD ADDED
@@ -0,0 +1,532 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ transformers/__init__.py,sha256=vU0NfQd5SjZwkhl5ELrWBYcn5kVz59ShQOcm77qVOac,118787
2
+ transformers/activations.py,sha256=vdFvWTv26miTCn-ZK2Vx5h_st2TlM-8F2gHDCZskI34,3537
3
+ transformers/activations_tf.py,sha256=icsuyf137XXLTMUq0cTaJ50sPg0urhW5GcJYH37F4fM,2766
4
+ transformers/configuration_utils.py,sha256=CJV3_EbIMwKCrNNisbC3mpuHF_QG0TItf2Jl4h3BkxE,40373
5
+ transformers/convert_graph_to_onnx.py,sha256=lrCE1ar73gR8nEw4ehQUI3ZgrlvjoijmajPJ014Ef7Q,18640
6
+ transformers/convert_pytorch_checkpoint_to_tf2.py,sha256=bAyd-qCY8azDJU6DOen4V9sl7QYlirWheDDKoqm5EhI,16607
7
+ transformers/convert_slow_tokenizer.py,sha256=q_ApjpV94Hv2pkwg1RIRopZBlCgzyQussWu2dbUCbWs,29938
8
+ transformers/convert_slow_tokenizers_checkpoints_to_fast.py,sha256=DxQB-Ji8I0lZ4EUXBl0ucnoJxwE9rSp3IbfuyMbBxiQ,4955
9
+ transformers/convert_tf_hub_seq_to_seq_bert_to_pytorch.py,sha256=LHDZoAwfslnHT74gNbiW2GmYim89tPtKm4HQsxxyeck,2899
10
+ transformers/debug_utils.py,sha256=_CfKIueP-5q57XX7hg_kTcgPhttPWqDz93J-7mTZdzU,12950
11
+ transformers/deepspeed.py,sha256=x7DKIC0zS6kgM5rf0AF7czzAeOWR8C3Ocpsjq7IzbH8,17035
12
+ transformers/dependency_versions_check.py,sha256=z10phKu2S4bq0CeJrVa9PQbPdfpxnUTi-icmNpO0vFk,1772
13
+ transformers/dependency_versions_table.py,sha256=nHiPNJSveGek2KxiKaBEJkOVqP2-7IP6E0NMdCqex9w,2501
14
+ transformers/feature_extraction_sequence_utils.py,sha256=x-S2Emr2PNSDcc5FPKcuaFEOWx98CW3DFKUzaXhmkVM,15734
15
+ transformers/feature_extraction_utils.py,sha256=RjTc_QLUq0w-ojWrABguaam57hjGRuGA_ZSA7KgrBCQ,21973
16
+ transformers/file_utils.py,sha256=3aWh5UIRUKGQneeWy9U3y9VHDmjaLrzdF-1kxzh-suA,80956
17
+ transformers/generation_beam_search.py,sha256=XNk5Ffu5oL6CTa8ErpgfCWL7-oyhMX_C9DAUXnIX_Zw,17536
18
+ transformers/generation_flax_logits_process.py,sha256=Ou06C4Z4ZrVG5YNnmhAYKyxf3r-_9DCo_Dj5QRawAFQ,11805
19
+ transformers/generation_flax_utils.py,sha256=rrgQahkB3N9SBecjMdKmVpka05SVTZk6_W8AOmzeCYs,37887
20
+ transformers/generation_logits_process.py,sha256=S6U2CkBngnONw7uPtoTsJVBcHJmFwbnfFMsLBrgAYdY,26683
21
+ transformers/generation_stopping_criteria.py,sha256=CEYApRm9cbyufwAEoQPC4dCnVQwMJjl6ZFimBfuHgkU,5383
22
+ transformers/generation_tf_utils.py,sha256=XxEX2njlMeIIEQekyvGsTe-dqSrH-u4ZwdvOEoDBKV8,91769
23
+ transformers/generation_utils.py,sha256=qnlpMQv-LemDRgaUl9yToKtWLUa0dDQbo7bBXhOgO3U,143441
24
+ transformers/hf_api.py,sha256=hRt_F1SKUZIdXDg7Wc7PjyDJCi2iF2cRO9ixRatWxsg,7325
25
+ transformers/hf_argparser.py,sha256=wVVCpi6o9-Ygm159k3IWZQrl84Q7_lSy6MyOL3dXfU8,10711
26
+ transformers/image_utils.py,sha256=m_JcwpRK3o1pgrOCIOt02FTKhAjYI7sPA5hFtAeICp4,8871
27
+ transformers/integrations.py,sha256=8mSw7uUt7El0qFmFgvi0wSg_SzgxA1aH9nIv1mjBpzg,33602
28
+ transformers/modelcard.py,sha256=qq3Jh-rdPwgUU4JJo7F3Xcy2tpRE2yKIgEVv_y8D70k,29548
29
+ transformers/modeling_flax_outputs.py,sha256=C4zrBHH2jrACZE1PPPhlmitmO32Mw_DSkZobO9gr1wY,38385
30
+ transformers/modeling_flax_pytorch_utils.py,sha256=w99NEOcY4_yDwHj7IA0KIDoGOg7qIgKX7XCtufoIMd0,10482
31
+ transformers/modeling_flax_utils.py,sha256=TG-txCqARtkfG5ySJxtef95AzpiYH9mrysV4sJU7kUs,25432
32
+ transformers/modeling_outputs.py,sha256=6LzFctao2A01GNdEdJiq-9NrtnlCxldHFSThqjuMBnA,52392
33
+ transformers/modeling_tf_outputs.py,sha256=V_Zr7nvAqVs57_RSm6MaKus_o76QrSr8sTqYrgYFTaU,42017
34
+ transformers/modeling_tf_pytorch_utils.py,sha256=V8voosVTkPenT_5cjvvwdeC8dDeSMhbVGiA0nrWkZqk,17997
35
+ transformers/modeling_tf_utils.py,sha256=VpqGLJII3OFuu8DOMPthecSKU8r1K3xpsYs3qATApjw,77670
36
+ transformers/modeling_utils.py,sha256=hVKFvm8hUpirqIHzhQ3Py8FMN0LO1dLX9vXVGduHNHk,105294
37
+ transformers/optimization.py,sha256=0f2SCH0e5LoXONUtaj2QR0hdeWsbsBxDc6A2ILkSiRs,27375
38
+ transformers/optimization_tf.py,sha256=MgK90VS75nNh52PZ-ZA5IWPrgck6EF4zpdodQznZje4,15890
39
+ transformers/testing_utils.py,sha256=LsqU_0LCQ154iihKsiJo1dq2skfliDr_Ky8sp4Avk9Q,42795
40
+ transformers/tokenization_utils.py,sha256=2BiexNvdjmAZ0hWJKgPDOgxgT_NJLEjA_yb_KWZKVAk,32397
41
+ transformers/tokenization_utils_base.py,sha256=vHe0uSe9cSzG-Bfxje03kyPqXfV2Cs392Gkr79xyauc,165870
42
+ transformers/tokenization_utils_fast.py,sha256=KirTOghLX79mCCot9gvdGFw7b-COHO61mb1CR3xtWqY,32188
43
+ transformers/trainer.py,sha256=NKu22yK7RWvNiaLOuPdlaEE4Wa4OxmgiooA2VxlGmKE,126345
44
+ transformers/trainer_callback.py,sha256=irUebB5TtddePVIQuT6CuJKobhS-x8CgOwEVqU89amk,23488
45
+ transformers/trainer_pt_utils.py,sha256=65y5fH1FeqS0q1BCdOg6E3gHBjJoAS8hjaRUH49bAY4,44264
46
+ transformers/trainer_seq2seq.py,sha256=-2ITs8ibbKrGJCDBAiCF4BX1aqWYafqsjsT7GCS9p1k,9744
47
+ transformers/trainer_tf.py,sha256=ABdmYj9pYdrYwfC5IOrP5L7rKTPM1kvMaqg2TZcUHoI,34959
48
+ transformers/trainer_utils.py,sha256=-tLuHESLRZ3H5Npwv3AdcfWZM_vGlG_NPf6ngNdiPrw,15414
49
+ transformers/training_args.py,sha256=DN3CkrvEiIX1r5aKeVkgGqSxH79mi1Ja93XKE9jrlBM,57088
50
+ transformers/training_args_seq2seq.py,sha256=mHau-QBp-b0NMBRraU2MOS7ibQSF2YoL0iuuow4eM2o,1754
51
+ transformers/training_args_tf.py,sha256=4cBb-HvWkqclWDxHGUnYOTMwPNK6RjJioxz2JmOx2nE,14909
52
+ transformers/benchmark/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
+ transformers/benchmark/benchmark.py,sha256=j4iQWOlkmPKSgGF7ItFhI78VEkXKCyBJ1nKcDUng2A8,10613
54
+ transformers/benchmark/benchmark_args.py,sha256=TSaQ9z4kIOX27VzbFco7OGzdt0y2Rze7qgMj8oDrWFM,3777
55
+ transformers/benchmark/benchmark_args_tf.py,sha256=r84A8jMUserwHmuQc9hgfhh6zJMRagUhsdwHdFJ8a1I,4573
56
+ transformers/benchmark/benchmark_args_utils.py,sha256=bKmk1DYEXSVVCpqbcFuB6XeR_rcD_2joXSqkpVTHSc4,5895
57
+ transformers/benchmark/benchmark_tf.py,sha256=QHrBfRItUfCbjK8vWLbeovE1bzSHVFb1nau_FiBR_dk,12924
58
+ transformers/benchmark/benchmark_utils.py,sha256=3241wr2_bK1dHzw_-151XepeJl274YEwpOyE3Ewz_RY,37265
59
+ transformers/commands/__init__.py,sha256=aFO3I7C6G9OLA9JZSc_yMaZl0glOQtjNPjqMFfu9wfQ,923
60
+ transformers/commands/add_new_model.py,sha256=QufphyzDMFkiZODBe7_7XWSSUF5AmnSEjKYCVxGXErU,9393
61
+ transformers/commands/convert.py,sha256=r94i7Y3UgdhvC7SvJ2hFBgEro8jlKku3r7QJWlWOHqw,7555
62
+ transformers/commands/download.py,sha256=LgjPfIvyS4T_CUWyaEfTdzm70ukcmE088XcARc98SKc,1860
63
+ transformers/commands/env.py,sha256=F3CmAIc2zMGV7JYr1NW0ZMXHxkzMXj_YVRXOx8uq43I,3223
64
+ transformers/commands/lfs.py,sha256=MZtTExjpnzTH1WKBqnVzCharnagHYT990LLw7bfDf9g,7951
65
+ transformers/commands/run.py,sha256=YmVnwcl3ECHAGpJG8YnG-t1-SH1triqwkvydAllCvTI,4316
66
+ transformers/commands/serving.py,sha256=UqPfTf7ptECNO6Sbkz-0o6_jNgdXC-IWv7I5BqkbneU,8088
67
+ transformers/commands/train.py,sha256=ci3PIPifxzAPVuNrEo6V2CtFU8Can1lNLc0jsP7UkSA,6371
68
+ transformers/commands/transformers_cli.py,sha256=3SbLbWHB7YD5yAFlkiVPv7y2i2SD59ouQn132C_47uU,1837
69
+ transformers/commands/user.py,sha256=tSTrVFHnXH_OkjExNe9CK-Qs1LDtS-UdIM3rXb0XXSQ,15582
70
+ transformers/data/__init__.py,sha256=R-RTJ0SCSk2tAZmM7uiB_EeDrgMVfg0GxVkKIU58ikk,1271
71
+ transformers/data/data_collator.py,sha256=fwvMWLGVLOtvdSY-ttZxDqMZtSg-Taxyfwdf2e_gxJ0,36353
72
+ transformers/data/test_generation_utils.py,sha256=mImkKgzBwIyM4YyYU43EI_33giCE7RFOdx0AXyrrxIg,3438
73
+ transformers/data/datasets/__init__.py,sha256=w6nfIPWoQNrf9dn1fVK9WQGm2eHtZOw2HOKSACbWNlk,1080
74
+ transformers/data/datasets/glue.py,sha256=yBT_ZxcLJ8qP89OFYKG57kX6eXc13DVXLDBtahJhy0M,6139
75
+ transformers/data/datasets/language_modeling.py,sha256=dSA2_W4SL5PpzU6zREOZgpPkrFmlF6r0SRghRl6U0Tc,22637
76
+ transformers/data/datasets/squad.py,sha256=hZC70ZAtBVd7LsZMV11cFfcT7mkMTaqNu5DobraZEy0,9042
77
+ transformers/data/metrics/__init__.py,sha256=vn0Ia7TaHRC7uSF-trNGueJbh5QqbbtxwUagTPk4_ro,3783
78
+ transformers/data/metrics/squad_metrics.py,sha256=CtGdGGEKIweyyuwQw2NRwvNHIWwoKsHsuW9rTunwIXg,29617
79
+ transformers/data/processors/__init__.py,sha256=4AxJWnnlohFY0fgJ3f24tP3lQ4t4Z5ks9WypFH7F9Fk,1185
80
+ transformers/data/processors/glue.py,sha256=xmDyK07tmoc5Lwg3pivCTdLD8TKws4JoKCuBcYo1wIY,23289
81
+ transformers/data/processors/squad.py,sha256=5nQEqW043AvF8HdrIEAYy_SkpUOcuZY64C-8jds_hPY,33283
82
+ transformers/data/processors/utils.py,sha256=4dbyHPQIFX8x0O76xn-DtevfRFZcwo2nuqNBd_mh_nw,13833
83
+ transformers/data/processors/xnli.py,sha256=eylu-SFA0gn7ZIv7Rp1Eu8vdqai3RLFxysNiHAr45pc,3290
84
+ transformers/models/__init__.py,sha256=2U9ggegZWChHQep_GQ7VjH32nvO13epoj4HowfPD7uM,1656
85
+ transformers/models/albert/__init__.py,sha256=8J8D7gJk-iSbof8u9HqWk9BZ7vZ7st2aUW3M5WlFDsw,3549
86
+ transformers/models/albert/configuration_albert.py,sha256=xd3H4FpdoYn4jo2VC3EROrbnzzgsoe0F5BWkcUj63bA,8984
87
+ transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py,sha256=OLAsUH7e5hPQz68AbPqy_ZJm8QzueEYX15nk1YAbZCE,2155
88
+ transformers/models/albert/modeling_albert.py,sha256=ARYj-1vw4Vxr5_nL_fGxhCk2ZH-t0ViEv86gfOiZwBc,56453
89
+ transformers/models/albert/modeling_tf_albert.py,sha256=9es_iwVUVYEeQnymF4E3rzeJNP9afxcBV8Rm5C62TiQ,70275
90
+ transformers/models/albert/tokenization_albert.py,sha256=3BUdj8cApmJ3jzVGxTt6MaDmKEXJ65hlTPQsKpb2iok,14513
91
+ transformers/models/albert/tokenization_albert_fast.py,sha256=So4k2K_m_fSBvHf193o3bngbI_YszSkd4tQPIsnhpEI,10552
92
+ transformers/models/auto/__init__.py,sha256=UUszNWq7T1ls17EfOIzs9JYuJ5q34q55goKAFgtE2nw,8313
93
+ transformers/models/auto/auto_factory.py,sha256=ARmNHJ4ZD5vn5qxohsJMQq-6QKeQGzA0HN5ZpQ7-jlc,27603
94
+ transformers/models/auto/configuration_auto.py,sha256=82bx143ABrTfxZt1YMi9KUmnvc9InD2Ii2mwnmifJg4,22776
95
+ transformers/models/auto/feature_extraction_auto.py,sha256=FnJyoBpHdHh_PclNLvkWEFy9oRhtKx6rw1DNKv4JFG8,8260
96
+ transformers/models/auto/modeling_auto.py,sha256=y2k6_ZzfCGVtF7WTIDSxjB5GlX0bvY1YdTst2He49t8,33274
97
+ transformers/models/auto/modeling_flax_auto.py,sha256=HFSU-_kxLAmIshTP5up2_28vJYtz0HmjQWNyntHDFuk,9806
98
+ transformers/models/auto/modeling_tf_auto.py,sha256=-rn0-ulsFfMPcawLmfvexAUAI2oVXDrQBiJ1SD5qRPM,21745
99
+ transformers/models/auto/tokenization_auto.py,sha256=893OMQv9bzbmFFXHeNK7iUtN2naYytBs4sib-huNEkA,27427
100
+ transformers/models/bart/__init__.py,sha256=kziWliIcT1VPlGoReqFgTCD2pGvWjm1pF8ymu85ff7g,3116
101
+ transformers/models/bart/configuration_bart.py,sha256=n3P_Y82l7rA_AoCVr-Cahpzk3rrYtvYOQ3qt_ScbBeQ,10106
102
+ transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py,sha256=UbkbQ6_JRDqbMtk8kmwQ8LEX87fU_SxlyBDQCgrHNiI,5647
103
+ transformers/models/bart/modeling_bart.py,sha256=URn7nSgmP_CO97TsAIGpIZY6nlp8WT1ztcGGV12J5ns,83415
104
+ transformers/models/bart/modeling_flax_bart.py,sha256=VaMyYVSZn3fQHL30R2AmHRJ_aHZTKuCZBr1Q7n7xCCg,72353
105
+ transformers/models/bart/modeling_tf_bart.py,sha256=VC8LdpMVrh1mTeFM6qP3F6OD0ePhutCfkTykn_GE6rA,70569
106
+ transformers/models/bart/tokenization_bart.py,sha256=ewxV_QGr2VyJWyp3Qv4kV9GsnUiQnDvKyyrrHYgM5yg,3052
107
+ transformers/models/bart/tokenization_bart_fast.py,sha256=0PVvHIXOWMNfm-g2os7cjqTgFHvaVVDq45tGe-QrDtE,3942
108
+ transformers/models/barthez/__init__.py,sha256=-Fop4WQ7FLyBfQGhrDDJIj-DFCAyv83c-REkQXu0rrw,1475
109
+ transformers/models/barthez/tokenization_barthez.py,sha256=TtTltDUxNczCa9FnxDhaQYvGCKRpITpHd6UozYzrwZc,12497
110
+ transformers/models/barthez/tokenization_barthez_fast.py,sha256=yaxGNyt1hoKE8FnzG2tchLH7t-vVAhXFQfZg5QIEv1A,8748
111
+ transformers/models/bert/__init__.py,sha256=d7A-FZGgSOznzjFcvizRU80_75QJDW0fIvPXM4VcYQE,4611
112
+ transformers/models/bert/configuration_bert.py,sha256=uPrjdZZIHo3JFY9_qvfBjHbt-TiGbl7IYHqjAymLoVI,10322
113
+ transformers/models/bert/convert_bert_original_tf2_checkpoint_to_pytorch.py,sha256=PR3miUETPZEBnT5Q9alOijwT-iOifcR2rJXXhtN7u98,10253
114
+ transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py,sha256=NjnB6Ux0mnn_D7WeDBkNiMqEr2LYYztU8eByGVQBEec,2131
115
+ transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py,sha256=rOZ4Af0a3lEGdCR5ikswI67O2BcDnfL5eTtrXMk5jNw,4101
116
+ transformers/models/bert/modeling_bert.py,sha256=zdPoyaaa0lLc5tFiLdQs4w33IHnp-gCZQO_xi2LaymY,78800
117
+ transformers/models/bert/modeling_flax_bert.py,sha256=_XICp_qYmG2Wz_f0hiuBvwscNaW0eTkN3JpFzDxXiLs,42177
118
+ transformers/models/bert/modeling_tf_bert.py,sha256=cjF7Vbutv27RhPApThPJWXVB4SyaYbedY7_p-cz8HjE,82409
119
+ transformers/models/bert/tokenization_bert.py,sha256=SIMJJZ5eDquFAQjM-TUk33tCYCkBDgoBYMfOJ1D9kxQ,24552
120
+ transformers/models/bert/tokenization_bert_fast.py,sha256=y7tT0ngXQFhiPPWOwPYJWia4qW2VAJZJXBCdSnepGUE,14323
121
+ transformers/models/bert_generation/__init__.py,sha256=6y5LOb5FgJeEZLQx8GFVa66qjddp1qnx9f_8Vft0sDA,1908
122
+ transformers/models/bert_generation/configuration_bert_generation.py,sha256=QurVdo80hLC3Z4saVEjvTbfrNC4iNi6RDML8_Nss6BM,6324
123
+ transformers/models/bert_generation/modeling_bert_generation.py,sha256=WpIHBzssvF-an19oeHIhfa4FwXP00-j-cw44gZVRYf0,27566
124
+ transformers/models/bert_generation/tokenization_bert_generation.py,sha256=-nazgr01l2RabK2d9lUKzzqRQmeFx-i7aUeri9jtqDM,6513
125
+ transformers/models/bert_japanese/__init__.py,sha256=bxBT0C99Sf-kvvEMjBjN6J5SGsMIMKImFYzgMqPNurA,1214
126
+ transformers/models/bert_japanese/tokenization_bert_japanese.py,sha256=nO4woRqHo6xg4ClhpfI54JqDb6Fo983S2WJxmxYZVdI,13227
127
+ transformers/models/bertweet/__init__.py,sha256=FOpFC4vt8uqAtPfvKd5wTL2BBlmW4QmMgSayprpNvw4,1120
128
+ transformers/models/bertweet/tokenization_bertweet.py,sha256=iyvCaH0gW0PzSdg1WlPlvauj7Z74Vmkc3uFjZdbX8HE,27394
129
+ transformers/models/big_bird/__init__.py,sha256=nBBqD92FXQey1n0bharIf2VFM_u3fJTsVbKmrxSV_gQ,3590
130
+ transformers/models/big_bird/configuration_big_bird.py,sha256=GcbvK1Axpt2rwR2eDcjoRR2dDQcJB96GlcP7fwjXlOs,8037
131
+ transformers/models/big_bird/convert_bigbird_original_tf_checkpoint_to_pytorch.py,sha256=3dCKKeSoJK72D6YWYLL07MNSt4jmHygu0De249VWt5I,2466
132
+ transformers/models/big_bird/modeling_big_bird.py,sha256=5IwxsjkyanY8YsTJTcjCYoQ18Qo905QEPNv1uuV4gWE,135005
133
+ transformers/models/big_bird/modeling_flax_big_bird.py,sha256=RqxVMq35mC-kMgpM2gvncpFWG40Ab37USDGEg8qVUhY,85524
134
+ transformers/models/big_bird/tokenization_big_bird.py,sha256=e7TugIGmPO_cZP2sH0LTgFlPbA-CoWl5nYO42O43JOU,12261
135
+ transformers/models/big_bird/tokenization_big_bird_fast.py,sha256=s0n6bRd90Zwx3o29DLw7IGZdA3Ws3DF2oFd1qwTiScM,11146
136
+ transformers/models/bigbird_pegasus/__init__.py,sha256=vkNAvxkXV4vXQP5_wk2WqVIO0ENBBuyq0SgrvxXNKkk,2072
137
+ transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py,sha256=mCJhSplCvne8ll8SuyhOmW4cxDoNq3wt4UYoWLNUCyk,9748
138
+ transformers/models/bigbird_pegasus/convert_bigbird_pegasus_tf_to_pytorch.py,sha256=CsoYYIaepYmhoG7r3tQKlRsXKfKUc0pxzciN0hwxysU,6295
139
+ transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py,sha256=7rQJxMwPxpOYtcQ5Ay2i56S7n1SUU-mTskJMiBRLRls,142054
140
+ transformers/models/blenderbot/__init__.py,sha256=kmjdtkZ-eCkAdCZafgksddC86P1GVNs3ix5OT6AH9tY,2331
141
+ transformers/models/blenderbot/configuration_blenderbot.py,sha256=QzkhuNUkB1Hl4bf19QIu-tutw39fqUZlLDJYaXvhwZU,8590
142
+ transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py,sha256=bqHKTaU9P7BUedAtCHYeT_NaFxg4EpkEDgzNWX23Jcs,3678
143
+ transformers/models/blenderbot/modeling_blenderbot.py,sha256=cLkly7g8Ia8w5iF99txrXO4o5on19frzEBVeU1tIKvA,74735
144
+ transformers/models/blenderbot/modeling_tf_blenderbot.py,sha256=ucbzVV9XZFli4JdEEA3O5IbIguhAQGgR9lbkzk2S6kg,73218
145
+ transformers/models/blenderbot/tokenization_blenderbot.py,sha256=bezI1HOBQDD4iQeZQwSI3RhXxZsL0fiRq07hvhdRFoE,4134
146
+ transformers/models/blenderbot_small/__init__.py,sha256=0fA-n-rY7xRq3nV5gzJJc1aQ13-ySQ454M2o9NYzY3E,2490
147
+ transformers/models/blenderbot_small/configuration_blenderbot_small.py,sha256=bKICjyWf07HkDLAN-Ah3XAk9XY8EB4EgW9AeLV5Ga2Y,8603
148
+ transformers/models/blenderbot_small/modeling_blenderbot_small.py,sha256=LiM49wsZbeK8zZX-Ihk6aqOsAnUM3FjGhZR9Hpw7wRo,73753
149
+ transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py,sha256=8dmNwSSqG4uzrn_H-ze9CUm8-bPYEhx6AosSxpOwR6w,72131
150
+ transformers/models/blenderbot_small/tokenization_blenderbot_small.py,sha256=qubiFN1ZlIXVyJvItd_iBWLKztbn9P_lcXf9aRsQCFs,8679
151
+ transformers/models/blenderbot_small/tokenization_blenderbot_small_fast.py,sha256=nxmEHs9hgXc6SDNcNAUKtxiqMTmj-XeRPszEQWj-nCg,4063
152
+ transformers/models/byt5/__init__.py,sha256=Xtl2luQjzz0YllzgMvN-BKJBZGu20I1gxi6J0nMthSM,1103
153
+ transformers/models/byt5/convert_byt5_original_tf_checkpoint_to_pytorch.py,sha256=hTwfWSLeVT9i8l8lRQ9ofEv2ntGTvgfM0UVZsA3LvCk,2107
154
+ transformers/models/byt5/tokenization_byt5.py,sha256=o253pYWQAurF4JILp1-LRv5MNdylhOwA4VjpUsECdKE,11475
155
+ transformers/models/camembert/__init__.py,sha256=tFcuCF1aPIwwVmOJuEpu3fNDLetaeFJwaC43vZ_yjWw,3303
156
+ transformers/models/camembert/configuration_camembert.py,sha256=lxDxh3c6h8jpiCLHOmqupFvxGL7Ej35j4iXBvUgRMss,1499
157
+ transformers/models/camembert/modeling_camembert.py,sha256=MC24--K-qW-h2axQwIgVuoY0dXmM__8RZFV-0Ew60_A,5734
158
+ transformers/models/camembert/modeling_tf_camembert.py,sha256=6ho5nuIm0L9WyYfXKTbhjd-2gEJyVFwr-X_U3bnwQAg,6327
159
+ transformers/models/camembert/tokenization_camembert.py,sha256=P8-3XFuCXN6jriYkaroGg3ssQ8bCX4tTLV72GqDGHsA,12909
160
+ transformers/models/camembert/tokenization_camembert_fast.py,sha256=nr3MwkEiXXLDvRY9CVgX_xg8PTZrEcupq9V6deBvxwg,8554
161
+ transformers/models/canine/__init__.py,sha256=XUHB8nqcjQFzXG7mJn2qd-e7EHFZgJNdglslflPzJ3s,2146
162
+ transformers/models/canine/configuration_canine.py,sha256=AjkfV7-g1vU6qM15bcJaP6t4j5NM-UgMkUZDaj-yzvA,6959
163
+ transformers/models/canine/convert_canine_original_tf_checkpoint_to_pytorch.py,sha256=nydwXeyOU3vbMxHgqj14ln-VJFOfH4FZG_stSDUa5Y4,2118
164
+ transformers/models/canine/modeling_canine.py,sha256=B9o_ZzAZ9A1eJe71lAfMTxKxoO2GNF6F0__-5hU4uao,70648
165
+ transformers/models/canine/tokenization_canine.py,sha256=_Ie4bEc7IK_T6u4Fz-xHN7TPzKQXD6eVr8vg-XXnpPQ,9435
166
+ transformers/models/clip/__init__.py,sha256=wTM_SED5cW4Db4qkxhLX4yTUASrRaLfDbkwZ2m76Ygw,2995
167
+ transformers/models/clip/configuration_clip.py,sha256=TR3ESQgB7V43kZykmfDOzFH_luAm7cHgBH6WAWFiYpQ,13419
168
+ transformers/models/clip/convert_clip_original_pytorch_to_hf.py,sha256=QaPBO8oTgNDalO0YCXNnHSnMb0c28fhGX8bN9KtKQTc,5234
169
+ transformers/models/clip/feature_extraction_clip.py,sha256=7IuoTT-PVpu3oc2hwveQVK4MG-u4qLX8zT3z3lRIxM0,9548
170
+ transformers/models/clip/modeling_clip.py,sha256=Z5W53P8Gz5QKGH9_35J2Fvojjve3wkhSvFSU3rtoH_s,43692
171
+ transformers/models/clip/modeling_flax_clip.py,sha256=Gm9XCedk4EzS7h6X9a1r4QKnonlmA2PwFPtzl7hlzMs,44715
172
+ transformers/models/clip/processing_clip.py,sha256=wLI74ICxPbxZtTMYjgEVNh7zt7H_sSLKURhtFu10myA,9021
173
+ transformers/models/clip/tokenization_clip.py,sha256=Ex035V_am2Od91DLW4d30L0EDJz6rK3M2Ooj937jDLI,14668
174
+ transformers/models/clip/tokenization_clip_fast.py,sha256=SxAsstgsSsl7KVrGCV-gP0p-C1YOxK5e0nka_lbrITg,7022
175
+ transformers/models/convbert/__init__.py,sha256=RYoeKe0hTIWveA757hwUPKHFk3FqyFgt6pQBlGM_TC8,3380
176
+ transformers/models/convbert/configuration_convbert.py,sha256=RZKc_3ITMqU77hq5IEpRhjy1khdp8sQWrtxOMF_94M8,6767
177
+ transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py,sha256=2j1QS0SBJPf1EA5Vulhre8iOvQ7nmFP-Dp6LsjVwwaE,2080
178
+ transformers/models/convbert/modeling_convbert.py,sha256=F0hdazX8I4hSlWRqf1zt4olSWr6gx-XRgtkCcEmLsf8,54697
179
+ transformers/models/convbert/modeling_tf_convbert.py,sha256=a_BCLiwT0Y1BgW9cGj5x3-JN6KU_M2iRoAU6LyljHEQ,58768
180
+ transformers/models/convbert/tokenization_convbert.py,sha256=E82NNDbKn2FwLyqu90wRFPbPQItz7bDPr1q8oGuottc,2216
181
+ transformers/models/convbert/tokenization_convbert_fast.py,sha256=3az76x_YtwSONZ973wqgCamLlsj3EXCHpECRIj1IG7w,2422
182
+ transformers/models/cpm/__init__.py,sha256=HRqcN66_579HtzB_p3CwyXvIK10ITOR02uIOyAUlns8,1100
183
+ transformers/models/cpm/tokenization_cpm.py,sha256=NDEa4PkPAjiHy1QcMMMcbbz1yIGBhGOZx9WrOY6rk-8,5653
184
+ transformers/models/ctrl/__init__.py,sha256=er5JdEvoPH-K--lojvel37JyzW0MAnX42JXDtLkKe24,2314
185
+ transformers/models/ctrl/configuration_ctrl.py,sha256=OQ-uZbfb-efDW_4F7oxKoL2ZFBMQfnN2fvWBJlmoXKA,5824
186
+ transformers/models/ctrl/modeling_ctrl.py,sha256=GjgrYEC29q-LJeABfjMyldJIRaK_J7DLSnBVGRQsScY,29015
187
+ transformers/models/ctrl/modeling_tf_ctrl.py,sha256=KMVavid4roBqwlBmDHJZSySGYHGaBzGdUgoX4DIDHlU,38193
188
+ transformers/models/ctrl/tokenization_ctrl.py,sha256=Gb1ogxdYT--TsNG8TIC0WGHsMtC4WgORO95XlBNIQOk,8504
189
+ transformers/models/deberta/__init__.py,sha256=ykQQ5_ilRz6rF1Au791TfvaqAp_1WGKa_Rzp1ACJeaE,2248
190
+ transformers/models/deberta/configuration_deberta.py,sha256=SNaXOHs1A9bo7r9_CBLpK-P55LTTVTdsDyQXMg3bZ6s,7628
191
+ transformers/models/deberta/modeling_deberta.py,sha256=0PslO1AjaiZzyx-C2pjT6fBieNWX6qQF0A88tgAKKYM,54099
192
+ transformers/models/deberta/tokenization_deberta.py,sha256=BsnRgwkYatUjm6lYrWW9mLmswkzV0zcgXP2Ll6o0T_I,10346
193
+ transformers/models/deberta/tokenization_deberta_fast.py,sha256=93HIVR1ZY9dRkC1OHQ1pDcTss34hsWDWIVvFDmRVtFk,9082
194
+ transformers/models/deberta_v2/__init__.py,sha256=2pwZbM4XA1xWqLJ9viVBmN3aqEic3efXcrmfxEYoomo,2073
195
+ transformers/models/deberta_v2/configuration_deberta_v2.py,sha256=qisVQyP_Wsx49WpXt-UPHwXEqKFT1Bh79iE2-yN7hxw,7372
196
+ transformers/models/deberta_v2/modeling_deberta_v2.py,sha256=jA3LJ1sh20Hb0ZoU-adVCbXku1QWYfq93legg3Yngo0,60591
197
+ transformers/models/deberta_v2/tokenization_deberta_v2.py,sha256=lEmSxZFxI2s3qNgT09mKp0a3PgBfgy_eWGmzjKT7HaQ,21489
198
+ transformers/models/deit/__init__.py,sha256=h34d4WRVvUcH2P48HWG0uK90tT9MOAR_ptYhBcw39q4,1947
199
+ transformers/models/deit/configuration_deit.py,sha256=q3SAkwL3APVQFG_58Q1TX8ldPkzciuiCAI-H4AOBu3U,5379
200
+ transformers/models/deit/convert_deit_timm_to_pytorch.py,sha256=L0oFx6LYxnMBLFW8McGKewqqGBXTGnoUShOm9yz7DGs,9000
201
+ transformers/models/deit/feature_extraction_deit.py,sha256=yc9q5mvGCpU1R1LZUnMoFdxM-JksHuODdGybZzD8kHU,7516
202
+ transformers/models/deit/modeling_deit.py,sha256=yWg5X3MLBWc0M0_VqVnXkGH-8Vvdwsi-mE3QRJYSfoQ,31573
203
+ transformers/models/detr/__init__.py,sha256=uLBDFKtezCM2UvNElWLhXTW2TpreaFR-tlQfIfeb0Kk,1900
204
+ transformers/models/detr/configuration_detr.py,sha256=GLD9JB5MpJzMJUDA0DOFu615xylVw34rtHNdynF2bU8,9986
205
+ transformers/models/detr/convert_detr_original_pytorch_checkpoint_to_pytorch.py,sha256=706Uraj_6meBTiNfJHHEcYfCHcSDzAYikQFI4glZRck,13304
206
+ transformers/models/detr/feature_extraction_detr.py,sha256=9zFrx7n3F_rDdK5AmINkcgr3CpR0HH8fLlZAaKTlt0Q,40828
207
+ transformers/models/detr/modeling_detr.py,sha256=vL7kL7UlmhqNZhjtyuIEvNjXy8HeV5Qj1XySNLcoUA0,107836
208
+ transformers/models/dialogpt/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
209
+ transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py,sha256=7yVhPLmBPKPzH8DjJC76cc7b8LgMGF2ldBmYjxXBMRE,1542
210
+ transformers/models/distilbert/__init__.py,sha256=407Gmi_b1whR5xHLA2WFAmDxN7ZpAxIMEd-U7D-DWNs,3469
211
+ transformers/models/distilbert/configuration_distilbert.py,sha256=_l2dKOO0QsvZ4Ke_iA0tga_4XjYWKGhqNjgyIQkKzYw,7276
212
+ transformers/models/distilbert/modeling_distilbert.py,sha256=1OJuy34hXoX95KpaujrJpxUE4dMfLAcNuDjkUad3WzA,39882
213
+ transformers/models/distilbert/modeling_tf_distilbert.py,sha256=MLW7nCQepM678t35_-HHxqwfqn16X654HTtNAeshE1E,49790
214
+ transformers/models/distilbert/tokenization_distilbert.py,sha256=d8Z9iefTiTEsbwnr9XItqzskfEEnnOAB_-AOa4kzYp4,3035
215
+ transformers/models/distilbert/tokenization_distilbert_fast.py,sha256=-KNiynUuBa_5hdkygpqjs_k4vJvTvIWSjF-yGcw1ycc,4056
216
+ transformers/models/dpr/__init__.py,sha256=RsOLAw138qc1-FoQNdZL_NPWD6klnMycywm33PfLPkk,3826
217
+ transformers/models/dpr/configuration_dpr.py,sha256=MRADjsuWRpGZYTKiSTIFnBVSmrVJK-08Ma_RQ5_6OpM,6939
218
+ transformers/models/dpr/convert_dpr_original_checkpoint_to_pytorch.py,sha256=5qPWZSb44QhFzOySnfcBuT3K1V1-ZH8klukzxSuXcrQ,6043
219
+ transformers/models/dpr/modeling_dpr.py,sha256=KsQHIsHKogsG0yiT2TV11SSzME00P9ofmJF0sIwlAY8,28713
220
+ transformers/models/dpr/modeling_tf_dpr.py,sha256=VOIN8gaPjr2mYn1Cp0u8S-yhRtuY8TYmkHofjbEU-Hs,37860
221
+ transformers/models/dpr/tokenization_dpr.py,sha256=RZV9ZkZ9ryZ0VowJ5HiI2olJ24m76XaJtlo0F-0l6KM,20037
222
+ transformers/models/dpr/tokenization_dpr_fast.py,sha256=aCFb2XO0ckV7VcRVQcDNU9NB5BmxBqnkdVEDlSluRIc,20519
223
+ transformers/models/electra/__init__.py,sha256=aVSQVDWB1QMIrEkdfcoBYT8_tKiU687gvDu0F1dvs-Y,4186
224
+ transformers/models/electra/configuration_electra.py,sha256=76U1cLSNqzsvCjddyw85NzZxm2uOvJTMr_7NoJ86AI0,9202
225
+ transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py,sha256=MOEFIHFKfG2WxwXM0TMQl7AmiSf5U7NPL-vVbFIyABs,2845
226
+ transformers/models/electra/modeling_electra.py,sha256=r1cwGcYqkPRW4r9-2S6WpACBP32RMsDFW7pg8POD4dI,61975
227
+ transformers/models/electra/modeling_flax_electra.py,sha256=s84SC1_aVLzNue5qpjL_arK4gmBvKf5KLP8ltiBAUXE,42701
228
+ transformers/models/electra/modeling_tf_electra.py,sha256=anOcNiG9waHqTLujFkyBg-xQwV-lJ9nKkD63It6JTBk,63334
229
+ transformers/models/electra/tokenization_electra.py,sha256=rb3nAbMxmJyq-cb44Ca-rCCeXPeIjSd3jO0WFZsK83g,2932
230
+ transformers/models/electra/tokenization_electra_fast.py,sha256=ophZMO96tYRDSwLutR-o5TULc0k8FWMF0k5EZVx99Co,3958
231
+ transformers/models/encoder_decoder/__init__.py,sha256=s_wPAF0AY_XJNDiI5bDmFd0S96wmxbYomZG27Odf_PM,1360
232
+ transformers/models/encoder_decoder/configuration_encoder_decoder.py,sha256=4p5_bAUE0Et_mbmPr2aDjJ3KTFum68DZ04fIyubvSlE,5040
233
+ transformers/models/encoder_decoder/modeling_encoder_decoder.py,sha256=kLGt2RE1ceRrjOdaNxkzK6MGCpC1NZnw3SysCDaqIs0,26150
234
+ transformers/models/flaubert/__init__.py,sha256=kJtlXCOOIlakrMxWr9hVFvxPSoIG6g_9Ir5Bqgitohc,3000
235
+ transformers/models/flaubert/configuration_flaubert.py,sha256=NC4Z-6_1POjLFq56ZraXbsugMF6gg_Mk614X6ER8CSQ,8932
236
+ transformers/models/flaubert/modeling_flaubert.py,sha256=szgNid8rpx23BW-AckFZIxh5iIrseLSuO1qIMhcX88s,17590
237
+ transformers/models/flaubert/modeling_tf_flaubert.py,sha256=QxxE7CFiHErUaND9YQA0y1EjeCwagQmtEjcrs4FVZc0,40272
238
+ transformers/models/flaubert/tokenization_flaubert.py,sha256=AclXmXd5bo932ikHjE3SMx3O8XdwizDROutYNXOUkOc,5634
239
+ transformers/models/fsmt/__init__.py,sha256=n-FV1sHqqhsYDfSunvpB01kVbctRQ15oqxfVk290vz0,1549
240
+ transformers/models/fsmt/configuration_fsmt.py,sha256=WgzZf_tLEyZc2DChFcI3i9QtZE6nUECicDzbulIUyCg,10332
241
+ transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py,sha256=1BmcjXkhypX_wZJA-ioao5V4MFGkSFjw89IQWTWt224,11261
242
+ transformers/models/fsmt/modeling_fsmt.py,sha256=oEpqmehcGXmfwlI9zbIUom-P80OeQ0bBpWPKRv5ktQo,53914
243
+ transformers/models/fsmt/tokenization_fsmt.py,sha256=2jqQC7bm8xkl20Zr_1VWzO2flXRgFMmRAqn8ULf3KF0,19528
244
+ transformers/models/funnel/__init__.py,sha256=cKF9oId3IFbf3jZTnvaRTfoHjQx2jo6m76X8UUFyW78,3479
245
+ transformers/models/funnel/configuration_funnel.py,sha256=9IfjsdHuM7BfYqV5KNV3e7O1aiAMkpJ_OvZ7QPywRJk,9492
246
+ transformers/models/funnel/convert_funnel_original_tf_checkpoint_to_pytorch.py,sha256=iKNN_afXPjNMcPX0MYvmabhyAvsIcVnMiYuifogXhwE,2346
247
+ transformers/models/funnel/modeling_funnel.py,sha256=6Keja81WqGU-rAAyBgAcJ5i9BbrfHUPQFwTYPwc5ewA,66858
248
+ transformers/models/funnel/modeling_tf_funnel.py,sha256=4c4x74bel9DCr1Gkl-uvc3kH9VBzaZ_pSlXgW9eW3R4,78480
249
+ transformers/models/funnel/tokenization_funnel.py,sha256=Bm5zf8wblvVBkBAsXoQBrkrm-w1A115cqpB5f7xC4_w,5374
250
+ transformers/models/funnel/tokenization_funnel_fast.py,sha256=I_bcEn4TG7EePyDGInyJ0pmFW0gR-giKqSujyCfI9Lo,6927
251
+ transformers/models/gpt2/__init__.py,sha256=8-D8VXEsbHWD9tmqweyqq5CnOYX6VkNxGfzQZaRZDdE,3123
252
+ transformers/models/gpt2/configuration_gpt2.py,sha256=kUpRhtADZAbjU2ySXzuSq6ON8wORzxwcdFHcDAOiiMM,11777
253
+ transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py,sha256=vBV5t7JmzbZIiSDviZ7FXshxp3clEIkRLEsn74Dt8ZM,2539
254
+ transformers/models/gpt2/modeling_flax_gpt2.py,sha256=rFctE-bgEx-yXL781trg8I7rBWhAQke-npNa98VCpG8,25996
255
+ transformers/models/gpt2/modeling_gpt2.py,sha256=n3tCxoqn0lw8mW_7SivNwxMHTuV0oP9Bvjmsz9rdzWc,57110
256
+ transformers/models/gpt2/modeling_tf_gpt2.py,sha256=CUKfIwdt0KPqCbzMLioKMcWhkyd1en1-k0ZRAbVB5B0,45855
257
+ transformers/models/gpt2/tokenization_gpt2.py,sha256=AXkEtOyFMOvPNHhC3emnyyc5_tiywhRZIdh1Q6YhMZQ,12364
258
+ transformers/models/gpt2/tokenization_gpt2_fast.py,sha256=98egXbJ50ZooJHWYGX3btf1S5N8JW2E0gP4SaONa93Y,8131
259
+ transformers/models/gpt_neo/__init__.py,sha256=UsPoQamm04s36zzOWbr_y0UHkp9sw4_lPru4DAvrnCA,2146
260
+ transformers/models/gpt_neo/configuration_gpt_neo.py,sha256=17JmCAz6fg_q7P5r_vb7di06SmpVUQcD_YiySfw-j7k,8477
261
+ transformers/models/gpt_neo/convert_gpt_neo_mesh_tf_to_pytorch.py,sha256=yzNqtgcRsn8Yzqyj_fPr2LtNAneGGMLimOr0vd2QY1Y,2555
262
+ transformers/models/gpt_neo/modeling_flax_gpt_neo.py,sha256=D-69-XsN2kf-y-hej4BSpvpP3lOsfDNV46DpgsTWq7Q,27203
263
+ transformers/models/gpt_neo/modeling_gpt_neo.py,sha256=epo0FIDYt5Kq3Q6bUL6U1Mpauf2uLWGVxNEnwsPuaj4,48638
264
+ transformers/models/herbert/__init__.py,sha256=JGTluXj6Vu7J_PbT96u4fZn747W-kgDxds2XhZUiGgo,1353
265
+ transformers/models/herbert/tokenization_herbert.py,sha256=JUq0RLMRWVexD-mhH0BlIGwY5FZo4eJ42N38zQeSHDU,3318
266
+ transformers/models/herbert/tokenization_herbert_fast.py,sha256=CgFQgfanNDVcoVvlKy41mc-wcsRria0klVhlAvt4dKk,6637
267
+ transformers/models/hubert/__init__.py,sha256=uuaj_2QHmHGy_mF-vJa8hOUPX-YYjsJ6_30XeTXpAdo,2081
268
+ transformers/models/hubert/configuration_hubert.py,sha256=iLIWTsULEUKHp9yBwkXBw9ieNyCDFnMCH-SsMe8z4Rg,12793
269
+ transformers/models/hubert/convert_hubert_original_pytorch_checkpoint_to_pytorch.py,sha256=98f1RkezhoFw1o3DBc1JDn1RyPbgSXztIXvZf-PoyeU,10289
270
+ transformers/models/hubert/modeling_hubert.py,sha256=l57EfHdLE0f-sc8GnnKoh74T_QuOHXOs2JlV2CYmtPU,46180
271
+ transformers/models/hubert/modeling_tf_hubert.py,sha256=yfCBs7_g7vP-LlkjEFvspNoLc9NXRbfwDijpDNJ6qbg,69660
272
+ transformers/models/ibert/__init__.py,sha256=6m7pRKEpPY3whQX_tMykyj6GKISmN6EUL8j8muWcH8U,1931
273
+ transformers/models/ibert/configuration_ibert.py,sha256=oRekTvZ9n4RlAtSccsSwjwE_ixiqwrkWtOPXyv0mK6c,6985
274
+ transformers/models/ibert/modeling_ibert.py,sha256=04xNFgchSF2y29amqxBVdvesQHSZ9VPxxt77TCwNX2c,55422
275
+ transformers/models/ibert/quant_modules.py,sha256=PLC0kaGCLpba2U9KVSnKgr3TXHArzDn2KDSa-X9jb4E,30518
276
+ transformers/models/layoutlm/__init__.py,sha256=ty6xok63JL_NvbGJ526pApD9wRIlBHtg_w5HEGkRI0I,3074
277
+ transformers/models/layoutlm/configuration_layoutlm.py,sha256=8wyEXlhMbjtFSj22Um_alHP2s8jp4DLtR-hJhQ6xQUw,6160
278
+ transformers/models/layoutlm/modeling_layoutlm.py,sha256=DYnhFQo-p910IO8IPkbCyL_bwmNhcScTFaxM4I0nLDo,50874
279
+ transformers/models/layoutlm/modeling_tf_layoutlm.py,sha256=oyyha6WduufuzUSQZf1B-vXrhh16oNZt7nIRKSn6Huo,58199
280
+ transformers/models/layoutlm/tokenization_layoutlm.py,sha256=kMS2shyz8IHQeGOnrj73t7EfbFoyP9qXFuT55WWb8E8,2088
281
+ transformers/models/layoutlm/tokenization_layoutlm_fast.py,sha256=MfxzusXtkSkmF81M7uV6iO9cqjOB9Fyxg9Czu-AzoIk,2550
282
+ transformers/models/led/__init__.py,sha256=c_bqXSjH3VMk8RAZPB0KaYoiasfZTlX69Jj8107T_n0,2361
283
+ transformers/models/led/configuration_led.py,sha256=68helyVz8SbpwRRsU5htER8jv1M1yYmjS1fGnKwDqf0,8324
284
+ transformers/models/led/modeling_led.py,sha256=f7sa7jRQCO8oS8K4NrGjGCzFvj3Xitt31M3pbxUXxQc,134093
285
+ transformers/models/led/modeling_tf_led.py,sha256=5CCMf3b5AMQpGYZH-C2qRkjFSS9w6R4mUmvxzMJgGPc,120358
286
+ transformers/models/led/tokenization_led.py,sha256=ZVYzi7oCyeUICFvJs7EjiEOTq0uN7ZAX75hul7UKj7E,1864
287
+ transformers/models/led/tokenization_led_fast.py,sha256=S9p6bPl0oCT98TKS1-1adPuR3Kar_LqHZSIFnCISiTE,2031
288
+ transformers/models/longformer/__init__.py,sha256=2uyO1tL0mK-FFx1Nklnt_f_8x11njwTddQDST7E-jkc,3549
289
+ transformers/models/longformer/configuration_longformer.py,sha256=8C_Qxph7CC74BAkyXULGxaS5i9m6ZlTIATMAoB_eyg0,4123
290
+ transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py,sha256=ZfHX4qCCudmvGCxJcOOIisAo2-1EKbW8SyR9UPhCxPI,3027
291
+ transformers/models/longformer/modeling_longformer.py,sha256=lgeIES6cTEsMCrKPIHSxw0A9enGviwf-vB_KPNDmrCo,110628
292
+ transformers/models/longformer/modeling_tf_longformer.py,sha256=mkPh_c-hLaSUb8sE0Ixf_v2IC9XiLHQTgDhG6BvYMVg,127235
293
+ transformers/models/longformer/tokenization_longformer.py,sha256=Gt_6dB7x3RbqYRPk8l-dmZ9ZRl4fUKe89GeRUFeHO4s,3160
294
+ transformers/models/longformer/tokenization_longformer_fast.py,sha256=RA73Key0gDYua8XSn6OJs9BRq6yW4ZUvO9vR-Faoh4g,4188
295
+ transformers/models/luke/__init__.py,sha256=2WlFJ8I6s1mN2FCQfiHN6uVV9Xxxsm2rZYcKsP_hlRs,1897
296
+ transformers/models/luke/configuration_luke.py,sha256=H1NBju4NqeJdE5jxfrOrbLj67SRMz57JNTEWaT0SXW4,6702
297
+ transformers/models/luke/convert_luke_original_pytorch_checkpoint_to_pytorch.py,sha256=RYSLt__QN6rXqeEYdBI-ON8GEA4Hi0f5xR-T2dGlQT4,6719
298
+ transformers/models/luke/modeling_luke.py,sha256=pnDPHVWAyYjx5SAUvmYHGmRHFdcN2nrimEMw1GxphK4,63091
299
+ transformers/models/luke/tokenization_luke.py,sha256=WTbawV4mP61BtIHEVz8FaSDE6HDhUJT3JLPIK5KS8nw,78060
300
+ transformers/models/lxmert/__init__.py,sha256=VSCoPWUxei8l0-Az8O8obTGn0MqGwbxtrjEq6TfxsTI,2749
301
+ transformers/models/lxmert/configuration_lxmert.py,sha256=Sg8R2l-M0P3EKLep-hPTCvlXguwRTrTnrMlhLqQlNUg,9843
302
+ transformers/models/lxmert/convert_lxmert_original_tf_checkpoint_to_pytorch.py,sha256=TLkhtn4gzn5Y0iFYvuIEv1IKya2hmydLuVF13imKBTE,2120
303
+ transformers/models/lxmert/modeling_lxmert.py,sha256=1qgzPq7dnR4pXzODORGCdqj4FBYF_9YkLYxGHoG2yZI,64442
304
+ transformers/models/lxmert/modeling_tf_lxmert.py,sha256=VcIQaVSg6gwVzA3raIay7HOABdF7gmgu3qBxPTXsY0s,66786
305
+ transformers/models/lxmert/tokenization_lxmert.py,sha256=mE4AnbatqGoQlBkgG7PZbOOZmB6YswiZLhNAEOAEIRQ,1736
306
+ transformers/models/lxmert/tokenization_lxmert_fast.py,sha256=isspPJGsydl6KZAJ529-Q2j1TxejJy--Batg4rvMcfM,2098
307
+ transformers/models/m2m_100/__init__.py,sha256=-PDNEt423yFNbzYDsLATTK17_eWBWe4AyKknWsar784,1828
308
+ transformers/models/m2m_100/configuration_m2m_100.py,sha256=LNRqyjg0HROfou-W0BuxZnTyjxpHUwVI5lsPBdxfz2A,7857
309
+ transformers/models/m2m_100/convert_m2m100_original_checkpoint_to_pytorch.py,sha256=j9TXiOt9GCxiEnXIaJ_LoO4VbOgxoJ-M9LA3ln4IjCg,3118
310
+ transformers/models/m2m_100/modeling_m2m_100.py,sha256=VqEXq2MKESmIwxuGRLbL7WmYuXFC8OqQXY3UvON_ABk,63602
311
+ transformers/models/m2m_100/tokenization_m2m_100.py,sha256=3ZkGd7pt7lrptKjpz5waSkjYBJi4oGBBoxSg0eEt3jM,16130
312
+ transformers/models/marian/__init__.py,sha256=mI62_GvVBm3iC7JhbJXFZZTD5v8lWL0BTL-9dyXmM4E,2531
313
+ transformers/models/marian/configuration_marian.py,sha256=cBlNeGokBQ8mrXwA9k2Wh6gZVUgIhZQXM9QRF_WNA0w,8388
314
+ transformers/models/marian/convert_marian_tatoeba_to_pytorch.py,sha256=Qq7SMe7zVkUSMzAchyYqUNfSovsbGsf2E37r6Sx69tM,33875
315
+ transformers/models/marian/convert_marian_to_pytorch.py,sha256=rWSml1KxoloTJVRH_7lQLJRToc9sOXVEkLaCMgwtw44,23461
316
+ transformers/models/marian/modeling_flax_marian.py,sha256=_GO-tImUJ7k_sb0vpQPQuop2SVMs7CqA3sIw9sLmq4w,63842
317
+ transformers/models/marian/modeling_marian.py,sha256=ilHieeEScWHFIfbc8PRGbIy0QiK7aiRg8aQdbAuR0QY,73840
318
+ transformers/models/marian/modeling_tf_marian.py,sha256=-QhRuT9u0dtZ2yAzBZIijUMPhvEAXnjbpx_CJmPX7-w,73427
319
+ transformers/models/marian/tokenization_marian.py,sha256=eieC6xC-MSE5ZFBZz6F9KFNAa9QFxJth-QM0_HP4f8M,15286
320
+ transformers/models/mbart/__init__.py,sha256=ttany1sj06obpWDO9H6a4K4k0uMtl_dANYCKH3H1jRs,3526
321
+ transformers/models/mbart/configuration_mbart.py,sha256=oBN2FZfyVs9Dx7TJyrmXaRsAiSKhOD1rj5pZChg8kqQ,8333
322
+ transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py,sha256=xVW9Mj-jd7X_MImJCgS52Aok1CGPf-E6u8ptvG1hK8o,3035
323
+ transformers/models/mbart/modeling_flax_mbart.py,sha256=eH-43cISAda25vLcYpUND4NtXxN1AzpAr6klg3rNWzU,74350
324
+ transformers/models/mbart/modeling_mbart.py,sha256=mp3KT291lUFW0lUVYq-RjWh7dQEBXAzuOQmFvABMl60,84451
325
+ transformers/models/mbart/modeling_tf_mbart.py,sha256=ro1TBzHE4u4jHz_PLVSiL2Xgk2s7snXI5s8I4zGPstQ,72804
326
+ transformers/models/mbart/tokenization_mbart.py,sha256=nN3DBCY5qFG7WgEGLlNa-1C0F_dOr7teewsr3ebLAwU,9967
327
+ transformers/models/mbart/tokenization_mbart50.py,sha256=eVmRDQ7_KjS3LX4JJ7FjG_ytHG-pUmWBvpJg9pFVB9U,16070
328
+ transformers/models/mbart/tokenization_mbart50_fast.py,sha256=e0AzTg9Ph_TUWD5nJDgpBB6BLNdqNEM_DZ6edpFcXBg,12145
329
+ transformers/models/mbart/tokenization_mbart_fast.py,sha256=nJ3d87Fvc9bVjnd0a5ya7HOSX4aMxPE09h9VlghiT8g,9921
330
+ transformers/models/megatron_bert/__init__.py,sha256=J74QYURqS4nS3uGTVzNGQE0JFbpEgc7I6aVAJarNspQ,2380
331
+ transformers/models/megatron_bert/configuration_megatron_bert.py,sha256=Gn8_FxqEy647wOATwJOlDP5qrbXwsekqU0uIduAclck,6956
332
+ transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py,sha256=PGwSEw2cfoJ9-zmJQubu8HkbkUO4my6vZxTW9GdcBAQ,9641
333
+ transformers/models/megatron_bert/modeling_megatron_bert.py,sha256=8UglDfPpqfCx9MDWoG1TG0-M-R65kXbTH0qb1lTs2Fc,78672
334
+ transformers/models/mmbt/__init__.py,sha256=oKjujs58kqQU1rd0pKmdk3BVoTE3Y0lD2UPtJN5k2ck,1360
335
+ transformers/models/mmbt/configuration_mmbt.py,sha256=2XO4AtN7FORz_C4DFB5jcey3ej8Ne--nBLILTt_FlAk,1654
336
+ transformers/models/mmbt/modeling_mmbt.py,sha256=UdF3xGAeZPQa7YBSmfXsXn2BFDOmwvnqswhHkhg-Deg,19250
337
+ transformers/models/mobilebert/__init__.py,sha256=pTyay2Q8edgio0nCKPPPGSmTuIkUoujHGEArTzwtfSY,3847
338
+ transformers/models/mobilebert/configuration_mobilebert.py,sha256=9S8PazcjFWLGS1EwhoaLYI5aw6LpZCdlwoGAdZBot20,7804
339
+ transformers/models/mobilebert/convert_mobilebert_original_tf_checkpoint_to_pytorch.py,sha256=BuvaIR8XQQ0qxhqS6YLvJi8xTtLurInIjq-oX8Rz3yQ,2172
340
+ transformers/models/mobilebert/modeling_mobilebert.py,sha256=TpYisJ0_g_GtP0_OtelIVJ0DMYKjn0JufUYyaYrR2M8,66006
341
+ transformers/models/mobilebert/modeling_tf_mobilebert.py,sha256=EQUO46rqydkCe1Wmjl_Pv6DgMA0b5CMp5GkjoLmdHCQ,76628
342
+ transformers/models/mobilebert/tokenization_mobilebert.py,sha256=x1j_FozyvSuL6M25gOBVi0mapuqN-do7jdHPgfg84Qw,1729
343
+ transformers/models/mobilebert/tokenization_mobilebert_fast.py,sha256=w_PmDTnuGZuOT6wo0R0Kuyc-3gYT4FbZYlMGPsfyCE4,2094
344
+ transformers/models/mpnet/__init__.py,sha256=z93A_sDkyFENbpDASU-c0IahNLgZQ6oCyytI__rzZio,3224
345
+ transformers/models/mpnet/configuration_mpnet.py,sha256=Z37GxUh0QYaJ5D2xbyTJk9iMMpzkuQBACWGyBQ-mBSE,5659
346
+ transformers/models/mpnet/modeling_mpnet.py,sha256=UsIbL1FaF1aU8DFBRcqhtk3BSjqV7lTvnrlzUoFzuP0,41065
347
+ transformers/models/mpnet/modeling_tf_mpnet.py,sha256=oCSNAWZzXNyA0-qD9t-tqLw-4bKMB02ETc4IhsNdI7Q,55732
348
+ transformers/models/mpnet/tokenization_mpnet.py,sha256=XgFHruMI-lBd9s9-Tv8dZmDh6YoM-t_i1V6mJjTQL9A,22284
349
+ transformers/models/mpnet/tokenization_mpnet_fast.py,sha256=_qBHq94iKpu4npvZFkFjhce7rIKuKY7N22TI3u3RlIQ,9053
350
+ transformers/models/mt5/__init__.py,sha256=BpRQBwIvoDRY-RYkiwRxUHuefHthVL8kACYf4_4_Ji8,2221
351
+ transformers/models/mt5/configuration_mt5.py,sha256=pWs4pj25oQPyjbGOSjjS83ZCo1jIHqWHZ-7WjYGLCs0,5595
352
+ transformers/models/mt5/modeling_mt5.py,sha256=_-v2WcfNZscy_vhiMh_cYjjRfK3qd-gJ3zpv4jfBlw8,4250
353
+ transformers/models/mt5/modeling_tf_mt5.py,sha256=rqTMVqQjhG9aWwtqb-oPa2MSUerLr9uZxYeTsMO8riQ,3614
354
+ transformers/models/openai/__init__.py,sha256=Kuu_25SnPCLTik1aphtT0t9z0vEHcB8imx54lnIPLzg,3011
355
+ transformers/models/openai/configuration_openai.py,sha256=O3IMC-CClGaDXuiMUu2VlngEnrG1RsrgoI1gIkeIj4s,8395
356
+ transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py,sha256=eY_jCzyK6B4SqOkD1SxrEJTjT08d4pPmziROb6vGHy4,2673
357
+ transformers/models/openai/modeling_openai.py,sha256=XXfjZJk49RwEomzSsbexpz8IMdqPcjPv6niodMkVtKc,35876
358
+ transformers/models/openai/modeling_tf_openai.py,sha256=PyfzXvlJ2GxJ-5ABM7jjjqk6vR5w-_Nkj0ZG-g7YYp8,41855
359
+ transformers/models/openai/tokenization_openai.py,sha256=7iPywP1lGU2kiLR9DGsI2y2vf9jxb7UllJE0FGN8Xeo,8507
360
+ transformers/models/openai/tokenization_openai_fast.py,sha256=-fV3Ltk1sc4Ak0fk9zD3FJXOPTu-Tbboa0j13k5Asl0,3085
361
+ transformers/models/pegasus/__init__.py,sha256=xMRWkit1zpQ0Gd6M1IN12ZzH9TfG0jmE7rcKMPDe4Bk,2575
362
+ transformers/models/pegasus/configuration_pegasus.py,sha256=BztBPBbZaSIW6c-BcB65eRfVuJY4bBQrByC9jrNOWIY,8344
363
+ transformers/models/pegasus/convert_pegasus_tf_to_pytorch.py,sha256=LgO0UB7YBsgZdJYPylRrLbUnRf8k3eRDKaw0JA8Glp8,5362
364
+ transformers/models/pegasus/modeling_pegasus.py,sha256=TUhLOW61xO53rwkPcTM_wB2VvfNXtwecj7HianhGpqw,73813
365
+ transformers/models/pegasus/modeling_tf_pegasus.py,sha256=-heh1S459xL_BDiGV8_CmeTFkAG1EF6A0Brj-OAWDgc,73429
366
+ transformers/models/pegasus/tokenization_pegasus.py,sha256=StexgdFd8CpLUPFUoagzddPhfLjrQNGkMgtJ6h8cRRE,13079
367
+ transformers/models/pegasus/tokenization_pegasus_fast.py,sha256=_T4R4TCDQi07dT921tLlOxEhzsjSDnrOUXeeKPdCtiw,9639
368
+ transformers/models/phobert/__init__.py,sha256=_V2Ueixxil_f1RBXGUwvMzf_6Yaw1lCKqYkzTIAgS-I,1116
369
+ transformers/models/phobert/tokenization_phobert.py,sha256=4TEecbfM4X8VrJq57Y9wLKcd0pmMyVIhbhwNmkTN4m8,13718
370
+ transformers/models/prophetnet/__init__.py,sha256=bIUI8PIPJkFOQAxFZBghnDMnsPj0f2_ZnwRYICtB4Ak,2031
371
+ transformers/models/prophetnet/configuration_prophetnet.py,sha256=-AnExbIck6EB4ROTThlTg6Adqa4JAbANbqbNvQMz_bU,8652
372
+ transformers/models/prophetnet/convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py,sha256=_oqlYxlvWPR_9BwJBmEQ58B1bg4P-yf3Sko9wX3lP40,7055
373
+ transformers/models/prophetnet/modeling_prophetnet.py,sha256=5z736aQsgS9bJ86VpYQtjQ4LXJHslabn20gYBh7CL8M,113056
374
+ transformers/models/prophetnet/tokenization_prophetnet.py,sha256=xakQyVlV_m4NqRE0r1dHJKlrwkNsROVDPPEMHDDTz_U,12509
375
+ transformers/models/rag/__init__.py,sha256=a0u0DIF98xLoV5VoTAHy24ZD9mM7I7NhqhaZPW3fACo,2052
376
+ transformers/models/rag/configuration_rag.py,sha256=GOnhkJ6WGHw77pMpN6xf4SpZ8lzyS9xXaOs64DVC6EA,9289
377
+ transformers/models/rag/modeling_rag.py,sha256=3EwNUQozuuvAL0d7KF79MrF7Do3yMjpRUXgx2fN_XxY,89845
378
+ transformers/models/rag/modeling_tf_rag.py,sha256=Su0pPCrQ0NmLUm5cWMYSb5K5qr8Ba1tzi6c0VCYtTmQ,97101
379
+ transformers/models/rag/retrieval_rag.py,sha256=xMTDJwBDUOw01X_9yHpK_wfGXg6uxwj4sNE9n-PN08k,29159
380
+ transformers/models/rag/tokenization_rag.py,sha256=08-s-tiVRhB0Yqxoea1Ep-gOaL2R7z2ia91773xXBD8,4898
381
+ transformers/models/reformer/__init__.py,sha256=nNXoNdsf884qBloou0TQCFapCiLv898FN_k4CYzyMck,2499
382
+ transformers/models/reformer/configuration_reformer.py,sha256=bNJM7XsYQSmk_ZHPoBBVVHQ7LPg3z0W14Gmcw_efIaM,13521
383
+ transformers/models/reformer/convert_reformer_trax_checkpoint_to_pytorch.py,sha256=DcGRSF2im7h_TO8l0rFBk66d_6OhngYeSfyrGyutWuY,7790
384
+ transformers/models/reformer/modeling_reformer.py,sha256=7VWx7_MXLlQxjgtstNw7IgNSo2PRafS-wbTrgtq2PVY,110884
385
+ transformers/models/reformer/tokenization_reformer.py,sha256=gBxaBZmJoEFHjzk2P1xODP3O32uL10pjhINcgIPuJg4,6679
386
+ transformers/models/reformer/tokenization_reformer_fast.py,sha256=EMGT7cCWJFbD82G4IE59N4Tx-7GhJ54oXAdvO3XUXmU,4552
387
+ transformers/models/retribert/__init__.py,sha256=Db7873ksFIGPFJx1nEz7xGE8EHhsNnaFFKd7vdQnuWk,1976
388
+ transformers/models/retribert/configuration_retribert.py,sha256=W_myqgj6wt7Mfjyp2lh3sAKZKySDTHb5cyv2mC7eLHI,5357
389
+ transformers/models/retribert/modeling_retribert.py,sha256=98oQX_Bgy6vblcsDOC34IPY6UAYBeJPbPrwEuFHW8xQ,9449
390
+ transformers/models/retribert/tokenization_retribert.py,sha256=BcwImDIPRPUC9gtyTbhS9ebH2ucBmdTeVSyK-Zq1Xhg,1884
391
+ transformers/models/retribert/tokenization_retribert_fast.py,sha256=cuuATXGrP06EBv8m97qCcT1-ofFoogVHhSVuvzTrvss,2264
392
+ transformers/models/roberta/__init__.py,sha256=tulPJ-NSPHTfoK_NdcwhQV3EkPpVI3gzJmfxHAEEc2Q,4054
393
+ transformers/models/roberta/configuration_roberta.py,sha256=gRzEkedL_dw6shbgbr4GuoYVPb4VbelA3mdiqfevF_I,3454
394
+ transformers/models/roberta/convert_roberta_original_pytorch_checkpoint_to_pytorch.py,sha256=MmHtq9AhcXXd-V8Fz0XWC8n-PL-S1MSdFhTCVM6Cksk,8002
395
+ transformers/models/roberta/modeling_flax_roberta.py,sha256=ZvxHbBTTFS6ecp4cmRuCPq49uclwal48HQR77iZbmAI,37120
396
+ transformers/models/roberta/modeling_roberta.py,sha256=t_D8BPYqIHs6nQJ2xeVo0viiR79tTIN-Kc4prpFAKd8,67588
397
+ transformers/models/roberta/modeling_tf_roberta.py,sha256=lH0JfsAIKSHvVr46PhLKPLKWvW7xElGENUOiGg4tofs,59857
398
+ transformers/models/roberta/tokenization_roberta.py,sha256=inJ9xhlSsMcv2cTQ_apS6Xuk8GkvwsFXPgWKc82aJNg,12006
399
+ transformers/models/roberta/tokenization_roberta_fast.py,sha256=uxe_9O6ANxFLw1t0gaFfn9iUiTd6X4WaEFSHS3ahKdE,10916
400
+ transformers/models/roformer/__init__.py,sha256=6d-ATp-OlOjDenpwvwYkMZnpq1Y1mZEtX2os6yfhaSE,3512
401
+ transformers/models/roformer/configuration_roformer.py,sha256=dBFEhEwM-ebYom_a2_R23fhBlcCfJRvg4G-2E62Uox0,7442
402
+ transformers/models/roformer/convert_roformer_original_tf_checkpoint_to_pytorch.py,sha256=wXcik_5PQnfFOCQKCk7SEdWIOshUA8Fn-bus3FLcxCg,2212
403
+ transformers/models/roformer/modeling_roformer.py,sha256=voeQwiUx1Q1ck7vKRpAGYs3oUjYOiTBIMpai2rbbzAM,67008
404
+ transformers/models/roformer/modeling_tf_roformer.py,sha256=r1IVhCoowUm-n9nu8yOoOiKVqdjQOfwm81zGJnqFZo4,67531
405
+ transformers/models/roformer/tokenization_roformer.py,sha256=t-nm-1Yjrb5PgosmQZ2hXtj0qKsucK2TFAoQGeZMv8k,14478
406
+ transformers/models/roformer/tokenization_roformer_fast.py,sha256=IgBPYrE0KsD-HrKU1MusxY_CHRFcTcis161OgpPpRZk,8273
407
+ transformers/models/roformer/tokenization_utils.py,sha256=LGRWPOJf2U2YLOpsjzaR6Y1NtWNjHliHIKfVSd6ZuNw,2651
408
+ transformers/models/speech_to_text/__init__.py,sha256=1qy--WQ03zDxoF-kCi2RkZz_hmXa3DmaJPfXXdSzOow,2527
409
+ transformers/models/speech_to_text/configuration_speech_to_text.py,sha256=s1hj5wJouybXduJcSQ3bwfKYYYQhZxXZxYlaqHorGl4,9928
410
+ transformers/models/speech_to_text/convert_s2t_fairseq_to_tfms.py,sha256=asrE04TN3OWd5yT7L9yDqPYhuNqB82FGw56L5VbG_EA,4071
411
+ transformers/models/speech_to_text/feature_extraction_speech_to_text.py,sha256=rbtDO7OqjF5FGTGjwmDPU89Pvxy6n_Qx-BZiI58owls,10108
412
+ transformers/models/speech_to_text/modeling_speech_to_text.py,sha256=BOB1D16o00hhEBWZZz1awYROb_ozU63cQp_kbkkbRPM,64733
413
+ transformers/models/speech_to_text/processing_speech_to_text.py,sha256=4cZZgbmP1Crbc9XYGJX5Z72PYHOxlY8mnxw4Pbyv7iU,6929
414
+ transformers/models/speech_to_text/tokenization_speech_to_text.py,sha256=m3zNK0xuOS37FkcsZfmlw5DzQ3IX2vAQYlyXwWOAmTQ,10949
415
+ transformers/models/squeezebert/__init__.py,sha256=4gCRM4FVU2XKhsSf_VbYxyxLUoDqkc-KxgpsZDWrRWM,2510
416
+ transformers/models/squeezebert/configuration_squeezebert.py,sha256=M4M57OqBRwIe_Y71ecT9hUKrz9CO-bxiB_yFDcErBKE,7210
417
+ transformers/models/squeezebert/modeling_squeezebert.py,sha256=dCooKrhCdCE2osHmCVIUaoycPcZdPGaxsoZt4NsK2Wc,44244
418
+ transformers/models/squeezebert/tokenization_squeezebert.py,sha256=-sWtxf57A52qlfS7XwIfewRppAr8OftrxfV0xEs8UsM,2337
419
+ transformers/models/squeezebert/tokenization_squeezebert_fast.py,sha256=JXQVbkOSiaEgFtMvaWLG6M2cVL22y85crB4sTNGt6E8,2989
420
+ transformers/models/t5/__init__.py,sha256=-u3A6ueGC8rpxKjcKlrsRlDooSboWYssZLWrcHxq2IE,3042
421
+ transformers/models/t5/configuration_t5.py,sha256=Vg7u9e8DWojoz_PYZkrNUiL_O23x5eXgbjRqpyvQDZ8,8940
422
+ transformers/models/t5/convert_t5_original_tf_checkpoint_to_pytorch.py,sha256=hTwfWSLeVT9i8l8lRQ9ofEv2ntGTvgfM0UVZsA3LvCk,2107
423
+ transformers/models/t5/modeling_flax_t5.py,sha256=4RMRMKJ4ptzOkIBBYEZxEy4-u4xS8TNJM9cdslihjK4,68939
424
+ transformers/models/t5/modeling_t5.py,sha256=EQcKb0BnOTx8z1yRuDYG7fx3comU4Uj4zjASFFCzZdE,81418
425
+ transformers/models/t5/modeling_tf_t5.py,sha256=Q9HTjJebfchBEUEzGLwunueb_377OJbfdGGJqAjGiiY,74076
426
+ transformers/models/t5/tokenization_t5.py,sha256=qjvLzzMh5lCIkZtyPv4Y5e-vEjTpQnnvZQR9GyJU9KM,13155
427
+ transformers/models/t5/tokenization_t5_fast.py,sha256=bO8fzOuAZXWTI1o1mqiA1UisIZdXJ0aLs7CY_b5C5Wo,8622
428
+ transformers/models/tapas/__init__.py,sha256=jBUhjUUwsiSqjk1AgeP6XkxFMNkdRJffB9N_3AhHVxE,1879
429
+ transformers/models/tapas/configuration_tapas.py,sha256=ovPtzJgLjW4ppDSDvcocRVGmiXv9XoizwtdbKY9T2w8,13459
430
+ transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py,sha256=UDEBfWljuOIQexm-N3pChKLyg7gx-IQEsSHW7JomQ9s,5093
431
+ transformers/models/tapas/modeling_tapas.py,sha256=0UqlVo_UmZukhObFmIgSgNOgiAwYgrrfoQsPq0S5Od0,106883
432
+ transformers/models/tapas/tokenization_tapas.py,sha256=yFO0S_mB74wUIUVQ1puI9BTy6QeokM7TXohPEgX80Y0,119284
433
+ transformers/models/transfo_xl/__init__.py,sha256=uYpLphZEHNnLiRsbCNEYoRBLdhO3doyW9rETM69JDZY,2808
434
+ transformers/models/transfo_xl/configuration_transfo_xl.py,sha256=2bODKg3NEjwIXbjMiZ22SrsFKINOBR_C2-K9_G07ENs,8128
435
+ transformers/models/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py,sha256=lAVYBSr2ab3uDAhMw_8n33-4aoDyYQO9A-PNcAU0b5E,4923
436
+ transformers/models/transfo_xl/modeling_tf_transfo_xl.py,sha256=4OYvrmRvue_Ae5eZsNCHP2B80FZ74rJpf8iz5kxi4OY,48370
437
+ transformers/models/transfo_xl/modeling_tf_transfo_xl_utilities.py,sha256=iwocQ-kpV_dqAnUB6jPv76yHatVUYEXHPem3XFDAQAk,7588
438
+ transformers/models/transfo_xl/modeling_transfo_xl.py,sha256=iXD4CfbOyV4C4a8KM5IXMKMEq3LTquC4F4KdUqVQSlE,53495
439
+ transformers/models/transfo_xl/modeling_transfo_xl_utilities.py,sha256=sheazQjoBvIksNMffYTADjaMC6X1gSWr9hR_zQpCjUY,10694
440
+ transformers/models/transfo_xl/tokenization_transfo_xl.py,sha256=ZQmNR3exoYZboIxYs7HZsQSn386dEZYASkQRx6Rlclg,30667
441
+ transformers/models/visual_bert/__init__.py,sha256=flKX03g9KU9mf6v2LWDlWCeJMVAJZHjGnDOJBtUYTdc,2116
442
+ transformers/models/visual_bert/configuration_visual_bert.py,sha256=4wsh4LrnXNbGQ3lPdBkP4qxqPy0zaXhpGh2K2iB7cNo,8153
443
+ transformers/models/visual_bert/convert_visual_bert_original_pytorch_checkpoint_to_pytorch.py,sha256=BpXgEZ-5LdGIa0NK6BDZd_5VhKCqeWuu2oOQyUqcSRQ,5158
444
+ transformers/models/visual_bert/modeling_visual_bert.py,sha256=Rc43knmNLD6uI66vyZNK0fTHEOosTwrjhcopW-quAr0,67162
445
+ transformers/models/vit/__init__.py,sha256=rdATzgA1CfceD2RFwYyfD_Bs-OcSkAsXA0jsIINjPug,2159
446
+ transformers/models/vit/configuration_vit.py,sha256=LZ_aYYvTfKkd2HtwUD9w0Hk4Q_wNpUfr3j4JcFRGKy0,5272
447
+ transformers/models/vit/convert_vit_timm_to_pytorch.py,sha256=83K2Hli6kDDtSbzZVHMIAw9jZ0AAcukDUJMAq4Aiez0,9959
448
+ transformers/models/vit/feature_extraction_vit.py,sha256=-s5cwMYhHyov9LXIh145JIyWxiWlGHmaUJ-5uIA7vGI,6646
449
+ transformers/models/vit/modeling_flax_vit.py,sha256=vV__cL9a9Lguh2_ptAfjwtqavCJKdq6z8o-TZl-d-HY,22844
450
+ transformers/models/vit/modeling_vit.py,sha256=I4lK4yN84p206VqA-JjCleac2ctKQzZf0X3KGro_ygw,24782
451
+ transformers/models/wav2vec2/__init__.py,sha256=sR97F-tPBYyac6sgjD7rWD_jCIZgUrFVvOEnNQxTHGI,3145
452
+ transformers/models/wav2vec2/configuration_wav2vec2.py,sha256=rqIJsWPvw6xnHf_Q3EMn4Pbg28n0GLgMswKvA5ozwm8,15046
453
+ transformers/models/wav2vec2/convert_wav2vec2_original_pytorch_checkpoint_to_pytorch.py,sha256=jzAZ6w7PsLx5gS--97KRVqu9GUB9RAA3N4DRHdVLm5E,10612
454
+ transformers/models/wav2vec2/feature_extraction_wav2vec2.py,sha256=laPduyLSgI7_o-U2YWbuC1o8fMAKJGJvmrNKWK7Y1g0,9625
455
+ transformers/models/wav2vec2/modeling_flax_wav2vec2.py,sha256=tPCN8hMXDXb0wgxue04ylUdMSA62AahUxubGiE6zXT0,50889
456
+ transformers/models/wav2vec2/modeling_tf_wav2vec2.py,sha256=aMx22BkJ_zmhDde-CDmnknaYmp0rpAHfARBZdR-8vS4,68358
457
+ transformers/models/wav2vec2/modeling_wav2vec2.py,sha256=l2N3HuSaCTE6Ew5RI3gwunz_2M8MYpBks1Hik2Lifsg,66594
458
+ transformers/models/wav2vec2/processing_wav2vec2.py,sha256=XIwqEzES6C3KFQ_psUdZAx3xzWTfcG_FWDlXg8toUg8,7492
459
+ transformers/models/wav2vec2/tokenization_wav2vec2.py,sha256=H_n0WsQXC2w8uMmxw4nUBoiHouhRMWwea9I-K7V_QTA,24737
460
+ transformers/models/xlm/__init__.py,sha256=k9072VbB3P_guUxyLYurfp3i-i_742Hhv2AB9zfXUBg,2886
461
+ transformers/models/xlm/configuration_xlm.py,sha256=PQghZwWIoZHY_1Zpb-372ex47yjXiMbU_VrlyXX-7yM,11891
462
+ transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py,sha256=ixvKniYiujC5ZQCW3xYpAcaUSWTFKJTILkqoxX6374A,2981
463
+ transformers/models/xlm/modeling_tf_xlm.py,sha256=VhnIH5d3_iRBWPk_fxer0oEjq74kJlXaTZqeBr5TYgc,59142
464
+ transformers/models/xlm/modeling_xlm.py,sha256=F7pqqiUZg-7ewEp_R0Wxdc6O02DLar3mmmfDtXP_jUY,53247
465
+ transformers/models/xlm/tokenization_xlm.py,sha256=RbOclBOmU7Osy6PRQUxKkyt317mrjgJkzS1D76gjzzg,34429
466
+ transformers/models/xlm_prophetnet/__init__.py,sha256=6L2lF2laIfmnFUQgjiqWlX6Z2NINTURmFYVw2M5FJaI,1360
467
+ transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py,sha256=p2_MrOWHYRk9EoKvhunKbGlrJhReI67IzWbXkqmcG0s,1262
468
+ transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py,sha256=NuHvNMLA48Dp16WvauAWdRatIOO3e2BcY0AxUze7Vqk,7309
469
+ transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py,sha256=5Ac0wV1ZGTVPjhWhu9v1vOZ_mOgK5yiHPqqxvAt_RC4,13840
470
+ transformers/models/xlm_roberta/__init__.py,sha256=bzi2itEbJxOmtX3z-cTUInyi78i9-q7ZQAgluZhmZcw,3477
471
+ transformers/models/xlm_roberta/configuration_xlm_roberta.py,sha256=a5oPK_7AkzVvQLDV3PelW1XfT4P4f2MQaeJxXi5IN58,2620
472
+ transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py,sha256=jYZyJRO_SxqQyvs9oLZY0sdvaBlFFlQatPO_sanhmHo,6353
473
+ transformers/models/xlm_roberta/modeling_xlm_roberta.py,sha256=qURTcMrDr7VyG4hsQu0TojmZXtVJj29Z1fWIczQc_pw,5877
474
+ transformers/models/xlm_roberta/tokenization_xlm_roberta.py,sha256=YlJJLNWLfRc7MKQhaJzE5R5uuZQUUGzOQhfrvmww1nA,14051
475
+ transformers/models/xlm_roberta/tokenization_xlm_roberta_fast.py,sha256=NemGhCUw3gmt1TSkPSYSs8A3XTjQJ91rfGEyajPEITQ,9965
476
+ transformers/models/xlnet/__init__.py,sha256=ZOms6ohgB2FUsXhd2qxHQZZAjF4mUKk_Jo-BKWfE3PM,3421
477
+ transformers/models/xlnet/configuration_xlnet.py,sha256=lgX39bUkziVb4TcTvPKxSz2o2V0oHTfitfC3XErmSGc,11248
478
+ transformers/models/xlnet/convert_xlnet_original_tf_checkpoint_to_pytorch.py,sha256=3ozPE5V-X4QuheBEt1JzISTO22RrOHgKcVC8qL5HIAA,3695
479
+ transformers/models/xlnet/modeling_tf_xlnet.py,sha256=aUoiK307bvGvX0RftNTVfV7v1DnGcotYNwx12YLwQBI,81280
480
+ transformers/models/xlnet/modeling_xlnet.py,sha256=6EKSptAt-adpjO0YKqYz_9E3vI44uO8zYj8QkwG3EMw,91661
481
+ transformers/models/xlnet/tokenization_xlnet.py,sha256=cHoHZYvhLo2dsEbPqBbumLjpT6RHq7qDdYj6Pi89P70,14406
482
+ transformers/models/xlnet/tokenization_xlnet_fast.py,sha256=6DqHIr5Mv70OTPYqVZPjbXGVgu3bOwvDzTuW1zWbeX4,9944
483
+ transformers/onnx/__init__.py,sha256=rRHBpjRxesvUHnf2NTUJL16L9-SVGNkR1YmqRBkuDLE,829
484
+ transformers/onnx/__main__.py,sha256=KrSH8cy16dJKz_oeFHDBmvytrR65FNtC5weTAIZNiOg,5822
485
+ transformers/onnx/config.py,sha256=Kt7U7CUc4ZAX5LrdyCGATQsYKwK9IpCbdNFLE9u-qnM,7860
486
+ transformers/onnx/convert.py,sha256=eQGmvm1eJUfkjcVUFWCIBaiPVG7sJRdxSvbvSc5qd6c,8698
487
+ transformers/onnx/utils.py,sha256=bElAB-C3AN2l2A8lwUkG4purxXkEGBWStCM0AXVwREQ,2377
488
+ transformers/pipelines/__init__.py,sha256=ZUNpX9UwRFgTWzrMjpol48vWWUnkKic9W1X57d3FlhY,24467
489
+ transformers/pipelines/automatic_speech_recognition.py,sha256=S2LU1tl8-b1zrXgiqX7EU5DH6sshGNgCdzNuovSOADo,6526
490
+ transformers/pipelines/base.py,sha256=hYjw8-7Oti5PYkfkFCnU9y3814r2rO3i-SEM9JNWtts,30282
491
+ transformers/pipelines/conversational.py,sha256=akS3oT73dsJIdZHyXO_QKq5TEq3KXwfpv6Mj4zd_EyM,14841
492
+ transformers/pipelines/feature_extraction.py,sha256=y0R5TPrUc_A5tbl-5f1eSUDLt6JIotTof0kTIRpBHLA,3684
493
+ transformers/pipelines/fill_mask.py,sha256=FYeYCauzvK7QY8dn6iwTHxZRZZ2DSe1gTWApXvs3OeI,8780
494
+ transformers/pipelines/image_classification.py,sha256=8WsSma4gYtkPwyweMALIInPQ03npLsMPYmMR7SjZOXo,5155
495
+ transformers/pipelines/question_answering.py,sha256=RjPzKUt6quyA6cOrZRxfMJ8lWOffTaoZzmeGE-nYXR4,24463
496
+ transformers/pipelines/table_question_answering.py,sha256=QBg7iKJdGZwCVybM0xiGdUJtwVkKm2peHE62AWcvVzo,14037
497
+ transformers/pipelines/text2text_generation.py,sha256=SroS3fkdEZ7vxE735M0b-b7MoI8sHAHBhUij5JR6TU8,14756
498
+ transformers/pipelines/text_classification.py,sha256=LwWE0GL5BWrOv10aquipEXCuY56q9LRBz4aC52uVqFo,3194
499
+ transformers/pipelines/text_generation.py,sha256=6WDJi524pD9GdquT1q6TQko0frLVYs9IGCBp7bWDqUk,8974
500
+ transformers/pipelines/token_classification.py,sha256=e8tOlIsiHmxEjxXK8AXL7gZoG0KwJkdjYDclo_9n9ik,19373
501
+ transformers/pipelines/zero_shot_classification.py,sha256=rEkjPSHYE8Il-bTwyGTVDwVRACug-zHzde1Ag80eTfM,8455
502
+ transformers/sagemaker/__init__.py,sha256=WmEfdtVOQN3cizez_7qbfK_hVmDE1oTqQhV0Q7fNVuM,901
503
+ transformers/sagemaker/trainer_sm.py,sha256=7GsKLtjdMfKp98OwHD7RcBsl745OOwHAaBswkfLkfsE,1044
504
+ transformers/sagemaker/training_args_sm.py,sha256=fREG6PvrbXe0rmTiUHO0eAgISz7qAWXf5Ei3Ala2m6Y,4926
505
+ transformers/utils/__init__.py,sha256=pxGlUMJU0WSxDi6ULwroVNk8hgByUoEXqrCx22mnDPk,1520
506
+ transformers/utils/coco_classes.py,sha256=48U3Klkr1VryAxpimugM6YTRqhRXpK1u7X4btPXbjPs,1715
507
+ transformers/utils/dummy_flax_objects.py,sha256=JUvMktNEF-zUMxyf6se2i2uJ79p6R9zP1S64-oakZqI,19181
508
+ transformers/utils/dummy_pt_objects.py,sha256=NAaDJ6t2ZTMG5Nhy9pEfn27OuhMV2G7uXRPc6dZDCGU,88488
509
+ transformers/utils/dummy_sentencepiece_and_speech_objects.py,sha256=Vh24cqmfXyyo2XtduItNfznyVtP62-TYOSWVZaEmmaY,376
510
+ transformers/utils/dummy_sentencepiece_and_tokenizers_objects.py,sha256=99nhSTTd-ghn6A2Rxe8UOl18MsajH1y6KFXuyy07WhU,278
511
+ transformers/utils/dummy_sentencepiece_objects.py,sha256=Zzk98SIWHNWIEMMYycmBTP6IKNnxygyg2d4vzNFVaoE,4089
512
+ transformers/utils/dummy_speech_objects.py,sha256=j2XILitMMdU0AEtewjINfTUKfD3Qv2P2WSCGBizImaA,241
513
+ transformers/utils/dummy_tf_objects.py,sha256=HFmjmxg61GUb9cYtHS8bU-MAufWwJWLu3zYvMsDJ_eA,47447
514
+ transformers/utils/dummy_timm_and_vision_objects.py,sha256=Vu9aXQBtBXMIq9x91oYtajP2yJt6VYX6iNdzjM5c2PQ,1108
515
+ transformers/utils/dummy_timm_objects.py,sha256=LVLYwLIWD-7ck2WMJJYwxIWGiMwhRzIENBpE40YnPPw,810
516
+ transformers/utils/dummy_tokenizers_objects.py,sha256=BkWRVCqQPcd41jB4ecIEOEFKIFcQCscNJ9pYYMoFf9g,8684
517
+ transformers/utils/dummy_vision_objects.py,sha256=t_FHiZIy_gKDeChR9BtQVSyMW-VbzHPVa3R8kVn0D_E,916
518
+ transformers/utils/fx.py,sha256=8pdtfR560ZwOXlL0xTDmwOLjellDEjBrph9-tkfWQdk,14869
519
+ transformers/utils/hp_naming.py,sha256=kTCCyv7RT8cQJ3rb_o7MLtO3yhN0bcG72ZzN2M2mcOw,4971
520
+ transformers/utils/imagenet_classes.py,sha256=VHr_mLGsXZ6LWxC8N8dff0WkRbHoQ2NWz3DtDm52uSg,33616
521
+ transformers/utils/logging.py,sha256=huC6tvT0RixnkTdfcIsPcREVN0NoJYKrDS0Qkev4R90,7701
522
+ transformers/utils/model_parallel_utils.py,sha256=seImhvNcDKwtWL6-G7wPBZOw5Q2m6ZPLZvzSePidV2Y,2186
523
+ transformers/utils/modeling_auto_mapping.py,sha256=XXbRSLCxlgStQqz1dWcXPJiTUvQ6F1xJAIGyFtdGaOs,16415
524
+ transformers/utils/notebook.py,sha256=3aA2tIbtdiCoyLo4wDZ6w5MY7vqJ6_EbwztGkN4n9qw,14431
525
+ transformers/utils/sentencepiece_model_pb2.py,sha256=X9U2bJld-kTtVXLB_EVdSc3AVubf9_s1At9WXyA_JP8,39607
526
+ transformers/utils/versions.py,sha256=LH0KEy0FXVeyE7pv6LR-lBlVqVJUBy55KNpmiHWO2hY,4381
527
+ transformers-4.9.1.dist-info/LICENSE,sha256=d_1HEN757DwPYiWADgI18VpCWr1KiwNVkSf814JhIEk,11418
528
+ transformers-4.9.1.dist-info/METADATA,sha256=F3ivBbwrRTNdbyYmGutYGFyd0MsgZYbVKXPhvcaNbds,49509
529
+ transformers-4.9.1.dist-info/WHEEL,sha256=EVRjI69F5qVjm_YgqcTXPnTAv3BfSUr0WVAHuSP3Xoo,92
530
+ transformers-4.9.1.dist-info/entry_points.txt,sha256=NC_VjQxHu59c5WStu_7imUSlBjuk86IvLxhEtlrO-2k,82
531
+ transformers-4.9.1.dist-info/top_level.txt,sha256=GLBaeTo_CSdhnHvbxQ0kzpEHdlLuA_33foIogaWxntI,13
532
+ transformers-4.9.1.dist-info/RECORD,,
public/gpt-2/transformers-4.9.1.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.35.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
public/gpt-2/transformers-4.9.1.dist-info/entry_points.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [console_scripts]
2
+ transformers-cli = transformers.commands.transformers_cli:main
3
+
public/gpt-2/transformers-4.9.1.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ transformers
public/gpt-2/transformers/__init__.py ADDED
The diff for this file is too large to render. See raw diff
 
public/gpt-2/transformers/__init__.py.orig ADDED
The diff for this file is too large to render. See raw diff
 
public/gpt-2/transformers/activations.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+
17
+ import torch
18
+ from packaging import version
19
+ from torch import nn
20
+
21
+ from .utils import logging
22
+
23
+
24
+ logger = logging.get_logger(__name__)
25
+
26
+
27
+ def _gelu_python(x):
28
+ """
29
+ Original Implementation of the GELU activation function in Google BERT repo when initially created. For
30
+ information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
31
+ torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional
32
+ Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
33
+ """
34
+ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
35
+
36
+
37
+ def gelu_new(x):
38
+ """
39
+ Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
40
+ the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
41
+ """
42
+ return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
43
+
44
+
45
+ if version.parse(torch.__version__) < version.parse("1.4"):
46
+ gelu = _gelu_python
47
+ else:
48
+ gelu = nn.functional.gelu
49
+
50
+
51
+ def gelu_fast(x):
52
+ return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
53
+
54
+
55
+ def quick_gelu(x):
56
+ return x * torch.sigmoid(1.702 * x)
57
+
58
+
59
+ def _silu_python(x):
60
+ """
61
+ See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear
62
+ Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function
63
+ Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated
64
+ Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with
65
+ later.
66
+ """
67
+ return x * torch.sigmoid(x)
68
+
69
+
70
+ if version.parse(torch.__version__) < version.parse("1.7"):
71
+ silu = _silu_python
72
+ else:
73
+ silu = nn.functional.silu
74
+
75
+
76
+ def _mish_python(x):
77
+ """
78
+ See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also
79
+ visit the official repository for the paper: https://github.com/digantamisra98/Mish
80
+ """
81
+ return x * torch.tanh(nn.functional.softplus(x))
82
+
83
+
84
+ if version.parse(torch.__version__) < version.parse("1.9"):
85
+ mish = _mish_python
86
+ else:
87
+ mish = nn.functional.mish
88
+
89
+
90
+ def linear_act(x):
91
+ return x
92
+
93
+
94
+ ACT2FN = {
95
+ "relu": nn.functional.relu,
96
+ "silu": silu,
97
+ "swish": silu,
98
+ "gelu": gelu,
99
+ "tanh": torch.tanh,
100
+ "gelu_new": gelu_new,
101
+ "gelu_fast": gelu_fast,
102
+ "quick_gelu": quick_gelu,
103
+ "mish": mish,
104
+ "linear": linear_act,
105
+ "sigmoid": torch.sigmoid,
106
+ }
107
+
108
+
109
+ def get_activation(activation_string):
110
+ if activation_string in ACT2FN:
111
+ return ACT2FN[activation_string]
112
+ else:
113
+ raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}")
public/gpt-2/transformers/activations_tf.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import math
16
+
17
+ import tensorflow as tf
18
+ from packaging import version
19
+
20
+
21
+ def _gelu(x):
22
+ """
23
+ Gaussian Error Linear Unit. Original Implementation of the gelu activation function in Google Bert repo when
24
+ initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
25
+ 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see
26
+ https://arxiv.org/abs/1606.08415
27
+ """
28
+ x = tf.convert_to_tensor(x)
29
+ cdf = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0), x.dtype)))
30
+
31
+ return x * cdf
32
+
33
+
34
+ def _gelu_new(x):
35
+ """
36
+ Gaussian Error Linear Unit. This is a smoother version of the GELU. Original paper: https://arxiv.org/abs/1606.0841
37
+
38
+ Args:
39
+ x: float Tensor to perform activation
40
+
41
+ Returns:
42
+ `x` with the GELU activation applied.
43
+ """
44
+ x = tf.convert_to_tensor(x)
45
+ pi = tf.cast(math.pi, x.dtype)
46
+ coeff = tf.cast(0.044715, x.dtype)
47
+ cdf = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(x, 3))))
48
+
49
+ return x * cdf
50
+
51
+
52
+ def mish(x):
53
+ x = tf.convert_to_tensor(x)
54
+
55
+ return x * tf.tanh(tf.math.softplus(x))
56
+
57
+
58
+ def gelu_fast(x):
59
+ x = tf.convert_to_tensor(x)
60
+ coeff1 = tf.cast(0.044715, x.dtype)
61
+ coeff2 = tf.cast(0.7978845608, x.dtype)
62
+
63
+ return 0.5 * x * (1.0 + tf.tanh(x * coeff2 * (1.0 + coeff1 * x * x)))
64
+
65
+
66
+ if version.parse(tf.version.VERSION) >= version.parse("2.4"):
67
+
68
+ def approximate_gelu_wrap(x):
69
+ return tf.keras.activations.gelu(x, approximate=True)
70
+
71
+ gelu = tf.keras.activations.gelu
72
+ gelu_new = approximate_gelu_wrap
73
+ else:
74
+ gelu = _gelu
75
+ gelu_new = _gelu_new
76
+
77
+
78
+ ACT2FN = {
79
+ "gelu": gelu,
80
+ "relu": tf.keras.activations.relu,
81
+ "swish": tf.keras.activations.swish,
82
+ "silu": tf.keras.activations.swish,
83
+ "gelu_new": gelu_new,
84
+ "mish": mish,
85
+ "tanh": tf.keras.activations.tanh,
86
+ "gelu_fast": gelu_fast,
87
+ }
88
+
89
+
90
+ def get_tf_activation(activation_string):
91
+ if activation_string in ACT2FN:
92
+ return ACT2FN[activation_string]
93
+ else:
94
+ raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}")
public/gpt-2/transformers/benchmark/__init__.py ADDED
File without changes
public/gpt-2/transformers/benchmark/benchmark.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Benchmarking the library on inference and training in PyTorch.
18
+ """
19
+
20
+
21
+ import timeit
22
+ from typing import Callable, Optional
23
+
24
+ from ..configuration_utils import PretrainedConfig
25
+ from ..file_utils import is_py3nvml_available, is_torch_available
26
+ from ..models.auto.modeling_auto import MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING
27
+ from ..utils import logging
28
+ from .benchmark_utils import (
29
+ Benchmark,
30
+ Memory,
31
+ MemorySummary,
32
+ measure_peak_memory_cpu,
33
+ start_memory_tracing,
34
+ stop_memory_tracing,
35
+ )
36
+
37
+
38
+ if is_torch_available():
39
+ import torch
40
+
41
+ from .benchmark_args import PyTorchBenchmarkArguments
42
+
43
+
44
+ if is_py3nvml_available():
45
+ import py3nvml.py3nvml as nvml
46
+
47
+
48
+ logger = logging.get_logger(__name__)
49
+
50
+
51
+ class PyTorchBenchmark(Benchmark):
52
+
53
+ args: PyTorchBenchmarkArguments
54
+ configs: PretrainedConfig
55
+ framework: str = "PyTorch"
56
+
57
+ @property
58
+ def framework_version(self):
59
+ return torch.__version__
60
+
61
+ def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
62
+ _inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
63
+ return self._measure_speed(_inference)
64
+
65
+ def _inference_memory(
66
+ self, model_name: str, batch_size: int, sequence_length: int
67
+ ) -> [Memory, Optional[MemorySummary]]:
68
+ _inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
69
+ return self._measure_memory(_inference)
70
+
71
+ def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
72
+ _train = self._prepare_train_func(model_name, batch_size, sequence_length)
73
+ return self._measure_speed(_train)
74
+
75
+ def _train_memory(
76
+ self, model_name: str, batch_size: int, sequence_length: int
77
+ ) -> [Memory, Optional[MemorySummary]]:
78
+ _train = self._prepare_train_func(model_name, batch_size, sequence_length)
79
+ return self._measure_memory(_train)
80
+
81
+ def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
82
+ config = self.config_dict[model_name]
83
+
84
+ if self.args.torchscript:
85
+ config.torchscript = True
86
+
87
+ has_model_class_in_config = (
88
+ hasattr(config, "architectures")
89
+ and isinstance(config.architectures, list)
90
+ and len(config.architectures) > 0
91
+ )
92
+ if not self.args.only_pretrain_model and has_model_class_in_config:
93
+ try:
94
+ model_class = config.architectures[0]
95
+ transformers_module = __import__("transformers", fromlist=[model_class])
96
+ model_cls = getattr(transformers_module, model_class)
97
+ model = model_cls(config)
98
+ except ImportError:
99
+ raise ImportError(
100
+ f"{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`."
101
+ )
102
+ else:
103
+ model = MODEL_MAPPING[config.__class__](config)
104
+
105
+ model.eval()
106
+ model.to(self.args.device)
107
+
108
+ # encoder-decoder has vocab size saved differently
109
+ vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
110
+ input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device)
111
+
112
+ if self.args.fp16:
113
+ logger.info("Running training in Mixed Precision...")
114
+ assert self.args.is_gpu, "Mixed precision is possible only for GPU."
115
+ # amp seems to have memory leaks so that memory usage
116
+ # is measured using .half() for now https://github.com/NVIDIA/apex/issues/439
117
+ model.half()
118
+
119
+ if self.args.torchscript:
120
+ with torch.no_grad():
121
+ inference_model = torch.jit.trace(model, input_ids)
122
+ else:
123
+ inference_model = model
124
+
125
+ def encoder_decoder_forward():
126
+ with torch.no_grad():
127
+ outputs = inference_model(input_ids, decoder_input_ids=input_ids)
128
+ return outputs
129
+
130
+ def encoder_forward():
131
+ with torch.no_grad():
132
+ outputs = inference_model(input_ids)
133
+ return outputs
134
+
135
+ _forward = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
136
+ return _forward
137
+
138
+ def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
139
+ config = self.config_dict[model_name]
140
+
141
+ has_model_class_in_config = (
142
+ hasattr(config, "architectures")
143
+ and isinstance(config.architectures, list)
144
+ and len(config.architectures) > 0
145
+ )
146
+ if not self.args.only_pretrain_model and has_model_class_in_config:
147
+ try:
148
+ model_class = config.architectures[0]
149
+ transformers_module = __import__("transformers", fromlist=[model_class])
150
+ model_cls = getattr(transformers_module, model_class)
151
+ model = model_cls(config)
152
+ except ImportError:
153
+ raise ImportError(
154
+ f"{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`."
155
+ )
156
+ else:
157
+ model = MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config)
158
+
159
+ if self.args.torchscript:
160
+ raise NotImplementedError("Training for torchscript is currently not implemented")
161
+ else:
162
+ train_model = model
163
+
164
+ model.train()
165
+ model.to(self.args.device)
166
+
167
+ # encoder-decoder has vocab size saved differently
168
+ vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
169
+ input_ids = torch.randint(vocab_size, (batch_size, sequence_length), dtype=torch.long, device=self.args.device)
170
+
171
+ if self.args.fp16:
172
+ logger.info("Running training in Mixed Precision...")
173
+ assert self.args.is_gpu, "Mixed precision is possible only for GPU."
174
+
175
+ # amp seems to have memory leaks so that memory usage
176
+ # is measured using .half() for now https://github.com/NVIDIA/apex/issues/439
177
+ model.half()
178
+
179
+ def compute_loss_and_backprob_encoder():
180
+ loss = train_model(input_ids, labels=input_ids)[0]
181
+ loss.backward()
182
+ return loss
183
+
184
+ def compute_loss_and_backprob_encoder_decoder():
185
+ loss = train_model(input_ids, decoder_input_ids=input_ids, labels=input_ids)[0]
186
+ loss.backward()
187
+ return loss
188
+
189
+ _train = (
190
+ compute_loss_and_backprob_encoder_decoder
191
+ if config.is_encoder_decoder
192
+ else compute_loss_and_backprob_encoder
193
+ )
194
+ return _train
195
+
196
+ def _measure_speed(self, func) -> float:
197
+ try:
198
+ if self.args.is_tpu or self.args.torchscript:
199
+ # run additional 10 times to stabilize compilation for tpu and torchscript
200
+ logger.info("Do inference on TPU or torchscript. Running model 5 times to stabilize compilation")
201
+ timeit.repeat(
202
+ func,
203
+ repeat=1,
204
+ number=5,
205
+ )
206
+
207
+ # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
208
+ runtimes = timeit.repeat(
209
+ func,
210
+ repeat=self.args.repeat,
211
+ number=10,
212
+ )
213
+
214
+ if self.args.is_tpu and self.args.torch_xla_tpu_print_metrics:
215
+ import torch_xla.debug.metrics as met
216
+
217
+ self.print_fn(met.metrics_report())
218
+
219
+ return min(runtimes) / 10.0
220
+ except RuntimeError as e:
221
+ self.print_fn(f"Doesn't fit on GPU. {e}")
222
+ return "N/A"
223
+
224
+ def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]:
225
+ try:
226
+ if self.args.trace_memory_line_by_line:
227
+ trace = start_memory_tracing("transformers")
228
+
229
+ if self.args.is_tpu:
230
+ # tpu
231
+ raise NotImplementedError(
232
+ "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking with `--no-memory` or `args.memory=False`"
233
+ )
234
+ elif self.args.is_gpu:
235
+ if not is_py3nvml_available():
236
+ logger.warning(
237
+ "py3nvml not installed, we won't log GPU memory usage. "
238
+ "Install py3nvml (pip install py3nvml) to log information about GPU."
239
+ )
240
+ memory = "N/A"
241
+ else:
242
+ logger.info(
243
+ "Measuring total GPU usage on GPU device. Make sure to not have additional processes running on the same GPU."
244
+ )
245
+ # init nvml
246
+ nvml.nvmlInit()
247
+ func()
248
+ handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
249
+ meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
250
+ max_bytes_in_use = meminfo.used
251
+ memory = Memory(max_bytes_in_use)
252
+ # shutdown nvml
253
+ nvml.nvmlShutdown()
254
+ else:
255
+ # cpu
256
+ memory_bytes = measure_peak_memory_cpu(func)
257
+ memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes
258
+
259
+ if self.args.trace_memory_line_by_line:
260
+ summary = stop_memory_tracing(trace)
261
+ else:
262
+ summary = None
263
+
264
+ return memory, summary
265
+ except RuntimeError as e:
266
+ self.print_fn(f"Doesn't fit on GPU. {e}")
267
+ return "N/A", None
public/gpt-2/transformers/benchmark/benchmark_args.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from dataclasses import dataclass, field
18
+ from typing import Tuple
19
+
20
+ from ..file_utils import cached_property, is_torch_available, is_torch_tpu_available, torch_required
21
+ from ..utils import logging
22
+ from .benchmark_args_utils import BenchmarkArguments
23
+
24
+
25
+ if is_torch_available():
26
+ import torch
27
+
28
+ if is_torch_tpu_available():
29
+ import torch_xla.core.xla_model as xm
30
+
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ @dataclass
36
+ class PyTorchBenchmarkArguments(BenchmarkArguments):
37
+
38
+ deprecated_args = [
39
+ "no_inference",
40
+ "no_cuda",
41
+ "no_tpu",
42
+ "no_speed",
43
+ "no_memory",
44
+ "no_env_print",
45
+ "no_multi_process",
46
+ ]
47
+
48
+ def __init__(self, **kwargs):
49
+ """
50
+ This __init__ is there for legacy code. When removing deprecated args completely, the class can simply be
51
+ deleted
52
+ """
53
+ for deprecated_arg in self.deprecated_args:
54
+ if deprecated_arg in kwargs:
55
+ positive_arg = deprecated_arg[3:]
56
+ setattr(self, positive_arg, not kwargs.pop(deprecated_arg))
57
+ logger.warning(
58
+ f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or {positive_arg}={kwargs[positive_arg]}"
59
+ )
60
+
61
+ self.torchscript = kwargs.pop("torchscript", self.torchscript)
62
+ self.torch_xla_tpu_print_metrics = kwargs.pop("torch_xla_tpu_print_metrics", self.torch_xla_tpu_print_metrics)
63
+ self.fp16_opt_level = kwargs.pop("fp16_opt_level", self.fp16_opt_level)
64
+ super().__init__(**kwargs)
65
+
66
+ torchscript: bool = field(default=False, metadata={"help": "Trace the models using torchscript"})
67
+ torch_xla_tpu_print_metrics: bool = field(default=False, metadata={"help": "Print Xla/PyTorch tpu metrics"})
68
+ fp16_opt_level: str = field(
69
+ default="O1",
70
+ metadata={
71
+ "help": (
72
+ "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
73
+ "See details at https://nvidia.github.io/apex/amp.html"
74
+ )
75
+ },
76
+ )
77
+
78
+ @cached_property
79
+ @torch_required
80
+ def _setup_devices(self) -> Tuple["torch.device", int]:
81
+ logger.info("PyTorch: setting up devices")
82
+ if not self.cuda:
83
+ device = torch.device("cpu")
84
+ n_gpu = 0
85
+ elif is_torch_tpu_available():
86
+ device = xm.xla_device()
87
+ n_gpu = 0
88
+ else:
89
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
90
+ n_gpu = torch.cuda.device_count()
91
+ return device, n_gpu
92
+
93
+ @property
94
+ def is_tpu(self):
95
+ return is_torch_tpu_available() and self.tpu
96
+
97
+ @property
98
+ @torch_required
99
+ def device_idx(self) -> int:
100
+ # TODO(PVP): currently only single GPU is supported
101
+ return torch.cuda.current_device()
102
+
103
+ @property
104
+ @torch_required
105
+ def device(self) -> "torch.device":
106
+ return self._setup_devices[0]
107
+
108
+ @property
109
+ @torch_required
110
+ def n_gpu(self):
111
+ return self._setup_devices[1]
112
+
113
+ @property
114
+ def is_gpu(self):
115
+ return self.n_gpu > 0
public/gpt-2/transformers/benchmark/benchmark_args_tf.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ from dataclasses import dataclass, field
18
+ from typing import Tuple
19
+
20
+ from ..file_utils import cached_property, is_tf_available, tf_required
21
+ from ..utils import logging
22
+ from .benchmark_args_utils import BenchmarkArguments
23
+
24
+
25
+ if is_tf_available():
26
+ import tensorflow as tf
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+
32
+ @dataclass
33
+ class TensorFlowBenchmarkArguments(BenchmarkArguments):
34
+
35
+ deprecated_args = [
36
+ "no_inference",
37
+ "no_cuda",
38
+ "no_tpu",
39
+ "no_speed",
40
+ "no_memory",
41
+ "no_env_print",
42
+ "no_multi_process",
43
+ ]
44
+
45
+ def __init__(self, **kwargs):
46
+ """
47
+ This __init__ is there for legacy code. When removing deprecated args completely, the class can simply be
48
+ deleted
49
+ """
50
+ for deprecated_arg in self.deprecated_args:
51
+ if deprecated_arg in kwargs:
52
+ positive_arg = deprecated_arg[3:]
53
+ kwargs[positive_arg] = not kwargs.pop(deprecated_arg)
54
+ logger.warning(
55
+ f"{deprecated_arg} is depreciated. Please use --no-{positive_arg} or {positive_arg}={kwargs[positive_arg]}"
56
+ )
57
+ self.tpu_name = kwargs.pop("tpu_name", self.tpu_name)
58
+ self.device_idx = kwargs.pop("device_idx", self.device_idx)
59
+ self.eager_mode = kwargs.pop("eager_mode", self.eager_mode)
60
+ self.use_xla = kwargs.pop("use_xla", self.use_xla)
61
+ super().__init__(**kwargs)
62
+
63
+ tpu_name: str = field(
64
+ default=None,
65
+ metadata={"help": "Name of TPU"},
66
+ )
67
+ device_idx: int = field(
68
+ default=0,
69
+ metadata={"help": "CPU / GPU device index. Defaults to 0."},
70
+ )
71
+ eager_mode: bool = field(default=False, metadata={"help": "Benchmark models in eager model."})
72
+ use_xla: bool = field(
73
+ default=False,
74
+ metadata={
75
+ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
76
+ },
77
+ )
78
+
79
+ @cached_property
80
+ @tf_required
81
+ def _setup_tpu(self) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
82
+ if self.tpu:
83
+ try:
84
+ if self.tpu_name:
85
+ tpu = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name)
86
+ else:
87
+ tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
88
+ except ValueError:
89
+ tpu = None
90
+ return tpu
91
+
92
+ @cached_property
93
+ @tf_required
94
+ def _setup_strategy(self) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
95
+ if self.is_tpu:
96
+ tf.config.experimental_connect_to_cluster(self._setup_tpu)
97
+ tf.tpu.experimental.initialize_tpu_system(self._setup_tpu)
98
+
99
+ strategy = tf.distribute.TPUStrategy(self._setup_tpu)
100
+ else:
101
+ # currently no multi gpu is allowed
102
+ if self.is_gpu:
103
+ # TODO: Currently only single GPU is supported
104
+ tf.config.set_visible_devices(self.gpu_list[self.device_idx], "GPU")
105
+ strategy = tf.distribute.OneDeviceStrategy(device=f"/gpu:{self.device_idx}")
106
+ else:
107
+ tf.config.set_visible_devices([], "GPU") # disable GPU
108
+ strategy = tf.distribute.OneDeviceStrategy(device=f"/cpu:{self.device_idx}")
109
+
110
+ return strategy
111
+
112
+ @property
113
+ @tf_required
114
+ def is_tpu(self) -> bool:
115
+ return self._setup_tpu is not None
116
+
117
+ @property
118
+ @tf_required
119
+ def strategy(self) -> "tf.distribute.Strategy":
120
+ return self._setup_strategy
121
+
122
+ @property
123
+ @tf_required
124
+ def gpu_list(self):
125
+ return tf.config.list_physical_devices("GPU")
126
+
127
+ @property
128
+ @tf_required
129
+ def n_gpu(self) -> int:
130
+ if self.cuda:
131
+ return len(self.gpu_list)
132
+ return 0
133
+
134
+ @property
135
+ def is_gpu(self) -> bool:
136
+ return self.n_gpu > 0
public/gpt-2/transformers/benchmark/benchmark_args_utils.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import dataclasses
18
+ import json
19
+ from dataclasses import dataclass, field
20
+ from time import time
21
+ from typing import List
22
+
23
+ from ..utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ def list_field(default=None, metadata=None):
30
+ return field(default_factory=lambda: default, metadata=metadata)
31
+
32
+
33
+ @dataclass
34
+ class BenchmarkArguments:
35
+ """
36
+ BenchMarkArguments are arguments we use in our benchmark scripts **which relate to the training loop itself**.
37
+
38
+ Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command
39
+ line.
40
+ """
41
+
42
+ models: List[str] = list_field(
43
+ default=[],
44
+ metadata={
45
+ "help": "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version of all available models"
46
+ },
47
+ )
48
+
49
+ batch_sizes: List[int] = list_field(
50
+ default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"}
51
+ )
52
+
53
+ sequence_lengths: List[int] = list_field(
54
+ default=[8, 32, 128, 512],
55
+ metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"},
56
+ )
57
+
58
+ inference: bool = field(
59
+ default=True,
60
+ metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."},
61
+ )
62
+ cuda: bool = field(
63
+ default=True,
64
+ metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."},
65
+ )
66
+ tpu: bool = field(
67
+ default=True, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."}
68
+ )
69
+ fp16: bool = field(default=False, metadata={"help": "Use FP16 to accelerate inference."})
70
+ training: bool = field(default=False, metadata={"help": "Benchmark training of model"})
71
+ verbose: bool = field(default=False, metadata={"help": "Verbose memory tracing"})
72
+ speed: bool = field(
73
+ default=True,
74
+ metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."},
75
+ )
76
+ memory: bool = field(
77
+ default=True,
78
+ metadata={
79
+ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
80
+ },
81
+ )
82
+ trace_memory_line_by_line: bool = field(default=False, metadata={"help": "Trace memory line by line"})
83
+ save_to_csv: bool = field(default=False, metadata={"help": "Save result to a CSV file"})
84
+ log_print: bool = field(default=False, metadata={"help": "Save all print statements in a log file"})
85
+ env_print: bool = field(default=False, metadata={"help": "Whether to print environment information"})
86
+ multi_process: bool = field(
87
+ default=True,
88
+ metadata={
89
+ "help": "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled for debugging / testing and on TPU."
90
+ },
91
+ )
92
+ inference_time_csv_file: str = field(
93
+ default=f"inference_time_{round(time())}.csv",
94
+ metadata={"help": "CSV filename used if saving time results to csv."},
95
+ )
96
+ inference_memory_csv_file: str = field(
97
+ default=f"inference_memory_{round(time())}.csv",
98
+ metadata={"help": "CSV filename used if saving memory results to csv."},
99
+ )
100
+ train_time_csv_file: str = field(
101
+ default=f"train_time_{round(time())}.csv",
102
+ metadata={"help": "CSV filename used if saving time results to csv for training."},
103
+ )
104
+ train_memory_csv_file: str = field(
105
+ default=f"train_memory_{round(time())}.csv",
106
+ metadata={"help": "CSV filename used if saving memory results to csv for training."},
107
+ )
108
+ env_info_csv_file: str = field(
109
+ default=f"env_info_{round(time())}.csv",
110
+ metadata={"help": "CSV filename used if saving environment information."},
111
+ )
112
+ log_filename: str = field(
113
+ default=f"log_{round(time())}.csv",
114
+ metadata={"help": "Log filename used if print statements are saved in log."},
115
+ )
116
+ repeat: int = field(default=3, metadata={"help": "Times an experiment will be run."})
117
+ only_pretrain_model: bool = field(
118
+ default=False,
119
+ metadata={
120
+ "help": "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain model weights."
121
+ },
122
+ )
123
+
124
+ def to_json_string(self):
125
+ """
126
+ Serializes this instance to a JSON string.
127
+ """
128
+ return json.dumps(dataclasses.asdict(self), indent=2)
129
+
130
+ @property
131
+ def model_names(self):
132
+ assert (
133
+ len(self.models) > 0
134
+ ), "Please make sure you provide at least one model name / model identifier, *e.g.* `--models bert-base-cased` or `args.models = ['bert-base-cased']."
135
+ return self.models
136
+
137
+ @property
138
+ def do_multi_processing(self):
139
+ if not self.multi_process:
140
+ return False
141
+ elif self.is_tpu:
142
+ logger.info("Multiprocessing is currently not possible on TPU.")
143
+ return False
144
+ else:
145
+ return True
public/gpt-2/transformers/benchmark/benchmark_tf.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Benchmarking the library on inference and training in PyTorch.
18
+ """
19
+
20
+
21
+ import random
22
+ import timeit
23
+ from functools import wraps
24
+ from typing import Callable, Optional
25
+
26
+ from ..configuration_utils import PretrainedConfig
27
+ from ..file_utils import is_py3nvml_available, is_tf_available
28
+ from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
29
+ from ..utils import logging
30
+ from .benchmark_utils import (
31
+ Benchmark,
32
+ Memory,
33
+ MemorySummary,
34
+ measure_peak_memory_cpu,
35
+ start_memory_tracing,
36
+ stop_memory_tracing,
37
+ )
38
+
39
+
40
+ if is_tf_available():
41
+ import tensorflow as tf
42
+ from tensorflow.python.framework.errors_impl import ResourceExhaustedError
43
+
44
+ from .benchmark_args_tf import TensorFlowBenchmarkArguments
45
+
46
+ if is_py3nvml_available():
47
+ import py3nvml.py3nvml as nvml
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+
52
+ def run_with_tf_optimizations(do_eager_mode: bool, use_xla: bool):
53
+ def run_func(func):
54
+ @wraps(func)
55
+ def run_in_eager_mode(*args, **kwargs):
56
+ return func(*args, **kwargs)
57
+
58
+ @wraps(func)
59
+ @tf.function(experimental_compile=use_xla)
60
+ def run_in_graph_mode(*args, **kwargs):
61
+ return func(*args, **kwargs)
62
+
63
+ if do_eager_mode is True:
64
+ assert (
65
+ use_xla is False
66
+ ), "Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`."
67
+ return run_in_eager_mode
68
+ else:
69
+ return run_in_graph_mode
70
+
71
+ return run_func
72
+
73
+
74
+ def random_input_ids(batch_size: int, sequence_length: int, vocab_size: int) -> ["tf.Tensor"]:
75
+ rng = random.Random()
76
+ values = [rng.randint(0, vocab_size - 1) for i in range(batch_size * sequence_length)]
77
+ return tf.constant(values, shape=(batch_size, sequence_length), dtype=tf.int32)
78
+
79
+
80
+ class TensorFlowBenchmark(Benchmark):
81
+
82
+ args: TensorFlowBenchmarkArguments
83
+ configs: PretrainedConfig
84
+ framework: str = "TensorFlow"
85
+
86
+ @property
87
+ def framework_version(self):
88
+ return tf.__version__
89
+
90
+ def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
91
+ # initialize GPU on separate process
92
+ strategy = self.args.strategy
93
+ assert strategy is not None, "A device strategy has to be initialized before using TensorFlow."
94
+ _inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
95
+ return self._measure_speed(_inference)
96
+
97
+ def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
98
+ strategy = self.args.strategy
99
+ assert strategy is not None, "A device strategy has to be initialized before using TensorFlow."
100
+ _train = self._prepare_train_func(model_name, batch_size, sequence_length)
101
+ return self._measure_speed(_train)
102
+
103
+ def _inference_memory(
104
+ self, model_name: str, batch_size: int, sequence_length: int
105
+ ) -> [Memory, Optional[MemorySummary]]:
106
+ # initialize GPU on separate process
107
+ if self.args.is_gpu:
108
+ tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], True)
109
+ strategy = self.args.strategy
110
+ assert strategy is not None, "A device strategy has to be initialized before using TensorFlow."
111
+ _inference = self._prepare_inference_func(model_name, batch_size, sequence_length)
112
+ return self._measure_memory(_inference)
113
+
114
+ def _train_memory(
115
+ self, model_name: str, batch_size: int, sequence_length: int
116
+ ) -> [Memory, Optional[MemorySummary]]:
117
+ if self.args.is_gpu:
118
+ tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], True)
119
+ strategy = self.args.strategy
120
+ assert strategy is not None, "A device strategy has to be initialized before using TensorFlow."
121
+
122
+ _train = self._prepare_train_func(model_name, batch_size, sequence_length)
123
+ return self._measure_memory(_train)
124
+
125
+ def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
126
+ config = self.config_dict[model_name]
127
+
128
+ if self.args.fp16:
129
+ raise NotImplementedError("Mixed precision is currently not supported.")
130
+
131
+ has_model_class_in_config = (
132
+ hasattr(config, "architectures")
133
+ and isinstance(config.architectures, list)
134
+ and len(config.architectures) > 0
135
+ )
136
+ if not self.args.only_pretrain_model and has_model_class_in_config:
137
+ try:
138
+ model_class = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
139
+ transformers_module = __import__("transformers", fromlist=[model_class])
140
+ model_cls = getattr(transformers_module, model_class)
141
+ model = model_cls(config)
142
+ except ImportError:
143
+ raise ImportError(
144
+ f"{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`."
145
+ )
146
+ else:
147
+ model = TF_MODEL_MAPPING[config.__class__](config)
148
+
149
+ # encoder-decoder has vocab size saved differently
150
+ vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
151
+ input_ids = random_input_ids(batch_size, sequence_length, vocab_size)
152
+
153
+ @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
154
+ def encoder_decoder_forward():
155
+ return model(input_ids, decoder_input_ids=input_ids, training=False)
156
+
157
+ @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
158
+ def encoder_forward():
159
+ return model(input_ids, training=False)
160
+
161
+ _inference = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
162
+
163
+ return _inference
164
+
165
+ def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]:
166
+ config = self.config_dict[model_name]
167
+
168
+ assert (
169
+ self.args.eager_mode is False
170
+ ), "Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`."
171
+
172
+ if self.args.fp16:
173
+ raise NotImplementedError("Mixed precision is currently not supported.")
174
+
175
+ has_model_class_in_config = (
176
+ hasattr(config, "architectures")
177
+ and isinstance(config.architectures, list)
178
+ and len(config.architectures) > 0
179
+ )
180
+ if not self.args.only_pretrain_model and has_model_class_in_config:
181
+ try:
182
+ model_class = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
183
+ transformers_module = __import__("transformers", fromlist=[model_class])
184
+ model_cls = getattr(transformers_module, model_class)
185
+ model = model_cls(config)
186
+ except ImportError:
187
+ raise ImportError(
188
+ f"{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`."
189
+ )
190
+ else:
191
+ model = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config)
192
+
193
+ # encoder-decoder has vocab size saved differently
194
+ vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size
195
+ input_ids = random_input_ids(batch_size, sequence_length, vocab_size)
196
+
197
+ @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
198
+ def encoder_decoder_train():
199
+ loss = model(input_ids, decoder_input_ids=input_ids, labels=input_ids, training=True)[0]
200
+ gradients = tf.gradients(loss, model.trainable_variables)
201
+ return gradients
202
+
203
+ @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla)
204
+ def encoder_train():
205
+ loss = model(input_ids, labels=input_ids, training=True)[0]
206
+ gradients = tf.gradients(loss, model.trainable_variables)
207
+ return gradients
208
+
209
+ _train = encoder_decoder_train if config.is_encoder_decoder else encoder_train
210
+
211
+ return _train
212
+
213
+ def _measure_speed(self, func) -> float:
214
+ with self.args.strategy.scope():
215
+ try:
216
+ if self.args.is_tpu or self.args.use_xla:
217
+ # run additional 10 times to stabilize compilation for tpu
218
+ logger.info("Do inference on TPU. Running model 5 times to stabilize compilation")
219
+ timeit.repeat(func, repeat=1, number=5)
220
+
221
+ # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
222
+ runtimes = timeit.repeat(
223
+ func,
224
+ repeat=self.args.repeat,
225
+ number=10,
226
+ )
227
+
228
+ return min(runtimes) / 10.0
229
+ except ResourceExhaustedError as e:
230
+ self.print_fn(f"Doesn't fit on GPU. {e}")
231
+
232
+ def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]:
233
+ logger.info(
234
+ "Note that TensorFlow allocates more memory than"
235
+ "it might need to speed up computation."
236
+ "The memory reported here corresponds to the memory"
237
+ "reported by `nvidia-smi`, which can vary depending"
238
+ "on total available memory on the GPU that is used."
239
+ )
240
+ with self.args.strategy.scope():
241
+ try:
242
+ if self.args.trace_memory_line_by_line:
243
+ assert (
244
+ self.args.eager_mode
245
+ ), "`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory consumption line by line."
246
+ trace = start_memory_tracing("transformers")
247
+
248
+ if self.args.is_tpu:
249
+ # tpu
250
+ raise NotImplementedError(
251
+ "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking with `args.memory=False`"
252
+ )
253
+ elif self.args.is_gpu:
254
+ # gpu
255
+ if not is_py3nvml_available():
256
+ logger.warning(
257
+ "py3nvml not installed, we won't log GPU memory usage. "
258
+ "Install py3nvml (pip install py3nvml) to log information about GPU."
259
+ )
260
+ memory = "N/A"
261
+ else:
262
+ logger.info(
263
+ "Measuring total GPU usage on GPU device. Make sure to not have additional processes running on the same GPU."
264
+ )
265
+ # init nvml
266
+ nvml.nvmlInit()
267
+ func()
268
+ handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
269
+ meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
270
+ max_bytes_in_use = meminfo.used
271
+ memory = Memory(max_bytes_in_use)
272
+ # shutdown nvml
273
+ nvml.nvmlShutdown()
274
+ else:
275
+ # cpu
276
+ if self.args.trace_memory_line_by_line:
277
+ logger.info(
278
+ "When enabling line by line tracing, the max peak memory for CPU is inaccurate in TensorFlow."
279
+ )
280
+ memory = None
281
+ else:
282
+ memory_bytes = measure_peak_memory_cpu(func)
283
+ memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes
284
+ if self.args.trace_memory_line_by_line:
285
+ summary = stop_memory_tracing(trace)
286
+ if memory is None:
287
+ memory = summary.total
288
+ else:
289
+ summary = None
290
+
291
+ return memory, summary
292
+ except ResourceExhaustedError as e:
293
+ self.print_fn(f"Doesn't fit on GPU. {e}")
294
+ return "N/A", None
public/gpt-2/transformers/benchmark/benchmark_utils.py ADDED
@@ -0,0 +1,909 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
2
+
3
+ # Copyright 2020 The HuggingFace Team and the AllenNLP authors. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Utilities for working with the local dataset cache.
18
+ """
19
+
20
+ import copy
21
+ import csv
22
+ import linecache
23
+ import os
24
+ import platform
25
+ import sys
26
+ from abc import ABC, abstractmethod
27
+ from collections import defaultdict, namedtuple
28
+ from datetime import datetime
29
+ from multiprocessing import Pipe, Process, Queue
30
+ from multiprocessing.connection import Connection
31
+ from typing import Callable, Iterable, List, NamedTuple, Optional, Union
32
+
33
+ from .. import AutoConfig, PretrainedConfig
34
+ from .. import __version__ as version
35
+ from ..file_utils import is_psutil_available, is_py3nvml_available, is_tf_available, is_torch_available
36
+ from ..utils import logging
37
+ from .benchmark_args_utils import BenchmarkArguments
38
+
39
+
40
+ if is_torch_available():
41
+ from torch.cuda import empty_cache as torch_empty_cache
42
+
43
+ if is_tf_available():
44
+ from tensorflow.python.eager import context as tf_context
45
+
46
+ if is_psutil_available():
47
+ import psutil
48
+
49
+ if is_py3nvml_available():
50
+ import py3nvml.py3nvml as nvml
51
+
52
+ if platform.system() == "Windows":
53
+ from signal import CTRL_C_EVENT as SIGKILL
54
+ else:
55
+ from signal import SIGKILL
56
+
57
+
58
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
59
+
60
+
61
+ _is_memory_tracing_enabled = False
62
+
63
+ BenchmarkOutput = namedtuple(
64
+ "BenchmarkOutput",
65
+ [
66
+ "time_inference_result",
67
+ "memory_inference_result",
68
+ "time_train_result",
69
+ "memory_train_result",
70
+ "inference_summary",
71
+ "train_summary",
72
+ ],
73
+ )
74
+
75
+
76
+ def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: bool) -> Callable[[], None]:
77
+ """
78
+ This function wraps another function into its own separated process. In order to ensure accurate memory
79
+ measurements it is important that the function is executed in a separate process
80
+
81
+ Args:
82
+
83
+ - `func`: (`callable`): function() -> ... generic function which will be executed in its own separate process
84
+ - `do_multi_processing`: (`bool`) Whether to run function on separate process or not
85
+ """
86
+
87
+ def multi_process_func(*args, **kwargs):
88
+ # run function in an individual
89
+ # process to get correct memory
90
+ def wrapper_func(queue: Queue, *args):
91
+ try:
92
+ result = func(*args)
93
+ except Exception as e:
94
+ logger.error(e)
95
+ print(e)
96
+ result = "N/A"
97
+ queue.put(result)
98
+
99
+ queue = Queue()
100
+ p = Process(target=wrapper_func, args=[queue] + list(args))
101
+ p.start()
102
+ result = queue.get()
103
+ p.join()
104
+ return result
105
+
106
+ if do_multi_processing:
107
+ logger.info(f"Function {func} is executed in its own process...")
108
+ return multi_process_func
109
+ else:
110
+ return func
111
+
112
+
113
+ def is_memory_tracing_enabled():
114
+ global _is_memory_tracing_enabled
115
+ return _is_memory_tracing_enabled
116
+
117
+
118
+ class Frame(NamedTuple):
119
+ """
120
+ `Frame` is a NamedTuple used to gather the current frame state. `Frame` has the following fields:
121
+
122
+ - 'filename' (string): Name of the file currently executed
123
+ - 'module' (string): Name of the module currently executed
124
+ - 'line_number' (int): Number of the line currently executed
125
+ - 'event' (string): Event that triggered the tracing (default will be "line")
126
+ - 'line_text' (string): Text of the line in the python script
127
+ """
128
+
129
+ filename: str
130
+ module: str
131
+ line_number: int
132
+ event: str
133
+ line_text: str
134
+
135
+
136
+ class UsedMemoryState(NamedTuple):
137
+ """
138
+ `UsedMemoryState` are named tuples with the following fields:
139
+
140
+ - 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current file,
141
+ location in current file)
142
+ - 'cpu_memory': CPU RSS memory state *before* executing the line
143
+ - 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only `gpus_to_trace` if
144
+ provided)
145
+ """
146
+
147
+ frame: Frame
148
+ cpu_memory: int
149
+ gpu_memory: int
150
+
151
+
152
+ class Memory(NamedTuple):
153
+ """
154
+ `Memory` NamedTuple have a single field `bytes` and you can get a human readable str of the number of mega bytes by
155
+ calling `__repr__`
156
+
157
+ - `byte` (integer): number of bytes,
158
+ """
159
+
160
+ bytes: int
161
+
162
+ def __repr__(self) -> str:
163
+ return str(bytes_to_mega_bytes(self.bytes))
164
+
165
+
166
+ class MemoryState(NamedTuple):
167
+ """
168
+ `MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:
169
+
170
+ - `frame` (`Frame`): the current frame (see above)
171
+ - `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple
172
+ - `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple
173
+ - `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
174
+ """
175
+
176
+ frame: Frame
177
+ cpu: Memory
178
+ gpu: Memory
179
+ cpu_gpu: Memory
180
+
181
+
182
+ class MemorySummary(NamedTuple):
183
+ """
184
+ `MemorySummary` namedtuple otherwise with the fields:
185
+
186
+ - `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by
187
+ subtracting the memory after executing each line from the memory before executing said line.
188
+ - `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each line
189
+ obtained by summing repeated memory increase for a line if it's executed several times. The list is sorted
190
+ from the frame with the largest memory consumption to the frame with the smallest (can be negative if memory
191
+ is released)
192
+ - `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with
193
+ memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).
194
+ """
195
+
196
+ sequential: List[MemoryState]
197
+ cumulative: List[MemoryState]
198
+ current: List[MemoryState]
199
+ total: Memory
200
+
201
+
202
+ MemoryTrace = List[UsedMemoryState]
203
+
204
+
205
+ def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_idx=None) -> int:
206
+ """
207
+ measures peak cpu memory consumption of a given `function` running the function for at least interval seconds and
208
+ at most 20 * interval seconds. This function is heavily inspired by: `memory_usage` of the package
209
+ `memory_profiler`:
210
+ https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239
211
+
212
+ Args:
213
+
214
+ - `function`: (`callable`): function() -> ... function without any arguments to measure for which to measure
215
+ the peak memory
216
+
217
+ - `interval`: (`float`, `optional`, defaults to `0.5`) interval in second for which to measure the memory usage
218
+
219
+ - `device_idx`: (`int`, `optional`, defaults to `None`) device id for which to measure gpu usage
220
+
221
+ Returns:
222
+
223
+ - `max_memory`: (`int`) consumed memory peak in Bytes
224
+ """
225
+
226
+ def get_cpu_memory(process_id: int) -> int:
227
+ """
228
+ measures current cpu memory usage of a given `process_id`
229
+
230
+ Args:
231
+
232
+ - `process_id`: (`int`) process_id for which to measure memory
233
+
234
+ Returns
235
+
236
+ - `memory`: (`int`) consumed memory in Bytes
237
+ """
238
+ process = psutil.Process(process_id)
239
+ try:
240
+ meminfo_attr = "memory_info" if hasattr(process, "memory_info") else "get_memory_info"
241
+ memory = getattr(process, meminfo_attr)()[0]
242
+ except psutil.AccessDenied:
243
+ raise ValueError("Error with Psutil.")
244
+ return memory
245
+
246
+ if not is_psutil_available():
247
+ logger.warning(
248
+ "Psutil not installed, we won't log CPU memory usage. "
249
+ "Install Psutil (pip install psutil) to use CPU memory tracing."
250
+ )
251
+ max_memory = "N/A"
252
+ else:
253
+
254
+ class MemoryMeasureProcess(Process):
255
+
256
+ """
257
+ `MemoryMeasureProcess` inherits from `Process` and overwrites its `run()` method. Used to measure the
258
+ memory usage of a process
259
+ """
260
+
261
+ def __init__(self, process_id: int, child_connection: Connection, interval: float):
262
+ super().__init__()
263
+ self.process_id = process_id
264
+ self.interval = interval
265
+ self.connection = child_connection
266
+ self.num_measurements = 1
267
+ self.mem_usage = get_cpu_memory(self.process_id)
268
+
269
+ def run(self):
270
+ self.connection.send(0)
271
+ stop = False
272
+ while True:
273
+ self.mem_usage = max(self.mem_usage, get_cpu_memory(self.process_id))
274
+ self.num_measurements += 1
275
+
276
+ if stop:
277
+ break
278
+
279
+ stop = self.connection.poll(self.interval)
280
+
281
+ # send results to parent pipe
282
+ self.connection.send(self.mem_usage)
283
+ self.connection.send(self.num_measurements)
284
+
285
+ while True:
286
+ # create child, parent connection
287
+ child_connection, parent_connection = Pipe()
288
+
289
+ # instantiate process
290
+ mem_process = MemoryMeasureProcess(os.getpid(), child_connection, interval)
291
+ mem_process.start()
292
+
293
+ # wait until we get memory
294
+ parent_connection.recv()
295
+
296
+ try:
297
+ # execute function
298
+ function()
299
+
300
+ # start parent connection
301
+ parent_connection.send(0)
302
+
303
+ # receive memory and num measurements
304
+ max_memory = parent_connection.recv()
305
+ num_measurements = parent_connection.recv()
306
+ except Exception:
307
+ # kill process in a clean way
308
+ parent = psutil.Process(os.getpid())
309
+ for child in parent.children(recursive=True):
310
+ os.kill(child.pid, SIGKILL)
311
+ mem_process.join(0)
312
+ raise RuntimeError("Process killed. Error in Process")
313
+
314
+ # run process at least 20 * interval or until it finishes
315
+ mem_process.join(20 * interval)
316
+
317
+ if (num_measurements > 4) or (interval < 1e-6):
318
+ break
319
+
320
+ # reduce interval
321
+ interval /= 10
322
+
323
+ return max_memory
324
+
325
+
326
+ def start_memory_tracing(
327
+ modules_to_trace: Optional[Union[str, Iterable[str]]] = None,
328
+ modules_not_to_trace: Optional[Union[str, Iterable[str]]] = None,
329
+ events_to_trace: str = "line",
330
+ gpus_to_trace: Optional[List[int]] = None,
331
+ ) -> MemoryTrace:
332
+ """
333
+ Setup line-by-line tracing to record rss mem (RAM) at each line of a module or sub-module. See `./benchmark.py` for
334
+ usage examples. Current memory consumption is returned using psutil and in particular is the RSS memory "Resident
335
+ Set Size” (the non-swapped physical memory the process is using). See
336
+ https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info
337
+
338
+ Args:
339
+
340
+ - `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list
341
+ of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or
342
+ 'transformers.models.gpt2.modeling_gpt2')
343
+ - `modules_not_to_trace`: (None, string, list/tuple of string) if None, no module is avoided if string or list
344
+ of strings: events from the listed module/sub-module will not be recorded (e.g. 'torch')
345
+ - `events_to_trace`: string or list of string of events to be recorded (see official python doc for
346
+ `sys.settrace` for the list of events) default to line
347
+ - `gpus_to_trace`: (optional list, default None) list of GPUs to trace. Default to tracing all GPUs
348
+
349
+ Return:
350
+
351
+ - `memory_trace` is a list of `UsedMemoryState` for each event (default each line of the traced script).
352
+
353
+ - `UsedMemoryState` are named tuples with the following fields:
354
+
355
+ - 'frame': a `Frame` namedtuple (see below) storing information on the current tracing frame (current
356
+ file, location in current file)
357
+ - 'cpu_memory': CPU RSS memory state *before* executing the line
358
+ - 'gpu_memory': GPU used memory *before* executing the line (sum for all GPUs or for only
359
+ `gpus_to_trace` if provided)
360
+
361
+ `Frame` is a namedtuple used by `UsedMemoryState` to list the current frame state. `Frame` has the following
362
+ fields: - 'filename' (string): Name of the file currently executed - 'module' (string): Name of the module
363
+ currently executed - 'line_number' (int): Number of the line currently executed - 'event' (string): Event that
364
+ triggered the tracing (default will be "line") - 'line_text' (string): Text of the line in the python script
365
+
366
+ """
367
+ if is_psutil_available():
368
+ process = psutil.Process(os.getpid())
369
+ else:
370
+ logger.warning(
371
+ "Psutil not installed, we won't log CPU memory usage. "
372
+ "Install psutil (pip install psutil) to use CPU memory tracing."
373
+ )
374
+ process = None
375
+
376
+ if is_py3nvml_available():
377
+ try:
378
+ nvml.nvmlInit()
379
+ devices = list(range(nvml.nvmlDeviceGetCount())) if gpus_to_trace is None else gpus_to_trace
380
+ nvml.nvmlShutdown()
381
+ except (OSError, nvml.NVMLError):
382
+ logger.warning("Error while initializing communication with GPU. " "We won't perform GPU memory tracing.")
383
+ log_gpu = False
384
+ else:
385
+ log_gpu = is_torch_available() or is_tf_available()
386
+ else:
387
+ logger.warning(
388
+ "py3nvml not installed, we won't log GPU memory usage. "
389
+ "Install py3nvml (pip install py3nvml) to use GPU memory tracing."
390
+ )
391
+ log_gpu = False
392
+
393
+ memory_trace = []
394
+
395
+ def traceit(frame, event, args):
396
+ """
397
+ Tracing method executed before running each line in a module or sub-module Record memory allocated in a list
398
+ with debugging information
399
+ """
400
+ global _is_memory_tracing_enabled
401
+
402
+ if not _is_memory_tracing_enabled:
403
+ return traceit
404
+
405
+ # Filter events
406
+ if events_to_trace is not None:
407
+ if isinstance(events_to_trace, str) and event != events_to_trace:
408
+ return traceit
409
+ elif isinstance(events_to_trace, (list, tuple)) and event not in events_to_trace:
410
+ return traceit
411
+
412
+ if "__name__" not in frame.f_globals:
413
+ return traceit
414
+
415
+ # Filter modules
416
+ name = frame.f_globals["__name__"]
417
+ if not isinstance(name, str):
418
+ return traceit
419
+ else:
420
+ # Filter whitelist of modules to trace
421
+ if modules_to_trace is not None:
422
+ if isinstance(modules_to_trace, str) and modules_to_trace not in name:
423
+ return traceit
424
+ elif isinstance(modules_to_trace, (list, tuple)) and all(m not in name for m in modules_to_trace):
425
+ return traceit
426
+
427
+ # Filter blacklist of modules not to trace
428
+ if modules_not_to_trace is not None:
429
+ if isinstance(modules_not_to_trace, str) and modules_not_to_trace in name:
430
+ return traceit
431
+ elif isinstance(modules_not_to_trace, (list, tuple)) and any(m in name for m in modules_not_to_trace):
432
+ return traceit
433
+
434
+ # Record current tracing state (file, location in file...)
435
+ lineno = frame.f_lineno
436
+ filename = frame.f_globals["__file__"]
437
+ if filename.endswith(".pyc") or filename.endswith(".pyo"):
438
+ filename = filename[:-1]
439
+ line = linecache.getline(filename, lineno).rstrip()
440
+ traced_state = Frame(filename, name, lineno, event, line)
441
+
442
+ # Record current memory state (rss memory) and compute difference with previous memory state
443
+ cpu_mem = 0
444
+ if process is not None:
445
+ mem = process.memory_info()
446
+ cpu_mem = mem.rss
447
+
448
+ gpu_mem = 0
449
+ if log_gpu:
450
+ # Clear GPU caches
451
+ if is_torch_available():
452
+ torch_empty_cache()
453
+ if is_tf_available():
454
+ tf_context.context()._clear_caches() # See https://github.com/tensorflow/tensorflow/issues/20218#issuecomment-416771802
455
+
456
+ # Sum used memory for all GPUs
457
+ nvml.nvmlInit()
458
+
459
+ for i in devices:
460
+ handle = nvml.nvmlDeviceGetHandleByIndex(i)
461
+ meminfo = nvml.nvmlDeviceGetMemoryInfo(handle)
462
+ gpu_mem += meminfo.used
463
+
464
+ nvml.nvmlShutdown()
465
+
466
+ mem_state = UsedMemoryState(traced_state, cpu_mem, gpu_mem)
467
+ memory_trace.append(mem_state)
468
+
469
+ return traceit
470
+
471
+ sys.settrace(traceit)
472
+
473
+ global _is_memory_tracing_enabled
474
+ _is_memory_tracing_enabled = True
475
+
476
+ return memory_trace
477
+
478
+
479
+ def stop_memory_tracing(
480
+ memory_trace: Optional[MemoryTrace] = None, ignore_released_memory: bool = True
481
+ ) -> Optional[MemorySummary]:
482
+ """
483
+ Stop memory tracing cleanly and return a summary of the memory trace if a trace is given.
484
+
485
+ Args:
486
+
487
+ `memory_trace` (optional output of start_memory_tracing, default: None):
488
+ memory trace to convert in summary
489
+ `ignore_released_memory` (boolean, default: None):
490
+ if True we only sum memory increase to compute total memory
491
+
492
+ Return:
493
+
494
+ - None if `memory_trace` is None
495
+ - `MemorySummary` namedtuple otherwise with the fields:
496
+
497
+ - `sequential`: a list of `MemoryState` namedtuple (see below) computed from the provided `memory_trace` by
498
+ subtracting the memory after executing each line from the memory before executing said line.
499
+ - `cumulative`: a list of `MemoryState` namedtuple (see below) with cumulative increase in memory for each
500
+ line obtained by summing repeated memory increase for a line if it's executed several times. The list is
501
+ sorted from the frame with the largest memory consumption to the frame with the smallest (can be negative
502
+ if memory is released)
503
+ - `total`: total memory increase during the full tracing as a `Memory` named tuple (see below). Line with
504
+ memory release (negative consumption) are ignored if `ignore_released_memory` is `True` (default).
505
+
506
+ `Memory` named tuple have fields
507
+
508
+ - `byte` (integer): number of bytes,
509
+ - `string` (string): same as human readable string (ex: "3.5MB")
510
+
511
+ `Frame` are namedtuple used to list the current frame state and have the following fields:
512
+
513
+ - 'filename' (string): Name of the file currently executed
514
+ - 'module' (string): Name of the module currently executed
515
+ - 'line_number' (int): Number of the line currently executed
516
+ - 'event' (string): Event that triggered the tracing (default will be "line")
517
+ - 'line_text' (string): Text of the line in the python script
518
+
519
+ `MemoryState` are namedtuples listing frame + CPU/GPU memory with the following fields:
520
+
521
+ - `frame` (`Frame`): the current frame (see above)
522
+ - `cpu`: CPU memory consumed at during the current frame as a `Memory` named tuple
523
+ - `gpu`: GPU memory consumed at during the current frame as a `Memory` named tuple
524
+ - `cpu_gpu`: CPU + GPU memory consumed at during the current frame as a `Memory` named tuple
525
+ """
526
+ global _is_memory_tracing_enabled
527
+ _is_memory_tracing_enabled = False
528
+
529
+ if memory_trace is not None and len(memory_trace) > 1:
530
+ memory_diff_trace = []
531
+ memory_curr_trace = []
532
+
533
+ cumulative_memory_dict = defaultdict(lambda: [0, 0, 0])
534
+
535
+ for (
536
+ (frame, cpu_mem, gpu_mem),
537
+ (next_frame, next_cpu_mem, next_gpu_mem),
538
+ ) in zip(memory_trace[:-1], memory_trace[1:]):
539
+ cpu_mem_inc = next_cpu_mem - cpu_mem
540
+ gpu_mem_inc = next_gpu_mem - gpu_mem
541
+ cpu_gpu_mem_inc = cpu_mem_inc + gpu_mem_inc
542
+ memory_diff_trace.append(
543
+ MemoryState(
544
+ frame=frame,
545
+ cpu=Memory(cpu_mem_inc),
546
+ gpu=Memory(gpu_mem_inc),
547
+ cpu_gpu=Memory(cpu_gpu_mem_inc),
548
+ )
549
+ )
550
+
551
+ memory_curr_trace.append(
552
+ MemoryState(
553
+ frame=frame,
554
+ cpu=Memory(next_cpu_mem),
555
+ gpu=Memory(next_gpu_mem),
556
+ cpu_gpu=Memory(next_gpu_mem + next_cpu_mem),
557
+ )
558
+ )
559
+
560
+ cumulative_memory_dict[frame][0] += cpu_mem_inc
561
+ cumulative_memory_dict[frame][1] += gpu_mem_inc
562
+ cumulative_memory_dict[frame][2] += cpu_gpu_mem_inc
563
+
564
+ cumulative_memory = sorted(
565
+ list(cumulative_memory_dict.items()), key=lambda x: x[1][2], reverse=True
566
+ ) # order by the total CPU + GPU memory increase
567
+ cumulative_memory = list(
568
+ MemoryState(
569
+ frame=frame,
570
+ cpu=Memory(cpu_mem_inc),
571
+ gpu=Memory(gpu_mem_inc),
572
+ cpu_gpu=Memory(cpu_gpu_mem_inc),
573
+ )
574
+ for frame, (cpu_mem_inc, gpu_mem_inc, cpu_gpu_mem_inc) in cumulative_memory
575
+ )
576
+
577
+ memory_curr_trace = sorted(memory_curr_trace, key=lambda x: x.cpu_gpu.bytes, reverse=True)
578
+
579
+ if ignore_released_memory:
580
+ total_memory = sum(max(0, step_trace.cpu_gpu.bytes) for step_trace in memory_diff_trace)
581
+ else:
582
+ total_memory = sum(step_trace.cpu_gpu.bytes for step_trace in memory_diff_trace)
583
+
584
+ total_memory = Memory(total_memory)
585
+
586
+ return MemorySummary(
587
+ sequential=memory_diff_trace,
588
+ cumulative=cumulative_memory,
589
+ current=memory_curr_trace,
590
+ total=total_memory,
591
+ )
592
+
593
+ return None
594
+
595
+
596
+ def bytes_to_mega_bytes(memory_amount: int) -> int:
597
+ """Utility to convert a number of bytes (int) into a number of mega bytes (int)"""
598
+ return memory_amount >> 20
599
+
600
+
601
+ class Benchmark(ABC):
602
+ """
603
+ Benchmarks is a simple but feature-complete benchmarking script to compare memory and time performance of models in
604
+ Transformers.
605
+ """
606
+
607
+ args: BenchmarkArguments
608
+ configs: PretrainedConfig
609
+ framework: str
610
+
611
+ def __init__(self, args: BenchmarkArguments = None, configs: PretrainedConfig = None):
612
+ self.args = args
613
+ if configs is None:
614
+ self.config_dict = {
615
+ model_name: AutoConfig.from_pretrained(model_name) for model_name in self.args.model_names
616
+ }
617
+ else:
618
+ self.config_dict = {model_name: config for model_name, config in zip(self.args.model_names, configs)}
619
+
620
+ if self.args.memory and os.getenv("TRANSFORMERS_USE_MULTIPROCESSING") == 0:
621
+ logger.warning(
622
+ "Memory consumption will not be measured accurately if `args.multi_process` is set to `False.` The flag 'TRANSFORMERS_USE_MULTIPROCESSING' should only be disabled for debugging / testing."
623
+ )
624
+
625
+ self._print_fn = None
626
+ self._framework_version = None
627
+ self._environment_info = None
628
+
629
+ @property
630
+ def print_fn(self):
631
+ if self._print_fn is None:
632
+ if self.args.log_print:
633
+
634
+ def print_and_log(*args):
635
+ with open(self.args.log_filename, "a") as log_file:
636
+ log_file.write("".join(args) + "\n")
637
+ print(*args)
638
+
639
+ self._print_fn = print_and_log
640
+ else:
641
+ self._print_fn = print
642
+ return self._print_fn
643
+
644
+ @property
645
+ @abstractmethod
646
+ def framework_version(self):
647
+ pass
648
+
649
+ @abstractmethod
650
+ def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
651
+ pass
652
+
653
+ @abstractmethod
654
+ def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float:
655
+ pass
656
+
657
+ @abstractmethod
658
+ def _inference_memory(
659
+ self, model_name: str, batch_size: int, sequence_length: int
660
+ ) -> [Memory, Optional[MemorySummary]]:
661
+ pass
662
+
663
+ @abstractmethod
664
+ def _train_memory(
665
+ self, model_name: str, batch_size: int, sequence_length: int
666
+ ) -> [Memory, Optional[MemorySummary]]:
667
+ pass
668
+
669
+ def inference_speed(self, *args, **kwargs) -> float:
670
+ return separate_process_wrapper_fn(self._inference_speed, self.args.do_multi_processing)(*args, **kwargs)
671
+
672
+ def train_speed(self, *args, **kwargs) -> float:
673
+ return separate_process_wrapper_fn(self._train_speed, self.args.do_multi_processing)(*args, **kwargs)
674
+
675
+ def inference_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]:
676
+ return separate_process_wrapper_fn(self._inference_memory, self.args.do_multi_processing)(*args, **kwargs)
677
+
678
+ def train_memory(self, *args, **kwargs) -> [Memory, Optional[MemorySummary]]:
679
+ return separate_process_wrapper_fn(self._train_memory, self.args.do_multi_processing)(*args, **kwargs)
680
+
681
+ def run(self):
682
+ result_dict = {model_name: {} for model_name in self.args.model_names}
683
+ inference_result_time = copy.deepcopy(result_dict)
684
+ inference_result_memory = copy.deepcopy(result_dict)
685
+ train_result_time = copy.deepcopy(result_dict)
686
+ train_result_memory = copy.deepcopy(result_dict)
687
+
688
+ for c, model_name in enumerate(self.args.model_names):
689
+ self.print_fn(f"{c + 1} / {len(self.args.model_names)}")
690
+
691
+ model_dict = {
692
+ "bs": self.args.batch_sizes,
693
+ "ss": self.args.sequence_lengths,
694
+ "result": {i: {} for i in self.args.batch_sizes},
695
+ }
696
+ inference_result_time[model_name] = copy.deepcopy(model_dict)
697
+ inference_result_memory[model_name] = copy.deepcopy(model_dict)
698
+ train_result_time[model_name] = copy.deepcopy(model_dict)
699
+ train_result_memory[model_name] = copy.deepcopy(model_dict)
700
+
701
+ inference_summary = train_summary = None
702
+
703
+ for batch_size in self.args.batch_sizes:
704
+ for sequence_length in self.args.sequence_lengths:
705
+ if self.args.inference:
706
+ if self.args.memory:
707
+ memory, inference_summary = self.inference_memory(model_name, batch_size, sequence_length)
708
+ inference_result_memory[model_name]["result"][batch_size][sequence_length] = memory
709
+ if self.args.speed:
710
+ time = self.inference_speed(model_name, batch_size, sequence_length)
711
+ inference_result_time[model_name]["result"][batch_size][sequence_length] = time
712
+
713
+ if self.args.training:
714
+ if self.args.memory:
715
+ memory, train_summary = self.train_memory(model_name, batch_size, sequence_length)
716
+ train_result_memory[model_name]["result"][batch_size][sequence_length] = memory
717
+ if self.args.speed:
718
+ time = self.train_speed(model_name, batch_size, sequence_length)
719
+ train_result_time[model_name]["result"][batch_size][sequence_length] = time
720
+
721
+ if self.args.inference:
722
+ if self.args.speed:
723
+ self.print_fn("\n" + 20 * "=" + ("INFERENCE - SPEED - RESULT").center(40) + 20 * "=")
724
+ self.print_results(inference_result_time, type_label="Time in s")
725
+ self.save_to_csv(inference_result_time, self.args.inference_time_csv_file)
726
+ if self.args.is_tpu:
727
+ self.print_fn(
728
+ "TPU was used for inference. Note that the time after compilation stabilized (after ~10 inferences model.forward(..) calls) was measured."
729
+ )
730
+
731
+ if self.args.memory:
732
+ self.print_fn("\n" + 20 * "=" + ("INFERENCE - MEMORY - RESULT").center(40) + 20 * "=")
733
+ self.print_results(inference_result_memory, type_label="Memory in MB")
734
+ self.save_to_csv(inference_result_memory, self.args.inference_memory_csv_file)
735
+
736
+ if self.args.trace_memory_line_by_line:
737
+ self.print_fn("\n" + 20 * "=" + ("INFERENCE - MEMOMRY - LINE BY LINE - SUMMARY").center(40) + 20 * "=")
738
+ self.print_memory_trace_statistics(inference_summary)
739
+
740
+ if self.args.training:
741
+ if self.args.speed:
742
+ self.print_fn("\n" + 20 * "=" + ("TRAIN - SPEED - RESULTS").center(40) + 20 * "=")
743
+ self.print_results(train_result_time, "Time in s")
744
+ self.save_to_csv(train_result_time, self.args.train_time_csv_file)
745
+ if self.args.is_tpu:
746
+ self.print_fn(
747
+ "TPU was used for training. Note that the time after compilation stabilized (after ~10 train loss=model.forward(...) + loss.backward() calls) was measured."
748
+ )
749
+
750
+ if self.args.memory:
751
+ self.print_fn("\n" + 20 * "=" + ("TRAIN - MEMORY - RESULTS").center(40) + 20 * "=")
752
+ self.print_results(train_result_memory, type_label="Memory in MB")
753
+ self.save_to_csv(train_result_memory, self.args.train_memory_csv_file)
754
+
755
+ if self.args.trace_memory_line_by_line:
756
+ self.print_fn("\n" + 20 * "=" + ("TRAIN - MEMOMRY - LINE BY LINE - SUMMARY").center(40) + 20 * "=")
757
+ self.print_memory_trace_statistics(train_summary)
758
+
759
+ if self.args.env_print:
760
+ self.print_fn("\n" + 20 * "=" + ("ENVIRONMENT INFORMATION").center(40) + 20 * "=")
761
+ self.print_fn("\n".join([f"- {prop}: {val}" for prop, val in self.environment_info.items()]) + "\n")
762
+
763
+ if self.args.save_to_csv:
764
+ with open(self.args.env_info_csv_file, mode="w", newline="") as csv_file:
765
+ writer = csv.writer(csv_file)
766
+ for key, value in self.environment_info.items():
767
+ writer.writerow([key, value])
768
+
769
+ return BenchmarkOutput(
770
+ inference_result_time,
771
+ inference_result_memory,
772
+ train_result_time,
773
+ train_result_memory,
774
+ inference_summary,
775
+ train_summary,
776
+ )
777
+
778
+ @property
779
+ def environment_info(self):
780
+ if self._environment_info is None:
781
+ info = {}
782
+ info["transformers_version"] = version
783
+ info["framework"] = self.framework
784
+ if self.framework == "PyTorch":
785
+ info["use_torchscript"] = self.args.torchscript
786
+ if self.framework == "TensorFlow":
787
+ info["eager_mode"] = self.args.eager_mode
788
+ info["use_xla"] = self.args.use_xla
789
+ info["framework_version"] = self.framework_version
790
+ info["python_version"] = platform.python_version()
791
+ info["system"] = platform.system()
792
+ info["cpu"] = platform.processor()
793
+ info["architecture"] = platform.architecture()[0]
794
+ info["date"] = datetime.date(datetime.now())
795
+ info["time"] = datetime.time(datetime.now())
796
+ info["fp16"] = self.args.fp16
797
+ info["use_multiprocessing"] = self.args.do_multi_processing
798
+ info["only_pretrain_model"] = self.args.only_pretrain_model
799
+
800
+ if is_psutil_available():
801
+ info["cpu_ram_mb"] = bytes_to_mega_bytes(psutil.virtual_memory().total)
802
+ else:
803
+ logger.warning(
804
+ "Psutil not installed, we won't log available CPU memory."
805
+ "Install psutil (pip install psutil) to log available CPU memory."
806
+ )
807
+ info["cpu_ram_mb"] = "N/A"
808
+
809
+ info["use_gpu"] = self.args.is_gpu
810
+ if self.args.is_gpu:
811
+ info["num_gpus"] = 1 # TODO(PVP) Currently only single GPU is supported
812
+ if is_py3nvml_available():
813
+ nvml.nvmlInit()
814
+ handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
815
+ info["gpu"] = nvml.nvmlDeviceGetName(handle)
816
+ info["gpu_ram_mb"] = bytes_to_mega_bytes(nvml.nvmlDeviceGetMemoryInfo(handle).total)
817
+ info["gpu_power_watts"] = nvml.nvmlDeviceGetPowerManagementLimit(handle) / 1000
818
+ info["gpu_performance_state"] = nvml.nvmlDeviceGetPerformanceState(handle)
819
+ nvml.nvmlShutdown()
820
+ else:
821
+ logger.warning(
822
+ "py3nvml not installed, we won't log GPU memory usage. "
823
+ "Install py3nvml (pip install py3nvml) to log information about GPU."
824
+ )
825
+ info["gpu"] = "N/A"
826
+ info["gpu_ram_mb"] = "N/A"
827
+ info["gpu_power_watts"] = "N/A"
828
+ info["gpu_performance_state"] = "N/A"
829
+
830
+ info["use_tpu"] = self.args.is_tpu
831
+ # TODO(PVP): See if we can add more information about TPU
832
+ # see: https://github.com/pytorch/xla/issues/2180
833
+
834
+ self._environment_info = info
835
+ return self._environment_info
836
+
837
+ def print_results(self, result_dict, type_label):
838
+ self.print_fn(80 * "-")
839
+ self.print_fn(
840
+ "Model Name".center(30) + "Batch Size".center(15) + "Seq Length".center(15) + type_label.center(15)
841
+ )
842
+ self.print_fn(80 * "-")
843
+ for model_name in self.args.model_names:
844
+ for batch_size in result_dict[model_name]["bs"]:
845
+ for sequence_length in result_dict[model_name]["ss"]:
846
+ result = result_dict[model_name]["result"][batch_size][sequence_length]
847
+ if isinstance(result, float):
848
+ result = round(1000 * result) / 1000
849
+ result = "< 0.001" if result == 0.0 else str(result)
850
+ else:
851
+ result = str(result)
852
+ self.print_fn(
853
+ model_name[:30].center(30) + str(batch_size).center(15),
854
+ str(sequence_length).center(15),
855
+ result.center(15),
856
+ )
857
+ self.print_fn(80 * "-")
858
+
859
+ def print_memory_trace_statistics(self, summary: MemorySummary):
860
+ self.print_fn(
861
+ "\nLine by line memory consumption:\n"
862
+ + "\n".join(
863
+ f"{state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
864
+ for state in summary.sequential
865
+ )
866
+ )
867
+ self.print_fn(
868
+ "\nLines with top memory consumption:\n"
869
+ + "\n".join(
870
+ f"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
871
+ for state in summary.cumulative[:6]
872
+ )
873
+ )
874
+ self.print_fn(
875
+ "\nLines with lowest memory consumption:\n"
876
+ + "\n".join(
877
+ f"=> {state.frame.filename}:{state.frame.line_number}: mem {state.cpu_gpu}: {state.frame.line_text}"
878
+ for state in summary.cumulative[-6:]
879
+ )
880
+ )
881
+ self.print_fn(f"\nTotal memory increase: {summary.total}")
882
+
883
+ def save_to_csv(self, result_dict, filename):
884
+ if not self.args.save_to_csv:
885
+ return
886
+ self.print_fn("Saving results to csv.")
887
+ with open(filename, mode="w") as csv_file:
888
+
889
+ assert len(self.args.model_names) > 0, f"At least 1 model should be defined, but got {self.model_names}"
890
+
891
+ fieldnames = ["model", "batch_size", "sequence_length"]
892
+ writer = csv.DictWriter(csv_file, fieldnames=fieldnames + ["result"])
893
+ writer.writeheader()
894
+
895
+ for model_name in self.args.model_names:
896
+ result_dict_model = result_dict[model_name]["result"]
897
+ for bs in result_dict_model:
898
+ for ss in result_dict_model[bs]:
899
+ result_model = result_dict_model[bs][ss]
900
+ writer.writerow(
901
+ {
902
+ "model": model_name,
903
+ "batch_size": bs,
904
+ "sequence_length": ss,
905
+ "result": ("{}" if not isinstance(result_model, float) else "{:.4f}").format(
906
+ result_model
907
+ ),
908
+ }
909
+ )
public/gpt-2/transformers/commands/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from abc import ABC, abstractmethod
16
+ from argparse import ArgumentParser
17
+
18
+
19
+ class BaseTransformersCLICommand(ABC):
20
+ @staticmethod
21
+ @abstractmethod
22
+ def register_subcommand(parser: ArgumentParser):
23
+ raise NotImplementedError()
24
+
25
+ @abstractmethod
26
+ def run(self):
27
+ raise NotImplementedError()
public/gpt-2/transformers/commands/add_new_model.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+ import shutil
18
+ from argparse import ArgumentParser, Namespace
19
+ from pathlib import Path
20
+ from typing import List
21
+
22
+ from ..utils import logging
23
+ from . import BaseTransformersCLICommand
24
+
25
+
26
+ try:
27
+ from cookiecutter.main import cookiecutter
28
+
29
+ _has_cookiecutter = True
30
+ except ImportError:
31
+ _has_cookiecutter = False
32
+
33
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
34
+
35
+
36
+ def add_new_model_command_factory(args: Namespace):
37
+ return AddNewModelCommand(args.testing, args.testing_file, path=args.path)
38
+
39
+
40
+ class AddNewModelCommand(BaseTransformersCLICommand):
41
+ @staticmethod
42
+ def register_subcommand(parser: ArgumentParser):
43
+ add_new_model_parser = parser.add_parser("add-new-model")
44
+ add_new_model_parser.add_argument("--testing", action="store_true", help="If in testing mode.")
45
+ add_new_model_parser.add_argument("--testing_file", type=str, help="Configuration file on which to run.")
46
+ add_new_model_parser.add_argument(
47
+ "--path", type=str, help="Path to cookiecutter. Should only be used for testing purposes."
48
+ )
49
+ add_new_model_parser.set_defaults(func=add_new_model_command_factory)
50
+
51
+ def __init__(self, testing: bool, testing_file: str, path=None, *args):
52
+ self._testing = testing
53
+ self._testing_file = testing_file
54
+ self._path = path
55
+
56
+ def run(self):
57
+ if not _has_cookiecutter:
58
+ raise ImportError(
59
+ "Model creation dependencies are required to use the `add_new_model` command. Install them by running "
60
+ "the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n"
61
+ )
62
+ # Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
63
+ directories = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
64
+ if len(directories) > 0:
65
+ raise ValueError(
66
+ "Several directories starting with `cookiecutter-template-` in current working directory. "
67
+ "Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
68
+ "change your working directory."
69
+ )
70
+
71
+ path_to_transformer_root = (
72
+ Path(__file__).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
73
+ )
74
+ path_to_cookiecutter = path_to_transformer_root / "templates" / "adding_a_new_model"
75
+
76
+ # Execute cookiecutter
77
+ if not self._testing:
78
+ cookiecutter(str(path_to_cookiecutter))
79
+ else:
80
+ with open(self._testing_file, "r") as configuration_file:
81
+ testing_configuration = json.load(configuration_file)
82
+
83
+ cookiecutter(
84
+ str(path_to_cookiecutter if self._path is None else self._path),
85
+ no_input=True,
86
+ extra_context=testing_configuration,
87
+ )
88
+
89
+ directory = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
90
+
91
+ # Retrieve configuration
92
+ with open(directory + "/configuration.json", "r") as configuration_file:
93
+ configuration = json.load(configuration_file)
94
+
95
+ lowercase_model_name = configuration["lowercase_modelname"]
96
+ pytorch_or_tensorflow = configuration["generate_tensorflow_and_pytorch"]
97
+ os.remove(f"{directory}/configuration.json")
98
+
99
+ output_pytorch = "PyTorch" in pytorch_or_tensorflow
100
+ output_tensorflow = "TensorFlow" in pytorch_or_tensorflow
101
+
102
+ model_dir = f"{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}"
103
+ os.makedirs(model_dir, exist_ok=True)
104
+
105
+ shutil.move(
106
+ f"{directory}/__init__.py",
107
+ f"{model_dir}/__init__.py",
108
+ )
109
+ shutil.move(
110
+ f"{directory}/configuration_{lowercase_model_name}.py",
111
+ f"{model_dir}/configuration_{lowercase_model_name}.py",
112
+ )
113
+
114
+ def remove_copy_lines(path):
115
+ with open(path, "r") as f:
116
+ lines = f.readlines()
117
+ with open(path, "w") as f:
118
+ for line in lines:
119
+ if "# Copied from transformers." not in line:
120
+ f.write(line)
121
+
122
+ if output_pytorch:
123
+ if not self._testing:
124
+ remove_copy_lines(f"{directory}/modeling_{lowercase_model_name}.py")
125
+
126
+ shutil.move(
127
+ f"{directory}/modeling_{lowercase_model_name}.py",
128
+ f"{model_dir}/modeling_{lowercase_model_name}.py",
129
+ )
130
+
131
+ shutil.move(
132
+ f"{directory}/test_modeling_{lowercase_model_name}.py",
133
+ f"{path_to_transformer_root}/tests/test_modeling_{lowercase_model_name}.py",
134
+ )
135
+ else:
136
+ os.remove(f"{directory}/modeling_{lowercase_model_name}.py")
137
+ os.remove(f"{directory}/test_modeling_{lowercase_model_name}.py")
138
+
139
+ if output_tensorflow:
140
+ if not self._testing:
141
+ remove_copy_lines(f"{directory}/modeling_tf_{lowercase_model_name}.py")
142
+
143
+ shutil.move(
144
+ f"{directory}/modeling_tf_{lowercase_model_name}.py",
145
+ f"{model_dir}/modeling_tf_{lowercase_model_name}.py",
146
+ )
147
+
148
+ shutil.move(
149
+ f"{directory}/test_modeling_tf_{lowercase_model_name}.py",
150
+ f"{path_to_transformer_root}/tests/test_modeling_tf_{lowercase_model_name}.py",
151
+ )
152
+ else:
153
+ os.remove(f"{directory}/modeling_tf_{lowercase_model_name}.py")
154
+ os.remove(f"{directory}/test_modeling_tf_{lowercase_model_name}.py")
155
+
156
+ shutil.move(
157
+ f"{directory}/{lowercase_model_name}.rst",
158
+ f"{path_to_transformer_root}/docs/source/model_doc/{lowercase_model_name}.rst",
159
+ )
160
+
161
+ shutil.move(
162
+ f"{directory}/tokenization_{lowercase_model_name}.py",
163
+ f"{model_dir}/tokenization_{lowercase_model_name}.py",
164
+ )
165
+
166
+ shutil.move(
167
+ f"{directory}/tokenization_fast_{lowercase_model_name}.py",
168
+ f"{model_dir}/tokenization_{lowercase_model_name}_fast.py",
169
+ )
170
+
171
+ from os import fdopen, remove
172
+ from shutil import copymode, move
173
+ from tempfile import mkstemp
174
+
175
+ def replace(original_file: str, line_to_copy_below: str, lines_to_copy: List[str]):
176
+ # Create temp file
177
+ fh, abs_path = mkstemp()
178
+ line_found = False
179
+ with fdopen(fh, "w") as new_file:
180
+ with open(original_file) as old_file:
181
+ for line in old_file:
182
+ new_file.write(line)
183
+ if line_to_copy_below in line:
184
+ line_found = True
185
+ for line_to_copy in lines_to_copy:
186
+ new_file.write(line_to_copy)
187
+
188
+ if not line_found:
189
+ raise ValueError(f"Line {line_to_copy_below} was not found in file.")
190
+
191
+ # Copy the file permissions from the old file to the new file
192
+ copymode(original_file, abs_path)
193
+ # Remove original file
194
+ remove(original_file)
195
+ # Move new file
196
+ move(abs_path, original_file)
197
+
198
+ def skip_units(line):
199
+ return ("generating PyTorch" in line and not output_pytorch) or (
200
+ "generating TensorFlow" in line and not output_tensorflow
201
+ )
202
+
203
+ def replace_in_files(path_to_datafile):
204
+ with open(path_to_datafile) as datafile:
205
+ lines_to_copy = []
206
+ skip_file = False
207
+ skip_snippet = False
208
+ for line in datafile:
209
+ if "# To replace in: " in line and "##" not in line:
210
+ file_to_replace_in = line.split('"')[1]
211
+ skip_file = skip_units(line)
212
+ elif "# Below: " in line and "##" not in line:
213
+ line_to_copy_below = line.split('"')[1]
214
+ skip_snippet = skip_units(line)
215
+ elif "# End." in line and "##" not in line:
216
+ if not skip_file and not skip_snippet:
217
+ replace(file_to_replace_in, line_to_copy_below, lines_to_copy)
218
+
219
+ lines_to_copy = []
220
+ elif "# Replace with" in line and "##" not in line:
221
+ lines_to_copy = []
222
+ elif "##" not in line:
223
+ lines_to_copy.append(line)
224
+
225
+ remove(path_to_datafile)
226
+
227
+ replace_in_files(f"{directory}/to_replace_{lowercase_model_name}.py")
228
+ os.rmdir(directory)
public/gpt-2/transformers/commands/convert.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from argparse import ArgumentParser, Namespace
16
+
17
+ from ..utils import logging
18
+ from . import BaseTransformersCLICommand
19
+
20
+
21
+ def convert_command_factory(args: Namespace):
22
+ """
23
+ Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
24
+
25
+ Returns: ServeCommand
26
+ """
27
+ return ConvertCommand(
28
+ args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name
29
+ )
30
+
31
+
32
+ IMPORT_ERROR_MESSAGE = """
33
+ transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
34
+ TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
35
+ """
36
+
37
+
38
+ class ConvertCommand(BaseTransformersCLICommand):
39
+ @staticmethod
40
+ def register_subcommand(parser: ArgumentParser):
41
+ """
42
+ Register this command to argparse so it's available for the transformer-cli
43
+
44
+ Args:
45
+ parser: Root parser to register command-specific arguments
46
+ """
47
+ train_parser = parser.add_parser(
48
+ "convert",
49
+ help="CLI tool to run convert model from original "
50
+ "author checkpoints to Transformers PyTorch checkpoints.",
51
+ )
52
+ train_parser.add_argument("--model_type", type=str, required=True, help="Model's type.")
53
+ train_parser.add_argument(
54
+ "--tf_checkpoint", type=str, required=True, help="TensorFlow checkpoint path or folder."
55
+ )
56
+ train_parser.add_argument(
57
+ "--pytorch_dump_output", type=str, required=True, help="Path to the PyTorch saved model output."
58
+ )
59
+ train_parser.add_argument("--config", type=str, default="", help="Configuration file path or folder.")
60
+ train_parser.add_argument(
61
+ "--finetuning_task_name",
62
+ type=str,
63
+ default=None,
64
+ help="Optional fine-tuning task name if the TF model was a finetuned model.",
65
+ )
66
+ train_parser.set_defaults(func=convert_command_factory)
67
+
68
+ def __init__(
69
+ self,
70
+ model_type: str,
71
+ tf_checkpoint: str,
72
+ pytorch_dump_output: str,
73
+ config: str,
74
+ finetuning_task_name: str,
75
+ *args
76
+ ):
77
+ self._logger = logging.get_logger("transformers-cli/converting")
78
+
79
+ self._logger.info(f"Loading model {model_type}")
80
+ self._model_type = model_type
81
+ self._tf_checkpoint = tf_checkpoint
82
+ self._pytorch_dump_output = pytorch_dump_output
83
+ self._config = config
84
+ self._finetuning_task_name = finetuning_task_name
85
+
86
+ def run(self):
87
+ if self._model_type == "albert":
88
+ try:
89
+ from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
90
+ convert_tf_checkpoint_to_pytorch,
91
+ )
92
+ except ImportError:
93
+ raise ImportError(IMPORT_ERROR_MESSAGE)
94
+
95
+ convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
96
+ elif self._model_type == "bert":
97
+ try:
98
+ from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
99
+ convert_tf_checkpoint_to_pytorch,
100
+ )
101
+ except ImportError:
102
+ raise ImportError(IMPORT_ERROR_MESSAGE)
103
+
104
+ convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
105
+ elif self._model_type == "funnel":
106
+ try:
107
+ from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
108
+ convert_tf_checkpoint_to_pytorch,
109
+ )
110
+ except ImportError:
111
+ raise ImportError(IMPORT_ERROR_MESSAGE)
112
+
113
+ convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
114
+ elif self._model_type == "t5":
115
+ try:
116
+ from ..models.t5.convert_t5_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
117
+ except ImportError:
118
+ raise ImportError(IMPORT_ERROR_MESSAGE)
119
+
120
+ convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
121
+ elif self._model_type == "gpt":
122
+ from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
123
+ convert_openai_checkpoint_to_pytorch,
124
+ )
125
+
126
+ convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
127
+ elif self._model_type == "transfo_xl":
128
+ try:
129
+ from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
130
+ convert_transfo_xl_checkpoint_to_pytorch,
131
+ )
132
+ except ImportError:
133
+ raise ImportError(IMPORT_ERROR_MESSAGE)
134
+
135
+ if "ckpt" in self._tf_checkpoint.lower():
136
+ TF_CHECKPOINT = self._tf_checkpoint
137
+ TF_DATASET_FILE = ""
138
+ else:
139
+ TF_DATASET_FILE = self._tf_checkpoint
140
+ TF_CHECKPOINT = ""
141
+ convert_transfo_xl_checkpoint_to_pytorch(
142
+ TF_CHECKPOINT, self._config, self._pytorch_dump_output, TF_DATASET_FILE
143
+ )
144
+ elif self._model_type == "gpt2":
145
+ try:
146
+ from ..models.gpt2.convert_gpt2_original_tf_checkpoint_to_pytorch import (
147
+ convert_gpt2_checkpoint_to_pytorch,
148
+ )
149
+ except ImportError:
150
+ raise ImportError(IMPORT_ERROR_MESSAGE)
151
+
152
+ convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
153
+ elif self._model_type == "xlnet":
154
+ try:
155
+ from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
156
+ convert_xlnet_checkpoint_to_pytorch,
157
+ )
158
+ except ImportError:
159
+ raise ImportError(IMPORT_ERROR_MESSAGE)
160
+
161
+ convert_xlnet_checkpoint_to_pytorch(
162
+ self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name
163
+ )
164
+ elif self._model_type == "xlm":
165
+ from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
166
+ convert_xlm_checkpoint_to_pytorch,
167
+ )
168
+
169
+ convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
170
+ elif self._model_type == "lxmert":
171
+ from ..models.lxmert.convert_lxmert_original_pytorch_checkpoint_to_pytorch import (
172
+ convert_lxmert_checkpoint_to_pytorch,
173
+ )
174
+
175
+ convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
176
+ else:
177
+ raise ValueError(
178
+ "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]"
179
+ )
public/gpt-2/transformers/commands/download.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from argparse import ArgumentParser
16
+
17
+ from . import BaseTransformersCLICommand
18
+
19
+
20
+ def download_command_factory(args):
21
+ return DownloadCommand(args.model, args.cache_dir, args.force)
22
+
23
+
24
+ class DownloadCommand(BaseTransformersCLICommand):
25
+ @staticmethod
26
+ def register_subcommand(parser: ArgumentParser):
27
+ download_parser = parser.add_parser("download")
28
+ download_parser.add_argument(
29
+ "--cache-dir", type=str, default=None, help="Path to location to store the models"
30
+ )
31
+ download_parser.add_argument(
32
+ "--force", action="store_true", help="Force the model to be download even if already in cache-dir"
33
+ )
34
+ download_parser.add_argument("model", type=str, help="Name of the model to download")
35
+ download_parser.set_defaults(func=download_command_factory)
36
+
37
+ def __init__(self, model: str, cache: str, force: bool):
38
+ self._model = model
39
+ self._cache = cache
40
+ self._force = force
41
+
42
+ def run(self):
43
+ from ..models.auto import AutoModel, AutoTokenizer
44
+
45
+ AutoModel.from_pretrained(self._model, cache_dir=self._cache, force_download=self._force)
46
+ AutoTokenizer.from_pretrained(self._model, cache_dir=self._cache, force_download=self._force)
public/gpt-2/transformers/commands/env.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import platform
16
+ from argparse import ArgumentParser
17
+
18
+ from .. import __version__ as version
19
+ from ..file_utils import is_flax_available, is_tf_available, is_torch_available
20
+ from . import BaseTransformersCLICommand
21
+
22
+
23
+ def info_command_factory(_):
24
+ return EnvironmentCommand()
25
+
26
+
27
+ class EnvironmentCommand(BaseTransformersCLICommand):
28
+ @staticmethod
29
+ def register_subcommand(parser: ArgumentParser):
30
+ download_parser = parser.add_parser("env")
31
+ download_parser.set_defaults(func=info_command_factory)
32
+
33
+ def run(self):
34
+ pt_version = "not installed"
35
+ pt_cuda_available = "NA"
36
+ if is_torch_available():
37
+ import torch
38
+
39
+ pt_version = torch.__version__
40
+ pt_cuda_available = torch.cuda.is_available()
41
+
42
+ tf_version = "not installed"
43
+ tf_cuda_available = "NA"
44
+ if is_tf_available():
45
+ import tensorflow as tf
46
+
47
+ tf_version = tf.__version__
48
+ try:
49
+ # deprecated in v2.1
50
+ tf_cuda_available = tf.test.is_gpu_available()
51
+ except AttributeError:
52
+ # returns list of devices, convert to bool
53
+ tf_cuda_available = bool(tf.config.list_physical_devices("GPU"))
54
+
55
+ flax_version = "not installed"
56
+ jax_version = "not installed"
57
+ jaxlib_version = "not installed"
58
+ jax_backend = "NA"
59
+ if is_flax_available():
60
+ import flax
61
+ import jax
62
+ import jaxlib
63
+
64
+ flax_version = flax.__version__
65
+ jax_version = jax.__version__
66
+ jaxlib_version = jaxlib.__version__
67
+ jax_backend = jax.lib.xla_bridge.get_backend().platform
68
+
69
+ info = {
70
+ "`transformers` version": version,
71
+ "Platform": platform.platform(),
72
+ "Python version": platform.python_version(),
73
+ "PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
74
+ "Tensorflow version (GPU?)": f"{tf_version} ({tf_cuda_available})",
75
+ "Flax version (CPU?/GPU?/TPU?)": f"{flax_version} ({jax_backend})",
76
+ "Jax version": f"{jax_version}",
77
+ "JaxLib version": f"{jaxlib_version}",
78
+ "Using GPU in script?": "<fill in>",
79
+ "Using distributed or parallel set-up in script?": "<fill in>",
80
+ }
81
+
82
+ print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
83
+ print(self.format_dict(info))
84
+
85
+ return info
86
+
87
+ @staticmethod
88
+ def format_dict(d):
89
+ return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
public/gpt-2/transformers/commands/lfs.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of a custom transfer agent for the transfer type "multipart" for git-lfs.
3
+
4
+ Inspired by: github.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py
5
+
6
+ Spec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
7
+
8
+
9
+ To launch debugger while developing:
10
+
11
+ ``` [lfs "customtransfer.multipart"]
12
+
13
+ path = /path/to/transformers/.env/bin/python
14
+
15
+ args = -m debugpy --listen 5678 --wait-for-client /path/to/transformers/src/transformers/commands/transformers_cli.py
16
+ lfs-multipart-upload ```
17
+ """
18
+
19
+ import json
20
+ import os
21
+ import subprocess
22
+ import sys
23
+ import warnings
24
+ from argparse import ArgumentParser
25
+ from contextlib import AbstractContextManager
26
+ from typing import Dict, List, Optional
27
+
28
+ import requests
29
+
30
+ from ..utils import logging
31
+ from . import BaseTransformersCLICommand
32
+
33
+
34
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
35
+
36
+
37
+ LFS_MULTIPART_UPLOAD_COMMAND = "lfs-multipart-upload"
38
+
39
+
40
+ class LfsCommands(BaseTransformersCLICommand):
41
+ """
42
+ Implementation of a custom transfer agent for the transfer type "multipart" for git-lfs. This lets users upload
43
+ large files >5GB 🔥. Spec for LFS custom transfer agent is:
44
+ https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
45
+
46
+ This introduces two commands to the CLI:
47
+
48
+ 1. $ transformers-cli lfs-enable-largefiles
49
+
50
+ This should be executed once for each model repo that contains a model file >5GB. It's documented in the error
51
+ message you get if you just try to git push a 5GB file without having enabled it before.
52
+
53
+ 2. $ transformers-cli lfs-multipart-upload
54
+
55
+ This command is called by lfs directly and is not meant to be called by the user.
56
+ """
57
+
58
+ @staticmethod
59
+ def register_subcommand(parser: ArgumentParser):
60
+ enable_parser = parser.add_parser(
61
+ "lfs-enable-largefiles",
62
+ help="Deprecated: use `huggingface-cli` instead. "
63
+ "Configure your repository to enable upload of files > 5GB.",
64
+ )
65
+ enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.")
66
+ enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args))
67
+
68
+ upload_parser = parser.add_parser(
69
+ LFS_MULTIPART_UPLOAD_COMMAND,
70
+ help="Deprecated: use `huggingface-cli` instead. "
71
+ "Command will get called by git-lfs, do not call it directly.",
72
+ )
73
+ upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args))
74
+
75
+
76
+ class LfsEnableCommand:
77
+ def __init__(self, args):
78
+ self.args = args
79
+
80
+ def run(self):
81
+ warnings.warn(
82
+ "Managing repositories through transformers-cli is deprecated. Please use `huggingface-cli` instead."
83
+ )
84
+ local_path = os.path.abspath(self.args.path)
85
+ if not os.path.isdir(local_path):
86
+ print("This does not look like a valid git repo.")
87
+ exit(1)
88
+ subprocess.run(
89
+ "git config lfs.customtransfer.multipart.path transformers-cli".split(), check=True, cwd=local_path
90
+ )
91
+ subprocess.run(
92
+ f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(),
93
+ check=True,
94
+ cwd=local_path,
95
+ )
96
+ print("Local repo set up for largefiles")
97
+
98
+
99
+ def write_msg(msg: Dict):
100
+ """Write out the message in Line delimited JSON."""
101
+ msg = json.dumps(msg) + "\n"
102
+ sys.stdout.write(msg)
103
+ sys.stdout.flush()
104
+
105
+
106
+ def read_msg() -> Optional[Dict]:
107
+ """Read Line delimited JSON from stdin."""
108
+ msg = json.loads(sys.stdin.readline().strip())
109
+
110
+ if "terminate" in (msg.get("type"), msg.get("event")):
111
+ # terminate message received
112
+ return None
113
+
114
+ if msg.get("event") not in ("download", "upload"):
115
+ logger.critical("Received unexpected message")
116
+ sys.exit(1)
117
+
118
+ return msg
119
+
120
+
121
+ class FileSlice(AbstractContextManager):
122
+ """
123
+ File-like object that only reads a slice of a file
124
+
125
+ Inspired by stackoverflow.com/a/29838711/593036
126
+ """
127
+
128
+ def __init__(self, filepath: str, seek_from: int, read_limit: int):
129
+ self.filepath = filepath
130
+ self.seek_from = seek_from
131
+ self.read_limit = read_limit
132
+ self.n_seen = 0
133
+
134
+ def __enter__(self):
135
+ self.f = open(self.filepath, "rb")
136
+ self.f.seek(self.seek_from)
137
+ return self
138
+
139
+ def __len__(self):
140
+ total_length = os.fstat(self.f.fileno()).st_size
141
+ return min(self.read_limit, total_length - self.seek_from)
142
+
143
+ def read(self, n=-1):
144
+ if self.n_seen >= self.read_limit:
145
+ return b""
146
+ remaining_amount = self.read_limit - self.n_seen
147
+ data = self.f.read(remaining_amount if n < 0 else min(n, remaining_amount))
148
+ self.n_seen += len(data)
149
+ return data
150
+
151
+ def __iter__(self):
152
+ yield self.read(n=4 * 1024 * 1024)
153
+
154
+ def __exit__(self, *args):
155
+ self.f.close()
156
+
157
+
158
+ class LfsUploadCommand:
159
+ def __init__(self, args):
160
+ self.args = args
161
+
162
+ def run(self):
163
+ # Immediately after invoking a custom transfer process, git-lfs
164
+ # sends initiation data to the process over stdin.
165
+ # This tells the process useful information about the configuration.
166
+ init_msg = json.loads(sys.stdin.readline().strip())
167
+ if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"):
168
+ write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}})
169
+ sys.exit(1)
170
+
171
+ # The transfer process should use the information it needs from the
172
+ # initiation structure, and also perform any one-off setup tasks it
173
+ # needs to do. It should then respond on stdout with a simple empty
174
+ # confirmation structure, as follows:
175
+ write_msg({})
176
+
177
+ # After the initiation exchange, git-lfs will send any number of
178
+ # transfer requests to the stdin of the transfer process, in a serial sequence.
179
+ while True:
180
+ msg = read_msg()
181
+ if msg is None:
182
+ # When all transfers have been processed, git-lfs will send
183
+ # a terminate event to the stdin of the transfer process.
184
+ # On receiving this message the transfer process should
185
+ # clean up and terminate. No response is expected.
186
+ sys.exit(0)
187
+
188
+ oid = msg["oid"]
189
+ filepath = msg["path"]
190
+ completion_url = msg["action"]["href"]
191
+ header = msg["action"]["header"]
192
+ chunk_size = int(header.pop("chunk_size"))
193
+ presigned_urls: List[str] = list(header.values())
194
+
195
+ parts = []
196
+ for i, presigned_url in enumerate(presigned_urls):
197
+ with FileSlice(filepath, seek_from=i * chunk_size, read_limit=chunk_size) as data:
198
+ r = requests.put(presigned_url, data=data)
199
+ r.raise_for_status()
200
+ parts.append(
201
+ {
202
+ "etag": r.headers.get("etag"),
203
+ "partNumber": i + 1,
204
+ }
205
+ )
206
+ # In order to support progress reporting while data is uploading / downloading,
207
+ # the transfer process should post messages to stdout
208
+ write_msg(
209
+ {
210
+ "event": "progress",
211
+ "oid": oid,
212
+ "bytesSoFar": (i + 1) * chunk_size,
213
+ "bytesSinceLast": chunk_size,
214
+ }
215
+ )
216
+ # Not precise but that's ok.
217
+
218
+ r = requests.post(
219
+ completion_url,
220
+ json={
221
+ "oid": oid,
222
+ "parts": parts,
223
+ },
224
+ )
225
+ r.raise_for_status()
226
+
227
+ write_msg({"event": "complete", "oid": oid})
public/gpt-2/transformers/commands/run.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from argparse import ArgumentParser
16
+
17
+ from ..pipelines import SUPPORTED_TASKS, TASK_ALIASES, Pipeline, PipelineDataFormat, pipeline
18
+ from ..utils import logging
19
+ from . import BaseTransformersCLICommand
20
+
21
+
22
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
23
+
24
+
25
+ def try_infer_format_from_ext(path: str):
26
+ if not path:
27
+ return "pipe"
28
+
29
+ for ext in PipelineDataFormat.SUPPORTED_FORMATS:
30
+ if path.endswith(ext):
31
+ return ext
32
+
33
+ raise Exception(
34
+ f"Unable to determine file format from file extension {path}. "
35
+ f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}"
36
+ )
37
+
38
+
39
+ def run_command_factory(args):
40
+ nlp = pipeline(
41
+ task=args.task,
42
+ model=args.model if args.model else None,
43
+ config=args.config,
44
+ tokenizer=args.tokenizer,
45
+ device=args.device,
46
+ )
47
+ format = try_infer_format_from_ext(args.input) if args.format == "infer" else args.format
48
+ reader = PipelineDataFormat.from_str(
49
+ format=format,
50
+ output_path=args.output,
51
+ input_path=args.input,
52
+ column=args.column if args.column else nlp.default_input_names,
53
+ overwrite=args.overwrite,
54
+ )
55
+ return RunCommand(nlp, reader)
56
+
57
+
58
+ class RunCommand(BaseTransformersCLICommand):
59
+ def __init__(self, nlp: Pipeline, reader: PipelineDataFormat):
60
+ self._nlp = nlp
61
+ self._reader = reader
62
+
63
+ @staticmethod
64
+ def register_subcommand(parser: ArgumentParser):
65
+ run_parser = parser.add_parser("run", help="Run a pipeline through the CLI")
66
+ run_parser.add_argument(
67
+ "--task", choices=list(SUPPORTED_TASKS.keys()) + list(TASK_ALIASES.keys()), help="Task to run"
68
+ )
69
+ run_parser.add_argument("--input", type=str, help="Path to the file to use for inference")
70
+ run_parser.add_argument("--output", type=str, help="Path to the file that will be used post to write results.")
71
+ run_parser.add_argument("--model", type=str, help="Name or path to the model to instantiate.")
72
+ run_parser.add_argument("--config", type=str, help="Name or path to the model's config to instantiate.")
73
+ run_parser.add_argument(
74
+ "--tokenizer", type=str, help="Name of the tokenizer to use. (default: same as the model name)"
75
+ )
76
+ run_parser.add_argument(
77
+ "--column",
78
+ type=str,
79
+ help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)",
80
+ )
81
+ run_parser.add_argument(
82
+ "--format",
83
+ type=str,
84
+ default="infer",
85
+ choices=PipelineDataFormat.SUPPORTED_FORMATS,
86
+ help="Input format to read from",
87
+ )
88
+ run_parser.add_argument(
89
+ "--device",
90
+ type=int,
91
+ default=-1,
92
+ help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
93
+ )
94
+ run_parser.add_argument("--overwrite", action="store_true", help="Allow overwriting the output file.")
95
+ run_parser.set_defaults(func=run_command_factory)
96
+
97
+ def run(self):
98
+ nlp, outputs = self._nlp, []
99
+
100
+ for entry in self._reader:
101
+ output = nlp(**entry) if self._reader.is_multi_columns else nlp(entry)
102
+ if isinstance(output, dict):
103
+ outputs.append(output)
104
+ else:
105
+ outputs += output
106
+
107
+ # Saving data
108
+ if self._nlp.binary_output:
109
+ binary_path = self._reader.save_binary(outputs)
110
+ logger.warning(f"Current pipeline requires output to be in binary format, saving at {binary_path}")
111
+ else:
112
+ self._reader.save(outputs)
public/gpt-2/transformers/commands/serving.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from argparse import ArgumentParser, Namespace
16
+ from typing import Any, List, Optional
17
+
18
+ from ..pipelines import SUPPORTED_TASKS, TASK_ALIASES, Pipeline, pipeline
19
+ from ..utils import logging
20
+ from . import BaseTransformersCLICommand
21
+
22
+
23
+ try:
24
+ from fastapi import Body, FastAPI, HTTPException
25
+ from fastapi.routing import APIRoute
26
+ from pydantic import BaseModel
27
+ from starlette.responses import JSONResponse
28
+ from uvicorn import run
29
+
30
+ _serve_dependencies_installed = True
31
+ except (ImportError, AttributeError):
32
+ BaseModel = object
33
+
34
+ def Body(*x, **y):
35
+ pass
36
+
37
+ _serve_dependencies_installed = False
38
+
39
+
40
+ logger = logging.get_logger("transformers-cli/serving")
41
+
42
+
43
+ def serve_command_factory(args: Namespace):
44
+ """
45
+ Factory function used to instantiate serving server from provided command line arguments.
46
+
47
+ Returns: ServeCommand
48
+ """
49
+ nlp = pipeline(
50
+ task=args.task,
51
+ model=args.model if args.model else None,
52
+ config=args.config,
53
+ tokenizer=args.tokenizer,
54
+ device=args.device,
55
+ )
56
+ return ServeCommand(nlp, args.host, args.port, args.workers)
57
+
58
+
59
+ class ServeModelInfoResult(BaseModel):
60
+ """
61
+ Expose model information
62
+ """
63
+
64
+ infos: dict
65
+
66
+
67
+ class ServeTokenizeResult(BaseModel):
68
+ """
69
+ Tokenize result model
70
+ """
71
+
72
+ tokens: List[str]
73
+ tokens_ids: Optional[List[int]]
74
+
75
+
76
+ class ServeDeTokenizeResult(BaseModel):
77
+ """
78
+ DeTokenize result model
79
+ """
80
+
81
+ text: str
82
+
83
+
84
+ class ServeForwardResult(BaseModel):
85
+ """
86
+ Forward result model
87
+ """
88
+
89
+ output: Any
90
+
91
+
92
+ class ServeCommand(BaseTransformersCLICommand):
93
+ @staticmethod
94
+ def register_subcommand(parser: ArgumentParser):
95
+ """
96
+ Register this command to argparse so it's available for the transformer-cli
97
+
98
+ Args:
99
+ parser: Root parser to register command-specific arguments
100
+ """
101
+ serve_parser = parser.add_parser(
102
+ "serve", help="CLI tool to run inference requests through REST and GraphQL endpoints."
103
+ )
104
+ serve_parser.add_argument(
105
+ "--task",
106
+ type=str,
107
+ choices=list(SUPPORTED_TASKS.keys()) + list(TASK_ALIASES.keys()),
108
+ help="The task to run the pipeline on",
109
+ )
110
+ serve_parser.add_argument("--host", type=str, default="localhost", help="Interface the server will listen on.")
111
+ serve_parser.add_argument("--port", type=int, default=8888, help="Port the serving will listen to.")
112
+ serve_parser.add_argument("--workers", type=int, default=1, help="Number of http workers")
113
+ serve_parser.add_argument("--model", type=str, help="Model's name or path to stored model.")
114
+ serve_parser.add_argument("--config", type=str, help="Model's config name or path to stored model.")
115
+ serve_parser.add_argument("--tokenizer", type=str, help="Tokenizer name to use.")
116
+ serve_parser.add_argument(
117
+ "--device",
118
+ type=int,
119
+ default=-1,
120
+ help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
121
+ )
122
+ serve_parser.set_defaults(func=serve_command_factory)
123
+
124
+ def __init__(self, pipeline: Pipeline, host: str, port: int, workers: int):
125
+
126
+ self._pipeline = pipeline
127
+
128
+ self.host = host
129
+ self.port = port
130
+ self.workers = workers
131
+
132
+ if not _serve_dependencies_installed:
133
+ raise RuntimeError(
134
+ "Using serve command requires FastAPI and unicorn. "
135
+ 'Please install transformers with [serving]: pip install "transformers[serving]".'
136
+ "Or install FastAPI and unicorn separately."
137
+ )
138
+ else:
139
+ logger.info(f"Serving model over {host}:{port}")
140
+ self._app = FastAPI(
141
+ routes=[
142
+ APIRoute(
143
+ "/",
144
+ self.model_info,
145
+ response_model=ServeModelInfoResult,
146
+ response_class=JSONResponse,
147
+ methods=["GET"],
148
+ ),
149
+ APIRoute(
150
+ "/tokenize",
151
+ self.tokenize,
152
+ response_model=ServeTokenizeResult,
153
+ response_class=JSONResponse,
154
+ methods=["POST"],
155
+ ),
156
+ APIRoute(
157
+ "/detokenize",
158
+ self.detokenize,
159
+ response_model=ServeDeTokenizeResult,
160
+ response_class=JSONResponse,
161
+ methods=["POST"],
162
+ ),
163
+ APIRoute(
164
+ "/forward",
165
+ self.forward,
166
+ response_model=ServeForwardResult,
167
+ response_class=JSONResponse,
168
+ methods=["POST"],
169
+ ),
170
+ ],
171
+ timeout=600,
172
+ )
173
+
174
+ def run(self):
175
+ run(self._app, host=self.host, port=self.port, workers=self.workers)
176
+
177
+ def model_info(self):
178
+ return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
179
+
180
+ def tokenize(self, text_input: str = Body(None, embed=True), return_ids: bool = Body(False, embed=True)):
181
+ """
182
+ Tokenize the provided input and eventually returns corresponding tokens id: - **text_input**: String to
183
+ tokenize - **return_ids**: Boolean flags indicating if the tokens have to be converted to their integer
184
+ mapping.
185
+ """
186
+ try:
187
+ tokens_txt = self._pipeline.tokenizer.tokenize(text_input)
188
+
189
+ if return_ids:
190
+ tokens_ids = self._pipeline.tokenizer.convert_tokens_to_ids(tokens_txt)
191
+ return ServeTokenizeResult(tokens=tokens_txt, tokens_ids=tokens_ids)
192
+ else:
193
+ return ServeTokenizeResult(tokens=tokens_txt)
194
+
195
+ except Exception as e:
196
+ raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
197
+
198
+ def detokenize(
199
+ self,
200
+ tokens_ids: List[int] = Body(None, embed=True),
201
+ skip_special_tokens: bool = Body(False, embed=True),
202
+ cleanup_tokenization_spaces: bool = Body(True, embed=True),
203
+ ):
204
+ """
205
+ Detokenize the provided tokens ids to readable text: - **tokens_ids**: List of tokens ids -
206
+ **skip_special_tokens**: Flag indicating to not try to decode special tokens - **cleanup_tokenization_spaces**:
207
+ Flag indicating to remove all leading/trailing spaces and intermediate ones.
208
+ """
209
+ try:
210
+ decoded_str = self._pipeline.tokenizer.decode(tokens_ids, skip_special_tokens, cleanup_tokenization_spaces)
211
+ return ServeDeTokenizeResult(model="", text=decoded_str)
212
+ except Exception as e:
213
+ raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
214
+
215
+ async def forward(self, inputs=Body(None, embed=True)):
216
+ """
217
+ **inputs**:
218
+ **attention_mask**:
219
+ **tokens_type_ids**:
220
+ """
221
+
222
+ # Check we don't have empty string
223
+ if len(inputs) == 0:
224
+ return ServeForwardResult(output=[], attention=[])
225
+
226
+ try:
227
+ # Forward through the model
228
+ output = self._pipeline(inputs)
229
+ return ServeForwardResult(output=output)
230
+ except Exception as e:
231
+ raise HTTPException(500, {"error": str(e)})
public/gpt-2/transformers/commands/train.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import os
16
+ from argparse import ArgumentParser, Namespace
17
+
18
+ from ..data import SingleSentenceClassificationProcessor as Processor
19
+ from ..file_utils import is_tf_available, is_torch_available
20
+ from ..pipelines import TextClassificationPipeline
21
+ from ..utils import logging
22
+ from . import BaseTransformersCLICommand
23
+
24
+
25
+ if not is_tf_available() and not is_torch_available():
26
+ raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
27
+
28
+ # TF training parameters
29
+ USE_XLA = False
30
+ USE_AMP = False
31
+
32
+
33
+ def train_command_factory(args: Namespace):
34
+ """
35
+ Factory function used to instantiate training command from provided command line arguments.
36
+
37
+ Returns: TrainCommand
38
+ """
39
+ return TrainCommand(args)
40
+
41
+
42
+ class TrainCommand(BaseTransformersCLICommand):
43
+ @staticmethod
44
+ def register_subcommand(parser: ArgumentParser):
45
+ """
46
+ Register this command to argparse so it's available for the transformer-cli
47
+
48
+ Args:
49
+ parser: Root parser to register command-specific arguments
50
+ """
51
+ train_parser = parser.add_parser("train", help="CLI tool to train a model on a task.")
52
+
53
+ train_parser.add_argument(
54
+ "--train_data",
55
+ type=str,
56
+ required=True,
57
+ help="path to train (and optionally evaluation) dataset as a csv with "
58
+ "tab separated labels and sentences.",
59
+ )
60
+ train_parser.add_argument(
61
+ "--column_label", type=int, default=0, help="Column of the dataset csv file with example labels."
62
+ )
63
+ train_parser.add_argument(
64
+ "--column_text", type=int, default=1, help="Column of the dataset csv file with example texts."
65
+ )
66
+ train_parser.add_argument(
67
+ "--column_id", type=int, default=2, help="Column of the dataset csv file with example ids."
68
+ )
69
+ train_parser.add_argument(
70
+ "--skip_first_row", action="store_true", help="Skip the first row of the csv file (headers)."
71
+ )
72
+
73
+ train_parser.add_argument("--validation_data", type=str, default="", help="path to validation dataset.")
74
+ train_parser.add_argument(
75
+ "--validation_split",
76
+ type=float,
77
+ default=0.1,
78
+ help="if validation dataset is not provided, fraction of train dataset " "to use as validation dataset.",
79
+ )
80
+
81
+ train_parser.add_argument("--output", type=str, default="./", help="path to saved the trained model.")
82
+
83
+ train_parser.add_argument(
84
+ "--task", type=str, default="text_classification", help="Task to train the model on."
85
+ )
86
+ train_parser.add_argument(
87
+ "--model", type=str, default="bert-base-uncased", help="Model's name or path to stored model."
88
+ )
89
+ train_parser.add_argument("--train_batch_size", type=int, default=32, help="Batch size for training.")
90
+ train_parser.add_argument("--valid_batch_size", type=int, default=64, help="Batch size for validation.")
91
+ train_parser.add_argument("--learning_rate", type=float, default=3e-5, help="Learning rate.")
92
+ train_parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon for Adam optimizer.")
93
+ train_parser.set_defaults(func=train_command_factory)
94
+
95
+ def __init__(self, args: Namespace):
96
+ self.logger = logging.get_logger("transformers-cli/training")
97
+
98
+ self.framework = "tf" if is_tf_available() else "torch"
99
+
100
+ os.makedirs(args.output, exist_ok=True)
101
+ self.output = args.output
102
+
103
+ self.column_label = args.column_label
104
+ self.column_text = args.column_text
105
+ self.column_id = args.column_id
106
+
107
+ self.logger.info(f"Loading {args.task} pipeline for {args.model}")
108
+ if args.task == "text_classification":
109
+ self.pipeline = TextClassificationPipeline.from_pretrained(args.model)
110
+ elif args.task == "token_classification":
111
+ raise NotImplementedError
112
+ elif args.task == "question_answering":
113
+ raise NotImplementedError
114
+
115
+ self.logger.info(f"Loading dataset from {args.train_data}")
116
+ self.train_dataset = Processor.create_from_csv(
117
+ args.train_data,
118
+ column_label=args.column_label,
119
+ column_text=args.column_text,
120
+ column_id=args.column_id,
121
+ skip_first_row=args.skip_first_row,
122
+ )
123
+ self.valid_dataset = None
124
+ if args.validation_data:
125
+ self.logger.info(f"Loading validation dataset from {args.validation_data}")
126
+ self.valid_dataset = Processor.create_from_csv(
127
+ args.validation_data,
128
+ column_label=args.column_label,
129
+ column_text=args.column_text,
130
+ column_id=args.column_id,
131
+ skip_first_row=args.skip_first_row,
132
+ )
133
+
134
+ self.validation_split = args.validation_split
135
+ self.train_batch_size = args.train_batch_size
136
+ self.valid_batch_size = args.valid_batch_size
137
+ self.learning_rate = args.learning_rate
138
+ self.adam_epsilon = args.adam_epsilon
139
+
140
+ def run(self):
141
+ if self.framework == "tf":
142
+ return self.run_tf()
143
+ return self.run_torch()
144
+
145
+ def run_torch(self):
146
+ raise NotImplementedError
147
+
148
+ def run_tf(self):
149
+ self.pipeline.fit(
150
+ self.train_dataset,
151
+ validation_data=self.valid_dataset,
152
+ validation_split=self.validation_split,
153
+ learning_rate=self.learning_rate,
154
+ adam_epsilon=self.adam_epsilon,
155
+ train_batch_size=self.train_batch_size,
156
+ valid_batch_size=self.valid_batch_size,
157
+ )
158
+
159
+ # Save trained pipeline
160
+ self.pipeline.save_pretrained(self.output)
public/gpt-2/transformers/commands/transformers_cli.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from argparse import ArgumentParser
17
+
18
+ from .add_new_model import AddNewModelCommand
19
+ from .convert import ConvertCommand
20
+ from .download import DownloadCommand
21
+ from .env import EnvironmentCommand
22
+ from .lfs import LfsCommands
23
+ from .run import RunCommand
24
+ from .serving import ServeCommand
25
+ from .user import UserCommands
26
+
27
+
28
+ def main():
29
+ parser = ArgumentParser("Transformers CLI tool", usage="transformers-cli <command> [<args>]")
30
+ commands_parser = parser.add_subparsers(help="transformers-cli command helpers")
31
+
32
+ # Register commands
33
+ ConvertCommand.register_subcommand(commands_parser)
34
+ DownloadCommand.register_subcommand(commands_parser)
35
+ EnvironmentCommand.register_subcommand(commands_parser)
36
+ RunCommand.register_subcommand(commands_parser)
37
+ ServeCommand.register_subcommand(commands_parser)
38
+ UserCommands.register_subcommand(commands_parser)
39
+ AddNewModelCommand.register_subcommand(commands_parser)
40
+ LfsCommands.register_subcommand(commands_parser)
41
+
42
+ # Let's go
43
+ args = parser.parse_args()
44
+
45
+ if not hasattr(args, "func"):
46
+ parser.print_help()
47
+ exit(1)
48
+
49
+ # Run
50
+ service = args.func(args)
51
+ service.run()
52
+
53
+
54
+ if __name__ == "__main__":
55
+ main()