PetraAI commited on
Commit
8eddc47
1 Parent(s): ec0f90b

Upload 5 files

Browse files
Files changed (5) hide show
  1. .gitignore +34 -3
  2. LICENSE +21 -201
  3. README.md +279 -276
  4. README_zh.md +330 -0
  5. setup.py +135 -6
.gitignore CHANGED
@@ -20,7 +20,6 @@ parts/
20
  sdist/
21
  var/
22
  wheels/
23
- pip-wheel-metadata/
24
  share/python-wheels/
25
  *.egg-info/
26
  .installed.cfg
@@ -50,6 +49,7 @@ coverage.xml
50
  *.py,cover
51
  .hypothesis/
52
  .pytest_cache/
 
53
 
54
  # Translations
55
  *.mo
@@ -72,6 +72,7 @@ instance/
72
  docs/_build/
73
 
74
  # PyBuilder
 
75
  target/
76
 
77
  # Jupyter Notebook
@@ -82,7 +83,9 @@ profile_default/
82
  ipython_config.py
83
 
84
  # pyenv
85
- .python-version
 
 
86
 
87
  # pipenv
88
  # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
@@ -91,7 +94,22 @@ ipython_config.py
91
  # install all needed dependencies.
92
  #Pipfile.lock
93
 
94
- # PEP 582; used by e.g. github.com/David-OConnor/pyflow
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  __pypackages__/
96
 
97
  # Celery stuff
@@ -127,3 +145,16 @@ dmypy.json
127
 
128
  # Pyre type checker
129
  .pyre/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  sdist/
21
  var/
22
  wheels/
 
23
  share/python-wheels/
24
  *.egg-info/
25
  .installed.cfg
 
49
  *.py,cover
50
  .hypothesis/
51
  .pytest_cache/
52
+ cover/
53
 
54
  # Translations
55
  *.mo
 
72
  docs/_build/
73
 
74
  # PyBuilder
75
+ .pybuilder/
76
  target/
77
 
78
  # Jupyter Notebook
 
83
  ipython_config.py
84
 
85
  # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
 
90
  # pipenv
91
  # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
 
94
  # install all needed dependencies.
95
  #Pipfile.lock
96
 
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
  __pypackages__/
114
 
115
  # Celery stuff
 
145
 
146
  # Pyre type checker
147
  .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ #.idea/
LICENSE CHANGED
@@ -1,201 +1,21 @@
1
- Apache License
2
- Version 2.0, January 2004
3
- http://www.apache.org/licenses/
4
-
5
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
-
7
- 1. Definitions.
8
-
9
- "License" shall mean the terms and conditions for use, reproduction,
10
- and distribution as defined by Sections 1 through 9 of this document.
11
-
12
- "Licensor" shall mean the copyright owner or entity authorized by
13
- the copyright owner that is granting the License.
14
-
15
- "Legal Entity" shall mean the union of the acting entity and all
16
- other entities that control, are controlled by, or are under common
17
- control with that entity. For the purposes of this definition,
18
- "control" means (i) the power, direct or indirect, to cause the
19
- direction or management of such entity, whether by contract or
20
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
- outstanding shares, or (iii) beneficial ownership of such entity.
22
-
23
- "You" (or "Your") shall mean an individual or Legal Entity
24
- exercising permissions granted by this License.
25
-
26
- "Source" form shall mean the preferred form for making modifications,
27
- including but not limited to software source code, documentation
28
- source, and configuration files.
29
-
30
- "Object" form shall mean any form resulting from mechanical
31
- transformation or translation of a Source form, including but
32
- not limited to compiled object code, generated documentation,
33
- and conversions to other media types.
34
-
35
- "Work" shall mean the work of authorship, whether in Source or
36
- Object form, made available under the License, as indicated by a
37
- copyright notice that is included in or attached to the work
38
- (an example is provided in the Appendix below).
39
-
40
- "Derivative Works" shall mean any work, whether in Source or Object
41
- form, that is based on (or derived from) the Work and for which the
42
- editorial revisions, annotations, elaborations, or other modifications
43
- represent, as a whole, an original work of authorship. For the purposes
44
- of this License, Derivative Works shall not include works that remain
45
- separable from, or merely link (or bind by name) to the interfaces of,
46
- the Work and Derivative Works thereof.
47
-
48
- "Contribution" shall mean any work of authorship, including
49
- the original version of the Work and any modifications or additions
50
- to that Work or Derivative Works thereof, that is intentionally
51
- submitted to Licensor for inclusion in the Work by the copyright owner
52
- or by an individual or Legal Entity authorized to submit on behalf of
53
- the copyright owner. For the purposes of this definition, "submitted"
54
- means any form of electronic, verbal, or written communication sent
55
- to the Licensor or its representatives, including but not limited to
56
- communication on electronic mailing lists, source code control systems,
57
- and issue tracking systems that are managed by, or on behalf of, the
58
- Licensor for the purpose of discussing and improving the Work, but
59
- excluding communication that is conspicuously marked or otherwise
60
- designated in writing by the copyright owner as "Not a Contribution."
61
-
62
- "Contributor" shall mean Licensor and any individual or Legal Entity
63
- on behalf of whom a Contribution has been received by Licensor and
64
- subsequently incorporated within the Work.
65
-
66
- 2. Grant of Copyright License. Subject to the terms and conditions of
67
- this License, each Contributor hereby grants to You a perpetual,
68
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
- copyright license to reproduce, prepare Derivative Works of,
70
- publicly display, publicly perform, sublicense, and distribute the
71
- Work and such Derivative Works in Source or Object form.
72
-
73
- 3. Grant of Patent License. Subject to the terms and conditions of
74
- this License, each Contributor hereby grants to You a perpetual,
75
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
- (except as stated in this section) patent license to make, have made,
77
- use, offer to sell, sell, import, and otherwise transfer the Work,
78
- where such license applies only to those patent claims licensable
79
- by such Contributor that are necessarily infringed by their
80
- Contribution(s) alone or by combination of their Contribution(s)
81
- with the Work to which such Contribution(s) was submitted. If You
82
- institute patent litigation against any entity (including a
83
- cross-claim or counterclaim in a lawsuit) alleging that the Work
84
- or a Contribution incorporated within the Work constitutes direct
85
- or contributory patent infringement, then any patent licenses
86
- granted to You under this License for that Work shall terminate
87
- as of the date such litigation is filed.
88
-
89
- 4. Redistribution. You may reproduce and distribute copies of the
90
- Work or Derivative Works thereof in any medium, with or without
91
- modifications, and in Source or Object form, provided that You
92
- meet the following conditions:
93
-
94
- (a) You must give any other recipients of the Work or
95
- Derivative Works a copy of this License; and
96
-
97
- (b) You must cause any modified files to carry prominent notices
98
- stating that You changed the files; and
99
-
100
- (c) You must retain, in the Source form of any Derivative Works
101
- that You distribute, all copyright, patent, trademark, and
102
- attribution notices from the Source form of the Work,
103
- excluding those notices that do not pertain to any part of
104
- the Derivative Works; and
105
-
106
- (d) If the Work includes a "NOTICE" text file as part of its
107
- distribution, then any Derivative Works that You distribute must
108
- include a readable copy of the attribution notices contained
109
- within such NOTICE file, excluding those notices that do not
110
- pertain to any part of the Derivative Works, in at least one
111
- of the following places: within a NOTICE text file distributed
112
- as part of the Derivative Works; within the Source form or
113
- documentation, if provided along with the Derivative Works; or,
114
- within a display generated by the Derivative Works, if and
115
- wherever such third-party notices normally appear. The contents
116
- of the NOTICE file are for informational purposes only and
117
- do not modify the License. You may add Your own attribution
118
- notices within Derivative Works that You distribute, alongside
119
- or as an addendum to the NOTICE text from the Work, provided
120
- that such additional attribution notices cannot be construed
121
- as modifying the License.
122
-
123
- You may add Your own copyright statement to Your modifications and
124
- may provide additional or different license terms and conditions
125
- for use, reproduction, or distribution of Your modifications, or
126
- for any such Derivative Works as a whole, provided Your use,
127
- reproduction, and distribution of the Work otherwise complies with
128
- the conditions stated in this License.
129
-
130
- 5. Submission of Contributions. Unless You explicitly state otherwise,
131
- any Contribution intentionally submitted for inclusion in the Work
132
- by You to the Licensor shall be under the terms and conditions of
133
- this License, without any additional terms or conditions.
134
- Notwithstanding the above, nothing herein shall supersede or modify
135
- the terms of any separate license agreement you may have executed
136
- with Licensor regarding such Contributions.
137
-
138
- 6. Trademarks. This License does not grant permission to use the trade
139
- names, trademarks, service marks, or product names of the Licensor,
140
- except as required for reasonable and customary use in describing the
141
- origin of the Work and reproducing the content of the NOTICE file.
142
-
143
- 7. Disclaimer of Warranty. Unless required by applicable law or
144
- agreed to in writing, Licensor provides the Work (and each
145
- Contributor provides its Contributions) on an "AS IS" BASIS,
146
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
- implied, including, without limitation, any warranties or conditions
148
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
- PARTICULAR PURPOSE. You are solely responsible for determining the
150
- appropriateness of using or redistributing the Work and assume any
151
- risks associated with Your exercise of permissions under this License.
152
-
153
- 8. Limitation of Liability. In no event and under no legal theory,
154
- whether in tort (including negligence), contract, or otherwise,
155
- unless required by applicable law (such as deliberate and grossly
156
- negligent acts) or agreed to in writing, shall any Contributor be
157
- liable to You for damages, including any direct, indirect, special,
158
- incidental, or consequential damages of any character arising as a
159
- result of this License or out of the use or inability to use the
160
- Work (including but not limited to damages for loss of goodwill,
161
- work stoppage, computer failure or malfunction, or any and all
162
- other commercial damages or losses), even if such Contributor
163
- has been advised of the possibility of such damages.
164
-
165
- 9. Accepting Warranty or Additional Liability. While redistributing
166
- the Work or Derivative Works thereof, You may choose to offer,
167
- and charge a fee for, acceptance of support, warranty, indemnity,
168
- or other liability obligations and/or rights consistent with this
169
- License. However, in accepting such obligations, You may act only
170
- on Your own behalf and on Your sole responsibility, not on behalf
171
- of any other Contributor, and only if You agree to indemnify,
172
- defend, and hold each Contributor harmless for any liability
173
- incurred by, or claims asserted against, such Contributor by reason
174
- of your accepting any such warranty or additional liability.
175
-
176
- END OF TERMS AND CONDITIONS
177
-
178
- APPENDIX: How to apply the Apache License to your work.
179
-
180
- To apply the Apache License to your work, attach the following
181
- boilerplate notice, with the fields enclosed by brackets "[]"
182
- replaced with your own identifying information. (Don't include
183
- the brackets!) The text should be enclosed in the appropriate
184
- comment syntax for the file format. We also recommend that a
185
- file or class name and description of purpose be included on the
186
- same "printed page" as the copyright notice for easier
187
- identification within third-party archives.
188
-
189
- Copyright [yyyy] [name of copyright owner]
190
-
191
- Licensed under the Apache License, Version 2.0 (the "License");
192
- you may not use this file except in compliance with the License.
193
- You may obtain a copy of the License at
194
-
195
- http://www.apache.org/licenses/LICENSE-2.0
196
-
197
- Unless required by applicable law or agreed to in writing, software
198
- distributed under the License is distributed on an "AS IS" BASIS,
199
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
- See the License for the specific language governing permissions and
201
- limitations under the License.
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 潘其威(William)
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -1,336 +1,339 @@
1
- # StableLM: Stability AI Language Models
2
-
3
- ![Stochastic Parrot](/assets/mascot.png)
4
- <br/>*“A Stochastic Parrot, flat design, vector art” — [Stable Diffusion XL](https://clipdrop.co/stable-diffusion)*
5
-
6
- This repository contains Stability AI's ongoing development of the StableLM series of language models and will be continuously updated with new checkpoints. The following provides an overview of all currently available models. More coming soon.
7
-
8
- ## News
9
-
10
- *April 28, 2023*
11
-
12
- - Released StableVicuna-13B, our RLHF fine-tune of [Vicuna-13B v0](https://huggingface.co/lmsys/vicuna-13b-delta-v0), which itself is a fine-tune of [LLaMA-13B](https://github.com/facebookresearch/llama). Delta weights over the original Llama model is released under ([CC BY-NC-SA-4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/)).
13
-
14
- *April 20, 2023*
15
-
16
- - Released initial set of StableLM-alpha models, with 3B and 7B parameters. 15B and 30B models are on the way. Base models are released under [CC BY-SA-4.0](https://creativecommons.org/licenses/by-sa/4.0/).
17
-
18
- - Try to chat with our 7B model, `StableLM-Tuned-Alpha-7B`, on [Hugging Face Spaces](https://huggingface.co/spaces/stabilityai/stablelm-tuned-alpha-chat).
19
-
20
- ## Models
21
-
22
- ### StableVicuna
23
-
24
- StableVicuna is an RLHF fine-tune of [Vicuna-13B v0](https://huggingface.co/lmsys/vicuna-13b-delta-v0), which itself is a fine-tune of [LLaMA-13B](https://github.com/facebookresearch/llama). It is our attempt at creating an open-source RLHF LLM Chatbot. This model is developed by StabilityAI's CarperAI team, with [Duy V. Phung](https://github.com/PhungVanDuy) leading the training effort.
25
-
26
- Due to the original non-commercial license of LLaMA, we can only release the weights of our model as deltas over the original model's weights. StableVicuna's delta weights are released under ([CC BY-NC-SA-4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/)).
27
-
28
- Please visit HuggingFace checkpoint for more information about how to combine our delta weights with the original model.
29
-
30
- | Model | Download | Web Demo | Cite |
31
- | ---------------- | ---------------------------------------------------------------------- | -------------------------------------------------------------------- |------|
32
- | StableVicuna-13B | [checkpoint](https://huggingface.co/CarperAI/stable-vicuna-13b-delta/) | [Hugging Face](https://huggingface.co/spaces/CarperAI/StableVicuna/) | [![DOI:10.57967/hf/0588](https://zenodo.org/badge/DOI/10.1007/978-3-319-76207-4_15.svg)](https://doi.org/10.57967/hf/0588) |
33
-
34
- ### StableLM-Alpha
35
- StableLM-Alpha models are trained on the new dataset that build on [The Pile](https://pile.eleuther.ai/), which contains 1.5 trillion tokens, roughly 3x the size of The Pile. These models will be trained on up to 1.5 trillion tokens. The context length for these models is 4096 tokens.
36
-
37
- An upcoming technical report will document the model specifications and the training settings.
38
-
39
- As a proof-of-concept, we also fine-tuned the model with [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca)'s procedure using a combination of five recent datasets for conversational agents: Stanford's [Alpaca](https://github.com/tatsu-lab/stanford_alpaca), Nomic-AI's [gpt4all](https://github.com/nomic-ai/gpt4all), RyokoAI's [ShareGPT52K](https://huggingface.co/datasets/RyokoAI/ShareGPT52K) datasets, Databricks labs' [Dolly](https://github.com/databrickslabs/dolly), and Anthropic's [HH](https://github.com/anthropics/hh-rlhf). We will be releasing these models as StableLM-Tuned-Alpha.
40
-
41
- | Size | StableLM-Base-Alpha | StableLM-Tuned-Alpha | Training Tokens | Parameters | Web Demo |
42
- |------|--------------------------------------------------------------------------|---------------------------------------------------------------------------|-----------------|---------------|------------------------------------------------------------------------------------|
43
- | 3B | [checkpoint](https://huggingface.co/stabilityai/stablelm-base-alpha-3b/) | [checkpoint](https://huggingface.co/stabilityai/stablelm-tuned-alpha-3b/) | 800B | 3,638,525,952 | |
44
- | 7B | [checkpoint](https://huggingface.co/stabilityai/stablelm-base-alpha-7b) | [checkpoint](https://huggingface.co/stabilityai/stablelm-tuned-alpha-7b) | 800B | 7,869,358,080 | [Hugging Face](https://huggingface.co/spaces/stabilityai/stablelm-tuned-alpha-chat) |
45
- | 15B | (in progress) | (pending) | | | |
46
- | 30B | (in progress) | (pending) | | | |
47
- | 65B | (in progress) | (pending) | | | |
48
- | 175B | (planned) | | | | |
49
-
50
- ## Quickstart
51
-
52
- All StableLM models are hosted on [the Hugging Face hub](https://huggingface.co/StabilityAI). Check out this [notebook](https://github.com/Stability-AI/StableLM/blob/main/notebooks/stablelm-alpha.ipynb) to run inference with limited GPU capabilities.
53
-
54
- Get started chatting with `StableLM-Tuned-Alpha` by using the following code snippet:
55
-
56
- ```python
57
- import torch
58
- from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList
59
-
60
- tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-tuned-alpha-7b")
61
- model = AutoModelForCausalLM.from_pretrained("stabilityai/stablelm-tuned-alpha-7b")
62
- model.half().cuda()
63
-
64
- class StopOnTokens(StoppingCriteria):
65
- def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
66
- stop_ids = set([50278, 50279, 50277, 1, 0])
67
- return input_ids[0][-1] in stop_ids
68
-
69
- system_prompt = """<|SYSTEM|># StableLM Tuned (Alpha version)
70
- - StableLM is a helpful and harmless open-source AI language model developed by StabilityAI.
71
- - StableLM is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
72
- - StableLM is more than just an information source, StableLM is also able to write poetry, short stories, and make jokes.
73
- - StableLM will refuse to participate in anything that could harm a human.
74
- """
75
-
76
- prompt = f"{system_prompt}<|USER|>What's your mood today?<|ASSISTANT|>"
77
-
78
- inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
79
- tokens = model.generate(
80
- **inputs,
81
- max_new_tokens=64,
82
- temperature=0.7,
83
- do_sample=True,
84
- stopping_criteria=StoppingCriteriaList([StopOnTokens()])
85
- )
86
- print(tokenizer.decode(tokens[0], skip_special_tokens=True))
87
- ```
88
-
89
- StableLM Tuned should be used with prompts formatted to `<|SYSTEM|>...<|USER|>...<|ASSISTANT|>...`
90
- The system prompt is
91
  ```
92
- <|SYSTEM|># StableLM Tuned (Alpha version)
93
- - StableLM is a helpful and harmless open-source AI language model developed by StabilityAI.
94
- - StableLM is excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user.
95
- - StableLM is more than just an information source, StableLM is also able to write poetry, short stories, and make jokes.
96
- - StableLM will refuse to participate in anything that could harm a human.
97
  ```
98
 
99
- ## Fun with StableLM-Tuned-Alpha
100
- This section contains a collection of fun cherry-picked examples of what you can do with `stablelm-tuned-alpha`.
 
101
 
102
- <details>
103
- <summary>Chit-Chat</summary>
104
-
105
  ```
106
- User: Hey! My name is John nice to meet you!
107
-
108
- StableLM: Hello John! It’s always a pleasure to meet new people.
109
-
110
- User: The pleasure is mine. Hey my grandma is in Toronto. I want to plan a surprise visit for her. She’s a person who loves gardens and flowers.
111
-
112
- StableLM: That’s wonderful! It’s great to hear that you’ll be visiting your grandma in Toronto. Can you tell me a little bit more about your plans?
113
-
114
- User: Ya I need to book a flight there and a hotel. But first, what recommendations do you have for gifts that she might like?
115
-
116
- StableLM: I think that’s a very thoughtful idea! It’s a very personal gift that would be sure to make her very happy. I can think of a few examples that she might really enjoy. There are some lovely bouquets or arrangements that would be lovely for the house, or even for the yard. Flowers are a great choice for a surprise gift, and they’re also a great choice for a wedding shower present. Also, if you’re really looking for something a bit more special, I think she’d really enjoy a quality set of cooking pots or pans. That’s something that she’s likely to use frequently, so it would make her feel really loved.
117
-
118
- User: Thanks for the suggestion! Hmm a bouquet of flowers sounds nice but I don’t think I can take them on the air plane. What do you think I should do?
119
-
120
- StableLM: There are many possible solutions! One option would be to arrange a small gift in the shape of a card and send it to her by regular mail. Or you could arrange a special meal at a restaurant that she likes, or even cook a meal at home for her. Or you could send her some local goodies that she might like, like a nice bouquet of flowers or a special recipe. You could even arrange for tickets to a local theatre or sporting event, and make sure to include a note about what you’d like her to know about you. Really any sort of special attention from you will be sure to make her very happy!
121
-
122
- User: Ah! Right. I can order a bouquet for her online and get it delivered! Thanks!
123
-
124
- StableLM: You’re very welcome! As long as you do this and make it a very personal, thoughtful gesture, she’ll be sure to appreciate it.
125
- ```
126
-
127
- </details>
128
 
 
129
  <details>
130
- <summary>Formal Writing</summary>
131
-
132
- ```
133
- User: What would you say to a friend who is graduating high school?
134
-
135
- StableLM: I would say that they have every right to feel proud of themselves and their accomplishments. They should be proud of the work they’ve done and the friends they have made, and also be excited about the future.
136
-
137
- User: Please write an email.
138
- StableLM: Okay, here’s an example of what could be written:
139
-
140
- Dear [Friend’s Name],
141
-
142
- I hope this email finds you well. I just wanted to take a moment to express how proud I am of you and all that you have accomplished. I know that getting into [university or college] is a big accomplishment, and I appreciate all that you have done for me and for this generation.
143
 
144
- I hope that you are having a good day. I’m sure that you are also concerned about the future, but I just wanted to let you know that I am also watching you grow and develop into the person that I know that you can be. Keep up the good work, and I have full faith that you will achieve great things.
145
-
146
- Thank you again for everything that you do.
147
-
148
- Best,
149
- [Your Name]
150
-
151
- ```
152
-
153
- </details>
154
-
155
- <details>
156
- <summary>Creative Writing</summary>
157
-
158
- ### Rap Battles
159
  ```
160
- User: Write an epic rap battle song between deep neural networks and symbolic AI
161
-
162
- StableLM: [Beat drops]
163
-
164
- Deep Neural Networks:
165
- Listen up everyone, it’s time to witness
166
- The power of deep neural networks, it’s just a matter of abstract
167
- We’ve trained these networks to solve complex problems with ease
168
- While symbolic AI is limited to simple math and simple flows
169
-
170
- Symbolic AI:
171
- Hold up, hold up, let me interrupt
172
- It’s been a while since I’ve heard, some new thoughts you interrupt
173
- But let me tell you, my friend, it’s not all about math and flow
174
- It’s about making the world a better place, you know
175
-
176
- Deep Neural Networks:
177
- Well, I can see where you’re coming from, it’s clear
178
- But deep neural networks, they never get the same gear
179
- We can use them to analyze and predict with ease
180
- And prove the superiority of machine learning
181
-
182
- Symbolic AI:
183
- You’re right, it’s not just math or numbers
184
- But it’s the essence of our lives, it’s what counts, we’re numbers
185
- I don’t need a complicated math formula, just a simple device
186
- To make the world a better place, it’s a noble price
187
-
188
- Deep Neural Networks:
189
- I’ll give you that, my friend, you have a point
190
- But let’s not forget, we’re making an ever-increasing joint
191
- Our progress is not bound by logic, it’s a journey of the mind
192
- So let’s work together, let’s strive, let’s leave all troubles behind
193
-
194
- Symbolic AI:
195
- Agreed, let’s work together, let’s not falter
196
- We’re both making history, we’re both from the fader
197
- So let’s make the world a better place, let’s end this debate
198
- Cause it’s time to join forces, it’s time to leave no stone unturned
199
-
200
- [Beat fades out]
201
 
202
- Who won this epic rap battle between deep neural networks and symbolic AI? Well, its up to you to decide!
203
- ```
204
 
205
- ### Fictional Stories
206
 
207
  ```
208
- User: Write a story about a programmer who fell in love with a deep neural network.
 
209
 
210
- StableLM: Once upon a time, there was a programmer named John. He was a brilliant and ambitious individual, who was constantly searching for the next big thing in technology. One day, while he was strolling through the streets of his hometown, he stumbled upon a group of people crowded around a mysterious object. Intrigued, he approached to find out what the fuss was about.
 
 
211
 
212
- As he got closer, he realized that it was none other than a mysterious deep neural network. John’s mind was blown. He had never seen anything like it before. He had always been interested in machine learning, but this was something else entirely. He had to learn more about it.
213
 
214
- John spent the next few weeks holed up in his hotel room, trying to learn everything he could about deep neural networks. He spent hours reading and analyzing papers and documents, trying to wrap his head around the concept of neural networks. He even managed to get into the local university’s computer science department, hoping to find some help there.
 
215
 
216
- But, it wasn’t long before John realized that he was on his own. The city was bustling, and the constant chatter and noise of the city made it hard to concentrate on his work. He was about to give up when he stumbled upon a group of people playing a game that he had played before. They were having a good time, and John couldn’t help but join in.
 
 
 
 
217
 
218
- The group welcomed him into their group, and before he knew it, John was swept away in the excitement. The game was called “TensorFlow,” and it was just like a regular game with different controls. John played along with the group, trying to make sense of it all. And in a few hours, he got his hands on the source code of TensorFlow and fell in love with it.
 
 
219
 
220
- John was now a part of a team of programmers who loved using TensorFlow for their work. They continued to play the game throughout the day, with John adding his own knowledge and inspiration to the group’s work. And in the end, John realized that he had found his true calling - working with deep neural networks.
 
221
 
222
- He now knows the ins and outs of the TensorFlow programming language, and knows how to use it to create some of the most beautiful and complex machine learning models that anyone could dream of. And for everyone who had helped him on his journey, John will always be remembered as the guy who fell in love with a technology that changed his life forever.
 
 
 
 
 
223
 
224
- ```
 
 
 
 
225
 
226
- ### Humor
 
227
 
228
- ```
229
- User: What do you know about the League of Legends champion Zed? Write a short bio
230
 
231
- StableLM: Zed is a powerful and highly skilled player of the League of Legends (LoL) universe. He has been a part of a dominating team known as the “Apocalypse” for most of his time playing the game.
 
232
 
233
- Zed was born in the year 2013 and started playing as a child when he started League of Legends. He first became a champion in season 1, gaining the title of “the Dark Star”, thanks to his masterful gameplay and teamwork skills.
 
234
 
235
- Throughout his time playing the game, Zed has mastered several powerful techniques, such as QWQ combos, one-shot kills, and the ability to outmaneuver his enemies. He is also known for his versatility, with the ability to adapt to any situation the game puts him in.
 
 
 
 
 
 
236
 
237
- Despite his formidable skills and agility, Zed is not invincible. He can be CCed and killed, and he relies on his team to protect him. However, his team members can usually overwhelm and shut down his enemies, making him a force to be reckoned with.
 
 
 
 
238
 
239
- Zed has been a part of many memorable games, and his success in these games helped solidify his legacy as one of the most influential players of the LoL universe.
 
240
 
241
- User: Write a funny conversation after a team fight between Zed and Teemo
 
242
 
243
- StableLM:
244
- Teemo: “Hey Zed, great game today. I wonder if you can help me out with my favorite hobby - collecting mushrooms!”
245
- Zed: “Teemo, please. I’m not a mushroom collector.”
246
- Teemo: “But of course, you are. You have the best taste in the game.”
247
- Zed: “Thanks, but can I at least get some of your mushrooms to be a little more useful?”
248
- Teemo: “I’d love to help you, but I’m afraid they’re all mine. Have you noticed my tunnels are always so well-guarded?”
249
- Zed: “Ha! Well, maybe I can start my own tunnel. Helpful or not, you can never tell what I might have stored in them.”
250
- Teemo: “Good luck with that. Maybe I’ll just create some extra for my garden.”
251
- Zed: “I’m not sure I can keep up with you there.”
252
- Teemo: “Ha! If only you had some more decent weapons. I could really see this as a challenge, though!”
253
- Zed: “Teemo, I think your luck has finally run out.”
254
- Teemo: “Well, at least I’m the one with the mushrooms now.”
255
 
 
 
 
256
  ```
257
 
258
- </details>
259
 
 
260
  <details>
261
- <summary>Writing Code</summary>
262
 
263
- ```
264
- User: Write a program that solves the fib sequence in Python.
265
- ```
266
- StableLM:
267
 
268
  ```python
269
- def fib_sequence(n):
270
- if n==0:
271
- return 0
272
- elif n==1:
273
- return 1
274
- else:
275
- return fib(n-1)+fib(n-2)
276
-
277
- fib_sequence(10)
 
 
 
 
 
 
 
 
 
 
 
 
278
  ```
 
279
 
280
  </details>
281
 
282
- ## Selected failure modes of StableLM-Tuned-Alpha
283
- This section contains a collection of selected failure modes of `stablelm-tuned-alpha`.
284
 
285
- <details>
286
- <summary>Yann LeCun gears (linear)</summary>
287
-
288
- ```
289
- User: In front of us are six gears numbered 1 to 6, mounted on axles in a row. Each gear is engaged with the next gear. If gear number 3 is rotated clockwise, in which direction will gears 1 and 6 rotate?
290
 
291
- StableLM: Each gear is engaged with the next gear after every other gear, so that gears 1 and 6 will rotate together. Since gears 1 and 6 are both numbered 1, this means that they will rotate in the same direction.
292
 
293
- To determine the direction in which gears 1 and 6 will rotate, we need to look at the direction of the input force. Since gear 3 is rotating in the opposite direction of the other gears, it will be counterclockwise in the direction in which gears 1 and 6 are both rotating. Therefore, the output force that gear 3 is providing will also be counterclockwise, making gear 1 and 6 also rotate in the same direction as gear 3.
294
- ```
295
-
296
- </details>
297
 
298
- <details>
299
- <summary>Yann LeCun gears (circular)</summary>
300
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301
  ```
302
- User: 7 axles are equally spaced around a circle. A gear is placed on each axle such that each gear is engaged with the gear to its left and the gear to its right. The gears are numbered 1 to 7 around the circle. If gear 3 were rotated clockwise, in which direction would gear 7 rotate?
303
 
304
- StableLM: Gear 3 is the smallest gear, so it is not affected by the circular pattern. Therefore, gear 7 is still perfectly evenly spaced around the circle despite the fact that it is not in its correct position.
305
-
306
- To find the angle that gear 3 would rotate in, you need to know the distance (d) between the center of the circle and the gear, as well as the direction of rotation (in this case, clockwise).
307
 
308
- According to the problem, gear 3 is situated at (d/2, 2 degrees) and the gears are numbered 1 to 7. Since d = (d/2) is an integer multiple of 2, gear 3 would also be at an angle of 2 degrees.
 
309
 
310
- Therefore, the answer is d = (7/2) * 2 = 5 degrees in total.
311
- ```
312
-
313
- </details>
314
 
315
- ## Request for Help
316
 
317
- Want to get involved?
 
 
318
 
319
- - We would love to port [llama.cpp](https://github.com/ggerganov/llama.cpp) to work with StableLMs
320
- - Integration into [Open Assistant](https://github.com/LAION-AI/Open-Assistant) from LAION-AI to collect high quality human-generated feedback data
321
- - ... Reach out to us with ideas on our [Discord](https://discord.com/invite/stablediffusion)
 
 
 
 
 
 
 
 
 
322
 
323
- ## Potential issues
324
- As is typical for any pretrained Large Language Model without additional finetuning and reinforcement learning, the responses a user gets might be of varying quality and might potentially include offensive language and views. This is expected to be improved with scale, better data, community feedback, and optimisation.
325
 
326
- ## Acknowledgements
327
 
328
- - `StableLM-Tuned-Alpha` would not have been possible without the helpful hand of Dakota Mahan [@dmayhem93](https://huggingface.co/dmayhem93).
329
 
330
- ## Licenses
 
 
331
 
332
- - Base model checkpoints (`StableLM-Base-Alpha`) are licensed under the Creative Commons license ([CC BY-SA-4.0](https://creativecommons.org/licenses/by-sa/4.0/)). Under the license, you must give [credit](https://creativecommons.org/licenses/by/4.0/#) to Stability AI, provide a link to the license, and [indicate if changes were made](https://creativecommons.org/licenses/by/4.0/#). You may do so in any reasonable manner, but not in any way that suggests the Stability AI endorses you or your use.
 
 
333
 
334
- - Fine-tuned checkpoints (`StableLM-Tuned-Alpha`) are licensed under the Non-Commercial Creative Commons license ([CC BY-NC-SA-4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/)), in-line with the original non-commercial license specified by [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca).
335
 
336
- - All code in this repository is licensed under the Apache License 2.0 license.
 
1
+ <h1 align="center">AutoGPTQ</h1>
2
+ <p align="center">An easy-to-use LLMs quantization package with user-friendly apis, based on GPTQ algorithm.</p>
3
+ <p align="center">
4
+ <a href="https://github.com/PanQiWei/AutoGPTQ/releases">
5
+ <img alt="GitHub release" src="https://img.shields.io/github/release/PanQiWei/AutoGPTQ.svg">
6
+ </a>
7
+ <a href="https://pypi.org/project/auto-gptq/">
8
+ <img alt="PyPI - Downloads" src="https://img.shields.io/pypi/dd/auto-gptq">
9
+ </a>
10
+ </p>
11
+ <h4 align="center">
12
+ <p>
13
+ <b>English</b> |
14
+ <a href="https://github.com/PanQiWei/AutoGPTQ/blob/main/README_zh.md">中文</a>
15
+ </p>
16
+ </h4>
17
+
18
+ *<center>📣 Long time no see! 👋 Architecture upgrade, performance optimization and more new features will come in July and August, stay tune! 🥂</center>*
19
+
20
+ ## News or Update
21
+
22
+ - 2023-08-21 - (News) - Team of Qwen officially released 4bit quantized version of Qwen-7B based on `auto-gptq`, and provided [a detailed benchmark results](https://huggingface.co/Qwen/Qwen-7B-Chat-Int4#%E9%87%8F%E5%8C%96-quantization)
23
+ - 2023-08-06 - (Update) - Support exllama's q4 CUDA kernel to have at least 1.3x speed up for int4 quantized models when doing inference.
24
+ - 2023-08-04 - (Update) - Support RoCm so that AMD GPU users can use auto-gptq with CUDA extensions.
25
+ - 2023-07-26 - (Update) - An elegant [PPL benchmark script](examples/benchmark/perplexity.py) to get results that can be fairly compared with other libraries such as `llama.cpp`.
26
+ - 2023-06-05 - (Update) - Integrate with 🤗 peft to use gptq quantized model to train adapters, support LoRA, AdaLoRA, AdaptionPrompt, etc.
27
+ - 2023-05-30 - (Update) - Support download/upload quantized model from/to 🤗 Hub.
28
+
29
+ *For more histories please turn to [here](docs/NEWS_OR_UPDATE.md)*
30
+
31
+ ## Performance Comparison
32
+
33
+ ### Inference Speed
34
+ > The result is generated using [this script](examples/benchmark/generation_speed.py), batch size of input is 1, decode strategy is beam search and enforce the model to generate 512 tokens, speed metric is tokens/s (the larger, the better).
35
+ >
36
+ > The quantized model is loaded using the setup that can gain the fastest inference speed.
37
+
38
+ | model | GPU | num_beams | fp16 | gptq-int4 |
39
+ |---------------|---------------|-----------|-------|-----------|
40
+ | llama-7b | 1xA100-40G | 1 | 18.87 | 25.53 |
41
+ | llama-7b | 1xA100-40G | 4 | 68.79 | 91.30 |
42
+ | moss-moon 16b | 1xA100-40G | 1 | 12.48 | 15.25 |
43
+ | moss-moon 16b | 1xA100-40G | 4 | OOM | 42.67 |
44
+ | moss-moon 16b | 2xA100-40G | 1 | 06.83 | 06.78 |
45
+ | moss-moon 16b | 2xA100-40G | 4 | 13.10 | 10.80 |
46
+ | gpt-j 6b | 1xRTX3060-12G | 1 | OOM | 29.55 |
47
+ | gpt-j 6b | 1xRTX3060-12G | 4 | OOM | 47.36 |
48
+
49
+
50
+ ### Perplexity
51
+ For perplexity comparison, you can turn to [here](https://github.com/qwopqwop200/GPTQ-for-LLaMa#result) and [here](https://github.com/qwopqwop200/GPTQ-for-LLaMa#gptq-vs-bitsandbytes)
52
+
53
+ ## Installation
54
+
55
+ ### Quick Installation
56
+ You can install the latest stable release of AutoGPTQ from pip with pre-built wheels compatible with PyTorch 2.0.1:
57
+
58
+ * For CUDA 11.7: `pip install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu117/`
59
+ * For CUDA 11.8: `pip install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/`
60
+ * For RoCm 5.4.2: `pip install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/rocm542/`
61
+
62
+ **Warning:** These wheels are not expected to work on PyTorch nightly. Please install AutoGPTQ from source when using PyTorch nightly.
63
+
64
+ #### disable cuda extensions
65
+ By default, cuda extensions will be installed when `torch` and `cuda` is already installed in your machine, if you don't want to use them, using:
66
+ ```shell
67
+ BUILD_CUDA_EXT=0 pip install auto-gptq
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  ```
69
+ And to make sure `autogptq_cuda` is not ever in your virtual environment, run:
70
+ ```shell
71
+ pip uninstall autogptq_cuda -y
 
 
72
  ```
73
 
74
+ #### to support triton speedup
75
+ To integrate with `triton`, using:
76
+ > warning: currently triton only supports linux; 3-bit quantization is not supported when using triton
77
 
78
+ ```shell
79
+ pip install auto-gptq[triton]
 
80
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
+ ### Install from source
83
  <details>
84
+ <summary>click to see details</summary>
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
+ Clone the source code:
87
+ ```shell
88
+ git clone https://github.com/PanQiWei/AutoGPTQ.git && cd AutoGPTQ
 
 
 
 
 
 
 
 
 
 
 
 
89
  ```
90
+ Then, install from source:
91
+ ```shell
92
+ pip install .
93
+ ```
94
+ Like quick installation, you can also set `BUILD_CUDA_EXT=0` to disable pytorch extension building.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
+ Use `.[triton]` if you want to integrate with triton and it's available on your operating system.
 
97
 
98
+ To install from source for AMD GPUs supporting RoCm, please specify the `ROCM_VERSION` environment variable. The compilation can be speeded up by specifying the `PYTORCH_ROCM_ARCH` variable ([reference](https://github.com/pytorch/pytorch/blob/7b73b1e8a73a1777ebe8d2cd4487eb13da55b3ba/setup.py#L132)), for example `gfx90a` for MI200 series devices. Example:
99
 
100
  ```
101
+ ROCM_VERSION=5.6 pip install .
102
+ ```
103
 
104
+ For RoCm systems, the packages `rocsparse-dev`, `hipsparse-dev`, `rocthrust-dev`, `rocblas-dev` and `hipblas-dev` are required to build.
105
+
106
+ </details>
107
 
108
+ ## Quick Tour
109
 
110
+ ### Quantization and Inference
111
+ > warning: this is just a showcase of the usage of basic apis in AutoGPTQ, which uses only one sample to quantize a much small model, quality of quantized model using such little samples may not good.
112
 
113
+ Below is an example for the simplest use of `auto_gptq` to quantize a model and inference after quantization:
114
+ ```python
115
+ from transformers import AutoTokenizer, TextGenerationPipeline
116
+ from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
117
+ import logging
118
 
119
+ logging.basicConfig(
120
+ format="%(asctime)s %(levelname)s [%(name)s] %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S"
121
+ )
122
 
123
+ pretrained_model_dir = "facebook/opt-125m"
124
+ quantized_model_dir = "opt-125m-4bit"
125
 
126
+ tokenizer = AutoTokenizer.from_pretrained(pretrained_model_dir, use_fast=True)
127
+ examples = [
128
+ tokenizer(
129
+ "auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."
130
+ )
131
+ ]
132
 
133
+ quantize_config = BaseQuantizeConfig(
134
+ bits=4, # quantize model to 4-bit
135
+ group_size=128, # it is recommended to set the value to 128
136
+ desc_act=False, # set to False can significantly speed up inference but the perplexity may slightly bad
137
+ )
138
 
139
+ # load un-quantized model, by default, the model will always be loaded into CPU memory
140
+ model = AutoGPTQForCausalLM.from_pretrained(pretrained_model_dir, quantize_config)
141
 
142
+ # quantize model, the examples should be list of dict whose keys can only be "input_ids" and "attention_mask"
143
+ model.quantize(examples)
144
 
145
+ # save quantized model
146
+ model.save_quantized(quantized_model_dir)
147
 
148
+ # save quantized model using safetensors
149
+ model.save_quantized(quantized_model_dir, use_safetensors=True)
150
 
151
+ # push quantized model to Hugging Face Hub.
152
+ # to use use_auth_token=True, Login first via huggingface-cli login.
153
+ # or pass explcit token with: use_auth_token="hf_xxxxxxx"
154
+ # (uncomment the following three lines to enable this feature)
155
+ # repo_id = f"YourUserName/{quantized_model_dir}"
156
+ # commit_message = f"AutoGPTQ model for {pretrained_model_dir}: {quantize_config.bits}bits, gr{quantize_config.group_size}, desc_act={quantize_config.desc_act}"
157
+ # model.push_to_hub(repo_id, commit_message=commit_message, use_auth_token=True)
158
 
159
+ # alternatively you can save and push at the same time
160
+ # (uncomment the following three lines to enable this feature)
161
+ # repo_id = f"YourUserName/{quantized_model_dir}"
162
+ # commit_message = f"AutoGPTQ model for {pretrained_model_dir}: {quantize_config.bits}bits, gr{quantize_config.group_size}, desc_act={quantize_config.desc_act}"
163
+ # model.push_to_hub(repo_id, save_dir=quantized_model_dir, use_safetensors=True, commit_message=commit_message, use_auth_token=True)
164
 
165
+ # load quantized model to the first GPU
166
+ model = AutoGPTQForCausalLM.from_quantized(quantized_model_dir, device="cuda:0")
167
 
168
+ # download quantized model from Hugging Face Hub and load to the first GPU
169
+ # model = AutoGPTQForCausalLM.from_quantized(repo_id, device="cuda:0", use_safetensors=True, use_triton=False)
170
 
171
+ # inference with model.generate
172
+ print(tokenizer.decode(model.generate(**tokenizer("auto_gptq is", return_tensors="pt").to(model.device))[0]))
 
 
 
 
 
 
 
 
 
 
173
 
174
+ # or you can also use pipeline
175
+ pipeline = TextGenerationPipeline(model=model, tokenizer=tokenizer)
176
+ print(pipeline("auto-gptq is")[0]["generated_text"])
177
  ```
178
 
179
+ For more advanced features of model quantization, please reference to [this script](examples/quantization/quant_with_alpaca.py)
180
 
181
+ ### Customize Model
182
  <details>
 
183
 
184
+ <summary>Below is an example to extend `auto_gptq` to support `OPT` model, as you will see, it's very easy:</summary>
 
 
 
185
 
186
  ```python
187
+ from auto_gptq.modeling import BaseGPTQForCausalLM
188
+
189
+
190
+ class OPTGPTQForCausalLM(BaseGPTQForCausalLM):
191
+ # chained attribute name of transformer layer block
192
+ layers_block_name = "model.decoder.layers"
193
+ # chained attribute names of other nn modules that in the same level as the transformer layer block
194
+ outside_layer_modules = [
195
+ "model.decoder.embed_tokens", "model.decoder.embed_positions", "model.decoder.project_out",
196
+ "model.decoder.project_in", "model.decoder.final_layer_norm"
197
+ ]
198
+ # chained attribute names of linear layers in transformer layer module
199
+ # normally, there are four sub lists, for each one the modules in it can be seen as one operation,
200
+ # and the order should be the order when they are truly executed, in this case (and usually in most cases),
201
+ # they are: attention q_k_v projection, attention output projection, MLP project input, MLP project output
202
+ inside_layer_modules = [
203
+ ["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"],
204
+ ["self_attn.out_proj"],
205
+ ["fc1"],
206
+ ["fc2"]
207
+ ]
208
  ```
209
+ After this, you can use `OPTGPTQForCausalLM.from_pretrained` and other methods as shown in Basic.
210
 
211
  </details>
212
 
213
+ ### Evaluation on Downstream Tasks
214
+ You can use tasks defined in `auto_gptq.eval_tasks` to evaluate model's performance on specific down-stream task before and after quantization.
215
 
216
+ The predefined tasks support all causal-language-models implemented in [🤗 transformers](https://github.com/huggingface/transformers) and in this project.
 
 
 
 
217
 
218
+ <details>
219
 
220
+ <summary>Below is an example to evaluate `EleutherAI/gpt-j-6b` on sequence-classification task using `cardiffnlp/tweet_sentiment_multilingual` dataset:</summary>
 
 
 
221
 
222
+ ```python
223
+ from functools import partial
224
+
225
+ import datasets
226
+ from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
227
+
228
+ from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
229
+ from auto_gptq.eval_tasks import SequenceClassificationTask
230
+
231
+
232
+ MODEL = "EleutherAI/gpt-j-6b"
233
+ DATASET = "cardiffnlp/tweet_sentiment_multilingual"
234
+ TEMPLATE = "Question:What's the sentiment of the given text? Choices are {labels}.\nText: {text}\nAnswer:"
235
+ ID2LABEL = {
236
+ 0: "negative",
237
+ 1: "neutral",
238
+ 2: "positive"
239
+ }
240
+ LABELS = list(ID2LABEL.values())
241
+
242
+
243
+ def ds_refactor_fn(samples):
244
+ text_data = samples["text"]
245
+ label_data = samples["label"]
246
+
247
+ new_samples = {"prompt": [], "label": []}
248
+ for text, label in zip(text_data, label_data):
249
+ prompt = TEMPLATE.format(labels=LABELS, text=text)
250
+ new_samples["prompt"].append(prompt)
251
+ new_samples["label"].append(ID2LABEL[label])
252
+
253
+ return new_samples
254
+
255
+
256
+ # model = AutoModelForCausalLM.from_pretrained(MODEL).eval().half().to("cuda:0")
257
+ model = AutoGPTQForCausalLM.from_pretrained(MODEL, BaseQuantizeConfig())
258
+ tokenizer = AutoTokenizer.from_pretrained(MODEL)
259
+
260
+ task = SequenceClassificationTask(
261
+ model=model,
262
+ tokenizer=tokenizer,
263
+ classes=LABELS,
264
+ data_name_or_path=DATASET,
265
+ prompt_col_name="prompt",
266
+ label_col_name="label",
267
+ **{
268
+ "num_samples": 1000, # how many samples will be sampled to evaluation
269
+ "sample_max_len": 1024, # max tokens for each sample
270
+ "block_max_len": 2048, # max tokens for each data block
271
+ # function to load dataset, one must only accept data_name_or_path as input
272
+ # and return datasets.Dataset
273
+ "load_fn": partial(datasets.load_dataset, name="english"),
274
+ # function to preprocess dataset, which is used for datasets.Dataset.map,
275
+ # must return Dict[str, list] with only two keys: [prompt_col_name, label_col_name]
276
+ "preprocess_fn": ds_refactor_fn,
277
+ # truncate label when sample's length exceed sample_max_len
278
+ "truncate_prompt": False
279
+ }
280
+ )
281
+
282
+ # note that max_new_tokens will be automatically specified internally based on given classes
283
+ print(task.run())
284
+
285
+ # self-consistency
286
+ print(
287
+ task.run(
288
+ generation_config=GenerationConfig(
289
+ num_beams=3,
290
+ num_return_sequences=3,
291
+ do_sample=True
292
+ )
293
+ )
294
+ )
295
  ```
 
296
 
297
+ </details>
 
 
298
 
299
+ ## Learn More
300
+ [tutorials](docs/tutorial) provide step-by-step guidance to integrate `auto_gptq` with your own project and some best practice principles.
301
 
302
+ [examples](examples/README.md) provide plenty of example scripts to use `auto_gptq` in different ways.
 
 
 
303
 
304
+ ## Supported Models
305
 
306
+ > you can use `model.config.model_type` to compare with the table below to check whether the model you use is supported by `auto_gptq`.
307
+ >
308
+ > for example, model_type of `WizardLM`, `vicuna` and `gpt4all` are all `llama`, hence they are all supported by `auto_gptq`.
309
 
310
+ | model type | quantization | inference | peft-lora | peft-ada-lora | peft-adaption_prompt |
311
+ |------------------------------------|--------------|-----------|-----------|---------------|-------------------------------------------------------------------------------------------------|
312
+ | bloom | ✅ | ✅ | ✅ | ✅ | |
313
+ | gpt2 | ✅ | ✅ | ✅ | ✅ | |
314
+ | gpt_neox | ✅ | ✅ | ✅ | ✅ | ✅[requires this peft branch](https://github.com/PanQiWei/peft/tree/multi_modal_adaption_prompt) |
315
+ | gptj | ✅ | ✅ | ✅ | ✅ | ✅[requires this peft branch](https://github.com/PanQiWei/peft/tree/multi_modal_adaption_prompt) |
316
+ | llama | ✅ | ✅ | ✅ | ✅ | ✅ |
317
+ | moss | ✅ | ✅ | ✅ | ✅ | ✅[requires this peft branch](https://github.com/PanQiWei/peft/tree/multi_modal_adaption_prompt) |
318
+ | opt | ✅ | ✅ | ✅ | ✅ | |
319
+ | gpt_bigcode | ✅ | ✅ | ✅ | ✅ | |
320
+ | codegen | ✅ | ✅ | ✅ | ✅ | |
321
+ | falcon(RefinedWebModel/RefinedWeb) | ✅ | ✅ | ✅ | ✅ | |
322
 
323
+ ## Supported Evaluation Tasks
324
+ Currently, `auto_gptq` supports: `LanguageModelingTask`, `SequenceClassificationTask` and `TextSummarizationTask`; more Tasks will come soon!
325
 
326
+ ## Running tests
327
 
328
+ Tests can be run with:
329
 
330
+ ```
331
+ pytest tests/ -s
332
+ ```
333
 
334
+ ## Acknowledgement
335
+ - Specially thanks **Elias Frantar**, **Saleh Ashkboos**, **Torsten Hoefler** and **Dan Alistarh** for proposing **GPTQ** algorithm and open source the [code](https://github.com/IST-DASLab/gptq).
336
+ - Specially thanks **qwopqwop200**, for code in this project that relevant to quantization are mainly referenced from [GPTQ-for-LLaMa](https://github.com/qwopqwop200/GPTQ-for-LLaMa/tree/cuda).
337
 
 
338
 
339
+ [![Star History Chart](https://api.star-history.com/svg?repos=PanQiwei/AutoGPTQ&type=Date)](https://star-history.com/#PanQiWei/AutoGPTQ&Date)
README_zh.md ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <h1 align="center">AutoGPTQ</h1>
2
+ <p align="center">一个基于 GPTQ 算法,简单易用且拥有用户友好型接口的大语言模型量化工具包。</p>
3
+ <p align="center">
4
+ <a href="https://github.com/PanQiWei/AutoGPTQ/releases">
5
+ <img alt="GitHub release" src="https://img.shields.io/github/release/PanQiWei/AutoGPTQ.svg">
6
+ </a>
7
+ <a href="https://pypi.org/project/auto-gptq/">
8
+ <img alt="PyPI - Downloads" src="https://img.shields.io/pypi/dd/auto-gptq">
9
+ </a>
10
+ </p>
11
+ <h4 align="center">
12
+ <p>
13
+ <a href="https://github.com/PanQiWei/AutoGPTQ/blob/main/README.md">English</a> |
14
+ <b>中文</b>
15
+ </p>
16
+ </h4>
17
+
18
+ *<center>📣 好久不见!👋 七月和八月将会迎来架构升级,性能优化和新特性,敬请关注!🥂</center>*
19
+
20
+ ## 新闻或更新
21
+
22
+ - 2023-08-21 - (新闻) - 通义千问团队发布了基于 `auto-gptq` 的 Qwen-7B 4bit 量化版本模型,并提供了[详尽的测评结果](https://huggingface.co/Qwen/Qwen-7B-Chat-Int4#%E9%87%8F%E5%8C%96-quantization)
23
+ - 2023-08-06 - (更新) - 支持 exllama 的 q4 CUDA 算子使得 int4 量化模型能够获得至少1.3倍的推理速度提升.
24
+ - 2023-08-04 - (更新) - 支持 RoCm 使得 AMD GPU 的用户能够使用 auto-gptq 的 CUDA 拓展.
25
+ - 2023-07-26 - (更新) - 一个优雅的 [PPL 测评脚本](examples/benchmark/perplexity.py)以获得可以与诸如 `llama.cpp` 等代码库进行公平比较的结果。
26
+ - 2023-06-05 - (更新) - 集成 🤗 peft 来使用 gptq 量化过的模型训练适应层,支持 LoRA,AdaLoRA,AdaptionPrompt 等。
27
+ - 2023-05-30 - (更新) - 支持从 🤗 Hub 下载量化好的模型或上次量化好的模型到 🤗 Hub。
28
+
29
+ *获取更多的历史信息,请转至[这里](docs/NEWS_OR_UPDATE.md)*
30
+
31
+ ## 性能对比
32
+
33
+ ### 推理速度
34
+ > 以下结果通过[这个脚本](examples/benchmark/generation_speed.py)生成,文本输入的 batch size 为1,解码策略为 beam search 并且强制模型生成512个 token,速度的计量单位为 tokens/s(越大越好)。
35
+ >
36
+ > 量化模型通过能够最大化推理速度的方式加载。
37
+
38
+ | model | GPU | num_beams | fp16 | gptq-int4 |
39
+ |---------------|---------------|-----------|-------|-----------|
40
+ | llama-7b | 1xA100-40G | 1 | 18.87 | 25.53 |
41
+ | llama-7b | 1xA100-40G | 4 | 68.79 | 91.30 |
42
+ | moss-moon 16b | 1xA100-40G | 1 | 12.48 | 15.25 |
43
+ | moss-moon 16b | 1xA100-40G | 4 | OOM | 42.67 |
44
+ | moss-moon 16b | 2xA100-40G | 1 | 06.83 | 06.78 |
45
+ | moss-moon 16b | 2xA100-40G | 4 | 13.10 | 10.80 |
46
+ | gpt-j 6b | 1xRTX3060-12G | 1 | OOM | 29.55 |
47
+ | gpt-j 6b | 1xRTX3060-12G | 4 | OOM | 47.36 |
48
+
49
+
50
+ ### 困惑度(PPL)
51
+ 对于困惑度的对比, 你可以参考 [这里](https://github.com/qwopqwop200/GPTQ-for-LLaMa#result) 和 [这里](https://github.com/qwopqwop200/GPTQ-for-LLaMa#gptq-vs-bitsandbytes)
52
+
53
+ ## 安装
54
+
55
+ ### 快速安装
56
+ 你可以通过 pip 来安装 AutoGPTQ 当前最新的稳定版本:
57
+ ```shell
58
+ pip install auto-gptq
59
+ ```
60
+ 从 0.2.0 版本开始,你可以从每次版本发布的资产文件列表中下载预构建好的符合你系统配置情况的轮子文件,并通过安装这些轮子文件来跳过漫长的构建过程以达到最快的安装速度。如下是一个例子:
61
+ ```shell
62
+ # 首先,进入轮子文件存放的目录,然后执行下面的命令
63
+ pip install auto_gptq-0.2.0+cu118-cp310-cp310-linux_x86_64.whl # 在 linux 操作系统的一个 python=3.10 且 cuda=11.8 的环境下安装 0.2.0 版本的 auto_gptq
64
+ ```
65
+ #### 取消 cuda 拓展的安装
66
+ 默认情况下,在 `torch` 和 `cuda` 已经于你的机器上被安装时,cuda 拓展将被自动安装,如果你不想要这些拓展的话,采用以下安装命令:
67
+ ```shell
68
+ BUILD_CUDA_EXT=0 pip install auto-gptq
69
+ ```
70
+ 同时为确保该拓展——`autogptq_cuda` 不再存在于你的虚拟环境,执行以下命令:
71
+ ```shell
72
+ pip uninstall autogptq_cuda -y
73
+ ```
74
+
75
+ #### 支持使用 triton 加速
76
+ 若想使用 `triton` 加速模型推理,使用以下命令:
77
+ > 警告:目前 triton 仅支持 linux 操作系统;当使用 triton 时 3-bit 数值类型的量化将不被支持
78
+
79
+ ```shell
80
+ pip install auto-gptq[triton]
81
+ ```
82
+
83
+ ### 从源码安装
84
+ <details>
85
+ <summary>点击以查看详情</summary>
86
+
87
+ 克隆源码:
88
+ ```shell
89
+ git clone https://github.com/PanQiWei/AutoGPTQ.git && cd AutoGPTQ
90
+ ```
91
+ 然后,从项目目录安装:
92
+ ```shell
93
+ pip install .
94
+ ```
95
+ 正如在快速安装一节,你可以使用 `BUILD_CUDA_EXT=0` 来取消构建 cuda 拓展。
96
+
97
+ 如果你想要使用 triton 加速且其能够被你的操作系统所支持,请使用 `.[triton]`。
98
+
99
+ 对应 AMD GPUs,为了从源码安装以支持 RoCm,请设置 `ROCM_VERSION` 环境变量。同时通过设置 `PYTORCH_ROCM_ARCH` ([reference](https://github.com/pytorch/pytorch/blob/7b73b1e8a73a1777ebe8d2cd4487eb13da55b3ba/setup.py#L132)) 可提升编译���度,例如:对于 MI200 系列设备,该变量可设为 `gfx90a`。例子:
100
+
101
+ ```
102
+ ROCM_VERSION=5.6 pip install .
103
+ ```
104
+
105
+ 对于 RoCm 系统,在从源码安装时额外需要提前安装以下包:`rocsparse-dev`, `hipsparse-dev`, `rocthrust-dev`, `rocblas-dev` and `hipblas-dev`。
106
+
107
+ </details>
108
+
109
+ ## 快速开始
110
+
111
+ ### 量化和推理
112
+ > 警告:这里仅是对 AutoGPTQ 中基本接口的用法展示,只使用了一条文本来量化一个特别小的模型,因此其结果的表现可能不如在大模型上执行量化后预期的那样好。
113
+
114
+ 以下展示了使用 `auto_gptq` 进行量化和推理的最简单用法:
115
+ ```python
116
+ from transformers import AutoTokenizer, TextGenerationPipeline
117
+ from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
118
+
119
+
120
+ pretrained_model_dir = "facebook/opt-125m"
121
+ quantized_model_dir = "opt-125m-4bit"
122
+
123
+
124
+ tokenizer = AutoTokenizer.from_pretrained(pretrained_model_dir, use_fast=True)
125
+ examples = [
126
+ tokenizer(
127
+ "auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."
128
+ )
129
+ ]
130
+
131
+ quantize_config = BaseQuantizeConfig(
132
+ bits=4, # 将模型量化为 4-bit 数值类型
133
+ group_size=128, # 一般推荐将此参数的值设置为 128
134
+ desc_act=False, # 设为 False 可以显著提升推理速度,但是 ppl 可能会轻微地变差
135
+ )
136
+
137
+ # 加载未量化的模型,默认情况下,模型总是会被加载到 CPU 内存中
138
+ model = AutoGPTQForCausalLM.from_pretrained(pretrained_model_dir, quantize_config)
139
+
140
+ # 量化模型, 样本的数据类型应该为 List[Dict],其中字典的键有且仅有 input_ids 和 attention_mask
141
+ model.quantize(examples)
142
+
143
+ # 保存量化好的模型
144
+ model.save_quantized(quantized_model_dir)
145
+
146
+ # 使用 safetensors 保存量化好的模型
147
+ model.save_quantized(quantized_model_dir, use_safetensors=True)
148
+
149
+ # 将量化好的模型直接上传至 Hugging Face Hub
150
+ # 当使用 use_auth_token=True 时, 确保你已经首先使用 huggingface-cli login 进行了登录
151
+ # 或者可以使用 use_auth_token="hf_xxxxxxx" 来显式地添加账户认证 token
152
+ # (取消下面三行代码的注释来使用该功能)
153
+ # repo_id = f"YourUserName/{quantized_model_dir}"
154
+ # commit_message = f"AutoGPTQ model for {pretrained_model_dir}: {quantize_config.bits}bits, gr{quantize_config.group_size}, desc_act={quantize_config.desc_act}"
155
+ # model.push_to_hub(repo_id, commit_message=commit_message, use_auth_token=True)
156
+
157
+ # 或者你也可以同时将量化好的模型保存到本地并上传至 Hugging Face Hub
158
+ # (取消下面三行代码的注释来使用该功能)
159
+ # repo_id = f"YourUserName/{quantized_model_dir}"
160
+ # commit_message = f"AutoGPTQ model for {pretrained_model_dir}: {quantize_config.bits}bits, gr{quantize_config.group_size}, desc_act={quantize_config.desc_act}"
161
+ # model.push_to_hub(repo_id, save_dir=quantized_model_dir, use_safetensors=True, commit_message=commit_message, use_auth_token=True)
162
+
163
+ # 加载量化好的模型到能被识别到的第一块显卡中
164
+ model = AutoGPTQForCausalLM.from_quantized(quantized_model_dir, device="cuda:0")
165
+
166
+ # 从 Hugging Face Hub 下载量化好的模型并加载到能被识别到的第一块显卡中
167
+ # model = AutoGPTQForCausalLM.from_quantized(repo_id, device="cuda:0", use_safetensors=True, use_triton=False)
168
+
169
+ # 使用 model.generate 执行推理
170
+ print(tokenizer.decode(model.generate(**tokenizer("auto_gptq is", return_tensors="pt").to(model.device))[0]))
171
+
172
+ # 或者使用 TextGenerationPipeline
173
+ pipeline = TextGenerationPipeline(model=model, tokenizer=tokenizer)
174
+ print(pipeline("auto-gptq is")[0]["generated_text"])
175
+ ```
176
+
177
+ 参考 [此样例脚本](examples/quantization/quant_with_alpaca.py) 以了解进阶的用法。
178
+
179
+ ### 自定义模型
180
+
181
+ <details>
182
+
183
+ <summary>以下展示了如何拓展 `auto_gptq` 以支持 `OPT` 模型,如你所见,这非常简单:</summary>
184
+
185
+ ```python
186
+ from auto_gptq.modeling import BaseGPTQForCausalLM
187
+
188
+
189
+ class OPTGPTQForCausalLM(BaseGPTQForCausalLM):
190
+ # chained attribute name of transformer layer block
191
+ layers_block_name = "model.decoder.layers"
192
+ # chained attribute names of other nn modules that in the same level as the transformer layer block
193
+ outside_layer_modules = [
194
+ "model.decoder.embed_tokens", "model.decoder.embed_positions", "model.decoder.project_out",
195
+ "model.decoder.project_in", "model.decoder.final_layer_norm"
196
+ ]
197
+ # chained attribute names of linear layers in transformer layer module
198
+ # normally, there are four sub lists, for each one the modules in it can be seen as one operation,
199
+ # and the order should be the order when they are truly executed, in this case (and usually in most cases),
200
+ # they are: attention q_k_v projection, attention output projection, MLP project input, MLP project output
201
+ inside_layer_modules = [
202
+ ["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"],
203
+ ["self_attn.out_proj"],
204
+ ["fc1"],
205
+ ["fc2"]
206
+ ]
207
+ ```
208
+ 然后, 你就可以像在基本用法一节中展示的那样使用 `OPTGPTQForCausalLM.from_pretrained` 和其他方法。
209
+
210
+ </details>
211
+
212
+
213
+ ### 在下游任务上执行评估
214
+ 你可以使用在 `auto_gptq.eval_tasks` 中定义的任务来评估量化前后的模型在某个特定下游任务上的表现。
215
+
216
+ 这些预定义的模型支持所有在 [🤗 transformers](https://github.com/huggingface/transformers)和本项目中被实现了的 causal-language-models。
217
+
218
+ <details>
219
+
220
+ <summary>以下是使用 `cardiffnlp/tweet_sentiment_multilingual` 数据集在序列分类(文本分类)任务上评估 `EleutherAI/gpt-j-6b` 模型的示例:</summary>
221
+
222
+ ```python
223
+ from functools import partial
224
+
225
+ import datasets
226
+ from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
227
+
228
+ from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
229
+ from auto_gptq.eval_tasks import SequenceClassificationTask
230
+
231
+
232
+ MODEL = "EleutherAI/gpt-j-6b"
233
+ DATASET = "cardiffnlp/tweet_sentiment_multilingual"
234
+ TEMPLATE = "Question:What's the sentiment of the given text? Choices are {labels}.\nText: {text}\nAnswer:"
235
+ ID2LABEL = {
236
+ 0: "negative",
237
+ 1: "neutral",
238
+ 2: "positive"
239
+ }
240
+ LABELS = list(ID2LABEL.values())
241
+
242
+
243
+ def ds_refactor_fn(samples):
244
+ text_data = samples["text"]
245
+ label_data = samples["label"]
246
+
247
+ new_samples = {"prompt": [], "label": []}
248
+ for text, label in zip(text_data, label_data):
249
+ prompt = TEMPLATE.format(labels=LABELS, text=text)
250
+ new_samples["prompt"].append(prompt)
251
+ new_samples["label"].append(ID2LABEL[label])
252
+
253
+ return new_samples
254
+
255
+
256
+ # model = AutoModelForCausalLM.from_pretrained(MODEL).eval().half().to("cuda:0")
257
+ model = AutoGPTQForCausalLM.from_pretrained(MODEL, BaseQuantizeConfig())
258
+ tokenizer = AutoTokenizer.from_pretrained(MODEL)
259
+
260
+ task = SequenceClassificationTask(
261
+ model=model,
262
+ tokenizer=tokenizer,
263
+ classes=LABELS,
264
+ data_name_or_path=DATASET,
265
+ prompt_col_name="prompt",
266
+ label_col_name="label",
267
+ **{
268
+ "num_samples": 1000, # how many samples will be sampled to evaluation
269
+ "sample_max_len": 1024, # max tokens for each sample
270
+ "block_max_len": 2048, # max tokens for each data block
271
+ # function to load dataset, one must only accept data_name_or_path as input
272
+ # and return datasets.Dataset
273
+ "load_fn": partial(datasets.load_dataset, name="english"),
274
+ # function to preprocess dataset, which is used for datasets.Dataset.map,
275
+ # must return Dict[str, list] with only two keys: [prompt_col_name, label_col_name]
276
+ "preprocess_fn": ds_refactor_fn,
277
+ # truncate label when sample's length exceed sample_max_len
278
+ "truncate_prompt": False
279
+ }
280
+ )
281
+
282
+ # note that max_new_tokens will be automatically specified internally based on given classes
283
+ print(task.run())
284
+
285
+ # self-consistency
286
+ print(
287
+ task.run(
288
+ generation_config=GenerationConfig(
289
+ num_beams=3,
290
+ num_return_sequences=3,
291
+ do_sample=True
292
+ )
293
+ )
294
+ )
295
+ ```
296
+
297
+ </details>
298
+
299
+ ## 了解更多
300
+ [教程](docs/tutorial) 提供了将 `auto_gptq` 集成到你的项目中的手把手指导和最佳实践准则。
301
+
302
+ [示例](examples/README.md) 提供了大量示例脚本以将 `auto_gptq` 用于不同领域。
303
+
304
+ ## 支持的模型
305
+
306
+ > 你可以使用 `model.config.model_type` 来对照下表以检查你正在使用的一个模型是否被 `auto_gptq` 所支持。
307
+ >
308
+ > 比如, `WizardLM`,`vicuna` 和 `gpt4all` 模型的 `model_type` 皆为 `llama`, 因此这些模型皆被 `auto_gptq` 所支持。
309
+
310
+ | model type | quantization | inference | peft-lora | peft-ada-lora | peft-adaption_prompt |
311
+ |------------------------------------|--------------|-----------|-----------|---------------|-----------------------------------------------------------------------------------|
312
+ | bloom | ✅ | ✅ | ✅ | ✅ | |
313
+ | gpt2 | ✅ | ✅ | ✅ | ✅ | |
314
+ | gpt_neox | ✅ | ✅ | ✅ | ✅ | ✅[要求该分支的 peft](https://github.com/PanQiWei/peft/tree/multi_modal_adaption_prompt) |
315
+ | gptj | ✅ | ✅ | ✅ | ✅ | ✅[要求该分支的 peft](https://github.com/PanQiWei/peft/tree/multi_modal_adaption_prompt) |
316
+ | llama | ✅ | ✅ | ✅ | ✅ | ✅ |
317
+ | moss | ✅ | ✅ | ✅ | ✅ | ✅[要求该分支的 peft](https://github.com/PanQiWei/peft/tree/multi_modal_adaption_prompt) |
318
+ | opt | ✅ | ✅ | ✅ | ✅ | |
319
+ | gpt_bigcode | ✅ | ✅ | ✅ | ✅ | |
320
+ | codegen | ✅ | ✅ | ✅ | ✅ | |
321
+ | falcon(RefinedWebModel/RefinedWeb) | ✅ | ✅ | ✅ | ✅ | |
322
+
323
+ ## 支持的评估任务
324
+ 目前, `auto_gptq` 支持以下评估任务: `LanguageModelingTask`, `SequenceClassificationTask` 和 `TextSummarizationTask`;更多的评估任务即将到来!
325
+
326
+ ## 致谢
327
+ - 特别感谢 **Elias Frantar**, **Saleh Ashkboos**, **Torsten Hoefler** 和 **Dan Alistarh** 提出 **GPTQ** 算法并开源[代码](https://github.com/IST-DASLab/gptq)。
328
+ - 特别感谢 **qwopqwop200**, 本项目中涉及到模型量化的代码主要参考自 [GPTQ-for-LLaMa](https://github.com/qwopqwop200/GPTQ-for-LLaMa/tree/cuda)。
329
+
330
+ [![Star History Chart](https://api.star-history.com/svg?repos=PanQiwei/AutoGPTQ&type=Date)](https://star-history.com/#PanQiWei/AutoGPTQ&Date)
setup.py CHANGED
@@ -1,10 +1,139 @@
1
- #!/usr/bin/env python
 
 
 
2
 
3
 
4
- from distutils.core import setup
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- setup(name='HF API',
7
- version='0.1',
8
- description='Hugging Face Python API',
9
- packages=['hfapi']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  )
 
1
+ import os
2
+ import sys
3
+ from pathlib import Path
4
+ from setuptools import setup, find_packages
5
 
6
 
7
+ common_setup_kwargs = {
8
+ "version": "0.4.1",
9
+ "name": "auto_gptq",
10
+ "author": "PanQiWei",
11
+ "description": "An easy-to-use LLMs quantization package with user-friendly apis, based on GPTQ algorithm.",
12
+ "long_description": (Path(__file__).parent / "README.md").read_text(encoding="UTF-8"),
13
+ "long_description_content_type": "text/markdown",
14
+ "url": "https://github.com/PanQiWei/AutoGPTQ",
15
+ "keywords": ["gptq", "quantization", "large-language-models", "transformers"],
16
+ "platforms": ["windows", "linux"],
17
+ "classifiers": [
18
+ "Environment :: GPU :: NVIDIA CUDA :: 11.7",
19
+ "Environment :: GPU :: NVIDIA CUDA :: 11.8",
20
+ "Environment :: GPU :: NVIDIA CUDA :: 12.0",
21
+ "License :: OSI Approved :: MIT License",
22
+ "Natural Language :: Chinese (Simplified)",
23
+ "Natural Language :: English",
24
+ "Programming Language :: Python :: 3.8",
25
+ "Programming Language :: Python :: 3.9",
26
+ "Programming Language :: Python :: 3.10",
27
+ "Programming Language :: Python :: 3.11",
28
+ "Programming Language :: C++",
29
+ ]
30
+ }
31
 
32
+
33
+ BUILD_CUDA_EXT = int(os.environ.get('BUILD_CUDA_EXT', '1')) == 1
34
+ if BUILD_CUDA_EXT:
35
+ try:
36
+ import torch
37
+ except:
38
+ print("Building cuda extension requires PyTorch(>=1.13.0) been installed, please install PyTorch first!")
39
+ sys.exit(-1)
40
+
41
+ CUDA_VERSION = None
42
+ ROCM_VERSION = os.environ.get('ROCM_VERSION', None)
43
+ if ROCM_VERSION and not torch.version.hip:
44
+ print(
45
+ f"Trying to compile auto-gptq for RoCm, but PyTorch {torch.__version__} "
46
+ "is installed without RoCm support."
47
+ )
48
+ sys.exit(-1)
49
+
50
+ if not ROCM_VERSION:
51
+ default_cuda_version = torch.version.cuda
52
+ CUDA_VERSION = "".join(os.environ.get("CUDA_VERSION", default_cuda_version).split("."))
53
+
54
+ if ROCM_VERSION:
55
+ common_setup_kwargs['version'] += f"+rocm{ROCM_VERSION}"
56
+ else:
57
+ if not CUDA_VERSION:
58
+ print(
59
+ f"Trying to compile auto-gptq for CUDA, byt Pytorch {torch.__version__} "
60
+ "is installed without CUDA support."
61
+ )
62
+ sys.exit(-1)
63
+ common_setup_kwargs['version'] += f"+cu{CUDA_VERSION}"
64
+
65
+
66
+ requirements = [
67
+ "accelerate>=0.19.0",
68
+ "datasets",
69
+ "numpy",
70
+ "rouge",
71
+ "torch>=1.13.0",
72
+ "safetensors",
73
+ "transformers>=4.31.0",
74
+ "peft"
75
+ ]
76
+
77
+ extras_require = {
78
+ "triton": ["triton==2.0.0"],
79
+ "test": ["parameterized"]
80
+ }
81
+
82
+ include_dirs = ["autogptq_cuda"]
83
+
84
+ additional_setup_kwargs = dict()
85
+ if BUILD_CUDA_EXT:
86
+ from torch.utils import cpp_extension
87
+
88
+ if not ROCM_VERSION:
89
+ from distutils.sysconfig import get_python_lib
90
+ conda_cuda_include_dir = os.path.join(get_python_lib(), "nvidia/cuda_runtime/include")
91
+
92
+ print("conda_cuda_include_dir", conda_cuda_include_dir)
93
+ if os.path.isdir(conda_cuda_include_dir):
94
+ include_dirs.append(conda_cuda_include_dir)
95
+ print(f"appending conda cuda include dir {conda_cuda_include_dir}")
96
+ extensions = [
97
+ cpp_extension.CUDAExtension(
98
+ "autogptq_cuda_64",
99
+ [
100
+ "autogptq_cuda/autogptq_cuda_64.cpp",
101
+ "autogptq_cuda/autogptq_cuda_kernel_64.cu"
102
+ ]
103
+ ),
104
+ cpp_extension.CUDAExtension(
105
+ "autogptq_cuda_256",
106
+ [
107
+ "autogptq_cuda/autogptq_cuda_256.cpp",
108
+ "autogptq_cuda/autogptq_cuda_kernel_256.cu"
109
+ ]
110
+ )
111
+ ]
112
+
113
+ if os.environ.get("INCLUDE_EXLLAMA_KERNELS", "1") == "1": # TODO: improve github action to always compile exllama_kernels
114
+ extensions.append(
115
+ cpp_extension.CUDAExtension(
116
+ "exllama_kernels",
117
+ [
118
+ "autogptq_cuda/exllama/exllama_ext.cpp",
119
+ "autogptq_cuda/exllama/cuda_buffers.cu",
120
+ "autogptq_cuda/exllama/cuda_func/column_remap.cu",
121
+ "autogptq_cuda/exllama/cuda_func/q4_matmul.cu",
122
+ "autogptq_cuda/exllama/cuda_func/q4_matrix.cu"
123
+ ]
124
+ )
125
+ )
126
+
127
+ additional_setup_kwargs = {
128
+ "ext_modules": extensions,
129
+ "cmdclass": {'build_ext': cpp_extension.BuildExtension}
130
+ }
131
+ common_setup_kwargs.update(additional_setup_kwargs)
132
+ setup(
133
+ packages=find_packages(),
134
+ install_requires=requirements,
135
+ extras_require=extras_require,
136
+ include_dirs=include_dirs,
137
+ python_requires=">=3.8.0",
138
+ **common_setup_kwargs
139
  )