skyxiaobaibai commited on
Commit
06f65a6
1 Parent(s): 68508ae

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +88 -3
README.md CHANGED
@@ -1,3 +1,88 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ ---
4
+
5
+ Introduction
6
+
7
+ Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include:
8
+
9
+ 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated;
10
+ Significant performance improvement in Chat models;
11
+ Multilingual support of both base and chat models;
12
+ Stable support of 32K context length for models of all sizes
13
+ No need of trust_remote_code.
14
+
15
+ For more details, please refer to our blog post and GitHub repo.
16
+ Model Details
17
+
18
+ Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention.
19
+ Requirements
20
+
21
+ The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install transformers>=4.37.0, or you might encounter the following error:
22
+
23
+
24
+
25
+
26
+
27
+ from transformers import AutoTokenizer, TextGenerationPipeline
28
+ from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
29
+ import logging
30
+
31
+ logging.basicConfig(
32
+ format="%(asctime)s %(levelname)s [%(name)s] %(message)s", level=logging.INFO, datefmt="%Y-%m-%d %H:%M:%S"
33
+ )
34
+
35
+ pretrained_model_dir = "Qwen/Qwen1.5-1.8B"
36
+ quantized_model_dir = "local "
37
+
38
+ tokenizer = AutoTokenizer.from_pretrained(pretrained_model_dir, use_fast=True)
39
+ examples = [
40
+ tokenizer(
41
+ "auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."
42
+ )
43
+ ]
44
+
45
+ quantize_config = BaseQuantizeConfig(
46
+ bits=4, # quantize model to 4-bit
47
+ group_size=128, # it is recommended to set the value to 128
48
+ desc_act=False, # set to False can significantly speed up inference but the perplexity may slightly bad
49
+ )
50
+
51
+ # load un-quantized model, by default, the model will always be loaded into CPU memory
52
+ model = AutoGPTQForCausalLM.from_pretrained(pretrained_model_dir, quantize_config)
53
+
54
+ # quantize model, the examples should be list of dict whose keys can only be "input_ids" and "attention_mask"
55
+ model.quantize(examples)
56
+
57
+ # save quantized model
58
+ model.save_quantized(quantized_model_dir)
59
+
60
+ # save quantized model using safetensors
61
+ model.save_quantized(quantized_model_dir, use_safetensors=True)
62
+
63
+ # push quantized model to Hugging Face Hub.
64
+ # to use use_auth_token=True, Login first via huggingface-cli login.
65
+ # or pass explcit token with: use_auth_token="hf_xxxxxxx"
66
+ # (uncomment the following three lines to enable this feature)
67
+ # repo_id = f"YourUserName/{quantized_model_dir}"
68
+ # commit_message = f"AutoGPTQ model for {pretrained_model_dir}: {quantize_config.bits}bits, gr{quantize_config.group_size}, desc_act={quantize_config.desc_act}"
69
+ # model.push_to_hub(repo_id, commit_message=commit_message, use_auth_token=True)
70
+
71
+ # alternatively you can save and push at the same time
72
+ # (uncomment the following three lines to enable this feature)
73
+ # repo_id = f"YourUserName/{quantized_model_dir}"
74
+ # commit_message = f"AutoGPTQ model for {pretrained_model_dir}: {quantize_config.bits}bits, gr{quantize_config.group_size}, desc_act={quantize_config.desc_act}"
75
+ # model.push_to_hub(repo_id, save_dir=quantized_model_dir, use_safetensors=True, commit_message=commit_message, use_auth_token=True)
76
+
77
+ # load quantized model to the first GPU
78
+ model = AutoGPTQForCausalLM.from_quantized(quantized_model_dir, device="cuda:0")
79
+
80
+ # download quantized model from Hugging Face Hub and load to the first GPU
81
+ # model = AutoGPTQForCausalLM.from_quantized(repo_id, device="cuda:0", use_safetensors=True, use_triton=False)
82
+
83
+ # inference with model.generate
84
+ print(tokenizer.decode(model.generate(**tokenizer("auto_gptq is", return_tensors="pt").to(model.device))[0]))
85
+
86
+ # or you can also use pipeline
87
+ pipeline = TextGenerationPipeline(model=model, tokenizer=tokenizer)
88
+ print(pipeline("auto-gptq is")[0]["generated_text"])