aifeifei798 commited on
Commit
66bca5e
·
verified ·
1 Parent(s): 23f8374

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +1 -12
README.md CHANGED
@@ -85,16 +85,6 @@ model, tokenizer = FastLanguageModel.from_pretrained(
85
  # token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
86
  )
87
 
88
- if False:
89
- from unsloth import FastLanguageModel
90
- model, tokenizer = FastLanguageModel.from_pretrained(
91
- model_name = "Llama-3.2-1B-Instruct-bnb-4bit-lora", # YOUR MODEL YOU USED FOR TRAINING
92
- max_seq_length = max_seq_length,
93
- dtype = dtype,
94
- load_in_4bit = load_in_4bit,
95
- )
96
- FastLanguageModel.for_inference(model) # Enable native 2x faster inference
97
-
98
  # alpaca_prompt = You MUST copy from above!
99
 
100
  inputs = tokenizer(
@@ -120,8 +110,7 @@ from transformers import TextStreamer
120
  text_streamer = TextStreamer(tokenizer)
121
  _ = model.generate(**inputs, streamer = text_streamer, max_new_tokens = 1024)
122
 
123
- #model.save_pretrained_merged("Llama-3.2-1B-Instruct-bnb-4bit-end", tokenizer)
124
-
125
  ```
126
 
127
  ### Trainer Prograpm
 
85
  # token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
86
  )
87
 
 
 
 
 
 
 
 
 
 
 
88
  # alpaca_prompt = You MUST copy from above!
89
 
90
  inputs = tokenizer(
 
110
  text_streamer = TextStreamer(tokenizer)
111
  _ = model.generate(**inputs, streamer = text_streamer, max_new_tokens = 1024)
112
 
113
+
 
114
  ```
115
 
116
  ### Trainer Prograpm