Added and updated prompt_template related code in the instruction
#14
by
stabledelusion
- opened
README.md
CHANGED
@@ -99,6 +99,10 @@ model = AutoGPTQForCausalLM.from_quantized(model_name_or_path,
|
|
99 |
|
100 |
print("\n\n*** Generate:")
|
101 |
|
|
|
|
|
|
|
|
|
102 |
input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
|
103 |
output = model.generate(inputs=input_ids, temperature=0.7, max_new_tokens=512)
|
104 |
print(tokenizer.decode(output[0]))
|
@@ -109,8 +113,8 @@ print(tokenizer.decode(output[0]))
|
|
109 |
logging.set_verbosity(logging.CRITICAL)
|
110 |
|
111 |
prompt = "Tell me about AI"
|
112 |
-
prompt_template=f'''###
|
113 |
-
###
|
114 |
|
115 |
print("*** Pipeline:")
|
116 |
pipe = pipeline(
|
|
|
99 |
|
100 |
print("\n\n*** Generate:")
|
101 |
|
102 |
+
prompt = "Tell me about AI"
|
103 |
+
prompt_template=f'''### Instruction: {prompt}
|
104 |
+
### Response:'''
|
105 |
+
|
106 |
input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()
|
107 |
output = model.generate(inputs=input_ids, temperature=0.7, max_new_tokens=512)
|
108 |
print(tokenizer.decode(output[0]))
|
|
|
113 |
logging.set_verbosity(logging.CRITICAL)
|
114 |
|
115 |
prompt = "Tell me about AI"
|
116 |
+
prompt_template=f'''### Instruction: {prompt}
|
117 |
+
### Response:'''
|
118 |
|
119 |
print("*** Pipeline:")
|
120 |
pipe = pipeline(
|