TheBloke commited on
Commit
c3a0b06
1 Parent(s): a44beb2

Upload README.md

Browse files
Files changed (1) hide show
  1. README.md +10 -5
README.md CHANGED
@@ -7,7 +7,9 @@ license: mit
7
  model_creator: Nobody.png
8
  model_name: Yi 34B GiftedConvo Llama
9
  model_type: llama
10
- prompt_template: '{prompt}
 
 
11
 
12
  '
13
  quantized_by: TheBloke
@@ -55,10 +57,11 @@ These files were quantised using hardware kindly provided by [Massed Compute](ht
55
  <!-- repositories-available end -->
56
 
57
  <!-- prompt-template start -->
58
- ## Prompt template: Unknown
59
 
60
  ```
61
- {prompt}
 
62
 
63
  ```
64
 
@@ -236,7 +239,8 @@ from huggingface_hub import InferenceClient
236
  endpoint_url = "https://your-endpoint-url-here"
237
 
238
  prompt = "Tell me about AI"
239
- prompt_template=f'''{prompt}
 
240
  '''
241
 
242
  client = InferenceClient(endpoint_url)
@@ -289,7 +293,8 @@ model = AutoModelForCausalLM.from_pretrained(model_name_or_path,
289
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
290
 
291
  prompt = "Tell me about AI"
292
- prompt_template=f'''{prompt}
 
293
  '''
294
 
295
  print("\n\n*** Generate:")
 
7
  model_creator: Nobody.png
8
  model_name: Yi 34B GiftedConvo Llama
9
  model_type: llama
10
+ prompt_template: 'USER: {prompt}
11
+
12
+ ASSISTANT:
13
 
14
  '
15
  quantized_by: TheBloke
 
57
  <!-- repositories-available end -->
58
 
59
  <!-- prompt-template start -->
60
+ ## Prompt template: User-Assistant
61
 
62
  ```
63
+ USER: {prompt}
64
+ ASSISTANT:
65
 
66
  ```
67
 
 
239
  endpoint_url = "https://your-endpoint-url-here"
240
 
241
  prompt = "Tell me about AI"
242
+ prompt_template=f'''USER: {prompt}
243
+ ASSISTANT:
244
  '''
245
 
246
  client = InferenceClient(endpoint_url)
 
293
  tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
294
 
295
  prompt = "Tell me about AI"
296
+ prompt_template=f'''USER: {prompt}
297
+ ASSISTANT:
298
  '''
299
 
300
  print("\n\n*** Generate:")