theonerichy commited on
Commit
cc042e5
1 Parent(s): 6448781

fix prompt

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -8,7 +8,7 @@ llm = Llama(model_path="ggml-model-f16-q4_0.bin", n_ctx=2048)
8
  def generate_text_instruct(input_text):
9
  response = ""
10
  txt2tag_prompt = f"You are a tool that helps tag danbooru images when given a textual image description. Provide me with danbooru tags that accurately fit the following description. {input_text}"
11
- for output in llm(f"A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {txt2tag_prompt} ASSISTANT: ",
12
  echo=False, stream=True, max_tokens=64, stop=["</s>", "\n", "User:", "<unk>"]):
13
  answer = output['choices'][0]['text']
14
  response += answer
@@ -22,7 +22,7 @@ instruct_interface = gr.Interface(
22
 
23
  with gr.Blocks() as demo:
24
  with gr.Tab("Instruct"):
25
- gr.Markdown("# GGML Spaces Instruct Demo")
26
  instruct_interface.render()
27
 
28
  demo.queue(max_size=16, concurrency_count=1).launch(debug=True)
 
8
  def generate_text_instruct(input_text):
9
  response = ""
10
  txt2tag_prompt = f"You are a tool that helps tag danbooru images when given a textual image description. Provide me with danbooru tags that accurately fit the following description. {input_text}"
11
+ for output in llm(f" A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {txt2tag_prompt} ASSISTANT:",
12
  echo=False, stream=True, max_tokens=64, stop=["</s>", "\n", "User:", "<unk>"]):
13
  answer = output['choices'][0]['text']
14
  response += answer
 
22
 
23
  with gr.Blocks() as demo:
24
  with gr.Tab("Instruct"):
25
+ gr.Markdown("# GGML Booru Txt2Tag Demo")
26
  instruct_interface.render()
27
 
28
  demo.queue(max_size=16, concurrency_count=1).launch(debug=True)