theonerichy commited on
Commit
8c3676a
1 Parent(s): 18aaf23
Files changed (1) hide show
  1. app.py +19 -12
app.py CHANGED
@@ -5,17 +5,24 @@ MAX_TOKENS = 64
5
 
6
  llm = Llama(model_path="ggml-model-f16-q4_0.bin", n_ctx=2048)
7
 
8
- def generate_text(prompt):
9
- output = llm(prompt, max_tokens=MAX_TOKENS, echo=False, stop=["</s>", "\n", "User:"])
10
- output_text = output['choices'][0]['text']
11
- return output_text
 
 
 
 
12
 
13
- description = f"txt2tag 13B base model, max_tokens={MAX_TOKENS}"
14
-
15
- gradio_interface = gr.Interface(
16
- fn=generate_text,
17
- inputs="text",
18
- outputs="text",
19
- title="txt2tag API",
20
  )
21
- gradio_interface.launch()
 
 
 
 
 
 
 
5
 
6
  llm = Llama(model_path="ggml-model-f16-q4_0.bin", n_ctx=2048)
7
 
8
+ def generate_text_instruct(input_text):
9
+ response = ""
10
+ txt2tag_prompt = f"Provide me with danbooru tags that accurately describe the following description. {input_text}"
11
+ for output in llm(f"A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {txt2tag_prompt} ASSISTANT: ",
12
+ echo=False, stream=True, stop=["</s>", "\n", "User:"]):
13
+ answer = output['choices'][0]['text']
14
+ response += answer
15
+ yield response
16
 
17
+ instruct_interface = gr.Interface(
18
+ fn=generate_text_instruct,
19
+ inputs=gr.inputs.Textbox(lines= 10, label="Enter your instruction text"),
20
+ outputs=gr.outputs.Textbox(label="Output text"),
 
 
 
21
  )
22
+
23
+ with gr.Blocks() as demo:
24
+ with gr.Tab("Instruct"):
25
+ gr.Markdown("# GGML Spaces Instruct Demo")
26
+ instruct_interface.render()
27
+
28
+ demo.queue(max_size=16, concurrency_count=1).launch(debug=True)