hikinegi commited on
Commit
5c71e83
·
1 Parent(s): b9eae2b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -6
app.py CHANGED
@@ -1,5 +1,7 @@
1
  import torch
2
  from transformers import LlamaForCausalLM, LlamaTokenizer
 
 
3
 
4
  # Hugging Face model_path
5
  model_path = 'psmathur/orca_mini_3b'
@@ -10,7 +12,7 @@ model = LlamaForCausalLM.from_pretrained(
10
 
11
 
12
  #generate text function
13
- def predict(system, instruction, input=None):
14
 
15
  if input:
16
  prompt = f"### System:\n{system}\n\n### User:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
@@ -38,8 +40,6 @@ def predict(system, instruction, input=None):
38
  string = tokenizer.decode(output, skip_special_tokens=True)
39
  return f'[!] Response: {string}'
40
 
41
- import gradio as gr
42
-
43
  # Define input components
44
  prompt_input = gr.inputs.Textbox(label="System")
45
  instruction_input = gr.inputs.Textbox(label="Instruction")
@@ -49,7 +49,6 @@ context_input = gr.inputs.Textbox(label="Context")
49
  output_text = gr.outputs.Textbox(label="Output")
50
 
51
  # Create the interface
52
- iface=gr.Interface(fn=predict,
53
  inputs=[prompt_input, instruction_input, context_input],
54
- outputs=output_text,enable_queue=True)
55
- iface.launch()
 
1
  import torch
2
  from transformers import LlamaForCausalLM, LlamaTokenizer
3
+ import gradio as gr
4
+
5
 
6
  # Hugging Face model_path
7
  model_path = 'psmathur/orca_mini_3b'
 
12
 
13
 
14
  #generate text function
15
+ def generate_text(system, instruction, input=None):
16
 
17
  if input:
18
  prompt = f"### System:\n{system}\n\n### User:\n{instruction}\n\n### Input:\n{input}\n\n### Response:\n"
 
40
  string = tokenizer.decode(output, skip_special_tokens=True)
41
  return f'[!] Response: {string}'
42
 
 
 
43
  # Define input components
44
  prompt_input = gr.inputs.Textbox(label="System")
45
  instruction_input = gr.inputs.Textbox(label="Instruction")
 
49
  output_text = gr.outputs.Textbox(label="Output")
50
 
51
  # Create the interface
52
+ gr.Interface(fn=generate_text,
53
  inputs=[prompt_input, instruction_input, context_input],
54
+ outputs=output_text,enable_queue=True).launch(debug=True)