WICKED4950 commited on
Commit
827c0a2
·
verified ·
1 Parent(s): 44f5e72

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -18
app.py CHANGED
@@ -1,26 +1,20 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
- # Load your model and tokenizer
4
- model_name = "WICKED4950/Irisbetterprecise" # Replace with your model
5
  tokenizer = AutoTokenizer.from_pretrained(model_name)
6
- model = AutoModelForCausalLM.from_pretrained(model_name,from_tf=True)
7
 
8
  # Define the chatbot function
9
- def chatbot_response(input_text):
10
- inputs = tokenizer.encode(input_text, return_tensors="pt")
11
- outputs = model.generate(inputs, max_length=50, num_return_sequences=1)
12
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
13
  return response
14
 
15
- # Create Gradio interface
16
- interface = gr.Interface(
17
- fn=chatbot_response,
18
- inputs=gr.Textbox(label="Ask me anything!"),
19
- outputs=gr.Textbox(label="Response"),
20
- title="My Chatbot",
21
- description="A simple chatbot deployed using Hugging Face Spaces and Gradio!"
22
- )
23
-
24
- # Launch the app
25
- if __name__ == "__main__":
26
- interface.launch()
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ # Load your model
4
+ model_name = "WICKED4950/Esther400K_1epoch"
5
  tokenizer = AutoTokenizer.from_pretrained(model_name)
6
+ model = AutoModelForCausalLM.from_pretrained(model_name)
7
 
8
  # Define the chatbot function
9
+ def chatbot(input_text):
10
+ inputs = tokenizer(input_text, return_tensors="pt")
11
+ outputs = model.generate(inputs["input_ids"], max_length=100)
12
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
13
  return response
14
 
15
+ # Gradio interface
16
+ iface = gr.Interface(fn=chatbot,
17
+ inputs="text",
18
+ outputs="text",
19
+ title="Your Chatbot")
20
+ iface.launch()