vihaan43's picture
Update app.py
5e8a2ba verified
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
# Load the GPT-NeoX model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b")
# Define system instructions
SYSTEM_INSTRUCTIONS = (
"You are a helpful assistant specializing in gaming system instructions. "
"Follow all commands precisely. Provide step-by-step details for each task."
)
# Define the function for querying the model
def generate_response(user_input):
# Prepend the system instructions to the user's input
prompt = SYSTEM_INSTRUCTIONS + "\nUser: " + user_input + "\nAssistant:"
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_length=300, do_sample=True)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response.split("Assistant:")[-1].strip()
# Create a Gradio interface
interface = gr.Interface(fn=generate_response, inputs="text", outputs="text")
interface.launch()