File size: 1,064 Bytes
f7cb302
5e8a2ba
f7cb302
5e8a2ba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the GPT-NeoX model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b")

# Define system instructions
SYSTEM_INSTRUCTIONS = (
    "You are a helpful assistant specializing in gaming system instructions. "
    "Follow all commands precisely. Provide step-by-step details for each task."
)

# Define the function for querying the model
def generate_response(user_input):
    # Prepend the system instructions to the user's input
    prompt = SYSTEM_INSTRUCTIONS + "\nUser: " + user_input + "\nAssistant:"
    inputs = tokenizer(prompt, return_tensors="pt")
    outputs = model.generate(**inputs, max_length=300, do_sample=True)
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return response.split("Assistant:")[-1].strip()

# Create a Gradio interface
interface = gr.Interface(fn=generate_response, inputs="text", outputs="text")
interface.launch()