text-snt / app.py
saintyboy's picture
Create app.py
b304835 verified
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments
from datasets import load_dataset
# Load and prepare the dataset
dataset = load_dataset("daily_dialog")
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small")
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small")
# Define training arguments
training_args = TrainingArguments(
output_dir="./results",
num_train_epochs=3,
per_device_train_batch_size=4,
per_device_eval_batch_size=4,
warmup_steps=500,
weight_decay=0.01,
logging_dir='./logs',
logging_steps=10,
)
# Prepare the data for training
def tokenize_function(examples):
return tokenizer(examples["text"], padding="max_length", truncation=True)
tokenized_datasets = dataset.map(tokenize_function, batched=True)
# Initialize the Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["test"]
)
# Training the model
trainer.train()
# Streamlit interface
st.title('Simple Chatbot')
user_input = st.text_input("You: ")
if user_input:
# Encode the user input and generate a response
inputs = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt')
reply_ids = model.generate(inputs, max_length=1000, pad_token_id=tokenizer.eos_token_id)
reply = tokenizer.decode(reply_ids[0], skip_special_tokens=True)
st.write("Bot:", reply)