Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM | |
# Load models | |
model1_name = "facebook/bart-large-cnn" | |
model2_name = "google/flan-t5-base" | |
tokenizer1 = AutoTokenizer.from_pretrained(model1_name) | |
model1 = AutoModelForSeq2SeqLM.from_pretrained(model1_name) | |
summarizer = pipeline("summarization", model=model1, tokenizer=tokenizer1) | |
tokenizer2 = AutoTokenizer.from_pretrained(model2_name) | |
model2 = AutoModelForSeq2SeqLM.from_pretrained(model2_name) | |
generator = pipeline("text2text-generation", model=model2, tokenizer=tokenizer2) | |
def model1_response(query): | |
# Use BART for summarization as the first "logical explanation" | |
summary = summarizer(query, max_length=150, min_length=50, do_sample=False) | |
return summary[0]['summary_text'] | |
def model2_response(query, model1_output): | |
# Use T5 to generate a response based on the query and model1's output | |
prompt = f"Given the topic '{query}' and the argument '{model1_output}', provide a counter-argument:" | |
response = generator(prompt, max_length=150, do_sample=True, temperature=0.7) | |
return response[0]['generated_text'] | |
def debate(query): | |
response1 = model1_response(query) | |
response2 = model2_response(query, response1) | |
return response1, response2 | |
# Define the Gradio interface | |
iface = gr.Interface( | |
fn=debate, | |
inputs=gr.Textbox(lines=2, placeholder="Enter your question or topic for debate here..."), | |
outputs=[ | |
gr.Textbox(label="Model 1 Response (BART)"), | |
gr.Textbox(label="Model 2 Counter-argument (T5)") | |
], | |
title="Two-Model Debate System", | |
description="Enter a question or topic, and two AI models will debate it.", | |
examples=[ | |
["What are the pros and cons of renewable energy?"], | |
["Is artificial intelligence beneficial or harmful to society?"], | |
["Should governments implement a universal basic income?"] | |
] | |
) | |
# Launch the interface | |
iface.launch() |