import gradio as gr from transformers import GPT2LMHeadModel, GPT2Tokenizer # Load GPT-2 for both initial argument and counter-argument model = GPT2LMHeadModel.from_pretrained("gpt2-medium") tokenizer = GPT2Tokenizer.from_pretrained("gpt2-medium") def generate_argument(prompt, max_length=200, temperature=0.7): try: inputs = tokenizer.encode(prompt, return_tensors="pt") outputs = model.generate( inputs, max_length=max_length, temperature=temperature, num_return_sequences=1, do_sample=True, pad_token_id=tokenizer.eos_token_id ) return tokenizer.decode(outputs[0], skip_special_tokens=True).replace(prompt, "").strip() except Exception as e: return f"An error occurred: {str(e)}" def generate_initial_argument(query): prompt = f"Topic: {query}\n\nProvide a logical explanation supporting this topic:\n" return generate_argument(prompt, max_length=150) def generate_counter_argument(query, initial_argument): prompt = f"Topic: {query}\n\nInitial argument: {initial_argument}\n\nProvide a well-reasoned counter-argument:\n" return generate_argument(prompt, max_length=200, temperature=0.8) def debate(query): initial_argument = generate_initial_argument(query) counter_argument = generate_counter_argument(query, initial_argument) return initial_argument, counter_argument # Define the Gradio interface iface = gr.Interface( fn=debate, inputs=gr.Textbox(lines=2, placeholder="Enter your question or topic for debate here..."), outputs=[ gr.Textbox(label="Initial Argument (GPT-2)"), gr.Textbox(label="Counter-Argument (GPT-2)") ], title="Two-Perspective Debate System", description="Enter a question or topic. GPT-2 will provide an initial argument and a counter-argument.", examples=[ ["Is it good for kids to go to schools, or are they wasting their time?"], ["Should governments prioritize space exploration or addressing climate change?"], ["Is genetic engineering in humans ethical for disease prevention?"] ] ) # Launch the interface iface.launch()