Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,39 +1,43 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import
|
3 |
|
4 |
-
# Load the
|
5 |
-
model1 =
|
6 |
-
tokenizer1 =
|
7 |
|
8 |
-
|
9 |
-
|
|
|
10 |
|
11 |
-
def
|
12 |
try:
|
13 |
-
|
14 |
-
|
15 |
-
|
|
|
16 |
max_length=max_length,
|
17 |
-
num_return_sequences=1,
|
18 |
temperature=temperature,
|
19 |
-
|
|
|
|
|
20 |
)
|
21 |
-
return
|
22 |
except Exception as e:
|
23 |
return f"An error occurred: {str(e)}"
|
24 |
|
25 |
-
def
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
33 |
|
34 |
def debate(query):
|
35 |
-
initial_argument =
|
36 |
-
counter_argument =
|
37 |
return initial_argument, counter_argument
|
38 |
|
39 |
# Define the Gradio interface
|
@@ -41,11 +45,11 @@ iface = gr.Interface(
|
|
41 |
fn=debate,
|
42 |
inputs=gr.Textbox(lines=2, placeholder="Enter your question or topic for debate here..."),
|
43 |
outputs=[
|
44 |
-
gr.Textbox(label="Initial Argument (
|
45 |
-
gr.Textbox(label="Counter-Argument (
|
46 |
],
|
47 |
title="Two-Model Debate System",
|
48 |
-
description="Enter a question or topic.
|
49 |
examples=[
|
50 |
["What are the long-term implications of artificial intelligence on employment?"],
|
51 |
["Should governments prioritize space exploration or addressing climate change?"],
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import GPT2LMHeadModel, GPT2Tokenizer, BertForMaskedLM, BertTokenizer
|
3 |
|
4 |
+
# Load GPT-2 for the initial argument
|
5 |
+
model1 = GPT2LMHeadModel.from_pretrained("gpt2")
|
6 |
+
tokenizer1 = GPT2Tokenizer.from_pretrained("gpt2")
|
7 |
|
8 |
+
# Load BERT for the counter-argument
|
9 |
+
model2 = BertForMaskedLM.from_pretrained("bert-base-uncased")
|
10 |
+
tokenizer2 = BertTokenizer.from_pretrained("bert-base-uncased")
|
11 |
|
12 |
+
def generate_gpt2_argument(query, max_length=200, temperature=0.7):
|
13 |
try:
|
14 |
+
prompt = f"Provide a logical explanation for the following topic: {query}\n\nExplanation:"
|
15 |
+
inputs = tokenizer1.encode(prompt, return_tensors="pt")
|
16 |
+
outputs = model1.generate(
|
17 |
+
inputs,
|
18 |
max_length=max_length,
|
|
|
19 |
temperature=temperature,
|
20 |
+
num_return_sequences=1,
|
21 |
+
do_sample=True,
|
22 |
+
pad_token_id=tokenizer1.eos_token_id
|
23 |
)
|
24 |
+
return tokenizer1.decode(outputs[0], skip_special_tokens=True).replace(prompt, "").strip()
|
25 |
except Exception as e:
|
26 |
return f"An error occurred: {str(e)}"
|
27 |
|
28 |
+
def generate_bert_argument(query, initial_argument, max_length=200):
|
29 |
+
try:
|
30 |
+
prompt = f"Topic: {query}. Initial argument: {initial_argument}. Counter-argument:"
|
31 |
+
inputs = tokenizer2.encode(prompt, return_tensors="pt", max_length=max_length, truncation=True)
|
32 |
+
outputs = model2(inputs).logits
|
33 |
+
generated_tokens = outputs.argmax(dim=-1)
|
34 |
+
return tokenizer2.decode(generated_tokens[0], skip_special_tokens=True).replace(prompt, "").strip()
|
35 |
+
except Exception as e:
|
36 |
+
return f"An error occurred: {str(e)}"
|
37 |
|
38 |
def debate(query):
|
39 |
+
initial_argument = generate_gpt2_argument(query)
|
40 |
+
counter_argument = generate_bert_argument(query, initial_argument)
|
41 |
return initial_argument, counter_argument
|
42 |
|
43 |
# Define the Gradio interface
|
|
|
45 |
fn=debate,
|
46 |
inputs=gr.Textbox(lines=2, placeholder="Enter your question or topic for debate here..."),
|
47 |
outputs=[
|
48 |
+
gr.Textbox(label="Initial Argument (GPT-2)"),
|
49 |
+
gr.Textbox(label="Counter-Argument (BERT)")
|
50 |
],
|
51 |
title="Two-Model Debate System",
|
52 |
+
description="Enter a question or topic. GPT-2 will provide an initial argument, and BERT will generate a counter-argument.",
|
53 |
examples=[
|
54 |
["What are the long-term implications of artificial intelligence on employment?"],
|
55 |
["Should governments prioritize space exploration or addressing climate change?"],
|