Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,10 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
|
4 |
-
|
5 |
-
model1 = gr.load("models/microsoft/GRIN-MoE")
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
model2 = gr.load("models/microsoft/GRIN-MoE")
|
10 |
|
11 |
def generate_initial_argument(query):
|
12 |
prompt = f"Provide a logical explanation for the following topic: {query}"
|
@@ -14,11 +12,20 @@ def generate_initial_argument(query):
|
|
14 |
return response
|
15 |
|
16 |
def generate_counter_argument(query, initial_argument):
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
def debate(query):
|
24 |
initial_argument = generate_initial_argument(query)
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
|
4 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
5 |
|
6 |
+
model1 = AutoModelForCausalLM.from_pretrained("microsoft/GRIN-MoE")
|
7 |
+
tokenizer1 = AutoTokenizer.from_pretrained("microsoft/GRIN-MoE")
|
|
|
8 |
|
9 |
def generate_initial_argument(query):
|
10 |
prompt = f"Provide a logical explanation for the following topic: {query}"
|
|
|
12 |
return response
|
13 |
|
14 |
def generate_counter_argument(query, initial_argument):
|
15 |
+
try:
|
16 |
+
prompt = f"Given the topic '{query}' and the initial argument '{initial_argument}', provide a well-reasoned counter-argument:"
|
17 |
+
inputs = tokenizer2(prompt, return_tensors="pt")
|
18 |
+
outputs = model2.generate(
|
19 |
+
**inputs,
|
20 |
+
max_length=200,
|
21 |
+
num_return_sequences=1,
|
22 |
+
temperature=0.7,
|
23 |
+
do_sample=True
|
24 |
+
)
|
25 |
+
counter_argument = tokenizer2.decode(outputs[0], skip_special_tokens=True)
|
26 |
+
return counter_argument.replace(prompt, "").strip()
|
27 |
+
except Exception as e:
|
28 |
+
return f"An error occurred: {str(e)}"
|
29 |
|
30 |
def debate(query):
|
31 |
initial_argument = generate_initial_argument(query)
|