kprsnt commited on
Commit
c36c40a
1 Parent(s): fc9c323

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -26
app.py CHANGED
@@ -1,39 +1,43 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- # Load the models with trust_remote_code=True
5
- model1 = AutoModelForCausalLM.from_pretrained("microsoft/GRIN-MoE", trust_remote_code=True)
6
- tokenizer1 = AutoTokenizer.from_pretrained("microsoft/GRIN-MoE", trust_remote_code=True)
7
 
8
- model2 = AutoModelForCausalLM.from_pretrained("microsoft/GRIN-MoE", trust_remote_code=True)
9
- tokenizer2 = AutoTokenizer.from_pretrained("microsoft/GRIN-MoE", trust_remote_code=True)
 
10
 
11
- def generate_argument(model, tokenizer, prompt, max_length=200, temperature=0.7):
12
  try:
13
- inputs = tokenizer(prompt, return_tensors="pt")
14
- outputs = model.generate(
15
- **inputs,
 
16
  max_length=max_length,
17
- num_return_sequences=1,
18
  temperature=temperature,
19
- do_sample=True
 
 
20
  )
21
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
22
  except Exception as e:
23
  return f"An error occurred: {str(e)}"
24
 
25
- def generate_initial_argument(query):
26
- prompt = f"Provide a logical explanation for the following topic: {query}"
27
- return generate_argument(model1, tokenizer1, prompt)
28
-
29
- def generate_counter_argument(query, initial_argument):
30
- prompt = f"Given the topic '{query}' and the initial argument '{initial_argument}', provide a well-reasoned counter-argument:"
31
- response = generate_argument(model2, tokenizer2, prompt)
32
- return response.replace(prompt, "").strip()
 
33
 
34
  def debate(query):
35
- initial_argument = generate_initial_argument(query)
36
- counter_argument = generate_counter_argument(query, initial_argument)
37
  return initial_argument, counter_argument
38
 
39
  # Define the Gradio interface
@@ -41,11 +45,11 @@ iface = gr.Interface(
41
  fn=debate,
42
  inputs=gr.Textbox(lines=2, placeholder="Enter your question or topic for debate here..."),
43
  outputs=[
44
- gr.Textbox(label="Initial Argument (GRIN-MoE)"),
45
- gr.Textbox(label="Counter-Argument (GRIN-MoE)")
46
  ],
47
  title="Two-Model Debate System",
48
- description="Enter a question or topic. GRIN-MoE will provide an initial argument and a counter-argument.",
49
  examples=[
50
  ["What are the long-term implications of artificial intelligence on employment?"],
51
  ["Should governments prioritize space exploration or addressing climate change?"],
 
1
  import gradio as gr
2
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer, BertForMaskedLM, BertTokenizer
3
 
4
+ # Load GPT-2 for the initial argument
5
+ model1 = GPT2LMHeadModel.from_pretrained("gpt2")
6
+ tokenizer1 = GPT2Tokenizer.from_pretrained("gpt2")
7
 
8
+ # Load BERT for the counter-argument
9
+ model2 = BertForMaskedLM.from_pretrained("bert-base-uncased")
10
+ tokenizer2 = BertTokenizer.from_pretrained("bert-base-uncased")
11
 
12
+ def generate_gpt2_argument(query, max_length=200, temperature=0.7):
13
  try:
14
+ prompt = f"Provide a logical explanation for the following topic: {query}\n\nExplanation:"
15
+ inputs = tokenizer1.encode(prompt, return_tensors="pt")
16
+ outputs = model1.generate(
17
+ inputs,
18
  max_length=max_length,
 
19
  temperature=temperature,
20
+ num_return_sequences=1,
21
+ do_sample=True,
22
+ pad_token_id=tokenizer1.eos_token_id
23
  )
24
+ return tokenizer1.decode(outputs[0], skip_special_tokens=True).replace(prompt, "").strip()
25
  except Exception as e:
26
  return f"An error occurred: {str(e)}"
27
 
28
+ def generate_bert_argument(query, initial_argument, max_length=200):
29
+ try:
30
+ prompt = f"Topic: {query}. Initial argument: {initial_argument}. Counter-argument:"
31
+ inputs = tokenizer2.encode(prompt, return_tensors="pt", max_length=max_length, truncation=True)
32
+ outputs = model2(inputs).logits
33
+ generated_tokens = outputs.argmax(dim=-1)
34
+ return tokenizer2.decode(generated_tokens[0], skip_special_tokens=True).replace(prompt, "").strip()
35
+ except Exception as e:
36
+ return f"An error occurred: {str(e)}"
37
 
38
  def debate(query):
39
+ initial_argument = generate_gpt2_argument(query)
40
+ counter_argument = generate_bert_argument(query, initial_argument)
41
  return initial_argument, counter_argument
42
 
43
  # Define the Gradio interface
 
45
  fn=debate,
46
  inputs=gr.Textbox(lines=2, placeholder="Enter your question or topic for debate here..."),
47
  outputs=[
48
+ gr.Textbox(label="Initial Argument (GPT-2)"),
49
+ gr.Textbox(label="Counter-Argument (BERT)")
50
  ],
51
  title="Two-Model Debate System",
52
+ description="Enter a question or topic. GPT-2 will provide an initial argument, and BERT will generate a counter-argument.",
53
  examples=[
54
  ["What are the long-term implications of artificial intelligence on employment?"],
55
  ["Should governments prioritize space exploration or addressing climate change?"],