kprsnt commited on
Commit
24c8493
1 Parent(s): c36c40a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -29
app.py CHANGED
@@ -1,43 +1,36 @@
1
  import gradio as gr
2
- from transformers import GPT2LMHeadModel, GPT2Tokenizer, BertForMaskedLM, BertTokenizer
3
 
4
- # Load GPT-2 for the initial argument
5
- model1 = GPT2LMHeadModel.from_pretrained("gpt2")
6
- tokenizer1 = GPT2Tokenizer.from_pretrained("gpt2")
7
 
8
- # Load BERT for the counter-argument
9
- model2 = BertForMaskedLM.from_pretrained("bert-base-uncased")
10
- tokenizer2 = BertTokenizer.from_pretrained("bert-base-uncased")
11
-
12
- def generate_gpt2_argument(query, max_length=200, temperature=0.7):
13
  try:
14
- prompt = f"Provide a logical explanation for the following topic: {query}\n\nExplanation:"
15
- inputs = tokenizer1.encode(prompt, return_tensors="pt")
16
- outputs = model1.generate(
17
  inputs,
18
  max_length=max_length,
19
  temperature=temperature,
20
  num_return_sequences=1,
21
  do_sample=True,
22
- pad_token_id=tokenizer1.eos_token_id
23
  )
24
- return tokenizer1.decode(outputs[0], skip_special_tokens=True).replace(prompt, "").strip()
25
  except Exception as e:
26
  return f"An error occurred: {str(e)}"
27
 
28
- def generate_bert_argument(query, initial_argument, max_length=200):
29
- try:
30
- prompt = f"Topic: {query}. Initial argument: {initial_argument}. Counter-argument:"
31
- inputs = tokenizer2.encode(prompt, return_tensors="pt", max_length=max_length, truncation=True)
32
- outputs = model2(inputs).logits
33
- generated_tokens = outputs.argmax(dim=-1)
34
- return tokenizer2.decode(generated_tokens[0], skip_special_tokens=True).replace(prompt, "").strip()
35
- except Exception as e:
36
- return f"An error occurred: {str(e)}"
37
 
38
  def debate(query):
39
- initial_argument = generate_gpt2_argument(query)
40
- counter_argument = generate_bert_argument(query, initial_argument)
41
  return initial_argument, counter_argument
42
 
43
  # Define the Gradio interface
@@ -46,12 +39,12 @@ iface = gr.Interface(
46
  inputs=gr.Textbox(lines=2, placeholder="Enter your question or topic for debate here..."),
47
  outputs=[
48
  gr.Textbox(label="Initial Argument (GPT-2)"),
49
- gr.Textbox(label="Counter-Argument (BERT)")
50
  ],
51
- title="Two-Model Debate System",
52
- description="Enter a question or topic. GPT-2 will provide an initial argument, and BERT will generate a counter-argument.",
53
  examples=[
54
- ["What are the long-term implications of artificial intelligence on employment?"],
55
  ["Should governments prioritize space exploration or addressing climate change?"],
56
  ["Is genetic engineering in humans ethical for disease prevention?"]
57
  ]
 
1
  import gradio as gr
2
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
3
 
4
+ # Load GPT-2 for both initial argument and counter-argument
5
+ model = GPT2LMHeadModel.from_pretrained("gpt2-medium")
6
+ tokenizer = GPT2Tokenizer.from_pretrained("gpt2-medium")
7
 
8
+ def generate_argument(prompt, max_length=200, temperature=0.7):
 
 
 
 
9
  try:
10
+ inputs = tokenizer.encode(prompt, return_tensors="pt")
11
+ outputs = model.generate(
 
12
  inputs,
13
  max_length=max_length,
14
  temperature=temperature,
15
  num_return_sequences=1,
16
  do_sample=True,
17
+ pad_token_id=tokenizer.eos_token_id
18
  )
19
+ return tokenizer.decode(outputs[0], skip_special_tokens=True).replace(prompt, "").strip()
20
  except Exception as e:
21
  return f"An error occurred: {str(e)}"
22
 
23
+ def generate_initial_argument(query):
24
+ prompt = f"Topic: {query}\n\nProvide a logical explanation supporting this topic:\n"
25
+ return generate_argument(prompt, max_length=150)
26
+
27
+ def generate_counter_argument(query, initial_argument):
28
+ prompt = f"Topic: {query}\n\nInitial argument: {initial_argument}\n\nProvide a well-reasoned counter-argument:\n"
29
+ return generate_argument(prompt, max_length=200, temperature=0.8)
 
 
30
 
31
  def debate(query):
32
+ initial_argument = generate_initial_argument(query)
33
+ counter_argument = generate_counter_argument(query, initial_argument)
34
  return initial_argument, counter_argument
35
 
36
  # Define the Gradio interface
 
39
  inputs=gr.Textbox(lines=2, placeholder="Enter your question or topic for debate here..."),
40
  outputs=[
41
  gr.Textbox(label="Initial Argument (GPT-2)"),
42
+ gr.Textbox(label="Counter-Argument (GPT-2)")
43
  ],
44
+ title="Two-Perspective Debate System",
45
+ description="Enter a question or topic. GPT-2 will provide an initial argument and a counter-argument.",
46
  examples=[
47
+ ["Is it good for kids to go to schools, or are they wasting their time?"],
48
  ["Should governments prioritize space exploration or addressing climate change?"],
49
  ["Is genetic engineering in humans ethical for disease prevention?"]
50
  ]