File size: 2,145 Bytes
741d230
 
 
 
 
dc59cf6
f8b8183
0267b0d
d88f51e
dc59cf6
f8b8183
b3e2437
7f1f7ae
d88f51e
7f1f7ae
 
 
 
d88f51e
 
 
 
 
 
 
 
 
 
7f1f7ae
 
 
 
 
 
 
 
0267b0d
7f1f7ae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
"""
Module for detecting fallacies in text.

"""

import gradio as gr
from utils.core import HamburgerStyle
from context.palm import rebuttal_generator
from single.GPT4 import rebuttal_gen

rebuttal = HamburgerStyle()

gpt4 = gr.Interface(
    fn=rebuttal_gen,
    inputs=[
        gr.Textbox(
            label="input myth", lines=4, placeholder="climate change misinformation"
        ),
        gr.Textbox(
            label="openai_api_key",
            lines=1,
            placeholder="your OpenAi API key, default to None",
        ),
        gr.Textbox(
            label="openai_organization",
            lines=1,
            placeholder="your OpenAi Organization, default to None",
        ),
    ],
    outputs=gr.Textbox(
        lines=4, placeholder="## Fact:\n## Myth:\n## Fallacy:\n## Fact:\n"
    ),
    allow_flagging="never",
    description="Single, comprehensive prompt which assigns GPT-4 the role of a climate change analyst as an expert persona to debunk misinformation",
)
palm = gr.Interface(
    fn=rebuttal_generator,
    inputs=gr.Textbox(
        label="input myth", lines=4, placeholder="climate change misinformation"
    ),
    outputs=gr.Textbox(
        lines=4, placeholder="## Fact:\n## Myth:\n## Fallacy:\n## Fact:\n"
    ),
    allow_flagging="never",
    description="Single prompt with dinamic context relevant to the input myth, uses Palm2 LLM to debunk misinformation",
)
mix = gr.Interface(
    fn=rebuttal.rebuttal_generator,
    inputs=gr.Textbox(
        label="input myth", lines=4, placeholder="climate change misinformation"
    ),
    outputs=gr.Textbox(
        lines=4, placeholder="## Fact:\n## Myth:\n## Fallacy:\n## Fact:\n"
    ),
    allow_flagging="never",
    description="Four separate prompts, one per component of the output debunking, uses Mixtral LLM to debunk misinformation",
)


demo = gr.TabbedInterface(
    [gpt4, palm, mix],
    [
        "Single prompt, no context",
        "Single prompt, with context",
        "Structured prompt, with context",
    ],
    title="Generative Debunking of Climate Misinformation",
)

if __name__ == "__main__":
    demo.queue().launch()