flicc-agent / app.py
Francisco Zanartu
add palm methods
0267b0d
raw
history blame
1.93 kB
"""
Module for detecting fallacies in text.
"""
import os
import gradio as gr
from utils.core import HamburgerStyle
from context.palm import rebuttal_generator
rebuttal = HamburgerStyle()
def textgen(text):
return "Hi there"
gpt4 = gr.Interface(
fn=textgen,
inputs=[
gr.Textbox(
label="input myth", lines=4, placeholder="climate change misinformation"
),
gr.Textbox(label="credentials", lines=1, placeholder="your OpenAi API key"),
],
outputs=gr.Textbox(
lines=4, placeholder="## Fact:\n## Myth:\n## Fallacy:\n## Fact:\n"
),
allow_flagging="never",
description="Single, comprehensive prompt which assigns GPT-4 the role of a climate change analyst as an expert persona to debunk misinformation",
)
palm = gr.Interface(
fn=rebuttal_generator,
inputs=gr.Textbox(
label="input myth", lines=4, placeholder="climate change misinformation"
),
outputs=gr.Textbox(
lines=4, placeholder="## Fact:\n## Myth:\n## Fallacy:\n## Fact:\n"
),
allow_flagging="never",
description="Single prompt with dinamic context relevant to the input myth, uses Palm2 LLM to debunk misinformation",
)
mix = gr.Interface(
fn=rebuttal.rebuttal_generator,
inputs=gr.Textbox(
label="input myth", lines=4, placeholder="climate change misinformation"
),
outputs=gr.Textbox(
lines=4, placeholder="## Fact:\n## Myth:\n## Fallacy:\n## Fact:\n"
),
allow_flagging="never",
description="Four separate prompts, one per component of the output debunking, uses Mixtral LLM to debunk misinformation",
)
demo = gr.TabbedInterface(
[gpt4, palm, mix],
[
"Single prompt, no context",
"Single prompt, with context",
"Structured prompt, with context",
],
title="Generative Debunking of Climate Misinformation",
)
if __name__ == "__main__":
demo.queue().launch()