Francisco Zanartu
add palm methods
0267b0d
raw
history blame
5.36 kB
"""
Module for detecting fallacies in text.
Functions:
- rebuttal_generator: Detects fallacies in a text input by utilizing models for fallacy
detection and semantic textual similarity and generates a rebuttal for the fallacious claim.
- query: Sends a query to a specified API endpoint with the provided payload and returns
the response.
- demo: Launches a Gradio interface for interactively detecting fallacies in text.
Dependencies:
- os: Provides a portable way of using operating system dependent functionality.
- json: Provides functions for encoding and decoding JSON data.
- requests: Allows sending HTTP requests easily.
- gradio: Facilitates the creation of customizable UI components for machine learning models.
- langchain_google_genai: Wrapper for Google Generative AI language models.
- auxiliar: Contains auxiliary data used in the fallacy detection process.
Environment Variables:
- HF_API_KEY: API key for accessing Hugging Face model APIs.
- GOOGLE_API_KEY: API key for accessing Google APIs.
Constants:
- FLICC_MODEL: API endpoint for the FLICC model used for fallacy detection.
- CARDS_MODEL: API endpoint for the CARDS model used for fallacy detection.
- SEMANTIC_TEXTUAL_SIMILARITY: API endpoint for the model used for semantic textual similarity.
Global Variables:
- hf_api_key: API key for accessing Hugging Face model APIs.
- google_key: API key for accessing Google APIs.
- safety_settings: Settings for safety measures in the Google Generative AI model.
- llm: Instance of the GoogleGenerativeAI class for text generation.
- similarity_template: Template for generating prompts for similarity comparison.
- FALLACY_CLAIMS: Dictionary containing fallacy labels and corresponding claims.
- DEBUNKINGS: Dictionary containing debunkings for fallacy claims.
- DEFINITIONS: Dictionary containing definitions for fallacy labels.
"""
import os
import json
import requests
from langchain_google_genai import GoogleGenerativeAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from auxiliar import (
FALLACY_CLAIMS,
DEBUNKINGS,
DEFINITIONS,
SIMILARITY_TEMPLATE,
)
hf_api_key = os.environ["HF_API_KEY"]
google_key = os.environ["GOOGLE_API_KEY"]
llm = GoogleGenerativeAI(
model="models/text-bison-001",
google_api_key=google_key,
temperature=0,
# safety_settings=safety_settings,
)
similarity_template = PromptTemplate.from_template(SIMILARITY_TEMPLATE)
def query(payload, api_url, api_token=hf_api_key):
"""
Sends a query to the specified API endpoint with the provided payload.
Args:
payload (dict): The payload to be sent to the API.
api_url (str): The URL of the API endpoint.
api_token (str, optional): The API token used for authentication. Defaults to hf_api_key.
Returns:
dict: The JSON response from the API.
Raises:
ValueError: If the response content cannot be decoded as UTF-8.
Example:
>>> query({"text": "example text"}, "https://api.example.com")
{'status': 'success', 'result': 'example result'}
"""
headers = {"Authorization": f"Bearer {api_token}"}
options = {"use_cache": False, "wait_for_model": True}
payload = {"inputs": payload, "options": options}
response = requests.post(api_url, headers=headers, json=payload)
return json.loads(response.content.decode("utf-8"))
FLICC_MODEL = "https://api-inference.huggingface.co/models/fzanartu/flicc"
CARDS_MODEL = (
"https://api-inference.huggingface.co/models/crarojasca/BinaryAugmentedCARDS"
)
SEMANTIC_TEXTUAL_SIMILARITY = (
"https://api-inference.huggingface.co/models/sentence-transformers/all-MiniLM-L6-v2"
)
def rebuttal_generator(text):
"""
Generates a rebuttal for a text containing a detected fallacy.
This function detects fallacies in the input text and generates a rebuttal
for the fallacious claim.
Args:
text (str): The input text containing a potentially fallacious claim.
Returns:
str: A rebuttal for the fallacious claim in the input text.
Raises:
ValueError: If no similar sentence is found.
Example:
>>> rebuttal_generator("This is a text containing a fallacy.")
'A rebuttal to the fallacy of [fallacy label]: [rebuttal]'
"""
response = query(text, api_url=CARDS_MODEL)
if response[0][0].get("label") == "Contrarian":
response = query(text, api_url=FLICC_MODEL)
label = response[0][0].get("label")
claims = FALLACY_CLAIMS.get(label, None)
if claims:
data = query(
{"source_sentence": text, "sentences": claims},
api_url=SEMANTIC_TEXTUAL_SIMILARITY,
)
max_similarity = data.index(max(data))
chain = LLMChain(llm=llm, prompt=similarity_template, verbose=True)
result = chain.run(
{
"claim": claims[max_similarity],
"fallacy": label,
"definition": DEFINITIONS.get(label),
"example": DEBUNKINGS.get(claims[max_similarity]),
"text": text,
}
)
else:
raise ValueError("No similar sentence found")
else:
result = "No fallacy has been detected in your text."
return result