File size: 1,551 Bytes
fe87150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import os

import gradio as gr
from huggingface_hub import login, CommitScheduler

from lib.result import Result
from lib.data_entry import DataEntry
from lib.telemetry import TelemetryManager
from lib.model import Model
from lib.lang import Language
from schemas.request import Request

login(os.environ["HF_TOKEN"])

models: dict[Language, Model] = {
    Language.ENGLISH: Model.get_english_model(),
    Language.SPANISH: Model.get_spanish_model()
}

telemetry = TelemetryManager()

async def app_func(text: str, language: str) -> int:
    try:
        request = Request(text=text, language=language)
    except ValueError as e:
        raise gr.Error(e)
    
    result = models[request.language].analyze(request.text)
    telemetry.write_data(DataEntry(text, result))

    percentage = round(result.percentage * 100)
    percentage = max(percentage, 0)
    percentage = min(percentage, 100)

    return percentage


demo = gr.Interface(
    fn=app_func,
    inputs=[gr.Text(label="Texto"), gr.Radio(label="Idioma", choices=[Language.ENGLISH.value, Language.SPANISH.value])],
    outputs=gr.Label(num_top_classes=1, label="Probabilidad de phishing"),
    title="ConfIA Model Demo",
    description="Demo que te permite probar nuestros modelos de forma muy sencilla",
    examples=[["You have just Woned a free iPhone 16!! FOR FREE!!!", Language.ENGLISH], ["When will you review that PR? It's kinda urgent", Language.ENGLISH]],
    cache_examples=True
)

if __name__ == "__main__":
    demo.queue(max_size=5)
    demo.launch(share=True, debug=True)