File size: 1,374 Bytes
e3463bc
05f708c
e3463bc
c82653a
 
486c2a9
 
e3463bc
05f708c
c82653a
 
05f708c
e3463bc
05f708c
c82653a
05f708c
 
 
 
e3463bc
05f708c
 
 
 
 
 
 
 
 
e3463bc
 
05f708c
e3463bc
c82653a
 
 
e3463bc
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gradio as gr
from transformers import pipeline, BartTokenizer, BartForConditionalGeneration

# Load the BART model and tokenizer for text generation
model_name = "facebook/bart-small"
tokenizer = BartTokenizer.from_pretrained(model_name)
model = BartForConditionalGeneration.from_pretrained(model_name)

def detect_questions(email_text):
    # Simple heuristic to detect questions
    questions = [sentence.strip() + "?" for sentence in email_text.split(".") if "?" in sentence]
    return questions

def generate_answers(question):
    # Generate an answer for the given question using the BART model
    inputs = tokenizer(question, return_tensors="pt", max_length=1024, truncation=True)
    summary_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=50, early_stopping=True)
    answer = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
    return answer

def process_email(email_text):
    questions = detect_questions(email_text)
    responses = {}
    
    for question in questions:
        response = generate_answers(question)
        responses[question] = response
    
    return responses

iface = gr.Interface(
    fn=process_email,
    inputs="textbox",
    outputs="text",
    title="Email Question Responder",
    description="Input an email, and the AI will detect questions and provide possible answers.",
)

iface.launch()