MNGames commited on
Commit
feee0c8
1 Parent(s): f73ee36

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -39
app.py CHANGED
@@ -1,43 +1,64 @@
1
- import gradio as gr
2
- from transformers import pipeline, BartTokenizer, BartForConditionalGeneration
3
 
4
- # Load the BART model and tokenizer for text generation
5
  model_name = "facebook/bart-base"
6
- tokenizer = BartTokenizer.from_pretrained(model_name)
7
- model = BartForConditionalGeneration.from_pretrained(model_name)
8
-
9
- def detect_questions(email_text):
10
- # Use the BART model to generate questions from the email text
11
- inputs = tokenizer("generate questions: " + email_text, return_tensors="pt", max_length=1024, truncation=True)
12
- questions = model.generate(inputs["input_ids"], num_beams=4, max_length=50, early_stopping=True)
13
- questions = tokenizer.decode(questions[0], skip_special_tokens=True)
14
- return questions.split("##")
15
-
16
- def generate_responses(questions):
17
- responses = {}
18
- for question in questions:
19
- # Generate a response for each question using the BART model
20
- inputs = tokenizer(question, return_tensors="pt", max_length=1024, truncation=True)
21
- response = model.generate(inputs["input_ids"], num_beams=4, max_length=200, early_stopping=True)
22
- response = tokenizer.decode(response[0], skip_special_tokens=True)
23
- responses[question] = response
24
- return responses
25
-
26
- def process_email(email_text):
27
- # Detect questions from the email
28
- questions = detect_questions(email_text)
29
-
30
- # Generate responses to the detected questions
31
- responses = generate_responses(questions)
32
-
33
- return responses
34
-
35
- iface = gr.Interface(
36
- fn=process_email,
37
- inputs="textbox",
38
- outputs="text",
39
- title="Email Question Detector and Responder",
40
- description="Input an email, and the AI will detect questions and provide possible responses.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  )
42
 
43
- iface.launch()
 
 
1
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
2
+ from gradio import Interface
3
 
4
+ # Define the model name (change if desired)
5
  model_name = "facebook/bart-base"
6
+
7
+ # Load tokenizer and model
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
10
+
11
+ def generate_questions(email):
12
+ """Generates questions based on the input email."""
13
+ # Encode the email with tokenizer
14
+ inputs = tokenizer(email, return_tensors="pt")
15
+
16
+ # Generate questions using model with specific prompt
17
+ generation = model.generate(
18
+ input_ids=inputs["input_ids"],
19
+ max_length=256, # Adjust max length as needed
20
+ num_beams=5, # Adjust beam search for better quality (slower)
21
+ early_stopping=True,
22
+ prompt="What are the important questions or things that need to be addressed in this email:\n",
23
+ )
24
+
25
+ # Decode the generated text
26
+ return tokenizer.decode(generation[0], skip_special_tokens=True)
27
+
28
+ def generate_answers(questions):
29
+ """Generates possible answers to the input questions."""
30
+ # Encode each question with tokenizer, separated by newline
31
+ inputs = tokenizer("\n".join(questions), return_tensors="pt")
32
+
33
+ # Generate answers using model with specific prompt
34
+ generation = model.generate(
35
+ input_ids=inputs["input_ids"],
36
+ max_length=512, # Adjust max length as needed
37
+ num_beams=3, # Adjust beam search for better quality (slower)
38
+ early_stopping=True,
39
+ prompt="Here are some possible answers to the questions:\n",
40
+ )
41
+
42
+ # Decode the generated text
43
+ answers = tokenizer.decode(generation[0], skip_special_tokens=True).split("\n")
44
+ return zip(questions, answers[1:]) # Skip the first answer (prompt repetition)
45
+
46
+ def gradio_app(email):
47
+ """Gradio interface function"""
48
+ questions = generate_questions(email)
49
+ answers = generate_answers(questions.split("\n"))
50
+ return questions, [answer for _, answer in answers]
51
+
52
+ # Gradio interface definition
53
+ interface = Interface(
54
+ fn=gradio_app,
55
+ inputs="textbox",
56
+ outputs=["text", "text"],
57
+ title="AI Email Assistant",
58
+ description="Enter a long email and get questions and possible answers generated by an AI model.",
59
+ label="Email",
60
+ elem_id="email-input"
61
  )
62
 
63
+ # Launch the Gradio interface
64
+ interface.launch()