from fastapi import FastAPI, Request from pydantic import BaseModel import transformers import torch from fastapi.middleware.cors import CORSMiddleware import os access_token_read = os.getenv("DS4") print(access_token_read) from huggingface_hub import login login(token = access_token_read) # Define the FastAPI app app = FastAPI() app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"], ) pipe = transformers.pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto") # Define the request model for email input class EmailRequest(BaseModel): subject: str sender: str recipients: str body: str def create_email_prompt(subject, sender, recipients, body): messages = [ { "role": "system", "content": "You are an email summarizer. Your goal is to provide a concise summary by focusing on key points, action items, and urgency." }, { "role": "user", "content": f""" Summarize the following email by focusing on the key points, action items, and urgency. Email Details: Subject: {subject} Sender: {sender} Recipients: {recipients} Body: {body} Provide a concise summary of email body in points that includes important information, if any actions are required, and the priority of the email. """ } ] prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) return prompt # Define the FastAPI endpoint for email summarization @app.post("/summarize-email/") async def summarize_email(email: EmailRequest): prompt = create_email_prompt(email.subject, email.sender, email.recipients, email.body) # Use the pipeline to generate the summary outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) return {"summary": outputs}