import gradio as gr from langchain_together import ChatTogether from langchain_community.llms import Together from langchain_pinecone import PineconeVectorStore from langchain_openai import OpenAIEmbeddings from langchain_core.prompts import ChatPromptTemplate from langchain_core.output_parsers import StrOutputParser from langchain_community.document_loaders import PyPDFLoader from elevenlabs.client import ElevenLabs from transformers import AutoTokenizer, AutoModelForCausalLM import os import torch from io import BytesIO # Environment Variables os.environ['TOGETHER_API_KEY'] = 'e83925ff068ab5e4598a56f68385fd37144469f50eec94f5c2e6647798f1be9e' os.environ['PINECONE_API_KEY'] = 'f7413055-9b13-4bbc-8c92-56132e034bff' # Define your functions and classes here based on your original code # Example function to process questions def process_question(file): # Use your existing functions to process the PDF and generate outputs pdffile = extract_text_from_pdf(file) three_topics = topic_chain.invoke({"context": pdffile}) summary = summary_pdf_chain.invoke(pdffile) evaluation = evaluate_summary(summary) audio_file = text_to_speech_stream(summary) prompt = topics_prompt shape = generate_gif(prompt) ai_asistant = animate_image(audio_file) return summary, evaluation, ai_asistant, shape # Define Gradio Interface iface = gr.Interface( fn=process_question, inputs=gr.inputs.File(label="Upload PDF File"), outputs=[ gr.outputs.Textbox(label="Summary"), gr.outputs.Textbox(label="Evaluation"), gr.outputs.Audio(label="AI Assistant"), gr.outputs.Image(label="3D Shape") ] ) if __name__ == "__main__": iface.launch()