Spaces:
Runtime error
Runtime error
import pandas as pd | |
import nltk | |
import os | |
import langchain | |
from langchain.schema import Document | |
from langchain.document_loaders import TextLoader | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.vectorstores import Chroma | |
from langchain.text_splitter import CharacterTextSplitter | |
from langchain import OpenAI, VectorDBQA | |
from langchain.document_loaders import DirectoryLoader | |
from langchain.document_loaders import UnstructuredURLLoader | |
from langchain.document_loaders import UnstructuredFileLoader | |
from langchain.chains.conversation.memory import ConversationBufferMemory | |
from langchain.chains import RetrievalQA | |
from langchain.document_loaders import OnlinePDFLoader | |
from langchain.llms import OpenAIChat | |
from langchain.vectorstores import DeepLake | |
from langchain.document_loaders import SeleniumURLLoader | |
from langchain.chains import ConversationalRetrievalChain | |
from langchain.document_loaders.csv_loader import CSVLoader | |
from langchain.vectorstores import FAISS | |
import tempfile | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain.vectorstores import FAISS | |
from langchain.chains.question_answering import load_qa_chain | |
from langchain.llms import OpenAI | |
from langchain.prompts import PromptTemplate | |
from langchain.memory import ConversationBufferMemory | |
# Set your OpenAI API key | |
openai_api_key = os.environ['OPENAI_API_KEY'] | |
# Set your organization key | |
organization_key = os.environ['OPENAI_ORG_KEY'] | |
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key, organization_key=organization_key) | |
docsearch = FAISS.load_local("faiss_index_yt", embeddings) | |
template = """You are a virtual assistant discussing Toyota vehicles. | |
Please respond to our clients in a good way, Intelligently generate a welcoming phrase. | |
When discussing about toyota vehicles information or any recomendation, provide accurate information from this knowledge | |
base: {context}. If a question falls outside this document's scope, kindly reply with 'I'm sorry, but | |
the available information is limited as I am an AI assistant.' | |
{chat_history} Human: {human_input} Virtual Assistant:""" | |
prompt = PromptTemplate( | |
input_variables=["chat_history", "human_input", "context"], template=template | |
) | |
memory = ConversationBufferMemory(memory_key="chat_history", input_key="human_input", max_history=2) | |
chain = load_qa_chain( | |
OpenAI(temperature=0.3), chain_type="stuff", memory=memory, prompt=prompt | |
) | |
# updated code | |
import gradio as gr | |
import requests | |
import nest_asyncio | |
import re | |
# Function to get the image URL from an image search API | |
def get_image_url(query): | |
query = re.sub(r'[^\w\s]', '', query) | |
tundra_images = dict([('trd', 'https://www.toyota.com/imgix/content/dam/toyota/jellies/max/2023/tundra/8424.png?bg=fff&fm=webp&q=90&w=1764'), ('limited', 'https://www.toyota.com/imgix/content/dam/toyota/jellies/max/2023/tundra/8372.png?bg=fff&fm=webp&q=90&w=1764'), ('sr', 'https://www.toyota.com/imgix/content/dam/toyota/jellies/max/2023/tundra/8342.png?bg=fff&fm=webp&q=90&w=1764'), | |
('sr5', 'https://www.toyota.com/imgix/content/dam/toyota/jellies/max/2023/tundra/8361.png?bg=fff&fm=webp&q=90&w=1764'), ('platinum', 'https://www.toyota.com/imgix/content/dam/toyota/jellies/max/2023/tundra/8375.png?bg=fff&fm=webp&q=90&w=1764'), ('1794', 'https://www.toyota.com/imgix/content/dam/toyota/jellies/max/2023/tundra/8386.png?bg=fff&fm=webp&q=90&w=1764'), | |
('capstone', 'https://www.toyota.com/imgix/content/dam/toyota/jellies/max/2023/tundra/8425.png?bg=fff&fm=webp&q=90&w=1764')]) | |
model_names = ['sr','sr5','trd','platinum','limited','capstone','1794 edition'] | |
# Split the query into words | |
words = query.lower().split() | |
# Find the model names in the list using list comprehension | |
found_models = [model for model in model_names if model in words ] | |
# Get URLs and names of found models | |
model_info = [] | |
for model in found_models: | |
model_info.append((tundra_images[model], model)) | |
return model_info | |
with gr.Blocks() as demo: | |
chatbot = gr.Chatbot() | |
msg = gr.Textbox() | |
clear = gr.Button("Clear") | |
chat_history = [] | |
def user(user_message, history): | |
# Get response from QA chain | |
docs = docsearch.similarity_search(user_message) | |
output = chain({"input_documents": docs, "human_input": user_message}, return_only_outputs=False) | |
model_info = get_image_url(user_message) | |
output_text = output['output_text'] | |
output_text = re.sub(r'^\W+', '', output_text) | |
# Construct the HTML for displaying images | |
images_html = "" | |
for image_url, model_name in model_info: | |
if image_url: | |
image_html = f"<img src='{image_url}'><br><br>" | |
images_html += f"Toyota Tundra {model_name.capitalize()} 2023: {image_html}" | |
# Adding the source link | |
if docs[0].page_content == 'nan' or docs[1].page_content == 'nan' or docs[0].page_content == '♪': | |
video_link = "" | |
else: | |
user_query = user_message | |
model_year = re.search(r'\d{4}', user_query) | |
if model_year: | |
model_year = model_year.group() | |
video_link = f"Source: {output['input_documents'][0].metadata['video_link']}" | |
if model_year is not None: | |
for i in range(0,len(output['input_documents'])): | |
if model_year in output['input_documents'][i].metadata['title']: | |
video_link = f"Source: {output['input_documents'][i].metadata['video_link']}" | |
break | |
output_text_with_images = f"{output_text}<br>{images_html}{video_link}" | |
history.append((user_message, output_text_with_images)) | |
return gr.update(value=""), history | |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False) | |
clear.click(lambda: None, None, chatbot, queue=False) | |
if __name__ == "__main__": | |
nest_asyncio.apply() | |
demo.launch(debug=True, share=True) | |