from langchain.llms import OpenAI from langchain.chains.qa_with_sources import load_qa_with_sources_chain from langchain.docstore.document import Document import requests import pathlib import subprocess import tempfile import os import gradio as gr import pickle # using a vector space for our search from langchain.embeddings.openai import OpenAIEmbeddings from langchain.vectorstores.faiss import FAISS from langchain.text_splitter import CharacterTextSplitter #loading FAISS search index from disk with open("search_index.pickle", "rb") as f: search_index = pickle.load(f) #Get GPT3 response using Langchain def print_answer(question, openai): #openai_embeddings #search_index = get_search_index() chain = load_qa_with_sources_chain(openai) #(OpenAI(temperature=0)) response = ( chain( { "input_documents": search_index.similarity_search(question, k=4), "question": question, }, return_only_outputs=True, )["output_text"] ) if len(response.split('\n')[-1].split())>2: response = response.split('\n')[0] + ', '.join([' Click Link' + str(i) + '' for i in range(1,len(response.split('\n')[-1].split()))]) else: response = response.split('\n')[0] + ' Click Link' return response def chat(message, history, openai_api_key): #openai_embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key) openai = OpenAI(temperature=0, openai_api_key=openai_api_key ) #os.environ["OPENAI_API_KEY"] = openai_api_key history = history or [] message = message.lower() response = print_answer(message, openai) #openai_embeddings history.append((message, response)) return history, history with gr.Blocks() as demo: gr.HTML("""
Hi, I'm a Q and A $RepoName expert bot, start by typing in your OpenAI API key, questions/issues you are facing in your $RepoName implementations and then press enter.
Duplicate Space with GPU Upgrade for fast Inference & no queue
Built using LangChain and Gradio for the $RepoName Repo