Spaces:
Sleeping
Sleeping
import os | |
from typing import Any, Optional, Tuple | |
from langchain.chains import ConversationChain | |
from langchain.llms import HuggingFaceHub | |
from langchain.llms import OpenAI | |
from threading import Lock | |
import gradio as gr | |
def load_chain_openai(api_key: str): | |
os.environ["OPENAI_API_KEY"] = api_key | |
llm = OpenAI(temperature=0) | |
chain = ConversationChain(llm=llm) | |
os.environ["OPENAI_API_KEY"] = "" | |
return chain | |
def load_chain_falcon(api_key: str): | |
os.environ["HUGGINGFACEHUB_API_TOKEN"] = api_key | |
llm = HuggingFaceHub(repo_id="tiiuae/falcon-7b-instruct", model_kwargs={"temperature": 0.9}) | |
chain = ConversationChain(llm=llm) | |
os.environ["HUGGINGFACEHUB_API_TOKEN"] = "" | |
return chain | |
class ChatWrapper: | |
def __init__(self, chain_type: str, api_key: str = ''): | |
self.api_key = api_key | |
self.chain_type = chain_type | |
self.history = [] | |
self.lock = Lock() | |
if self.api_key: | |
if chain_type == 'openai': | |
self.chain = load_chain_openai(self.api_key) | |
elif chain_type == 'falcon': | |
self.chain = load_chain_falcon(self.api_key) | |
else: | |
raise ValueError(f'Invalid chain_type: {chain_type}') | |
else: | |
self.chain = None | |
def __call__(self, inp: str): | |
self.lock.acquire() | |
try: | |
if self.chain is None: | |
self.history.append((inp, "Please add your API key to proceed.")) | |
return self.history | |
output = self.chain.run(input=inp) | |
self.history.append((inp, output)) | |
except Exception as e: | |
self.history.append((inp, f"An error occurred: {e}")) | |
finally: | |
self.lock.release() | |
return self.history, self.history | |
chat_wrapper = ChatWrapper('openai') # default chain_type is 'openai' | |
def update_chain(api_key: str, selection: str): | |
global chat_wrapper | |
chat_wrapper = ChatWrapper(chain_type=selection, api_key=api_key) | |
def chat(message): | |
global chat_wrapper | |
chat_wrapper(message) # Get a response to the current message | |
history = chat_wrapper.history # Access the entire chat history | |
return history, history | |
block = gr.Blocks(css=".gradio-container {background-color: lightgray}") | |
with block: | |
with gr.Row(): | |
gr.Markdown("<h3><center>Hello-World LangChain App</center></h3>") | |
selection = gr.Dropdown(label="Select Agent", choices=["falcon", "openai"], default="openai") | |
api_key_textbox = gr.Textbox( | |
label="API Key", | |
placeholder="Paste your OpenAI API key (sk-...)", | |
show_label=True, | |
lines=1, | |
type="password", | |
) | |
chatbot = gr.Chatbot() | |
with gr.Row(): | |
message = gr.Textbox( | |
label="What's your question?", | |
placeholder="What's the answer to life, the universe, and everything?", | |
lines=1, | |
) | |
submit = gr.Button(value="Send", variant="secondary").style(full_width=False) | |
gr.Examples( | |
examples=[ | |
"Hi! How's it going?", | |
"What should I do tonight?", | |
"Whats 2 + 2?", | |
], | |
inputs=message, | |
) | |
gr.HTML("Demo application of a LangChain chain.") | |
state = gr.State() | |
agent_state = gr.State() | |
submit.click(chat, inputs=[message], outputs=[chatbot, state]) | |
message.submit(chat, inputs=[message], outputs=[chatbot, state]) | |
api_key_textbox.change(update_chain, inputs=[api_key_textbox, selection], outputs=[agent_state]) | |
block.launch(debug=True) |