Spaces:
Runtime error
Runtime error
# Importing Necessary Libraries | |
import gradio as gr | |
from llama_index import download_loader, ServiceContext, VectorStoreIndex | |
from llama_index.embeddings import HuggingFaceEmbedding | |
from llama_index import Prompt | |
import torch | |
device = torch.device("cpu") | |
# Loading the Zephyr Model using Llama CPP | |
from llama_index.llms import LlamaCPP | |
llm = LlamaCPP( | |
model_url='https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/resolve/main/zephyr-7b-beta.Q5_K_M.gguf?download=true', | |
model_path=None, | |
temperature=0.3, | |
max_new_tokens=2000, | |
context_window=3900, | |
# set to at least 1 to use GPU | |
model_kwargs={"n_gpu_layers": 0}, | |
verbose=True | |
) | |
# Loading Embedding Model | |
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5") | |
# Defining custom Prompt | |
TEMPLATE_STR = ( | |
'''You are an helpful and responsible AI assistant who is excited to help user but will never harm humans or engage in the activity that causes harm to anyone. Given with the context below help user with the query. | |
{context} | |
<|user|>\n | |
{query_str}\n | |
<|assistant|>\n''' | |
) | |
QA_TEMPLATE = Prompt(TEMPLATE_STR) | |
# User Interface functions | |
def build_the_bot(file): | |
global service_context, index | |
if file is not None: | |
# Loading Data | |
PandasExcelReader = download_loader("PandasExcelReader") | |
loader = PandasExcelReader(pandas_config={"header": 0}) | |
documents = loader.load_data(file=file) | |
service_context = ServiceContext.from_defaults( | |
chunk_size=150,chunk_overlap=10, | |
llm=llm,embed_model=embed_model, | |
) | |
index = VectorStoreIndex.from_documents(documents, service_context=service_context,text_qa_template=QA_TEMPLATE) | |
return('Index saved successfull!!!') | |
def chat(chat_history, user_input): | |
global service_context, index | |
query_engine = index.as_query_engine(streaming=False) | |
bot_response = query_engine.query(user_input) | |
bot_response = str(bot_response) | |
return chat_history + [(user_input, bot_response)] | |
# User Interface | |
with gr.Blocks() as demo: | |
gr.Markdown('# Marketing Email Generator') | |
with gr.Tab("Input Text Document"): | |
upload = gr.File(label="Upload Your Excel") | |
upload.upload(fn=build_the_bot,inputs=[upload],show_progress='full') | |
with gr.Tab("Knowledge Bot"): | |
chatbot = gr.Chatbot() | |
message = gr.Textbox () | |
submit_button = gr.Button("Submit") | |
submit_button.click(chat, [chatbot, message], chatbot) | |
message.submit(chat, [chatbot, message], chatbot) | |
demo.queue().launch(debug = True,share=True) |