Spaces:
Sleeping
Sleeping
import os | |
from groq import Groq | |
import torch | |
import transformers | |
from transformers import pipeline | |
from langchain_groq import ChatGroq | |
from langchain.vectorstores import Chroma | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain.chains import RetrievalQA | |
from langchain.embeddings import HuggingFaceBgeEmbeddings | |
from langchain.prompts import PromptTemplate | |
import streamlit as st | |
GROQ_API_KEY = os.getenv('GROQ_API_KEY') | |
if not GROQ_API_KEY: | |
raise ValueError("GROQ_API_KEY environment variable is not set.") | |
# Initialize Groq Client | |
groq_client = Groq(api_key=GROQ_API_KEY) | |
# Configure Llama 3.2 LLM with Groq | |
def configure_groq_llm( | |
model_name="llama-3.3-70b-versatile", | |
temperature=0.7, | |
max_tokens=2048 | |
): | |
return ChatGroq( | |
groq_api_key=GROQ_API_KEY, | |
model_name=model_name, | |
temperature=temperature, | |
max_tokens=max_tokens | |
) | |
# Embedding Configuration | |
def get_embeddings(model_name="BAAI/bge-base-en"): | |
encode_kwargs = {'normalize_embeddings': True} | |
return HuggingFaceBgeEmbeddings( | |
model_name=model_name, | |
encode_kwargs=encode_kwargs | |
) | |
# Prompt Template | |
def create_llama_prompt(): | |
template = """ | |
Use the following context to answer the question: | |
Context: {context} | |
Question: {question} | |
Helpful Answer:""" | |
return PromptTemplate( | |
template=template, | |
input_variables=["context", "question"] | |
) | |
# Initialize Components | |
embeddings = get_embeddings() | |
llm = configure_groq_llm() | |
vectordb = Chroma(persist_directory='db', embedding_function=embeddings) | |
retriever = vectordb.as_retriever(search_kwargs={"k": 5}) | |
prompt = create_llama_prompt() | |
# QA Chain Configuration | |
qa_chain = RetrievalQA.from_chain_type( | |
llm=llm, | |
chain_type="stuff", | |
retriever=retriever, | |
chain_type_kwargs={"prompt": prompt}, | |
return_source_documents=True | |
) | |
# Streamlit Interface | |
def groq_nlp_chatbot(): | |
st.title("Groq Llama 3.2 Chatbot") | |
user_input = st.text_input("Your Question:") | |
if user_input: | |
try: | |
response = qa_chain(user_input) | |
st.text_area("Bot's Response:", response['result']) | |
except Exception as e: | |
st.error(f"Error processing request: {e}") | |
if __name__ == "__main__": | |
groq_nlp_chatbot() |