|
import streamlit as st |
|
import chromadb |
|
from chromadb.utils import embedding_functions |
|
import groq |
|
from typing import Dict |
|
import os |
|
|
|
class CourseAdvisor: |
|
def __init__(self, db_path: str = "./chroma_db"): |
|
"""Initialize the course advisor with existing ChromaDB database.""" |
|
|
|
self.chroma_client = chromadb.PersistentClient(path=db_path) |
|
|
|
|
|
self.embedding_function = embedding_functions.SentenceTransformerEmbeddingFunction( |
|
model_name="jinaai/jina-embeddings-v2-base-en" |
|
) |
|
|
|
|
|
self.collection = self.chroma_client.get_collection( |
|
name="courses", |
|
embedding_function=self.embedding_function |
|
) |
|
|
|
def query_courses(self, query_text: str, chat_history: str, api_key: str, n_results: int = 3) -> Dict: |
|
"""Query the vector database and get course recommendations.""" |
|
|
|
groq_client = groq.Groq(api_key=api_key) |
|
|
|
try: |
|
|
|
results = self.collection.query( |
|
query_texts=[query_text], |
|
n_results=min(n_results, self.collection.count()), |
|
include=['documents', 'metadatas'] |
|
) |
|
|
|
|
|
docs_context = "\n\n".join(results['documents'][0]) |
|
|
|
except Exception as e: |
|
st.error(f"Error querying database: {str(e)}") |
|
return { |
|
'llm_response': "I encountered an error while searching the course database. Please try again.", |
|
'retrieved_courses': [] |
|
} |
|
|
|
|
|
prompt = f"""Previous conversation: |
|
{chat_history} |
|
|
|
Current user query: {query_text} |
|
|
|
Relevant course information: |
|
{docs_context} |
|
|
|
Please provide course recommendations based on the entire conversation context. Format your response as: |
|
1. Understanding of the user's needs (based on conversation history) |
|
2. Overall recommendation with reasoning |
|
3. Specific benefits of each recommended course |
|
4. Learning path suggestion (if applicable) |
|
5. Any prerequisites or important notes""" |
|
|
|
try: |
|
|
|
completion = groq_client.chat.completions.create( |
|
messages=[ |
|
{"role": "system", "content": "You are a helpful course advisor who provides detailed, relevant course recommendations based on the user's needs and conversation history. Keep responses clear and well-structured."}, |
|
{"role": "user", "content": prompt} |
|
], |
|
model="mixtral-8x7b-32768", |
|
temperature=0.7, |
|
) |
|
|
|
return { |
|
'llm_response': completion.choices[0].message.content, |
|
'retrieved_courses': results['metadatas'][0] |
|
} |
|
|
|
except Exception as e: |
|
st.error(f"Error with Groq API: {str(e)}") |
|
return { |
|
'llm_response': "I encountered an error while generating recommendations. Please check your API key and try again.", |
|
'retrieved_courses': [] |
|
} |
|
|
|
def initialize_session_state(): |
|
"""Initialize session state variables.""" |
|
if 'messages' not in st.session_state: |
|
st.session_state.messages = [] |
|
if 'course_advisor' not in st.session_state: |
|
st.session_state.course_advisor = CourseAdvisor() |
|
if 'api_key' not in st.session_state: |
|
st.session_state.api_key = "" |
|
|
|
def get_chat_history() -> str: |
|
"""Format chat history for LLM context.""" |
|
history = [] |
|
for message in st.session_state.messages[-5:]: |
|
role = message["role"] |
|
content = message["content"] |
|
history.append(f"{role}: {content}") |
|
return "\n".join(history) |
|
|
|
def display_course_card(course: Dict): |
|
"""Display a single course recommendation in a card format.""" |
|
with st.container(): |
|
|
|
with st.container(): |
|
st.markdown(""" |
|
<style> |
|
.course-card { |
|
background-color: #f8f9fa; |
|
padding: 1rem; |
|
border-radius: 0.5rem; |
|
margin-bottom: 1rem; |
|
} |
|
</style> |
|
""", unsafe_allow_html=True) |
|
|
|
with st.container(): |
|
st.markdown('<div class="course-card">', unsafe_allow_html=True) |
|
|
|
|
|
st.markdown(f"### {course['title']}") |
|
|
|
col1, col2 = st.columns(2) |
|
|
|
with col1: |
|
|
|
categories = course.get('categories', 'N/A') |
|
if isinstance(categories, str): |
|
|
|
categories = [cat.strip() for cat in categories.split(',')] |
|
elif not isinstance(categories, list): |
|
categories = [str(categories)] |
|
|
|
|
|
if len(categories) > 1: |
|
st.markdown("**Categories:**") |
|
for category in categories: |
|
st.markdown(f"- {category}") |
|
else: |
|
st.markdown(f"**Category:** {categories[0]}") |
|
|
|
st.markdown(f"**Lessons:** {course.get('lessons', 'N/A')}") |
|
|
|
with col2: |
|
st.markdown(f"**Price:** {course.get('price', 'N/A')}") |
|
if 'url' in course: |
|
st.markdown(f"**[Visit Course]({course['url']})**") |
|
|
|
st.markdown('</div>', unsafe_allow_html=True) |
|
|
|
st.markdown("---") |
|
|
|
def main(): |
|
st.set_page_config( |
|
page_title="Course Recommender", |
|
page_icon="π", |
|
layout="wide" |
|
) |
|
|
|
st.title("π AI Course Recommender") |
|
|
|
|
|
initialize_session_state() |
|
|
|
|
|
collection = st.session_state.course_advisor.collection |
|
st.sidebar.info(f"Connected to database with {collection.count()} courses") |
|
|
|
|
|
with st.sidebar: |
|
st.header("Settings") |
|
|
|
|
|
api_key = st.text_input("Enter Groq API Key", |
|
type="password", |
|
value=st.session_state.api_key) |
|
if api_key != st.session_state.api_key: |
|
st.session_state.api_key = api_key |
|
|
|
|
|
if st.button("Clear Chat History"): |
|
st.session_state.messages = [] |
|
|
|
|
|
st.header("Chat with AI Course Advisor") |
|
|
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
|
|
if prompt := st.chat_input("What would you like to learn?"): |
|
|
|
if not st.session_state.api_key: |
|
st.error("Please enter your Groq API key in the sidebar.") |
|
return |
|
|
|
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
with st.chat_message("user"): |
|
st.markdown(prompt) |
|
|
|
|
|
with st.chat_message("assistant"): |
|
with st.spinner("Thinking..."): |
|
|
|
chat_history = get_chat_history() |
|
|
|
|
|
response = st.session_state.course_advisor.query_courses( |
|
prompt, |
|
chat_history, |
|
st.session_state.api_key |
|
) |
|
|
|
|
|
st.markdown(response['llm_response']) |
|
|
|
|
|
if response['retrieved_courses']: |
|
st.markdown("### π Recommended Courses") |
|
for course in response['retrieved_courses']: |
|
display_course_card(course) |
|
|
|
|
|
st.session_state.messages.append({ |
|
"role": "assistant", |
|
"content": response['llm_response'] + "\n\n" + "### Recommended Courses\n" + |
|
"\n".join([f"- {course['title']}" for course in response['retrieved_courses']]) |
|
}) |
|
|
|
if __name__ == "__main__": |
|
main() |