Spaces:
Sleeping
Sleeping
import json | |
import os | |
class ConversationManager: | |
def __init__(self, client, history_file="conversation_history.json"): | |
self.client = client | |
self.history_file = history_file | |
self.conversation_history = self.load_history() | |
def load_history(self): | |
"""Load conversation history from a file.""" | |
if os.path.exists(self.history_file): | |
with open(self.history_file, "r") as file: | |
return json.load(file) | |
return [] | |
def save_history(self): | |
"""Save conversation history to a file.""" | |
with open(self.history_file, "w") as file: | |
json.dump(self.conversation_history, file) | |
def add_user_message(self, message): | |
"""Add a user message to the conversation history.""" | |
self.conversation_history.append({"role": "user", "content": message}) | |
self.save_history() | |
def add_ai_message(self, message): | |
"""Add an AI message to the conversation history.""" | |
self.conversation_history.append({"role": "assistant", "content": message}) | |
self.save_history() | |
def clear_history(self): | |
"""Clear the conversation history.""" | |
self.conversation_history = [] | |
self.save_history() | |
def check_warning(self, max_prompts=4): | |
"""Check if the conversation history is getting too long.""" | |
user_prompts = [msg for msg in self.conversation_history if msg['role'] == 'user'] | |
return len(user_prompts) >= max_prompts | |
def generate_ai_response(self, prompt): | |
"""Generate a response from the AI.""" | |
messages = [{"role": "system", "content": "You are a Django programming assistant."}] | |
messages += self.conversation_history | |
messages.append({"role": "user", "content": prompt}) | |
try: | |
completion = self.client.chat.completions.create( | |
model="meta/llama-3.3-70b-instruct", | |
messages=messages, | |
max_tokens=1024, | |
temperature=0.5 | |
) | |
return completion.choices[0].message.content | |
except Exception as e: | |
print(f"Error generating response: {e}") | |
return None | |
def summarize_history(self): | |
"""Summarize the conversation history.""" | |
if not self.conversation_history: | |
return "No history to summarize." | |
summary_prompt = "Summarize the following conversation history into a concise summary for context:" | |
full_history = "\n".join( | |
[f"{msg['role'].capitalize()}: {msg['content']}" for msg in self.conversation_history] | |
) | |
try: | |
completion = self.client.chat.completions.create( | |
model="meta/llama-3.3-70b-instruct", | |
messages=[ | |
{"role": "system", "content": "You are a summarization assistant."}, | |
{"role": "user", "content": f"{summary_prompt}\n\n{full_history}"} | |
], | |
max_tokens=8000 | |
) | |
summary = completion.choices[0].message.content | |
# Replace history with the summary | |
self.conversation_history = [{"role": "system", "content": summary}] | |
self.save_history() | |
return summary | |
except Exception as e: | |
print(f"Error summarizing history: {e}") | |
return "An error occurred while summarizing the conversation history." | |