Spaces:
Sleeping
Sleeping
File size: 3,433 Bytes
ad7058b df73cbe ad7058b df73cbe ad7058b df73cbe ad7058b df73cbe ad7058b df73cbe ad7058b df73cbe ad7058b df73cbe ad7058b df73cbe ad7058b df73cbe ad7058b df73cbe ad7058b df73cbe ad7058b df73cbe ad7058b df73cbe |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
import json
import os
class ConversationManager:
def __init__(self, client, history_file="conversation_history.json"):
self.client = client
self.history_file = history_file
self.conversation_history = self.load_history()
def load_history(self):
"""Load conversation history from a file."""
if os.path.exists(self.history_file):
with open(self.history_file, "r") as file:
return json.load(file)
return []
def save_history(self):
"""Save conversation history to a file."""
with open(self.history_file, "w") as file:
json.dump(self.conversation_history, file)
def add_user_message(self, message):
"""Add a user message to the conversation history."""
self.conversation_history.append({"role": "user", "content": message})
self.save_history()
def add_ai_message(self, message):
"""Add an AI message to the conversation history."""
self.conversation_history.append({"role": "assistant", "content": message})
self.save_history()
def clear_history(self):
"""Clear the conversation history."""
self.conversation_history = []
self.save_history()
def check_warning(self, max_prompts=4):
"""Check if the conversation history is getting too long."""
user_prompts = [msg for msg in self.conversation_history if msg['role'] == 'user']
return len(user_prompts) >= max_prompts
def generate_ai_response(self, prompt):
"""Generate a response from the AI."""
messages = [{"role": "system", "content": "You are a Django programming assistant."}]
messages += self.conversation_history
messages.append({"role": "user", "content": prompt})
try:
completion = self.client.chat.completions.create(
model="meta/llama-3.3-70b-instruct",
messages=messages,
max_tokens=1024,
temperature=0.5
)
return completion.choices[0].message.content
except Exception as e:
print(f"Error generating response: {e}")
return None
def summarize_history(self):
"""Summarize the conversation history."""
if not self.conversation_history:
return "No history to summarize."
summary_prompt = "Summarize the following conversation history into a concise summary for context:"
full_history = "\n".join(
[f"{msg['role'].capitalize()}: {msg['content']}" for msg in self.conversation_history]
)
try:
completion = self.client.chat.completions.create(
model="meta/llama-3.3-70b-instruct",
messages=[
{"role": "system", "content": "You are a summarization assistant."},
{"role": "user", "content": f"{summary_prompt}\n\n{full_history}"}
],
max_tokens=8000
)
summary = completion.choices[0].message.content
# Replace history with the summary
self.conversation_history = [{"role": "system", "content": summary}]
self.save_history()
return summary
except Exception as e:
print(f"Error summarizing history: {e}")
return "An error occurred while summarizing the conversation history."
|