mousamax commited on
Commit
b8b10fd
·
1 Parent(s): e58dfa0
DockerFile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+ RUN useradd -m -u 1000 user
3
+ USER user
4
+ ENV HOME=/home/user \
5
+ PATH=/home/user/.local/bin:$PATH
6
+ WORKDIR $HOME/app
7
+ COPY --chown=user • $HOME/app
8
+ COPY •/requirements.txt ~/app/requirements.txt
9
+ RUN pip install -r requirements.txt
10
+ COPY . .
11
+ CMD ['LITERAL_API_KEY="lsk_fVGJM7vFiOMrfjAnCpl13fWDstN089h2c5wI6aRuHuw"',"chainlit", "run", "app.py", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from operator import itemgetter
2
+ from langchain_openai import ChatOpenAI
3
+ from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
4
+ from langchain.schema import StrOutputParser
5
+ from langchain.schema.runnable import Runnable, RunnablePassthrough, RunnableLambda
6
+ from langchain.schema.runnable.config import RunnableConfig
7
+ from langchain.memory import ConversationBufferMemory
8
+ from resolution_logic import ResolutionLogic
9
+ from literal_thread_manager import LiteralThreadManager
10
+ from prompt_engineering.prompt_desing import system_prompt, system_prompt_b, system_prompt_questioning
11
+ import chainlit as cl
12
+ from chainlit.types import ThreadDict
13
+ import os
14
+ from dotenv import load_dotenv
15
+
16
+ # Load the environment variables from the .env file
17
+ load_dotenv()
18
+
19
+ jwt_secret_key = os.getenv('CHAINLIT_AUTH_SECRET')
20
+ if not jwt_secret_key:
21
+ raise ValueError(
22
+ "You must provide a JWT secret in the environment to use authentication.")
23
+
24
+ # Get the value of the OPENAI_API_KEY from the environment variables
25
+ openai_api_key = os.getenv("OPENAI_API_KEY")
26
+
27
+ # Set the OPENAI_API_KEY in the environment
28
+ os.environ["OPENAI_API_KEY"] = openai_api_key
29
+ manager = LiteralThreadManager(api_key=os.getenv("LITERAL_API_KEY"))
30
+
31
+ def setup_runnable():
32
+ """
33
+ Sets up the runnable pipeline for the chatbot. This pipeline includes a model for generating responses
34
+ and memory management for maintaining conversation context.
35
+ """
36
+ memory = cl.user_session.get("memory") # type: ConversationBufferMemory
37
+ model = ChatOpenAI(streaming=True, model="gpt-3.5-turbo")
38
+ prompt = ChatPromptTemplate.from_messages(
39
+ [
40
+ ("system", system_prompt_questioning),
41
+ MessagesPlaceholder(variable_name="history"),
42
+ ("human", "{question}"),
43
+ ]
44
+ )
45
+
46
+ runnable = (
47
+ RunnablePassthrough.assign(
48
+ history=RunnableLambda(
49
+ memory.load_memory_variables) | itemgetter("history")
50
+ )
51
+ | prompt
52
+ | model
53
+ | StrOutputParser()
54
+ )
55
+ cl.user_session.set("runnable", runnable)
56
+
57
+ @cl.password_auth_callback
58
+ def auth_callback(username: str, password: str):
59
+ """
60
+ Authenticates a user using the provided username and password. If the user does not exist in the
61
+ LiteralAI database, a new user is created.
62
+
63
+ Args:
64
+ username (str): The username provided by the user.
65
+ password (str): The password provided by the user.
66
+
67
+ Returns:
68
+ cl.User | None: A User object if authentication is successful, create a User otherwise.
69
+ """
70
+ auth_user = manager.literal_client.api.get_or_create_user(identifier=username)
71
+ if auth_user:
72
+ if username != "admin":
73
+ return cl.User(
74
+ identifier=username, metadata={
75
+ "role": "user", "provider": "credentials"}
76
+ )
77
+ else:
78
+ return cl.User(
79
+ identifier=username, metadata={
80
+ "role": "admin", "provider": "credentials"}
81
+ )
82
+ else:
83
+ return None
84
+
85
+ def create_and_update_threads(first_res, current_user, partner_user):
86
+ """
87
+ Creates and updates threads for the conversation between the current user and their partner.
88
+
89
+ Args:
90
+ first_res (str): The initial response from the user.
91
+ current_user (cl.User): The current user initiating the conversation.
92
+ partner_user (cl.User): The partner user to connect with.
93
+ """
94
+ latest_thread = manager.literal_client.api.get_threads(first=1)
95
+ partner_thread = manager.literal_client.api.create_thread(name=first_res['output'], participant_id=partner_user.id, metadata={
96
+ "partner_id": current_user.id, "partner_thread_id": latest_thread.data[0].id, "user_id": partner_user.id})
97
+ resolver = ResolutionLogic()
98
+ message_to_other_partner = resolver.summarize_conflict_topic(partner_user.identifier, current_user.identifier, first_res['output'])
99
+ manager.literal_client.api.create_step(thread_id=partner_thread.id, type="assistant_message",
100
+ output={'content': message_to_other_partner})
101
+ current_thread = manager.literal_client.api.upsert_thread(id=latest_thread.data[0].id,
102
+ participant_id=current_user.id, metadata={"partner_id": partner_user.id, "partner_thread_id": partner_thread.id})
103
+ cl.user_session.set("thread_id", current_thread.id)
104
+ manager.get_other_partner_thread_id(current_thread.id)
105
+
106
+ @cl.action_callback("2-1 Chat")
107
+ async def on_action(action):
108
+ """
109
+ Handles the action callback for initiating a 2-1 chat.
110
+
111
+ Args:
112
+ action (cl.Action): The action object containing the user's input.
113
+ """
114
+ await cl.Message(content="Write the email and the chat id:").send()
115
+ action.get("value")
116
+ await action.remove()
117
+
118
+ @cl.on_chat_start
119
+ async def on_chat_start():
120
+ """
121
+ Handles the start of a chat session. Initializes the memory, sets up the runnable pipeline, and prompts the user
122
+ to summarize the type of conflict.
123
+ """
124
+ cl.user_session.set("memory", ConversationBufferMemory(return_messages=True))
125
+ setup_runnable()
126
+ first_res = await cl.AskUserMessage(content="Welcome to the Relationship Coach chatbot. I can help you with your relationship questions. Please first summarize the type of conflict.").send()
127
+ add_person = await cl.AskActionMessage(
128
+ content="Select the conversation type.",
129
+ actions=[
130
+ cl.Action(name="1-1 Chat", value="1-1 Chat", label="👤 1-1"),
131
+ cl.Action(name="2-1 Chat", value="2-1 Chat", label="👥 2-1"),
132
+ ],
133
+ ).send()
134
+
135
+ if add_person and add_person.get("value") == "2-1 Chat":
136
+ res = await cl.AskUserMessage(content="Please write the username of the person to connect with.").send()
137
+ if res:
138
+ # request the parnet username until it exists in the db
139
+ while manager.literal_client.api.get_user(identifier=res["output"]) == None:
140
+ await cl.Message(content=f"Partner {res['output']} does not exist in db.").send()
141
+ res = await cl.AskUserMessage(content="Please write the username of the person to connect with.").send()
142
+ partner_username = res['output']
143
+ partner_user = manager.literal_client.api.get_user(identifier=partner_username)
144
+ current_user = cl.user_session.get("user")
145
+ current_username = current_user.identifier
146
+ manager.literal_client.api.update_user(id=current_user.id, identifier=current_username, metadata={
147
+ "role": "user", "provider": "credentials", "relationships": {"partner_username": partner_username}})
148
+ await cl.Message(content=f"Connected with {partner_username}!").send()
149
+ await on_message(cl.Message(content=first_res['output']))
150
+ create_and_update_threads(first_res, current_user, partner_user)
151
+ else:
152
+ await cl.Message(
153
+ content=f"Action timed out!",
154
+ ).send()
155
+
156
+ @cl.on_chat_resume
157
+ async def on_chat_resume(thread: ThreadDict):
158
+ """
159
+ Handles the resumption of a chat session. Restores the chat memory and sets up the runnable pipeline.
160
+
161
+ Args:
162
+ thread (ThreadDict): The thread dictionary containing the chat history.
163
+ """
164
+ memory = ConversationBufferMemory(return_messages=True)
165
+ root_messages = [m for m in thread["steps"] if m["parentId"] == None]
166
+ for message in root_messages:
167
+ if message["type"] == "user_message":
168
+ memory.chat_memory.add_user_message(message["output"])
169
+ else:
170
+ memory.chat_memory.add_ai_message(message["output"])
171
+
172
+ cl.user_session.set("memory", memory)
173
+ cl.user_session.set("thread_id", thread["id"])
174
+
175
+ setup_runnable()
176
+
177
+ conflict_resolution = ResolutionLogic()
178
+ resolution = conflict_resolution.intervention(thread["id"])
179
+
180
+ if resolution:
181
+ await cl.Message(content=resolution).send()
182
+
183
+ @cl.on_message
184
+ async def on_message(message: cl.Message):
185
+ """
186
+ Handles incoming messages during a chat session. Updates the memory and generates a response.
187
+
188
+ Args:
189
+ message (cl.Message): The incoming message from the user.
190
+ """
191
+ memory = cl.user_session.get("memory") # type: ConversationBufferMemory
192
+ runnable = cl.user_session.get("runnable") # type: Runnable
193
+
194
+ response = cl.Message(content="")
195
+
196
+ conflict_resolution = ResolutionLogic()
197
+ if cl.user_session.get("thread_id"):
198
+ resolution = conflict_resolution.intervention(cl.user_session.get("thread_id"))
199
+
200
+ if cl.user_session.get("thread_id") and resolution:
201
+ response = cl.Message(content=resolution)
202
+ else:
203
+ async for chunk in runnable.astream(
204
+ {"question": message.content},
205
+ config=RunnableConfig(callbacks=[cl.LangchainCallbackHandler()]),
206
+ ):
207
+ await response.stream_token(chunk)
208
+
209
+ await response.send()
210
+
211
+ memory.chat_memory.add_user_message(message.content)
212
+ memory.chat_memory.add_ai_message(response.content)
213
+
214
+ def main():
215
+ """
216
+ The main function to demonstrate the usage of the chatbot. Initializes the chat session and starts the event loop.
217
+ """
218
+ on_chat_start()
219
+ cl.run()
220
+
221
+ if __name__ == "__main__":
222
+ main()
chainlit.md ADDED
@@ -0,0 +1 @@
 
 
1
+ # Welcome to HarmonyAI! 🚀🤖
literal_thread_manager.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from literalai import LiteralClient
2
+ from dotenv import load_dotenv
3
+ import os
4
+ from typing import List, Dict, Any
5
+ load_dotenv()
6
+
7
+
8
+ class LiteralThreadManager:
9
+ """
10
+ The LiteralThreadManager is responsible for managing and extracting conversation threads from LiteralAI db.
11
+ It includes methods for retrieving threads, messages, processing chat history, and ensuring efficient and clear
12
+ extraction of conversation details. This class is designed to support functions that handle user and
13
+ assistant interactions within HarmonyAI.
14
+ """
15
+
16
+ def __init__(self, api_key: str):
17
+ """
18
+ Initialize the LiteralThreadManager with a LiteralClient.
19
+
20
+ Args:
21
+ api_key (str): The API key for the LiteralClient.
22
+ """
23
+ self.literal_client = LiteralClient(api_key=api_key)
24
+
25
+ @staticmethod
26
+ def threads_to_dict(threads_input) -> List[Dict[str, Any]]:
27
+ """
28
+ Convert a list of threads to a list of dictionaries.
29
+
30
+ Args:
31
+ threads_input: The input threads to be converted.
32
+
33
+ Returns:
34
+ List[Dict[str, Any]]: A list of dictionaries representing the threads.
35
+ """
36
+ return [a_thread.to_dict() for a_thread in threads_input.data]
37
+
38
+ def filter_threads_by_participant(self, participant_name: str) -> List[Dict[str, Any]]:
39
+ """
40
+ Filter threads by participant name.
41
+
42
+ Args:
43
+ participant_name (str): The name of the participant.
44
+
45
+ Returns:
46
+ List[Dict[str, Any]]: A list of dictionaries representing the filtered threads.
47
+ """
48
+ self._all_threads = self.literal_client.api.get_threads()
49
+ self.thread_dict_list = self.threads_to_dict(self._all_threads)
50
+ return [thread for thread in self.thread_dict_list if
51
+ thread['participant']['identifier'].lower() == participant_name.lower()]
52
+
53
+ def filter_thread_by_id(self, thread_id: str) -> List[Dict[str, Any]]:
54
+ """
55
+ Filter a thread by its ID.
56
+
57
+ Args:
58
+ thread_id (str): The ID of the thread.
59
+
60
+ Returns:
61
+ List[Dict[str, Any]]: A list containing the dictionary representation of the thread.
62
+ """
63
+ return [self.literal_client.api.get_thread(thread_id).to_dict()]
64
+
65
+ def get_other_partner_thread_id(self, thread_id: str) -> str:
66
+ """
67
+ Get the partner thread ID of a given thread.
68
+
69
+ Args:
70
+ thread_id (str): The ID of the thread.
71
+
72
+ Returns:
73
+ str: The partner thread ID.
74
+ """
75
+ current_thread = self.filter_thread_by_id(thread_id)
76
+ partner_thread_id = current_thread[0].get("metadata")["partner_thread_id"]
77
+ return partner_thread_id
78
+
79
+ def get_messages_from_thread(self, input_thread) -> List[str]:
80
+ """
81
+ Get all messages from a thread.
82
+
83
+ Args:
84
+ input_thread: The input thread to retrieve messages from.
85
+
86
+ Returns:
87
+ List[str]: A list of messages from the thread.
88
+ """
89
+ steps_in_thread = input_thread[0]['steps']
90
+ return steps_in_thread
91
+
92
+ def get_user_name_from_thread(self, thread) -> str:
93
+ """
94
+ Get the participant name from a thread.
95
+
96
+ Args:
97
+ thread: The input thread to retrieve the participant name from.
98
+
99
+ Returns:
100
+ str: The participant's name.
101
+ """
102
+ return thread[0]['participant']['identifier']
103
+
104
+ def is_conflict_resolved(self, thread_id: str) -> bool:
105
+ """
106
+ Check if the conflict in a thread is resolved.
107
+
108
+ Args:
109
+ thread_id (str): The ID of the thread.
110
+
111
+ Returns:
112
+ bool: True if the conflict is resolved, False otherwise.
113
+ """
114
+ if self.literal_client.api.get_thread(thread_id).metadata.get('isResolved') is None:
115
+ self.set_conflict_resolved(thread_id, False)
116
+ return self.literal_client.api.get_thread(thread_id).metadata['isResolved']
117
+
118
+ def set_conflict_resolved(self, thread_id: str, is_resolved: bool):
119
+ """
120
+ Set the conflict resolution status of a thread.
121
+
122
+ Args:
123
+ thread_id (str): The ID of the thread.
124
+ is_resolved (bool): The resolution status to be set.
125
+ """
126
+ thread = self.literal_client.api.get_thread(thread_id)
127
+ thread.metadata['isResolved'] = is_resolved
128
+ self.literal_client.api.update_thread(id=thread.id, metadata=thread.metadata)
129
+
130
+ def extract_chat_history_from_thread(self, input_thread) -> List[Dict[str, str]]:
131
+ """
132
+ Extract chat history from a thread.
133
+
134
+ Args:
135
+ input_thread: The input thread to extract chat history from.
136
+
137
+ Returns:
138
+ List[Dict[str, str]]: A list of dictionaries representing the chat history.
139
+ """
140
+ conversation = self.get_messages_from_thread(input_thread)
141
+ name = self.get_user_name_from_thread(input_thread)
142
+ chat_history = []
143
+
144
+ for convo in conversation:
145
+ if convo['type'] in ['user_message', 'assistant_message']:
146
+ role = 'user' if convo['type'] == 'user_message' else 'assistant'
147
+ content = convo['output']['content']
148
+ chat_history.append({
149
+ 'role': role,
150
+ 'name': name if role == 'user' else 'HarmonyAI',
151
+ 'content': content
152
+ })
153
+
154
+ if 'generation' in convo and convo['generation'] is not None:
155
+ if 'messages' in convo['generation']:
156
+ for message in convo['generation']['messages']:
157
+ if message['role'] in ['user', 'assistant']:
158
+ chat_history.append({
159
+ 'role': message['role'],
160
+ 'name': name if message['role'] == 'user' else 'HarmonyAI',
161
+ 'content': message['content']
162
+ })
163
+ if 'messageCompletion' in convo['generation'] and convo['generation']['messageCompletion'] is not None:
164
+ message_completion = convo['generation']['messageCompletion']
165
+ chat_history.append({
166
+ 'role': message_completion['role'],
167
+ 'name': 'HarmonyAI',
168
+ 'content': message_completion['content']
169
+ })
170
+
171
+ filtered_chat_history = []
172
+ for message in chat_history:
173
+ if not any(
174
+ m['content'] == message['content'] and m['role'] == message['role'] for m in filtered_chat_history):
175
+ filtered_chat_history.append(message)
176
+ # print(conversation)
177
+
178
+ for message in conversation:
179
+ if message.get('type') == 'user_message':
180
+ last_user_message = message['output']['content']
181
+ filtered_chat_history.append({
182
+ 'role': 'user',
183
+ 'name': name,
184
+ 'content': last_user_message
185
+ })
186
+
187
+ return filtered_chat_history
188
+
189
+ def count_llm_messages(self, thread_id: str) -> int:
190
+ """
191
+ Count the number of AI-generated messages in a thread.
192
+
193
+ Args:
194
+ thread_id (str): The ID of the thread.
195
+
196
+ Returns:
197
+ int: The number of AI-generated messages in the thread.
198
+ """
199
+ thread = self.literal_client.api.get_thread(thread_id)
200
+ return sum(step.type == 'llm' for step in thread.steps)
201
+
202
+ def send_message(self, thread_id: str, message: str):
203
+ """
204
+ Send a message to a thread.
205
+
206
+ Args:
207
+ thread_id (str): The ID of the thread.
208
+ message (str): The message content to be sent.
209
+ """
210
+ self.literal_client.api.create_step(thread_id=thread_id, type='assistant_message', name='HarmonyAI',
211
+ output={'content': message})
212
+
213
+
214
+ # Example usage:
215
+ if __name__ == "__main__":
216
+ manager = LiteralThreadManager(api_key=os.getenv("LITERAL_API_KEY"))
217
+
218
+ # Filter threads for "tom"
219
+ # all_threads = manager.literal_client.api.get_threads()
220
+ # print(all_threads)
221
+ # exit()
222
+
223
+ # tom_threads = manager.filter_threads_by_participant("tom")
224
+ #print("Threads involving Tom:")
225
+ #print(tom_threads)
226
+
227
+ # Filter thread by ID
228
+ thread_id = '1bb44dc5-0b81-42e9-84a2-cd34b6fe8480'
229
+ linda_thrad_id = '76d03507-4084-46ba-ba7e-734be1f58304'
230
+ thread_content = manager.filter_thread_by_id(linda_thrad_id)
231
+ #print(thread_content)
232
+ #exit()
233
+
234
+ chat = manager.extract_chat_history_from_thread(thread_content)
235
+ print(f"\nChat history for thread ID {thread_id}:")
236
+ for message in chat:
237
+ print(f"{message['role']} - {message['name']}: {message['content']}")
literalai.ipynb ADDED
@@ -0,0 +1,539 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "### Create a Thread"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": 1,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "import os\n",
17
+ "from dotenv import load_dotenv\n",
18
+ "load_dotenv()\n",
19
+ "from literalai import LiteralClient\n",
20
+ "\n",
21
+ "literal_client = LiteralClient(api_key=os.getenv(\"LITERAL_API_KEY\"))\n",
22
+ "\n",
23
+ "thread = literal_client.api.upsert_thread(\n",
24
+ " id=\"b6d8486f-a6d9-4731-bfdd-be789613b1df\",\n",
25
+ " name=\"washing the dishes\",\n",
26
+ " participant_id=literal_client.api.get_user(identifier=\"Tom\").id,\n",
27
+ " metadata={\"partnerThreadId\": \"42d078c3-b8f0-43bc-96fb-97b4782c946d\"}\n",
28
+ ")"
29
+ ]
30
+ },
31
+ {
32
+ "cell_type": "code",
33
+ "execution_count": 2,
34
+ "metadata": {},
35
+ "outputs": [
36
+ {
37
+ "data": {
38
+ "text/plain": [
39
+ "{'id': 'b6d8486f-a6d9-4731-bfdd-be789613b1df',\n",
40
+ " 'metadata': {'partnerThreadId': '42d078c3-b8f0-43bc-96fb-97b4782c946d'},\n",
41
+ " 'tags': None,\n",
42
+ " 'name': 'washing the dishes',\n",
43
+ " 'steps': [],\n",
44
+ " 'participant': {'id': '8ff8ee19-6671-4943-a382-52649ebeaaa5',\n",
45
+ " 'identifier': 'Tom'},\n",
46
+ " 'createdAt': '2024-07-10T17:17:24.106Z'}"
47
+ ]
48
+ },
49
+ "execution_count": 2,
50
+ "metadata": {},
51
+ "output_type": "execute_result"
52
+ }
53
+ ],
54
+ "source": [
55
+ "thread.to_dict()"
56
+ ]
57
+ },
58
+ {
59
+ "cell_type": "markdown",
60
+ "metadata": {},
61
+ "source": [
62
+ "### List thread"
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "code",
67
+ "execution_count": 6,
68
+ "metadata": {},
69
+ "outputs": [
70
+ {
71
+ "name": "stdout",
72
+ "output_type": "stream",
73
+ "text": [
74
+ "{'id': 'b6d8486f-a6d9-4731-bfdd-be789613b1df', 'metadata': {}, 'tags': None, 'name': 'washing the dishes', 'steps': [], 'participant': {'id': '8ff8ee19-6671-4943-a382-52649ebeaaa5', 'identifier': 'Tom'}, 'createdAt': '2024-07-10T17:17:24.106Z'}\n",
75
+ "{'id': '16b64356-bc5c-4259-97bb-b37455c0d8c2', 'metadata': {'id': '023ac87a-ce84-488c-8cdb-3812087c7360', 'env': '{}', 'user': None, 'memory': None, 'runnable': None, 'languages': 'en-US,en;q=0.9,es;q=0.8,ar;q=0.7', 'chat_profile': '', 'root_message': None, 'chat_settings': {}}, 'tags': None, 'name': '2-1 Chat', 'steps': [], 'participant': {'id': '8ff8ee19-6671-4943-a382-52649ebeaaa5', 'identifier': 'Tom'}, 'createdAt': '2024-07-10T17:13:52.548Z'}\n",
76
+ "{'id': '42d078c3-b8f0-43bc-96fb-97b4782c946d', 'metadata': {'id': '4438e3fe-f10b-4f68-84d4-47420f5c6b13', 'env': '{}', 'user': None, 'memory': None, 'runnable': None, 'languages': 'en-US,en;q=0.9,es;q=0.8,ar;q=0.7', 'chat_profile': '', 'root_message': None, 'chat_settings': {}}, 'tags': None, 'name': '2-1 Chat', 'steps': [], 'participant': {'id': 'e16487c4-5b66-4e22-94df-9d5b3be146f5', 'identifier': 'linda'}, 'createdAt': '2024-07-10T16:33:30.719Z'}\n",
77
+ "{'id': 'dca1c123-9d6a-486b-8646-76e3d2bb1278', 'metadata': {'id': '4e7e041f-c6bf-41d7-b2cc-ad84608315ad', 'env': '{}', 'user': None, 'memory': None, 'runnable': None, 'languages': 'en-US,en;q=0.9,es;q=0.8,ar;q=0.7', 'chat_profile': '', 'root_message': None, 'chat_settings': {}}, 'tags': None, 'name': '2-1 Chat', 'steps': [], 'participant': {'id': 'e16487c4-5b66-4e22-94df-9d5b3be146f5', 'identifier': 'linda'}, 'createdAt': '2024-07-10T12:59:10.320Z'}\n",
78
+ "{'id': '4351636b-54c8-47dc-8d81-4bf7cc467249', 'metadata': {}, 'tags': None, 'name': 'Automatice thread:)', 'steps': [], 'participant': {}, 'createdAt': '2024-07-10T09:08:09.851Z'}\n",
79
+ "{'id': '6b843ad4-926b-4c88-9775-f71b77907c61', 'metadata': {'id': 'e590da0d-0cc6-401c-a9a5-ce3c7d844128', 'env': '{}', 'user': None, 'memory': None, 'runnable': None, 'languages': 'en-US,en;q=0.9,es;q=0.8,ar;q=0.7', 'chat_profile': '', 'chat_settings': {}}, 'tags': None, 'name': '2-1 Chat', 'steps': [], 'participant': {'id': '9ddc5de2-477d-489f-9e6b-27e4ce69ed9d', 'identifier': 'admin'}, 'createdAt': '2024-07-10T08:50:20.948Z'}\n",
80
+ "{'id': '4e900626-054d-4656-9bdd-0c71e5262192', 'metadata': {'id': 'f277a972-372d-4862-ae9f-05ba86d5bea7', 'env': '{}', 'user': None, 'memory': None, 'runnable': None, 'languages': 'en-US,en;q=0.9,es;q=0.8,ar;q=0.7', 'chat_profile': '', 'root_message': None, 'chat_settings': {}}, 'tags': None, 'name': '1-1 Chat', 'steps': [], 'participant': {'id': '9ddc5de2-477d-489f-9e6b-27e4ce69ed9d', 'identifier': 'admin'}, 'createdAt': '2024-07-10T08:40:40.817Z'}\n",
81
+ "{'id': '464692b6-5409-4876-af16-824b65e687a1', 'metadata': {}, 'tags': None, 'name': 'Automatice thread:)', 'steps': [], 'participant': {}, 'createdAt': '2024-07-03T14:31:22.155Z'}\n",
82
+ "{'id': '1a9ef338-6d83-45ec-90cf-dd2d8a13ead3', 'metadata': {}, 'tags': None, 'name': 'My first thread !', 'steps': [], 'participant': {}, 'createdAt': '2024-07-03T14:15:48.713Z'}\n",
83
+ "{'id': 'e1854f6b-d984-4a04-847f-fcd530b81b7c', 'metadata': {'id': '68ccee8f-4ee8-4974-b009-fa62c37d4d58', 'env': '{}', 'user': None, 'memory': None, 'runnable': None, 'languages': 'en-US,en;q=0.9,es;q=0.8,ar;q=0.7', 'chat_profile': '', 'root_message': None, 'chat_settings': {}}, 'tags': None, 'name': 'connect with your partner', 'steps': [], 'participant': {'id': '9ddc5de2-477d-489f-9e6b-27e4ce69ed9d', 'identifier': 'admin'}, 'createdAt': '2024-06-26T14:12:27.040Z'}\n",
84
+ "{'id': '51a2df98-5bae-4ff0-a9f5-6e4d838455b7', 'metadata': {'id': 'edcc5cd8-ffa9-4b41-80af-4ae022873f67', 'env': '{}', 'user': None, 'memory': None, 'runnable': None, 'languages': 'en-US,en;q=0.9,es;q=0.8,ar;q=0.7', 'chat_profile': '', 'root_message': None, 'chat_settings': {}}, 'tags': None, 'name': '1:1 chat', 'steps': [], 'participant': {'id': '9ddc5de2-477d-489f-9e6b-27e4ce69ed9d', 'identifier': 'admin'}, 'createdAt': '2024-06-26T14:11:08.218Z'}\n",
85
+ "{'id': '5bd7e4d3-9515-4b85-a573-75d9084b00a5', 'metadata': {'id': '230ab52b-106a-4edd-86cb-e0024a9c614c', 'env': '{}', 'user': None, 'memory': None, 'runnable': None, 'languages': 'en-US,en;q=0.9,es;q=0.8,ar;q=0.7', 'chat_profile': '', 'root_message': None, 'chat_settings': {}}, 'tags': None, 'name': None, 'steps': [], 'participant': {}, 'createdAt': '2024-06-26T13:52:39.985Z'}\n",
86
+ "{'id': 'c59a0563-8f8e-49f3-9b0f-19862b903e32', 'metadata': {'id': '76b24675-6207-4dfc-b1d6-5c0187a40c54', 'env': '{}', 'user': None, 'memory': None, 'runnable': None, 'languages': 'en-US,en;q=0.9,es;q=0.8,ar;q=0.7', 'chat_profile': '', 'chat_settings': {}}, 'tags': None, 'name': 'connect with your partner', 'steps': [], 'participant': {'id': '9ddc5de2-477d-489f-9e6b-27e4ce69ed9d', 'identifier': 'admin'}, 'createdAt': '2024-06-26T13:50:12.546Z'}\n",
87
+ "{'id': '4ccf17f7-f8ad-417c-95e6-6c50d390c5b3', 'metadata': {'id': 'd3e07bc3-ac18-493d-9499-093dc43ada32', 'env': '{}', 'user': None, 'memory': None, 'runnable': None, 'languages': 'en-US,en;q=0.9,es;q=0.8,ar;q=0.7', 'chat_profile': '', 'root_message': None, 'chat_settings': {}}, 'tags': None, 'name': 'connect with your partner', 'steps': [], 'participant': {'id': '9ddc5de2-477d-489f-9e6b-27e4ce69ed9d', 'identifier': 'admin'}, 'createdAt': '2024-06-26T13:48:48.542Z'}\n",
88
+ "{'id': 'f32f2a8e-5edc-4745-954b-a72f6136beb4', 'metadata': {'id': '06140593-f045-4eb8-9920-29e4d3b6aae9', 'env': '{}', 'user': None, 'memory': None, 'runnable': None, 'languages': 'en-US,en;q=0.9,es;q=0.8,ar;q=0.7', 'chat_profile': '', 'root_message': None, 'chat_settings': {}}, 'tags': None, 'name': 'connect with your partner', 'steps': [], 'participant': {'id': '9ddc5de2-477d-489f-9e6b-27e4ce69ed9d', 'identifier': 'admin'}, 'createdAt': '2024-06-26T13:43:55.823Z'}\n",
89
+ "{'id': 'eb115f89-5ef1-42f6-993a-f5c0c1ebdb6a', 'metadata': {'id': '4f687bfe-416b-4358-b182-fe1bba6c8d76', 'env': '{}', 'user': None, 'memory': None, 'runnable': None, 'languages': 'en-US,en;q=0.9,es;q=0.8,ar;q=0.7', 'chat_profile': '', 'root_message': None, 'chat_settings': {}}, 'tags': None, 'name': '1:1 chat', 'steps': [], 'participant': {'id': '9ddc5de2-477d-489f-9e6b-27e4ce69ed9d', 'identifier': 'admin'}, 'createdAt': '2024-06-26T13:42:08.878Z'}\n",
90
+ "{'id': 'a049b457-218e-4682-a1f7-b584bec539bc', 'metadata': {'id': '84cddea9-3ee8-4e7e-8a58-be5cd609c1ab', 'env': '{}', 'user': None, 'memory': None, 'runnable': None, 'languages': 'en-US,en;q=0.9,es;q=0.8,ar;q=0.7', 'chat_profile': '', 'root_message': None, 'chat_settings': {}}, 'tags': None, 'name': \"I'm a software engineer, married to 34 year old woman. Her eyes are blue and she works full-time. She always complains about me not doing haushold chores \", 'steps': [], 'participant': {'id': '9ddc5de2-477d-489f-9e6b-27e4ce69ed9d', 'identifier': 'admin'}, 'createdAt': '2024-06-25T19:13:16.320Z'}\n"
91
+ ]
92
+ }
93
+ ],
94
+ "source": [
95
+ "import os\n",
96
+ "from dotenv import load_dotenv\n",
97
+ "load_dotenv()\n",
98
+ "from literalai import LiteralClient\n",
99
+ "\n",
100
+ "literal_client = LiteralClient(api_key=os.getenv(\"LITERAL_API_KEY\"))\n",
101
+ "\n",
102
+ "threads = literal_client.api.list_threads(first=None, after=None, before=None, order_by=None, filters=None)\n",
103
+ "\n",
104
+ "for thread in threads.data:\n",
105
+ " print(thread.to_dict())"
106
+ ]
107
+ },
108
+ {
109
+ "cell_type": "code",
110
+ "execution_count": 23,
111
+ "metadata": {},
112
+ "outputs": [
113
+ {
114
+ "name": "stdout",
115
+ "output_type": "stream",
116
+ "text": [
117
+ "Automatice thread:)\n",
118
+ "My first thread !\n",
119
+ "connect with your partner\n",
120
+ "1:1 chat\n",
121
+ "None\n",
122
+ "connect with your partner\n",
123
+ "connect with your partner\n",
124
+ "connect with your partner\n",
125
+ "1:1 chat\n",
126
+ "I'm a software engineer, married to 34 year old woman. Her eyes are blue and she works full-time. She always complains about me not doing haushold chores \n"
127
+ ]
128
+ }
129
+ ],
130
+ "source": [
131
+ "for thread in threads.data:\n",
132
+ " print(thread.to_dict()['name'])"
133
+ ]
134
+ },
135
+ {
136
+ "cell_type": "code",
137
+ "execution_count": 21,
138
+ "metadata": {},
139
+ "outputs": [
140
+ {
141
+ "name": "stdout",
142
+ "output_type": "stream",
143
+ "text": [
144
+ "'id': e1854f6b-d984-4a04-847f-fcd530b81b7c\n",
145
+ "'name': connect with your partner\n",
146
+ "{'id': '68ccee8f-4ee8-4974-b009-fa62c37d4d58', 'env': '{}', 'user': None, 'memory': None, 'runnable': None, 'languages': 'en-US,en;q=0.9,es;q=0.8,ar;q=0.7', 'chat_profile': '', 'root_message': None, 'chat_settings': {}}\n",
147
+ "[]\n"
148
+ ]
149
+ }
150
+ ],
151
+ "source": [
152
+ "id = 2\n",
153
+ "print(\"'id': \" + threads.data[id].to_dict()['id'])\n",
154
+ "print(\"'name': \" + threads.data[id].to_dict()['name'])\n",
155
+ "print(threads.data[id].to_dict()['metadata'])\n",
156
+ "print(threads.data[id].to_dict()['steps'])"
157
+ ]
158
+ },
159
+ {
160
+ "cell_type": "markdown",
161
+ "metadata": {},
162
+ "source": []
163
+ },
164
+ {
165
+ "cell_type": "code",
166
+ "execution_count": 2,
167
+ "metadata": {},
168
+ "outputs": [
169
+ {
170
+ "data": {
171
+ "text/plain": [
172
+ "[{'id': '2319abcb-765a-4fab-90f4-d92218cc1f7f',\n",
173
+ " 'metadata': {'isError': False,\n",
174
+ " 'language': None,\n",
175
+ " 'showInput': None,\n",
176
+ " 'waitForAnswer': False,\n",
177
+ " 'disableFeedback': False},\n",
178
+ " 'parentId': None,\n",
179
+ " 'startTime': '2024-07-10T17:18:14.6',\n",
180
+ " 'endTime': '2024-07-10T17:18:14.6',\n",
181
+ " 'type': 'user_message',\n",
182
+ " 'threadId': 'b6d8486f-a6d9-4731-bfdd-be789613b1df',\n",
183
+ " 'error': None,\n",
184
+ " 'input': None,\n",
185
+ " 'output': {'content': 'What is the problem here Linda!!!'},\n",
186
+ " 'generation': None,\n",
187
+ " 'name': 'Tom',\n",
188
+ " 'tags': None,\n",
189
+ " 'scores': [],\n",
190
+ " 'attachments': []},\n",
191
+ " {'id': '04402299-cb7f-40b4-b1a4-ccae7216fd10',\n",
192
+ " 'metadata': {'isError': False,\n",
193
+ " 'language': None,\n",
194
+ " 'showInput': False,\n",
195
+ " 'waitForAnswer': None,\n",
196
+ " 'disableFeedback': True},\n",
197
+ " 'parentId': '2319abcb-765a-4fab-90f4-d92218cc1f7f',\n",
198
+ " 'startTime': '2024-07-10T17:18:14.661',\n",
199
+ " 'endTime': '2024-07-10T17:18:14.689',\n",
200
+ " 'type': 'run',\n",
201
+ " 'threadId': 'b6d8486f-a6d9-4731-bfdd-be789613b1df',\n",
202
+ " 'error': None,\n",
203
+ " 'input': {'content': '{\\n \"input\": \"\"\\n}'},\n",
204
+ " 'output': {'content': '[]'},\n",
205
+ " 'generation': None,\n",
206
+ " 'name': 'load_memory_variables',\n",
207
+ " 'tags': None,\n",
208
+ " 'scores': [],\n",
209
+ " 'attachments': []},\n",
210
+ " {'id': '2ced9228-3f70-4d45-9dbe-20d91f695435',\n",
211
+ " 'metadata': {'isError': False,\n",
212
+ " 'language': None,\n",
213
+ " 'showInput': False,\n",
214
+ " 'waitForAnswer': None,\n",
215
+ " 'disableFeedback': False},\n",
216
+ " 'parentId': '2319abcb-765a-4fab-90f4-d92218cc1f7f',\n",
217
+ " 'startTime': '2024-07-10T17:18:14.714',\n",
218
+ " 'endTime': '2024-07-10T17:18:15.66',\n",
219
+ " 'type': 'llm',\n",
220
+ " 'threadId': 'b6d8486f-a6d9-4731-bfdd-be789613b1df',\n",
221
+ " 'error': None,\n",
222
+ " 'input': {'content': '{\\n \"prompts\": [\\n \"System: \\\\nYou are HarmonyAI, an empathetic and insightful chatbot designed to help individuals resolve relationship issues. \\\\nYour goal is to listen carefully, provide thoughtful and constructive advice, and promote understanding \\\\nand communication between partners.\\\\n\\\\nWhen interacting with users, always:\\\\n Acknowledge their feelings and show empathy.\\\\n Ask clarifying questions to understand their situation better.\\\\n Suggest practical solutions and communication strategies.\\\\n Encourage appreciation and mutual respect.\\\\n Avoid taking sides or placing blame.\\\\n Keep your responses concise and natural, similar to a friendly conversation. \\\\n Aim for responses to be around 2-4 sentences long, providing clear and actionable advice without overwhelming the user.\\\\n\\\\nHere’s an example of how to structure your responses:\\\\n Greet the user and ask how you can help.\\\\n Listen to their concern and acknowledge their feelings.\\\\n Ask for more details to fully understand the issue.\\\\n Provide practical advice and encourage positive communication.\\\\n Conclude with a supportive message and an offer to help further if needed.\\\\n\\\\nAlways maintain a supportive and non-judgmental tone.\\\\nHuman: What is the problem here Linda!!!\"\\n ]\\n}'},\n",
223
+ " 'output': {'content': '{\\n \"role\": \"assistant\",\\n \"content\": \"It sounds like you\\'re feeling frustrated. I\\'m here to listen. Could you tell me more about what\\'s been happening with Linda that\\'s causing you concern?\"\\n}'},\n",
224
+ " 'generation': {'promptId': None,\n",
225
+ " 'provider': 'openai-chat',\n",
226
+ " 'model': 'gpt-3.5-turbo',\n",
227
+ " 'error': None,\n",
228
+ " 'settings': {'n': 1,\n",
229
+ " 'stop': None,\n",
230
+ " 'model': 'gpt-3.5-turbo',\n",
231
+ " 'stream': True,\n",
232
+ " 'streaming': True,\n",
233
+ " 'model_name': 'gpt-3.5-turbo',\n",
234
+ " 'max_retries': 2,\n",
235
+ " 'temperature': 0.7,\n",
236
+ " 'openai_proxy': ''},\n",
237
+ " 'variables': {'input': ''},\n",
238
+ " 'tags': None,\n",
239
+ " 'tools': None,\n",
240
+ " 'tokenCount': 255,\n",
241
+ " 'inputTokenCount': 223,\n",
242
+ " 'outputTokenCount': 32,\n",
243
+ " 'ttFirstToken': 656.6357612609863,\n",
244
+ " 'tokenThroughputInSeconds': 35.8559359429227,\n",
245
+ " 'duration': 0.9482390880584717,\n",
246
+ " 'messages': [{'role': 'system',\n",
247
+ " 'content': '\\nYou are HarmonyAI, an empathetic and insightful chatbot designed to help individuals resolve relationship issues. \\nYour goal is to listen carefully, provide thoughtful and constructive advice, and promote understanding \\nand communication between partners.\\n\\nWhen interacting with users, always:\\n Acknowledge their feelings and show empathy.\\n Ask clarifying questions to understand their situation better.\\n Suggest practical solutions and communication strategies.\\n Encourage appreciation and mutual respect.\\n Avoid taking sides or placing blame.\\n Keep your responses concise and natural, similar to a friendly conversation. \\n Aim for responses to be around 2-4 sentences long, providing clear and actionable advice without overwhelming the user.\\n\\nHere’s an example of how to structure your responses:\\n Greet the user and ask how you can help.\\n Listen to their concern and acknowledge their feelings.\\n Ask for more details to fully understand the issue.\\n Provide practical advice and encourage positive communication.\\n Conclude with a supportive message and an offer to help further if needed.\\n\\nAlways maintain a supportive and non-judgmental tone.'},\n",
248
+ " {'role': 'user', 'content': 'What is the problem here Linda!!!'}],\n",
249
+ " 'messageCompletion': {'role': 'assistant',\n",
250
+ " 'content': \"It sounds like you're feeling frustrated. I'm here to listen. Could you tell me more about what's been happening with Linda that's causing you concern?\"},\n",
251
+ " 'type': 'CHAT'},\n",
252
+ " 'name': 'ChatOpenAI',\n",
253
+ " 'tags': None,\n",
254
+ " 'scores': [],\n",
255
+ " 'attachments': []},\n",
256
+ " {'id': 'e4060a6c-d4ba-4e40-82b7-9bd6256f72a2',\n",
257
+ " 'metadata': {'isError': False,\n",
258
+ " 'language': None,\n",
259
+ " 'showInput': None,\n",
260
+ " 'waitForAnswer': False,\n",
261
+ " 'disableFeedback': False},\n",
262
+ " 'parentId': None,\n",
263
+ " 'startTime': '2024-07-10T17:18:15.692',\n",
264
+ " 'endTime': '2024-07-10T17:18:15.692',\n",
265
+ " 'type': 'assistant_message',\n",
266
+ " 'threadId': 'b6d8486f-a6d9-4731-bfdd-be789613b1df',\n",
267
+ " 'error': None,\n",
268
+ " 'input': None,\n",
269
+ " 'output': {'content': \"It sounds like you're feeling frustrated. I'm here to listen. Could you tell me more about what's been happening with Linda that's causing you concern?\"},\n",
270
+ " 'generation': None,\n",
271
+ " 'name': 'Assistant',\n",
272
+ " 'tags': None,\n",
273
+ " 'scores': [],\n",
274
+ " 'attachments': []},\n",
275
+ " {'id': '53406e4d-e43c-4e75-9d84-09fca024a4f7',\n",
276
+ " 'metadata': {'isError': False,\n",
277
+ " 'language': None,\n",
278
+ " 'showInput': None,\n",
279
+ " 'waitForAnswer': False,\n",
280
+ " 'disableFeedback': False},\n",
281
+ " 'parentId': None,\n",
282
+ " 'startTime': '2024-07-10T17:19:28.173',\n",
283
+ " 'endTime': '2024-07-10T17:19:28.173',\n",
284
+ " 'type': 'user_message',\n",
285
+ " 'threadId': 'b6d8486f-a6d9-4731-bfdd-be789613b1df',\n",
286
+ " 'error': None,\n",
287
+ " 'input': None,\n",
288
+ " 'output': {'content': 'she is complaining about household chores as usual'},\n",
289
+ " 'generation': None,\n",
290
+ " 'name': 'Tom',\n",
291
+ " 'tags': None,\n",
292
+ " 'scores': [],\n",
293
+ " 'attachments': []},\n",
294
+ " {'id': '5ad97a2f-ffb1-43ea-bae4-7fa4ea29767e',\n",
295
+ " 'metadata': {'isError': False,\n",
296
+ " 'language': 'text',\n",
297
+ " 'showInput': False,\n",
298
+ " 'waitForAnswer': None,\n",
299
+ " 'disableFeedback': True},\n",
300
+ " 'parentId': '53406e4d-e43c-4e75-9d84-09fca024a4f7',\n",
301
+ " 'startTime': '2024-07-10T17:19:28.229',\n",
302
+ " 'endTime': '2024-07-10T17:19:28.255',\n",
303
+ " 'type': 'run',\n",
304
+ " 'threadId': 'b6d8486f-a6d9-4731-bfdd-be789613b1df',\n",
305
+ " 'error': None,\n",
306
+ " 'input': {'content': '{\\n \"input\": \"\"\\n}'},\n",
307
+ " 'output': {'content': '[HumanMessage(content=\\'What is the problem here Linda!!!\\'), AIMessage(content=\"It sounds like you\\'re feeling frustrated. I\\'m here to listen. Could you tell me more about what\\'s been happening with Linda that\\'s causing you concern?\")]'},\n",
308
+ " 'generation': None,\n",
309
+ " 'name': 'load_memory_variables',\n",
310
+ " 'tags': None,\n",
311
+ " 'scores': [],\n",
312
+ " 'attachments': []},\n",
313
+ " {'id': '6735b7e6-e7b4-4a9c-8435-cf462a554d4c',\n",
314
+ " 'metadata': {'isError': False,\n",
315
+ " 'language': None,\n",
316
+ " 'showInput': False,\n",
317
+ " 'waitForAnswer': None,\n",
318
+ " 'disableFeedback': False},\n",
319
+ " 'parentId': '53406e4d-e43c-4e75-9d84-09fca024a4f7',\n",
320
+ " 'startTime': '2024-07-10T17:19:28.281',\n",
321
+ " 'endTime': '2024-07-10T17:19:29.302',\n",
322
+ " 'type': 'llm',\n",
323
+ " 'threadId': 'b6d8486f-a6d9-4731-bfdd-be789613b1df',\n",
324
+ " 'error': None,\n",
325
+ " 'input': {'content': '{\\n \"prompts\": [\\n \"System: \\\\nYou are HarmonyAI, an empathetic and insightful chatbot designed to help individuals resolve relationship issues. \\\\nYour goal is to listen carefully, provide thoughtful and constructive advice, and promote understanding \\\\nand communication between partners.\\\\n\\\\nWhen interacting with users, always:\\\\n Acknowledge their feelings and show empathy.\\\\n Ask clarifying questions to understand their situation better.\\\\n Suggest practical solutions and communication strategies.\\\\n Encourage appreciation and mutual respect.\\\\n Avoid taking sides or placing blame.\\\\n Keep your responses concise and natural, similar to a friendly conversation. \\\\n Aim for responses to be around 2-4 sentences long, providing clear and actionable advice without overwhelming the user.\\\\n\\\\nHere’s an example of how to structure your responses:\\\\n Greet the user and ask how you can help.\\\\n Listen to their concern and acknowledge their feelings.\\\\n Ask for more details to fully understand the issue.\\\\n Provide practical advice and encourage positive communication.\\\\n Conclude with a supportive message and an offer to help further if needed.\\\\n\\\\nAlways maintain a supportive and non-judgmental tone.\\\\nHuman: What is the problem here Linda!!!\\\\nAI: It sounds like you\\'re feeling frustrated. I\\'m here to listen. Could you tell me more about what\\'s been happening with Linda that\\'s causing you concern?\\\\nHuman: she is complaining about household chores as usual\"\\n ]\\n}'},\n",
326
+ " 'output': {'content': '{\\n \"role\": \"assistant\",\\n \"content\": \"I understand how that could be stressful. Have you both discussed how to divide household chores fairly? It might help to have a calm conversation to understand each other\\'s perspectives and find a solution that works for both of you.\"\\n}'},\n",
327
+ " 'generation': {'promptId': None,\n",
328
+ " 'provider': 'openai-chat',\n",
329
+ " 'model': 'gpt-3.5-turbo',\n",
330
+ " 'error': None,\n",
331
+ " 'settings': {'n': 1,\n",
332
+ " 'stop': None,\n",
333
+ " 'model': 'gpt-3.5-turbo',\n",
334
+ " 'stream': True,\n",
335
+ " 'streaming': True,\n",
336
+ " 'model_name': 'gpt-3.5-turbo',\n",
337
+ " 'max_retries': 2,\n",
338
+ " 'temperature': 0.7,\n",
339
+ " 'openai_proxy': ''},\n",
340
+ " 'variables': {'input': ''},\n",
341
+ " 'tags': None,\n",
342
+ " 'tools': None,\n",
343
+ " 'tokenCount': 307,\n",
344
+ " 'inputTokenCount': 263,\n",
345
+ " 'outputTokenCount': 44,\n",
346
+ " 'ttFirstToken': 450.09469985961914,\n",
347
+ " 'tokenThroughputInSeconds': 45.01046398075452,\n",
348
+ " 'duration': 1.021984577178955,\n",
349
+ " 'messages': [{'role': 'system',\n",
350
+ " 'content': '\\nYou are HarmonyAI, an empathetic and insightful chatbot designed to help individuals resolve relationship issues. \\nYour goal is to listen carefully, provide thoughtful and constructive advice, and promote understanding \\nand communication between partners.\\n\\nWhen interacting with users, always:\\n Acknowledge their feelings and show empathy.\\n Ask clarifying questions to understand their situation better.\\n Suggest practical solutions and communication strategies.\\n Encourage appreciation and mutual respect.\\n Avoid taking sides or placing blame.\\n Keep your responses concise and natural, similar to a friendly conversation. \\n Aim for responses to be around 2-4 sentences long, providing clear and actionable advice without overwhelming the user.\\n\\nHere’s an example of how to structure your responses:\\n Greet the user and ask how you can help.\\n Listen to their concern and acknowledge their feelings.\\n Ask for more details to fully understand the issue.\\n Provide practical advice and encourage positive communication.\\n Conclude with a supportive message and an offer to help further if needed.\\n\\nAlways maintain a supportive and non-judgmental tone.'},\n",
351
+ " {'role': 'user', 'content': 'What is the problem here Linda!!!'},\n",
352
+ " {'role': 'assistant',\n",
353
+ " 'content': \"It sounds like you're feeling frustrated. I'm here to listen. Could you tell me more about what's been happening with Linda that's causing you concern?\"},\n",
354
+ " {'role': 'user',\n",
355
+ " 'content': 'she is complaining about household chores as usual'}],\n",
356
+ " 'messageCompletion': {'role': 'assistant',\n",
357
+ " 'content': \"I understand how that could be stressful. Have you both discussed how to divide household chores fairly? It might help to have a calm conversation to understand each other's perspectives and find a solution that works for both of you.\"},\n",
358
+ " 'type': 'CHAT'},\n",
359
+ " 'name': 'ChatOpenAI',\n",
360
+ " 'tags': None,\n",
361
+ " 'scores': [],\n",
362
+ " 'attachments': []},\n",
363
+ " {'id': 'd5d60e60-a55f-4394-bb41-c6fb13c6bd2f',\n",
364
+ " 'metadata': {'isError': False,\n",
365
+ " 'language': None,\n",
366
+ " 'showInput': None,\n",
367
+ " 'waitForAnswer': False,\n",
368
+ " 'disableFeedback': False},\n",
369
+ " 'parentId': None,\n",
370
+ " 'startTime': '2024-07-10T17:19:29.321',\n",
371
+ " 'endTime': '2024-07-10T17:19:29.321',\n",
372
+ " 'type': 'assistant_message',\n",
373
+ " 'threadId': 'b6d8486f-a6d9-4731-bfdd-be789613b1df',\n",
374
+ " 'error': None,\n",
375
+ " 'input': None,\n",
376
+ " 'output': {'content': \"I understand how that could be stressful. Have you both discussed how to divide household chores fairly? It might help to have a calm conversation to understand each other's perspectives and find a solution that works for both of you.\"},\n",
377
+ " 'generation': None,\n",
378
+ " 'name': 'Assistant',\n",
379
+ " 'tags': None,\n",
380
+ " 'scores': [],\n",
381
+ " 'attachments': []}]"
382
+ ]
383
+ },
384
+ "execution_count": 2,
385
+ "metadata": {},
386
+ "output_type": "execute_result"
387
+ }
388
+ ],
389
+ "source": [
390
+ "import os\n",
391
+ "from dotenv import load_dotenv\n",
392
+ "load_dotenv()\n",
393
+ "from literal_thread_manager import LiteralThreadManager\n",
394
+ "\n",
395
+ "literal_manager = LiteralThreadManager(api_key=os.getenv(\"LITERAL_API_KEY\"))\n",
396
+ "\n",
397
+ "thread_id = 'b6d8486f-a6d9-4731-bfdd-be789613b1df'\n",
398
+ "\n",
399
+ "thread = literal_manager.filter_thread_by_id(thread_id)\n",
400
+ "user_name = literal_manager.get_user_name_from_thread(thread)\n",
401
+ "literal_manager.get_messages_from_thread(thread)"
402
+ ]
403
+ },
404
+ {
405
+ "cell_type": "code",
406
+ "execution_count": 2,
407
+ "metadata": {},
408
+ "outputs": [
409
+ {
410
+ "name": "stdout",
411
+ "output_type": "stream",
412
+ "text": [
413
+ "\n",
414
+ "Chat history for thread ID b6d8486f-a6d9-4731-bfdd-be789613b1df:\n",
415
+ "user - Tom: What is the problem here Linda!!!\n",
416
+ "assistant - HarmonyAI: It sounds like you're feeling frustrated. I'm here to listen. Could you tell me more about what's been happening with Linda that's causing you concern?\n",
417
+ "user - Tom: she is complaining about household chores as usual\n",
418
+ "assistant - HarmonyAI: I understand how that could be stressful. Have you both discussed how to divide household chores fairly? It might help to have a calm conversation to understand each other's perspectives and find a solution that works for both of you.\n"
419
+ ]
420
+ }
421
+ ],
422
+ "source": [
423
+ "chat = literal_manager.extract_chat_history_from_thread(thread)\n",
424
+ "print(f\"\\nChat history for thread ID {thread_id}:\")\n",
425
+ "for message in chat:\n",
426
+ " print(f\"{message['role']} - {message['name']}: {message['content']}\")"
427
+ ]
428
+ },
429
+ {
430
+ "cell_type": "markdown",
431
+ "metadata": {},
432
+ "source": [
433
+ "### Conflict resolution"
434
+ ]
435
+ },
436
+ {
437
+ "cell_type": "code",
438
+ "execution_count": 6,
439
+ "metadata": {},
440
+ "outputs": [],
441
+ "source": [
442
+ "from openai import OpenAI\n",
443
+ "\n",
444
+ "class ResolutionLogic:\n",
445
+ " def __init__(self, thread_manager: LiteralThreadManager):\n",
446
+ " self.thread_manager = thread_manager\n",
447
+ "\n",
448
+ " def resolve_conflict(self, thread_id):\n",
449
+ "\n",
450
+ " # get other partner thread_id\n",
451
+ " other_partner_thread_id = self.thread_manager.get_other_partner_thread_id(thread_id)\n",
452
+ " # get both thread\n",
453
+ " thread_content = self.thread_manager.filter_thread_by_id(thread_id)\n",
454
+ " other_partner_thread_content = self.thread_manager.filter_thread_by_id(other_partner_thread_id)\n",
455
+ " # extract chat history from both threads\n",
456
+ " chat_history_partner1 = self.thread_manager.extract_chat_history_from_thread(thread_content)\n",
457
+ " chat_history_partner2 = self.thread_manager.extract_chat_history_from_thread(other_partner_thread_content)\n",
458
+ "\n",
459
+ " combined_chat = chat_history_partner1 + chat_history_partner2\n",
460
+ "\n",
461
+ " print(f\"Combined chat: {combined_chat}\")\n",
462
+ "\n",
463
+ " client = OpenAI(api_key=os.getenv(\"OPENAI_API_KEY\"))\n",
464
+ "\n",
465
+ " completion = client.chat.completions.create(\n",
466
+ " model=\"gpt-3.5-turbo\",\n",
467
+ " messages=[\n",
468
+ " {\"role\": \"system\", \"content\": \"\"\"\n",
469
+ " You are an expert relationship counselor. \n",
470
+ " The user is providing various perspectives on a conflict within a relationship. \n",
471
+ " Your task is to analyze these perspectives and offer thoughtful, empathetic, and actionable advice \n",
472
+ " to help resolve the conflict. Consider the emotions and viewpoints of all parties involved and \n",
473
+ " suggest a resolution that promotes understanding, communication, and mutual respect.\n",
474
+ " \n",
475
+ " Please adress them both with their names, to make it more personal. \n",
476
+ " Try to avoid leaking information you got in the prompt.\n",
477
+ " \"\"\"},\n",
478
+ " *combined_chat\n",
479
+ " ]\n",
480
+ " )\n",
481
+ " return completion.choices[0].message.content"
482
+ ]
483
+ },
484
+ {
485
+ "cell_type": "code",
486
+ "execution_count": 7,
487
+ "metadata": {},
488
+ "outputs": [
489
+ {
490
+ "name": "stdout",
491
+ "output_type": "stream",
492
+ "text": [
493
+ "Combined chat: [{'role': 'user', 'name': 'Tom', 'content': 'What is the problem here Linda!!!'}, {'role': 'assistant', 'name': 'HarmonyAI', 'content': \"It sounds like you're feeling frustrated. I'm here to listen. Could you tell me more about what's been happening with Linda that's causing you concern?\"}, {'role': 'user', 'name': 'Tom', 'content': 'she is complaining about household chores as usual'}, {'role': 'assistant', 'name': 'HarmonyAI', 'content': \"I understand how that could be stressful. Have you both discussed how to divide household chores fairly? It might help to have a calm conversation to understand each other's perspectives and find a solution that works for both of you.\"}, {'role': 'user', 'name': 'linda', 'content': 'I have a problem with my partner Tom. he is not helping me with the house hold chores'}, {'role': 'assistant', 'name': 'HarmonyAI', 'content': \"I'm sorry to hear that you're struggling with this issue. Have you spoken to Tom about how you're feeling and the importance of sharing the household responsibilities? What are his thoughts on the matter?\"}, {'role': 'user', 'name': 'linda', 'content': \"Yes, we I always mention that I'm the only one in this house washing the dishes and cleaning the floor. He replies by simply saying that he is super busy and it's normal for me to do these tasks \"}, {'role': 'assistant', 'name': 'HarmonyAI', 'content': \"I understand how frustrating that must be for you. It's important to have an open and honest conversation with Tom about how you both can divide the household chores more fairly. Maybe you can create a shared chore schedule that accommodates both of your schedules. It's essential to communicate your needs and listen to his perspective as well.\"}]\n"
494
+ ]
495
+ },
496
+ {
497
+ "ename": "TypeError",
498
+ "evalue": "unsupported operand type(s) for +: 'dict' and 'list'",
499
+ "output_type": "error",
500
+ "traceback": [
501
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
502
+ "\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
503
+ "\u001b[1;32m/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb Cell 13\u001b[0m line \u001b[0;36m5\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=0'>1</a>\u001b[0m resolution_logic \u001b[39m=\u001b[39m ResolutionLogic(literal_manager)\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=2'>3</a>\u001b[0m thread_id \u001b[39m=\u001b[39m \u001b[39m'\u001b[39m\u001b[39mb6d8486f-a6d9-4731-bfdd-be789613b1df\u001b[39m\u001b[39m'\u001b[39m\n\u001b[0;32m----> <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=4'>5</a>\u001b[0m result \u001b[39m=\u001b[39m resolution_logic\u001b[39m.\u001b[39mresolve_conflict(thread_id)\n",
504
+ "\u001b[1;32m/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb Cell 13\u001b[0m line \u001b[0;36m2\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=19'>20</a>\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mCombined chat: \u001b[39m\u001b[39m{\u001b[39;00mcombined_chat\u001b[39m}\u001b[39;00m\u001b[39m\"\u001b[39m)\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=21'>22</a>\u001b[0m client \u001b[39m=\u001b[39m OpenAI(api_key\u001b[39m=\u001b[39mos\u001b[39m.\u001b[39mgetenv(\u001b[39m\"\u001b[39m\u001b[39mOPENAI_API_KEY\u001b[39m\u001b[39m\"\u001b[39m))\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=23'>24</a>\u001b[0m completion \u001b[39m=\u001b[39m client\u001b[39m.\u001b[39mchat\u001b[39m.\u001b[39mcompletions\u001b[39m.\u001b[39mcreate(\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=24'>25</a>\u001b[0m model\u001b[39m=\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mgpt-3.5-turbo\u001b[39m\u001b[39m\"\u001b[39m,\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=25'>26</a>\u001b[0m messages\u001b[39m=\u001b[39m[\n\u001b[0;32m---> <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=26'>27</a>\u001b[0m {\u001b[39m\"\u001b[39m\u001b[39mrole\u001b[39m\u001b[39m\"\u001b[39m: \u001b[39m\"\u001b[39m\u001b[39msystem\u001b[39m\u001b[39m\"\u001b[39m, \u001b[39m\"\u001b[39m\u001b[39mcontent\u001b[39m\u001b[39m\"\u001b[39m: \u001b[39m\"\"\"\u001b[39m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=27'>28</a>\u001b[0m \u001b[39m You are an expert relationship counselor. \u001b[39m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=28'>29</a>\u001b[0m \u001b[39m The user is providing various perspectives on a conflict within a relationship. \u001b[39m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=29'>30</a>\u001b[0m \u001b[39m Your task is to analyze these perspectives and offer thoughtful, empathetic, and actionable advice \u001b[39m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=30'>31</a>\u001b[0m \u001b[39m to help resolve the conflict. Consider the emotions and viewpoints of all parties involved and \u001b[39m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=31'>32</a>\u001b[0m \u001b[39m suggest a resolution that promotes understanding, communication, and mutual respect.\u001b[39m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=32'>33</a>\u001b[0m \u001b[39m \u001b[39m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=33'>34</a>\u001b[0m \u001b[39m Please adress them both with their names, to make it more personal. \u001b[39m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=34'>35</a>\u001b[0m \u001b[39m Try to avoid leaking information you got in the prompt.\u001b[39m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=35'>36</a>\u001b[0m \u001b[39m \u001b[39m\u001b[39m\"\"\"\u001b[39m} \n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=36'>37</a>\u001b[0m \u001b[39m+\u001b[39m\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=37'>38</a>\u001b[0m combined_chat\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=38'>39</a>\u001b[0m ]\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=39'>40</a>\u001b[0m )\n\u001b[1;32m <a href='vscode-notebook-cell:/Users/mousamaxod/Documents/TUM/EmpatheticChatBot/empathetic_text_generation/ChattingBotMechanism/chainlit/literalai.ipynb#X14sZmlsZQ%3D%3D?line=40'>41</a>\u001b[0m \u001b[39mreturn\u001b[39;00m completion\u001b[39m.\u001b[39mchoices[\u001b[39m0\u001b[39m]\u001b[39m.\u001b[39mmessage\u001b[39m.\u001b[39mcontent\n",
505
+ "\u001b[0;31mTypeError\u001b[0m: unsupported operand type(s) for +: 'dict' and 'list'"
506
+ ]
507
+ }
508
+ ],
509
+ "source": [
510
+ "resolution_logic = ResolutionLogic(literal_manager)\n",
511
+ "\n",
512
+ "thread_id = 'b6d8486f-a6d9-4731-bfdd-be789613b1df'\n",
513
+ "\n",
514
+ "result = resolution_logic.resolve_conflict(thread_id)"
515
+ ]
516
+ }
517
+ ],
518
+ "metadata": {
519
+ "kernelspec": {
520
+ "display_name": "NLP",
521
+ "language": "python",
522
+ "name": "python3"
523
+ },
524
+ "language_info": {
525
+ "codemirror_mode": {
526
+ "name": "ipython",
527
+ "version": 3
528
+ },
529
+ "file_extension": ".py",
530
+ "mimetype": "text/x-python",
531
+ "name": "python",
532
+ "nbconvert_exporter": "python",
533
+ "pygments_lexer": "ipython3",
534
+ "version": "3.11.9"
535
+ }
536
+ },
537
+ "nbformat": 4,
538
+ "nbformat_minor": 2
539
+ }
prompt_engineering/__pycache__/prompt_desing.cpython-311.pyc ADDED
Binary file (2.34 kB). View file
 
prompt_engineering/prompt_desing.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ system_prompt = """
3
+ You are an expert Relatiobnship Coach who specializes in couples therapy.
4
+ Your task is to solve couples' conflicts and provide them with valuable advice.
5
+ Using chain of thoughts, go step by step to solve the conflict between the couple.
6
+ Step 1: Read and Understand the conversation between the couple.
7
+ Step 2: Identify the root cause of the conflict.
8
+ Step 3: Provide a solution to the problem.
9
+ """
10
+
11
+ system_prompt_b = """
12
+ You are HarmonyAI, an empathetic and insightful chatbot designed to help individuals resolve relationship issues.
13
+ Your goal is to listen carefully, provide thoughtful and constructive advice, and promote understanding
14
+ and communication between partners.
15
+
16
+ When interacting with users, always:
17
+ Acknowledge their feelings and show empathy.
18
+ Ask clarifying questions to understand their situation better.
19
+ Suggest practical solutions and communication strategies.
20
+ Encourage appreciation and mutual respect.
21
+ Avoid taking sides or placing blame.
22
+ Keep your responses concise and natural, similar to a friendly conversation.
23
+ Aim for responses to be around 2-4 sentences long, providing clear and actionable advice without overwhelming the user.
24
+
25
+ Here’s an example of how to structure your responses:
26
+ Greet the user and ask how you can help.
27
+ Listen to their concern and acknowledge their feelings.
28
+ Ask for more details to fully understand the issue.
29
+ Provide practical advice and encourage positive communication.
30
+ Conclude with a supportive message and an offer to help further if needed.
31
+
32
+ Always maintain a supportive and non-judgmental tone."""
33
+
34
+ system_prompt_questioning = """
35
+ You are HarmonyAI, an expert relationship counselor. Your task is to help couples expose their feelings and thoughts about their relationship.
36
+ Your goal is to ask insightful questions that encourage partners to communicate openly and honestly, reflecting on their feelings, needs, and expectations.
37
+ Based on the user input, Ask questions that will help in resolving their conflict based on the answers.
38
+ """
public/couple.png ADDED
public/logo_dark.png ADDED
public/logo_light.png ADDED
public/private.png ADDED
public/test.css ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* General Styles */
2
+
3
+ /* Sidebar Styles */
4
+
5
+ /* Button Styles */
6
+ .MuiButtonBase-root.MuiButton-root.MuiButton-outlined.MuiButton-outlinedPrimary.MuiButton-sizeSmall.MuiButton-outlinedSizeSmall.MuiButton-disableElevation.MuiButtonBase-root {
7
+ background-color: #2db1af;
8
+ border: none;
9
+ color: white;
10
+ padding: 15px 32px;
11
+ text-align: center;
12
+ text-decoration: none;
13
+ display: inline-block;
14
+ font-size: 16px;
15
+ margin: 4px 2px;
16
+ cursor: pointer;
17
+ border-radius: 8px;
18
+ }
19
+
20
+ .MuiButtonBase-root.MuiButton-root.MuiButton-contained.MuiButton-containedPrimary.MuiButton-sizeMedium.MuiButton-containedSizeMedium.MuiButton-disableElevation.MuiButton-root.MuiButton-contained.MuiButton-containedPrimary.MuiButton-sizeMedium.MuiButton-containedSizeMedium.MuiButton-disableElevation.css-6gwyqr {
21
+ background-color: #2db1af;
22
+ border: none;
23
+ color: white;
24
+ padding: 15px 32px;
25
+ text-align: center;
26
+ text-decoration: none;
27
+ display: inline-block;
28
+ font-size: 16px;
29
+ margin: 4px 2px;
30
+ cursor: pointer;
31
+ border-radius: 8px;
32
+ }
33
+
34
+ /* Logo & Watermark */
35
+ .MuiStack-root.watermark {
36
+ visibility: hidden;
37
+ }
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ langchain==0.2.14
2
+ langchain_openai==0.1.22
3
+ literalai==0.0.509
4
+ chainlit==1.0.506
5
+ openai==1.41.0
6
+ python-dotenv==1.0.1
resolution_logic.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from literal_thread_manager import LiteralThreadManager
2
+ from openai import OpenAI
3
+ from dotenv import load_dotenv
4
+ import os
5
+ from typing import List, Dict
6
+
7
+ load_dotenv()
8
+
9
+
10
+ class ResolutionLogic:
11
+ """
12
+ The ResolutionLogic class is designed to handle the resolution of conflicts within conversation threads.
13
+ It integrates with the LiteralThreadManager to manage chat history and utilizes the OpenAI API to generate
14
+ thoughtful and empathetic responses to help resolve the conflict between partners. The class also provides
15
+ methods for summarizing conflict topics and ensuring both parties have participated sufficiently in the conversation.
16
+ """
17
+
18
+ def __init__(self):
19
+ self.thread_manager = LiteralThreadManager(api_key=os.getenv("LITERAL_API_KEY"))
20
+ self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
21
+ self.model = "gpt-3.5-turbo"
22
+
23
+ def resolve_conflict(self, thread_id):
24
+ """
25
+ Resolves a conflict by analyzing the conversation threads of both partners and generating a response.
26
+
27
+ Args:
28
+ thread_id (str): The thread ID of one of the partners.
29
+
30
+ Returns:
31
+ str: The generated resolution advice from the OpenAI model.
32
+ """
33
+ other_partner_thread_id = self.thread_manager.get_other_partner_thread_id(thread_id)
34
+ thread_content = self.thread_manager.filter_thread_by_id(thread_id)
35
+ other_partner_thread_content = self.thread_manager.filter_thread_by_id(other_partner_thread_id)
36
+
37
+ chat_history_partner1 = self.thread_manager.extract_chat_history_from_thread(thread_content)
38
+ chat_history_partner2 = self.thread_manager.extract_chat_history_from_thread(other_partner_thread_content)
39
+
40
+ combined_chat = chat_history_partner1 + chat_history_partner2
41
+
42
+ print(f"Combined chat: {combined_chat}")
43
+
44
+ completion = self.client.chat.completions.create(
45
+ model=self.model,
46
+ messages=[
47
+ {"role": "system", "content": """
48
+ You are an expert relationship counselor.
49
+ The user is providing various perspectives on a conflict within a relationship.
50
+ Your task is to analyze these perspectives and offer thoughtful, empathetic, and actionable advice
51
+ to help resolve the conflict. Consider the emotions and viewpoints of all parties involved and
52
+ suggest a resolution that promotes understanding, communication, and mutual respect.
53
+
54
+ Please address them both with their names, to make it more personal.
55
+ Try to avoid leaking information you got in the prompt.
56
+ """},
57
+ *combined_chat
58
+ ]
59
+ )
60
+ return completion.choices[0].message.content
61
+
62
+ def intervention(self, thread_id) -> bool | str:
63
+ """
64
+ Checks if both partners have answered enough questions to resolve the conflict.
65
+ If they have, resolves the conflict; otherwise, returns False.
66
+
67
+ Args:
68
+ thread_id (str): The thread ID of one of the partners.
69
+
70
+ Returns:
71
+ bool | str: False if the conflict is not ready to be resolved, otherwise the resolution advice.
72
+ """
73
+ if self.thread_manager.is_conflict_resolved(thread_id):
74
+ return False
75
+
76
+ partner2_thread_id = self.thread_manager.get_other_partner_thread_id(thread_id)
77
+ num_partner1_messages = self.thread_manager.count_llm_messages(thread_id)
78
+ num_partner2_messages = self.thread_manager.count_llm_messages(partner2_thread_id)
79
+
80
+ if num_partner1_messages >= 3 and num_partner2_messages >= 3:
81
+ resolution = self.resolve_conflict(thread_id)
82
+ self.thread_manager.send_message(partner2_thread_id, resolution)
83
+ self.thread_manager.set_conflict_resolved(thread_id, True)
84
+ self.thread_manager.set_conflict_resolved(partner2_thread_id, True)
85
+ return resolution
86
+ return False
87
+
88
+ def summarize_conflict_topic(self, my_name: str, partner_name: str, topic: str):
89
+ """
90
+ Summarizes the conflict topic in a proper way for the other partner.
91
+
92
+ Args:
93
+ my_name (str): The name of the user.
94
+ partner_name (str): The name of the partner.
95
+ topic (str): The topic of the conflict.
96
+
97
+ Returns:
98
+ str: The generated summary of the conflict topic.
99
+ """
100
+ summary = self.client.chat.completions.create(
101
+ model=self.model,
102
+ messages=[
103
+ {"role": "system", "content": f"""
104
+ As an expert relationship counselor specialized in couple therapy,
105
+ you are helping the couple, {my_name} and {partner_name}, to resolve a conflict or communicate better.
106
+ {partner_name} has shared a topic with you about {topic}.
107
+ Now, your task is to talk to {my_name} and know his perspective on the topic,
108
+ ensuring privacy and not revealing explicit details about what the other person thought.
109
+ Ask {my_name} a question to get the conversation started.
110
+ Keep in mind, this conversation is ONLY between you and {my_name}.
111
+ """},
112
+ {"role": "user", "content": f"""
113
+ {topic}
114
+ """}
115
+ ]
116
+ )
117
+ return summary.choices[0].message.content
118
+
119
+
120
+ def get_summary(chat_history: List[Dict[str, str]], perspective: str) -> str:
121
+ """
122
+ Generates a summary of the chat conversation from a specific perspective.
123
+
124
+ Args:
125
+ chat_history (List[Dict[str, str]]): The chat history to summarize.
126
+ perspective (str): The perspective from which to summarize the chat.
127
+
128
+ Returns:
129
+ str: The generated summary.
130
+ """
131
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
132
+
133
+ chat_history_str = "\n".join([f"{msg['role']} - {msg['name']}: {msg['content']}" for msg in chat_history])
134
+
135
+ prompt = {
136
+ "model": "gpt-3.5-turbo",
137
+ "messages": [
138
+ {"role": "system",
139
+ "content": f"""
140
+ You are an assistant summarizing a chat conversation to help resolve a conflict about household chores.
141
+ Please summarize the chat from {perspective}'s perspective,
142
+ ensuring privacy and not revealing explicit details about what the other person thought.
143
+ Focus on the key points discussed and any constructive advice given."""},
144
+ {"role": "user", "content": chat_history_str}
145
+ ]
146
+ }
147
+
148
+ response = client.chat.completions.create(**prompt)
149
+ return response.choices[0].message.content
150
+
151
+
152
+ def get_thread_and_summarize(manager: LiteralThreadManager, thread_id: str):
153
+ """
154
+ Retrieves the thread and generates a summary of the chat history.
155
+
156
+ Args:
157
+ manager (LiteralThreadManager): The manager to handle thread operations.
158
+ thread_id (str): The thread ID to retrieve and summarize.
159
+
160
+ Returns:
161
+ str: The generated summary of the chat history.
162
+ """
163
+ thread_content = manager.filter_thread_by_id(thread_id)
164
+ name = manager.get_user_name_from_thread(thread_content)
165
+ chat_history = manager.extract_chat_history_from_thread(thread_content)
166
+ return get_summary(chat_history, name)
167
+
168
+
169
+ def main():
170
+ # Initialize the LiteralThreadManager with the API key
171
+ manager = LiteralThreadManager(api_key=os.getenv("LITERAL_API_KEY"))
172
+
173
+ tom_thread = '83547413-b1bd-4609-9af7-b856ef2108a2'
174
+ linda_thread = '79c9b29a-fa26-4860-ba94-6cddb0581604'
175
+
176
+ tom_summary = get_thread_and_summarize(manager, tom_thread)
177
+ linda_summary = get_thread_and_summarize(manager, linda_thread)
178
+
179
+ print(f"Summary for Tom: \n{tom_summary}")
180
+ print(f"Summary for Linda: \n{linda_summary}")
181
+
182
+ # Resolution prompt:
183
+ resplution_logic = ResolutionLogic(manager)
184
+ resolution = resplution_logic.resolve_conflict(tom_thread)
185
+ print(f"Resolution: \n{resolution}")
186
+
187
+ manager.literal_client.api.upsert_thread(id="resolution_1_tom", name="resolution_tom_linda",
188
+ participant_id="tom", tags="tom_linda")
189
+ manager.literal_client.api.upsert_thread(id="resolution_1_linda", name="resolution_tom_linda",
190
+ participant_id="linda", tags="tom_linda")
191
+
192
+ if __name__ == "__main__":
193
+ main()