Spaces:
Running
Running
update
Browse files- app.py +130 -89
- local_config_example.json +1 -0
app.py
CHANGED
@@ -57,6 +57,7 @@ if is_env_local:
|
|
57 |
OPEN_AI_KEY = config["OPEN_AI_KEY"]
|
58 |
OPEN_AI_ASSISTANT_ID_GPT4_BOT1 = config["OPEN_AI_ASSISTANT_ID_GPT4_BOT1"]
|
59 |
OPEN_AI_ASSISTANT_ID_GPT3_BOT1 = config["OPEN_AI_ASSISTANT_ID_GPT3_BOT1"]
|
|
|
60 |
OPEN_AI_KEY_BOT2 = config["OPEN_AI_KEY_BOT2"]
|
61 |
OPEN_AI_ASSISTANT_ID_GPT4_BOT2 = config["OPEN_AI_ASSISTANT_ID_GPT4_BOT2"]
|
62 |
OPEN_AI_ASSISTANT_ID_GPT3_BOT2 = config["OPEN_AI_ASSISTANT_ID_GPT3_BOT2"]
|
@@ -76,6 +77,7 @@ else:
|
|
76 |
OPEN_AI_KEY = os.getenv("OPEN_AI_KEY")
|
77 |
OPEN_AI_ASSISTANT_ID_GPT4_BOT1 = os.getenv("OPEN_AI_ASSISTANT_ID_GPT4_BOT1")
|
78 |
OPEN_AI_ASSISTANT_ID_GPT3_BOT1 = os.getenv("OPEN_AI_ASSISTANT_ID_GPT3_BOT1")
|
|
|
79 |
OPEN_AI_KEY_BOT2 = os.getenv("OPEN_AI_KEY_BOT2")
|
80 |
OPEN_AI_ASSISTANT_ID_GPT4_BOT2 = os.getenv("OPEN_AI_ASSISTANT_ID_GPT4_BOT2")
|
81 |
OPEN_AI_ASSISTANT_ID_GPT3_BOT2 = os.getenv("OPEN_AI_ASSISTANT_ID_GPT3_BOT2")
|
@@ -122,8 +124,20 @@ def check_open_ai_access(open_ai_api_key):
|
|
122 |
return False
|
123 |
|
124 |
open_ai_api_key_assistant_id_list = [
|
125 |
-
{
|
126 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
]
|
128 |
for open_ai_api_key_assistant_id in open_ai_api_key_assistant_id_list:
|
129 |
account = open_ai_api_key_assistant_id["account"]
|
@@ -132,6 +146,7 @@ for open_ai_api_key_assistant_id in open_ai_api_key_assistant_id_list:
|
|
132 |
OPEN_AI_CLIENT = OpenAI(api_key=open_ai_api_key)
|
133 |
OPEN_AI_ASSISTANT_ID_GPT4 = open_ai_api_key_assistant_id["assistant_gpt4_id"]
|
134 |
OPEN_AI_ASSISTANT_ID_GPT3 = open_ai_api_key_assistant_id["assistant_gpt3_id"]
|
|
|
135 |
print(f"OpenAI access is OK, account: {account}")
|
136 |
break
|
137 |
|
@@ -2177,6 +2192,18 @@ def get_instructions(content_subject, content_grade, key_moments, socratic_mode=
|
|
2177 |
"""
|
2178 |
return instructions
|
2179 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2180 |
def chat_with_any_ai(ai_type, password, video_id, user_data, transcript_state, key_moments, user_message, chat_history, content_subject, content_grade, questions_answers_json, socratic_mode=False, thread_id=None, ai_name=None):
|
2181 |
print(f"ai_type: {ai_type}")
|
2182 |
print(f"user_data: {user_data}")
|
@@ -2194,10 +2221,14 @@ def chat_with_any_ai(ai_type, password, video_id, user_data, transcript_state, k
|
|
2194 |
|
2195 |
verify_chat_limit(chat_history, CHAT_LIMIT)
|
2196 |
|
|
|
2197 |
if ai_type == "chat_completions":
|
2198 |
-
|
2199 |
-
|
2200 |
-
|
|
|
|
|
|
|
2201 |
# if thread_id is none, create random thread_id + timestamp
|
2202 |
if thread_id is None or thread_id == "":
|
2203 |
thread_id = "thread_" + str(uuid.uuid4()) + str(int(time.time()))
|
@@ -2212,22 +2243,12 @@ def chat_with_any_ai(ai_type, password, video_id, user_data, transcript_state, k
|
|
2212 |
"socratic_mode": str(socratic_mode),
|
2213 |
"assistant_id": ai_name,
|
2214 |
"is_streaming": "false",
|
|
|
|
|
2215 |
}
|
2216 |
elif ai_type == "assistant":
|
2217 |
client = OPEN_AI_CLIENT
|
2218 |
-
assistant_id = OPEN_AI_ASSISTANT_ID_GPT4
|
2219 |
-
if isinstance(key_moments, str):
|
2220 |
-
key_moments_json = json.loads(key_moments)
|
2221 |
-
else:
|
2222 |
-
key_moments_json = key_moments
|
2223 |
-
# key_moments_json remove images
|
2224 |
-
for moment in key_moments_json:
|
2225 |
-
moment.pop('images', None)
|
2226 |
-
moment.pop('end', None)
|
2227 |
-
moment.pop('transcript', None)
|
2228 |
-
key_moments_text = json.dumps(key_moments_json, ensure_ascii=False)
|
2229 |
-
instructions = get_instructions(content_subject, content_grade, key_moments_text, socratic_mode)
|
2230 |
-
print(f"=== instructions:{instructions} ===")
|
2231 |
metadata={
|
2232 |
"video_id": video_id,
|
2233 |
"user_data": user_data,
|
@@ -2236,10 +2257,28 @@ def chat_with_any_ai(ai_type, password, video_id, user_data, transcript_state, k
|
|
2236 |
"socratic_mode": str(socratic_mode),
|
2237 |
"assistant_id": assistant_id,
|
2238 |
"is_streaming": "false",
|
|
|
|
|
2239 |
}
|
2240 |
-
|
2241 |
-
|
2242 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2243 |
|
2244 |
# 更新聊天历史
|
2245 |
chat_history = update_chat_history(user_message, response_text, chat_history)
|
@@ -2632,77 +2671,79 @@ def chat_with_opan_ai_assistant_streaming(user_message, chat_history, password,
|
|
2632 |
if chat_history is not None and len(chat_history) > CHAT_LIMIT:
|
2633 |
error_msg = f"此次對話超過上限(對話一輪{CHAT_LIMIT}次)"
|
2634 |
raise gr.Error(error_msg)
|
|
|
|
|
|
|
|
|
2635 |
|
2636 |
-
|
2637 |
-
|
2638 |
-
|
2639 |
-
|
2640 |
-
|
2641 |
-
|
2642 |
-
|
2643 |
-
|
2644 |
-
|
2645 |
-
|
2646 |
-
|
2647 |
-
|
2648 |
-
#
|
2649 |
-
|
2650 |
-
if isinstance(key_moments, str):
|
2651 |
-
key_moments_json = json.loads(key_moments)
|
2652 |
-
else:
|
2653 |
-
key_moments_json = key_moments
|
2654 |
-
# key_moments_json remove images
|
2655 |
-
for moment in key_moments_json:
|
2656 |
-
moment.pop('images', None)
|
2657 |
-
moment.pop('end', None)
|
2658 |
-
moment.pop('transcript', None)
|
2659 |
-
key_moments_text = json.dumps(key_moments_json, ensure_ascii=False)
|
2660 |
-
|
2661 |
-
instructions = get_instructions(content_subject, content_grade, key_moments_text, socratic_mode)
|
2662 |
-
# 创建线程
|
2663 |
-
if not thread_id:
|
2664 |
-
thread = client.beta.threads.create()
|
2665 |
-
thread_id = thread.id
|
2666 |
-
print(f"new thread_id: {thread_id}")
|
2667 |
-
else:
|
2668 |
-
thread = client.beta.threads.retrieve(thread_id)
|
2669 |
-
print(f"old thread_id: {thread_id}")
|
2670 |
-
metadata = {
|
2671 |
-
"youtube_id": video_id,
|
2672 |
-
"user_data": user_data,
|
2673 |
-
"content_subject": content_subject,
|
2674 |
-
"content_grade": content_grade,
|
2675 |
-
"assistant_id": assistant_id,
|
2676 |
-
"is_streaming": "true",
|
2677 |
-
}
|
2678 |
-
client.beta.threads.update(
|
2679 |
-
thread_id=thread_id,
|
2680 |
-
metadata=metadata
|
2681 |
-
)
|
2682 |
-
|
2683 |
-
# 向线程添加用户的消息
|
2684 |
-
client.beta.threads.messages.create(
|
2685 |
-
thread_id=thread.id,
|
2686 |
-
role="user",
|
2687 |
-
content=user_message + "/n 請嚴格遵循instructions,擔任一位蘇格拉底家教,請一定要用繁體中文回答 zh-TW,並用台灣人的禮貌口語表達,回答時不要特別說明這是台灣人的語氣,不用提到「逐字稿」這個詞,用「內容」代替)),請在回答的最後標註【參考資料:(時):(分):(秒)】,(如果是反問學生,就只問一個問題,請幫助學生更好的理解資料,字數在100字以內)"
|
2688 |
-
)
|
2689 |
|
2690 |
-
|
2691 |
-
|
2692 |
-
|
2693 |
-
|
2694 |
-
|
2695 |
-
|
2696 |
-
|
2697 |
-
|
2698 |
-
|
2699 |
-
|
2700 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2701 |
|
2702 |
-
|
2703 |
-
|
2704 |
-
|
2705 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2706 |
user_id = user_data
|
2707 |
route = "chat_with_opan_ai_assistant_streaming"
|
2708 |
endpoint = "assistant_streaming"
|
|
|
57 |
OPEN_AI_KEY = config["OPEN_AI_KEY"]
|
58 |
OPEN_AI_ASSISTANT_ID_GPT4_BOT1 = config["OPEN_AI_ASSISTANT_ID_GPT4_BOT1"]
|
59 |
OPEN_AI_ASSISTANT_ID_GPT3_BOT1 = config["OPEN_AI_ASSISTANT_ID_GPT3_BOT1"]
|
60 |
+
OPEN_AI_MODERATION_BOT1 = config["OPEN_AI_MODERATION_BOT1"]
|
61 |
OPEN_AI_KEY_BOT2 = config["OPEN_AI_KEY_BOT2"]
|
62 |
OPEN_AI_ASSISTANT_ID_GPT4_BOT2 = config["OPEN_AI_ASSISTANT_ID_GPT4_BOT2"]
|
63 |
OPEN_AI_ASSISTANT_ID_GPT3_BOT2 = config["OPEN_AI_ASSISTANT_ID_GPT3_BOT2"]
|
|
|
77 |
OPEN_AI_KEY = os.getenv("OPEN_AI_KEY")
|
78 |
OPEN_AI_ASSISTANT_ID_GPT4_BOT1 = os.getenv("OPEN_AI_ASSISTANT_ID_GPT4_BOT1")
|
79 |
OPEN_AI_ASSISTANT_ID_GPT3_BOT1 = os.getenv("OPEN_AI_ASSISTANT_ID_GPT3_BOT1")
|
80 |
+
OPEN_AI_MODERATION_BOT1 = os.getenv("OPEN_AI_MODERATION_BOT1")
|
81 |
OPEN_AI_KEY_BOT2 = os.getenv("OPEN_AI_KEY_BOT2")
|
82 |
OPEN_AI_ASSISTANT_ID_GPT4_BOT2 = os.getenv("OPEN_AI_ASSISTANT_ID_GPT4_BOT2")
|
83 |
OPEN_AI_ASSISTANT_ID_GPT3_BOT2 = os.getenv("OPEN_AI_ASSISTANT_ID_GPT3_BOT2")
|
|
|
124 |
return False
|
125 |
|
126 |
open_ai_api_key_assistant_id_list = [
|
127 |
+
{
|
128 |
+
"account":"bot1",
|
129 |
+
"open_ai_api_key": OPEN_AI_KEY,
|
130 |
+
"assistant_gpt4_id": OPEN_AI_ASSISTANT_ID_GPT4_BOT1,
|
131 |
+
"assistant_gpt3_id": OPEN_AI_ASSISTANT_ID_GPT3_BOT1,
|
132 |
+
"moderation": OPEN_AI_MODERATION_BOT1
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"account":"bot2",
|
136 |
+
"open_ai_api_key": OPEN_AI_KEY_BOT2,
|
137 |
+
"assistant_gpt4_id": OPEN_AI_ASSISTANT_ID_GPT4_BOT2,
|
138 |
+
"assistant_gpt3_id": OPEN_AI_ASSISTANT_ID_GPT3_BOT2,
|
139 |
+
"moderation": OPEN_AI_MODERATION_BOT1
|
140 |
+
},
|
141 |
]
|
142 |
for open_ai_api_key_assistant_id in open_ai_api_key_assistant_id_list:
|
143 |
account = open_ai_api_key_assistant_id["account"]
|
|
|
146 |
OPEN_AI_CLIENT = OpenAI(api_key=open_ai_api_key)
|
147 |
OPEN_AI_ASSISTANT_ID_GPT4 = open_ai_api_key_assistant_id["assistant_gpt4_id"]
|
148 |
OPEN_AI_ASSISTANT_ID_GPT3 = open_ai_api_key_assistant_id["assistant_gpt3_id"]
|
149 |
+
OPEN_AI_MODERATION_CLIENT = OpenAI(api_key=open_ai_api_key_assistant_id["moderation"])
|
150 |
print(f"OpenAI access is OK, account: {account}")
|
151 |
break
|
152 |
|
|
|
2192 |
"""
|
2193 |
return instructions
|
2194 |
|
2195 |
+
def get_chat_moderation(user_content):
|
2196 |
+
# response = client.moderations.create(input=text)
|
2197 |
+
response = OPEN_AI_MODERATION_CLIENT.moderations.create(input=user_content)
|
2198 |
+
response_dict = response.model_dump()
|
2199 |
+
is_flagged = response_dict['results'][0]['flagged']
|
2200 |
+
print("========get_chat_moderation==========")
|
2201 |
+
print(f"is_flagged: {is_flagged}")
|
2202 |
+
print(response_dict)
|
2203 |
+
print("========get_chat_moderation==========")
|
2204 |
+
|
2205 |
+
return is_flagged, response_dict
|
2206 |
+
|
2207 |
def chat_with_any_ai(ai_type, password, video_id, user_data, transcript_state, key_moments, user_message, chat_history, content_subject, content_grade, questions_answers_json, socratic_mode=False, thread_id=None, ai_name=None):
|
2208 |
print(f"ai_type: {ai_type}")
|
2209 |
print(f"user_data: {user_data}")
|
|
|
2221 |
|
2222 |
verify_chat_limit(chat_history, CHAT_LIMIT)
|
2223 |
|
2224 |
+
is_flagged, response_dict = get_chat_moderation(user_message)
|
2225 |
if ai_type == "chat_completions":
|
2226 |
+
if is_flagged:
|
2227 |
+
response_text = "您的留言已被標記為不當內容,請重新發送。"
|
2228 |
+
else:
|
2229 |
+
chatbot_config = get_chatbot_config(ai_name, transcript_state, key_moments, content_subject, content_grade, video_id, socratic_mode)
|
2230 |
+
chatbot = Chatbot(chatbot_config)
|
2231 |
+
response_text = chatbot.chat(user_message, chat_history)
|
2232 |
# if thread_id is none, create random thread_id + timestamp
|
2233 |
if thread_id is None or thread_id == "":
|
2234 |
thread_id = "thread_" + str(uuid.uuid4()) + str(int(time.time()))
|
|
|
2243 |
"socratic_mode": str(socratic_mode),
|
2244 |
"assistant_id": ai_name,
|
2245 |
"is_streaming": "false",
|
2246 |
+
"moderation_is_flagged": str(is_flagged),
|
2247 |
+
"moderation_response_dict": str(response_dict)
|
2248 |
}
|
2249 |
elif ai_type == "assistant":
|
2250 |
client = OPEN_AI_CLIENT
|
2251 |
+
assistant_id = OPEN_AI_ASSISTANT_ID_GPT4
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2252 |
metadata={
|
2253 |
"video_id": video_id,
|
2254 |
"user_data": user_data,
|
|
|
2257 |
"socratic_mode": str(socratic_mode),
|
2258 |
"assistant_id": assistant_id,
|
2259 |
"is_streaming": "false",
|
2260 |
+
"moderation_is_flagged": str(is_flagged),
|
2261 |
+
"moderation_response_dict": str(response_dict)
|
2262 |
}
|
2263 |
+
|
2264 |
+
if is_flagged:
|
2265 |
+
response_text = "您的留言已被標記為不當內容,請重新發送。"
|
2266 |
+
else:
|
2267 |
+
if isinstance(key_moments, str):
|
2268 |
+
key_moments_json = json.loads(key_moments)
|
2269 |
+
else:
|
2270 |
+
key_moments_json = key_moments
|
2271 |
+
# key_moments_json remove images
|
2272 |
+
for moment in key_moments_json:
|
2273 |
+
moment.pop('images', None)
|
2274 |
+
moment.pop('end', None)
|
2275 |
+
moment.pop('transcript', None)
|
2276 |
+
key_moments_text = json.dumps(key_moments_json, ensure_ascii=False)
|
2277 |
+
instructions = get_instructions(content_subject, content_grade, key_moments_text, socratic_mode)
|
2278 |
+
print(f"=== instructions:{instructions} ===")
|
2279 |
+
user_message_note = "/n 請嚴格遵循instructions,擔任一位蘇格拉底家教,絕對不要重複 user 的問句,請用引導的方式指引方向,請一定要用繁體中文回答 zh-TW,並用台灣人的禮貌口語表達,回答時不要特別說明這是台灣人的語氣,請在回答的最後標註【參考:(時):(分):(秒)】,(如果是反問學生,就只問一個問題,請幫助學生更好的理解資料,字數在100字以內,回答時如果講到數學專有名詞,請用數學符號代替文字(Latex 用 $ 字號 render, ex: $x^2$)"
|
2280 |
+
user_content = user_message + user_message_note
|
2281 |
+
response_text, thread_id = handle_conversation_by_open_ai_assistant(client, user_content, instructions, assistant_id, thread_id, metadata, fallback=True)
|
2282 |
|
2283 |
# 更新聊天历史
|
2284 |
chat_history = update_chat_history(user_message, response_text, chat_history)
|
|
|
2671 |
if chat_history is not None and len(chat_history) > CHAT_LIMIT:
|
2672 |
error_msg = f"此次對話超過上限(對話一輪{CHAT_LIMIT}次)"
|
2673 |
raise gr.Error(error_msg)
|
2674 |
+
|
2675 |
+
print("===chat_with_opan_ai_assistant_streaming===")
|
2676 |
+
print(user_message)
|
2677 |
+
|
2678 |
|
2679 |
+
is_flagged, response_dict = get_chat_moderation(user_message)
|
2680 |
+
assistant_id = OPEN_AI_ASSISTANT_ID_GPT4 #GPT 4 turbo
|
2681 |
+
# assistant_id = OPEN_AI_ASSISTANT_ID_GPT3 #GPT 3.5 turbo
|
2682 |
+
client = OPEN_AI_CLIENT
|
2683 |
+
metadata = {
|
2684 |
+
"youtube_id": video_id,
|
2685 |
+
"user_data": user_data,
|
2686 |
+
"content_subject": content_subject,
|
2687 |
+
"content_grade": content_grade,
|
2688 |
+
"assistant_id": assistant_id,
|
2689 |
+
"is_streaming": "true",
|
2690 |
+
"moderation_is_flagged": str(is_flagged),
|
2691 |
+
# "moderation_response_dict": str(response_dict)
|
2692 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2693 |
|
2694 |
+
if is_flagged:
|
2695 |
+
partial_messages = "您的留言已被標記為不當內容,請重新發送。"
|
2696 |
+
yield partial_messages
|
2697 |
+
else:
|
2698 |
+
try:
|
2699 |
+
if isinstance(key_moments, str):
|
2700 |
+
key_moments_json = json.loads(key_moments)
|
2701 |
+
else:
|
2702 |
+
key_moments_json = key_moments
|
2703 |
+
# key_moments_json remove images
|
2704 |
+
for moment in key_moments_json:
|
2705 |
+
moment.pop('images', None)
|
2706 |
+
moment.pop('end', None)
|
2707 |
+
moment.pop('transcript', None)
|
2708 |
+
key_moments_text = json.dumps(key_moments_json, ensure_ascii=False)
|
2709 |
+
|
2710 |
+
instructions = get_instructions(content_subject, content_grade, key_moments_text, socratic_mode)
|
2711 |
+
# 创建线程
|
2712 |
+
if not thread_id:
|
2713 |
+
thread = client.beta.threads.create()
|
2714 |
+
thread_id = thread.id
|
2715 |
+
print(f"new thread_id: {thread_id}")
|
2716 |
+
else:
|
2717 |
+
thread = client.beta.threads.retrieve(thread_id)
|
2718 |
+
print(f"old thread_id: {thread_id}")
|
2719 |
+
|
2720 |
+
client.beta.threads.update(
|
2721 |
+
thread_id=thread_id,
|
2722 |
+
metadata=metadata
|
2723 |
+
)
|
2724 |
+
|
2725 |
+
# 向线程添加用户的消息
|
2726 |
+
client.beta.threads.messages.create(
|
2727 |
+
thread_id=thread.id,
|
2728 |
+
role="user",
|
2729 |
+
content=user_message + "/n 請嚴格遵循instructions,擔任一位蘇格拉底家教,請一定要用繁體中文回答 zh-TW,並用台灣人的禮貌口語表達,回答時不要特別說明這是台灣人的語氣,不用提到「逐字稿」這個詞,用「內容」代替)),請在回答的最後標註【參考資料:(時):(分):(秒)】,(如果是反問學生,就只問一個問題,請幫助學生更好的理解資料,字數在100字以內)"
|
2730 |
+
)
|
2731 |
|
2732 |
+
with client.beta.threads.runs.stream(
|
2733 |
+
thread_id=thread.id,
|
2734 |
+
assistant_id=assistant_id,
|
2735 |
+
instructions=instructions,
|
2736 |
+
) as stream:
|
2737 |
+
partial_messages = ""
|
2738 |
+
for event in stream:
|
2739 |
+
if event.data and event.data.object == "thread.message.delta":
|
2740 |
+
message = event.data.delta.content[0].text.value
|
2741 |
+
partial_messages += message
|
2742 |
+
yield partial_messages
|
2743 |
+
except Exception as e:
|
2744 |
+
print(f"Error: {e}")
|
2745 |
+
raise gr.Error(f"Error: {e}")
|
2746 |
+
|
2747 |
user_id = user_data
|
2748 |
route = "chat_with_opan_ai_assistant_streaming"
|
2749 |
endpoint = "assistant_streaming"
|
local_config_example.json
CHANGED
@@ -7,6 +7,7 @@
|
|
7 |
"OPEN_AI_KEY": "sk-proj-xxxxxxxxxx",
|
8 |
"OPEN_AI_ASSISTANT_ID_GPT4_BOT1": "asst_3cxxxxxxxxxxjrQio9",
|
9 |
"OPEN_AI_ASSISTANT_ID_GPT3_BOT1": "asst_mcuxxxxxxxxxx5L4e",
|
|
|
10 |
"OPEN_AI_KEY_BOT2": "sk-proj-5HsYxxxxxxxxxxkH4pShu",
|
11 |
"OPEN_AI_ASSISTANT_ID_GPT4_BOT2": "asst_3xxxxxxxxxxrQio9",
|
12 |
"OPEN_AI_ASSISTANT_ID_GPT3_BOT2": "asst_mxxxxxxxxxx6vg5L4e",
|
|
|
7 |
"OPEN_AI_KEY": "sk-proj-xxxxxxxxxx",
|
8 |
"OPEN_AI_ASSISTANT_ID_GPT4_BOT1": "asst_3cxxxxxxxxxxjrQio9",
|
9 |
"OPEN_AI_ASSISTANT_ID_GPT3_BOT1": "asst_mcuxxxxxxxxxx5L4e",
|
10 |
+
"OPEN_AI_MODERATION_BOT1": "sk-proj-xxxxxxxxxx",
|
11 |
"OPEN_AI_KEY_BOT2": "sk-proj-5HsYxxxxxxxxxxkH4pShu",
|
12 |
"OPEN_AI_ASSISTANT_ID_GPT4_BOT2": "asst_3xxxxxxxxxxrQio9",
|
13 |
"OPEN_AI_ASSISTANT_ID_GPT3_BOT2": "asst_mxxxxxxxxxx6vg5L4e",
|