Update app.py
Browse files
app.py
CHANGED
@@ -115,7 +115,7 @@ def rag_chain(llm, prompt, db):
|
|
115 |
completion = rag_chain({"query": prompt})
|
116 |
return completion
|
117 |
|
118 |
-
def wandb_trace(rag_option, prompt, prompt_template, result, completion, chain_name, status_msg):
|
119 |
wandb.init(project = "openai-llm-rag")
|
120 |
trace = Trace(
|
121 |
name = chain_name,
|
@@ -129,8 +129,8 @@ def wandb_trace(rag_option, prompt, prompt_template, result, completion, chain_n
|
|
129 |
"model": config["model"],
|
130 |
"temperature": config["temperature"],
|
131 |
},
|
132 |
-
|
133 |
-
|
134 |
inputs = {"rag_option": rag_option, "prompt": prompt, "prompt_template": prompt_template},
|
135 |
outputs = {"result": str(result), "completion": str(completion)},
|
136 |
)
|
@@ -145,7 +145,11 @@ def invoke(openai_api_key, rag_option, prompt):
|
|
145 |
if (prompt == ""):
|
146 |
raise gr.Error("Prompt is required.")
|
147 |
completion = ""
|
|
|
|
|
|
|
148 |
status_msg = ""
|
|
|
149 |
try:
|
150 |
llm = ChatOpenAI(model_name = config["model"],
|
151 |
openai_api_key = openai_api_key,
|
@@ -175,7 +179,8 @@ def invoke(openai_api_key, rag_option, prompt):
|
|
175 |
status_msg = e
|
176 |
raise gr.Error(e)
|
177 |
finally:
|
178 |
-
|
|
|
179 |
return result
|
180 |
|
181 |
description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with
|
|
|
115 |
completion = rag_chain({"query": prompt})
|
116 |
return completion
|
117 |
|
118 |
+
def wandb_trace(rag_option, prompt, prompt_template, result, completion, chain_name, status_msg, start_time_ms, end_time_ms):
|
119 |
wandb.init(project = "openai-llm-rag")
|
120 |
trace = Trace(
|
121 |
name = chain_name,
|
|
|
129 |
"model": config["model"],
|
130 |
"temperature": config["temperature"],
|
131 |
},
|
132 |
+
start_time_ms = start_time_ms,
|
133 |
+
end_time_ms = end_time_ms,
|
134 |
inputs = {"rag_option": rag_option, "prompt": prompt, "prompt_template": prompt_template},
|
135 |
outputs = {"result": str(result), "completion": str(completion)},
|
136 |
)
|
|
|
145 |
if (prompt == ""):
|
146 |
raise gr.Error("Prompt is required.")
|
147 |
completion = ""
|
148 |
+
result = ""
|
149 |
+
prompt_template = ""
|
150 |
+
chain_name = ""
|
151 |
status_msg = ""
|
152 |
+
start_time_ms = round(time.time() * 1000)
|
153 |
try:
|
154 |
llm = ChatOpenAI(model_name = config["model"],
|
155 |
openai_api_key = openai_api_key,
|
|
|
179 |
status_msg = e
|
180 |
raise gr.Error(e)
|
181 |
finally:
|
182 |
+
end_time_ms = round(time.time() * 1000)
|
183 |
+
wandb_trace(rag_option, prompt, prompt_template, result, completion, chain_name, status_msg, start_time_ms, end_time_ms)
|
184 |
return result
|
185 |
|
186 |
description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with
|