bstraehle commited on
Commit
6cb1c29
1 Parent(s): a627434

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -18
app.py CHANGED
@@ -115,6 +115,29 @@ def rag_chain(llm, prompt, db):
115
  completion = rag_chain({"query": prompt})
116
  return completion
117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  def invoke(openai_api_key, rag_option, prompt):
119
  if (openai_api_key == ""):
120
  raise gr.Error("OpenAI API Key is required.")
@@ -122,7 +145,6 @@ def invoke(openai_api_key, rag_option, prompt):
122
  raise gr.Error("Retrieval Augmented Generation is required.")
123
  if (prompt == ""):
124
  raise gr.Error("Prompt is required.")
125
- wandb.init(project = "openai-llm-rag")
126
  completion = ""
127
  try:
128
  llm = ChatOpenAI(model_name = config["model"],
@@ -146,23 +168,7 @@ def invoke(openai_api_key, rag_option, prompt):
146
  completion = e
147
  raise gr.Error(e)
148
  finally:
149
- trace = Trace(
150
- name="test",
151
- kind="chain",
152
- #status_code=status,
153
- #status_message=status_message,
154
- metadata={
155
- "temperature": config["temperature"],
156
- #"token_usage": token_usage,
157
- #"model_name": model_name,
158
- },
159
- #start_time_ms=start_time_ms,
160
- #end_time_ms=end_time_ms,
161
- #inputs={"system_prompt": system_message, "query": query},
162
- #outputs={"response": response_text},
163
- )
164
- trace.log("test")
165
- wandb.finish()
166
  return completion
167
 
168
  description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with
 
115
  completion = rag_chain({"query": prompt})
116
  return completion
117
 
118
+ def wandb_trace(prompt, completion, rag_option)
119
+ wandb.init(project = "openai-llm-rag")
120
+ trace = Trace(
121
+ name="test",
122
+ kind="chain",
123
+ #status_code=status,
124
+ #status_message=status_message,
125
+ metadata={
126
+ "rag_option": rag_option
127
+ "chunk_overlap": config["chunk_overlap"],
128
+ "chunk_size": config["chunk_size"],
129
+ "k": config["k"],
130
+ "model": config["model"],
131
+ "temperature": config["temperature"],
132
+ },
133
+ #start_time_ms=start_time_ms,
134
+ #end_time_ms=end_time_ms,
135
+ inputs={"prompt": prompt},
136
+ outputs={"completion": completion},
137
+ )
138
+ trace.log("test")
139
+ wandb.finish()
140
+
141
  def invoke(openai_api_key, rag_option, prompt):
142
  if (openai_api_key == ""):
143
  raise gr.Error("OpenAI API Key is required.")
 
145
  raise gr.Error("Retrieval Augmented Generation is required.")
146
  if (prompt == ""):
147
  raise gr.Error("Prompt is required.")
 
148
  completion = ""
149
  try:
150
  llm = ChatOpenAI(model_name = config["model"],
 
168
  completion = e
169
  raise gr.Error(e)
170
  finally:
171
+ wandb_trace(prompt, completion, rag_option)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  return completion
173
 
174
  description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with