bstraehle commited on
Commit
a627434
1 Parent(s): e64fbab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -4
app.py CHANGED
@@ -16,6 +16,8 @@ from langchain.vectorstores import MongoDBAtlasVectorSearch
16
 
17
  from pymongo import MongoClient
18
 
 
 
19
  from dotenv import load_dotenv, find_dotenv
20
  _ = load_dotenv(find_dotenv())
21
 
@@ -120,8 +122,7 @@ def invoke(openai_api_key, rag_option, prompt):
120
  raise gr.Error("Retrieval Augmented Generation is required.")
121
  if (prompt == ""):
122
  raise gr.Error("Prompt is required.")
123
- os.environ["WANDB_PROJECT"] = "openai-llm-rag"
124
- os.environ["LANGCHAIN_WANDB_TRACING"] = "true"
125
  completion = ""
126
  try:
127
  llm = ChatOpenAI(model_name = config["model"],
@@ -145,9 +146,23 @@ def invoke(openai_api_key, rag_option, prompt):
145
  completion = e
146
  raise gr.Error(e)
147
  finally:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
  wandb.finish()
149
- del os.environ["LANGCHAIN_WANDB_TRACING"]
150
- del os.environ["WANDB_PROJECT"]
151
  return completion
152
 
153
  description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with
 
16
 
17
  from pymongo import MongoClient
18
 
19
+ from wandb.sdk.data_types.trace_tree import Trace
20
+
21
  from dotenv import load_dotenv, find_dotenv
22
  _ = load_dotenv(find_dotenv())
23
 
 
122
  raise gr.Error("Retrieval Augmented Generation is required.")
123
  if (prompt == ""):
124
  raise gr.Error("Prompt is required.")
125
+ wandb.init(project = "openai-llm-rag")
 
126
  completion = ""
127
  try:
128
  llm = ChatOpenAI(model_name = config["model"],
 
146
  completion = e
147
  raise gr.Error(e)
148
  finally:
149
+ trace = Trace(
150
+ name="test",
151
+ kind="chain",
152
+ #status_code=status,
153
+ #status_message=status_message,
154
+ metadata={
155
+ "temperature": config["temperature"],
156
+ #"token_usage": token_usage,
157
+ #"model_name": model_name,
158
+ },
159
+ #start_time_ms=start_time_ms,
160
+ #end_time_ms=end_time_ms,
161
+ #inputs={"system_prompt": system_message, "query": query},
162
+ #outputs={"response": response_text},
163
+ )
164
+ trace.log("test")
165
  wandb.finish()
 
 
166
  return completion
167
 
168
  description = """<strong>Overview:</strong> Context-aware multimodal reasoning application using a <strong>large language model (LLM)</strong> with