bstraehle commited on
Commit
38ee3ac
1 Parent(s): bb0e9c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -9
app.py CHANGED
@@ -28,6 +28,16 @@ MONGODB_COLLECTION_NAME = "gpt-4"
28
  MONGODB_COLLECTION = client[MONGODB_DB_NAME][MONGODB_COLLECTION_NAME]
29
  MONGODB_INDEX_NAME = "default"
30
 
 
 
 
 
 
 
 
 
 
 
31
  template = """If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer as concise as possible. Always say
32
  "🧠 Thanks for using the app - Bernd" at the end of the answer. """
33
 
@@ -48,8 +58,6 @@ YOUTUBE_URL_1 = "https://www.youtube.com/watch?v=--khbXchTeE"
48
  YOUTUBE_URL_2 = "https://www.youtube.com/watch?v=hdhZwyf24mE"
49
  YOUTUBE_URL_3 = "https://www.youtube.com/watch?v=vw-KWfKwvTQ"
50
 
51
- MODEL_NAME = "gpt-4"
52
-
53
  def document_loading_splitting():
54
  # Document loading
55
  docs = []
@@ -96,16 +104,18 @@ def document_retrieval_mongodb(llm, prompt):
96
 
97
  def llm_chain(llm, prompt):
98
  llm_chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT)
99
- result = llm_chain.run({"question": prompt})
100
- return result
 
101
 
102
  def rag_chain(llm, prompt, db):
103
  rag_chain = RetrievalQA.from_chain_type(llm,
104
  chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT},
105
  retriever = db.as_retriever(search_kwargs = {"k": 3}),
106
  return_source_documents = True)
107
- result = rag_chain({"query": prompt})
108
- return result["result"]
 
109
 
110
  def invoke(openai_api_key, rag_option, prompt):
111
  if (openai_api_key == ""):
@@ -115,9 +125,9 @@ def invoke(openai_api_key, rag_option, prompt):
115
  if (prompt == ""):
116
  raise gr.Error("Prompt is required.")
117
  try:
118
- llm = ChatOpenAI(model_name = MODEL_NAME,
119
  openai_api_key = openai_api_key,
120
- temperature = 0)
121
  if (rag_option == "Chroma"):
122
  #splits = document_loading_splitting()
123
  #document_storage_chroma(splits)
@@ -155,7 +165,8 @@ description = """<strong>Overview:</strong> Context-aware multimodal reasoning a
155
  <a href='https://www.mongodb.com/blog/post/introducing-atlas-vector-search-build-intelligent-applications-semantic-search-ai'>MongoDB</a> vector search.
156
  <strong>Speech-to-text</strong> via <a href='https://openai.com/research/whisper'>whisper-1</a> model, <strong>text embedding</strong> via
157
  <a href='https://openai.com/blog/new-and-improved-embedding-model'>text-embedding-ada-002</a> model, and <strong>text generation</strong> via
158
- <a href='""" + WEB_URL + """'>gpt-4</a> model. Implementation via AI-first <a href='https://www.langchain.com/'>LangChain</a> toolkit."""
 
159
 
160
  gr.close_all()
161
  demo = gr.Interface(fn=invoke,
 
28
  MONGODB_COLLECTION = client[MONGODB_DB_NAME][MONGODB_COLLECTION_NAME]
29
  MONGODB_INDEX_NAME = "default"
30
 
31
+ config = {
32
+ "model": "gpt-4",
33
+ "temperature": 0,
34
+ }
35
+
36
+ wandb_api_key = os.environ["WANDB_API_KEY"]
37
+ wandb.login(key = wandb_api_key)
38
+ wandb.init(project = "openai-llm-rag", config = config)
39
+ config = wandb.config
40
+
41
  template = """If you don't know the answer, just say that you don't know, don't try to make up an answer. Keep the answer as concise as possible. Always say
42
  "🧠 Thanks for using the app - Bernd" at the end of the answer. """
43
 
 
58
  YOUTUBE_URL_2 = "https://www.youtube.com/watch?v=hdhZwyf24mE"
59
  YOUTUBE_URL_3 = "https://www.youtube.com/watch?v=vw-KWfKwvTQ"
60
 
 
 
61
  def document_loading_splitting():
62
  # Document loading
63
  docs = []
 
104
 
105
  def llm_chain(llm, prompt):
106
  llm_chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT)
107
+ completion = llm_chain.run({"question": prompt})
108
+ wandb.log({"prompt": prompt, "completion": completion})
109
+ return completion
110
 
111
  def rag_chain(llm, prompt, db):
112
  rag_chain = RetrievalQA.from_chain_type(llm,
113
  chain_type_kwargs = {"prompt": RAG_CHAIN_PROMPT},
114
  retriever = db.as_retriever(search_kwargs = {"k": 3}),
115
  return_source_documents = True)
116
+ completion = rag_chain({"query": prompt})
117
+ wandb.log({"prompt": prompt, "completion": completion})
118
+ return completion["result"]
119
 
120
  def invoke(openai_api_key, rag_option, prompt):
121
  if (openai_api_key == ""):
 
125
  if (prompt == ""):
126
  raise gr.Error("Prompt is required.")
127
  try:
128
+ llm = ChatOpenAI(model_name = config.model,
129
  openai_api_key = openai_api_key,
130
+ temperature = config.temperature)
131
  if (rag_option == "Chroma"):
132
  #splits = document_loading_splitting()
133
  #document_storage_chroma(splits)
 
165
  <a href='https://www.mongodb.com/blog/post/introducing-atlas-vector-search-build-intelligent-applications-semantic-search-ai'>MongoDB</a> vector search.
166
  <strong>Speech-to-text</strong> via <a href='https://openai.com/research/whisper'>whisper-1</a> model, <strong>text embedding</strong> via
167
  <a href='https://openai.com/blog/new-and-improved-embedding-model'>text-embedding-ada-002</a> model, and <strong>text generation</strong> via
168
+ <a href='""" + WEB_URL + """'>gpt-4</a> model. Implementation via AI-first <a href='https://www.langchain.com/'>LangChain</a> toolkit.
169
+ Model performance evaluation via <a href='https://wandb.ai/bstraehle'>Weights & Biases</a>."""
170
 
171
  gr.close_all()
172
  demo = gr.Interface(fn=invoke,