lillybak commited on
Commit
dc576de
1 Parent(s): 7b580a0

Getting rid or reranker to see if the reranker was the cause of the time-out of the app.

Browse files
Files changed (1) hide show
  1. app.py +32 -25
app.py CHANGED
@@ -7,7 +7,7 @@ load_dotenv()
7
  import os
8
  import sys
9
  import getpass
10
- import nest_asyncio
11
  # import pandas as pd
12
  import faiss
13
  import openai
@@ -25,12 +25,14 @@ from llama_index.core import set_global_handler
25
  from llama_index.core.node_parser import MarkdownElementNodeParser
26
  from llama_index.llms.openai import OpenAI
27
  from llama_index.embeddings.openai import OpenAIEmbedding
28
- from llama_index.postprocessor.flag_embedding_reranker import FlagEmbeddingReranker
29
  from llama_parse import LlamaParse
30
 
31
  from openai import AsyncOpenAI # importing openai for API usage
32
 
33
- os.environ["CUDA_VISIBLE_DEVICES"] = ""
 
 
34
  # GET KEYS
35
  LLAMA_CLOUD_API_KEY= os.getenv('LLAMA_CLOUD_API_KEY')
36
  OPENAI_API_KEY=os.getenv("OPENAI_API_KEY")
@@ -41,9 +43,9 @@ os.environ["OPENAI_API_KEY"] = getpass.getpass("OpenAI API Key:")
41
  # os.environ["WANDB_API_KEY"] = getpass.getpass("WandB API Key: ")
42
  """
43
 
44
- nest_asyncio.apply()
45
 
46
- # PARSING the pdf file
47
  parser = LlamaParse(
48
  result_type="markdown",
49
  verbose=True,
@@ -53,7 +55,7 @@ parser = LlamaParse(
53
 
54
  nvidia_docs = parser.load_data(["./nvidia_2tables.pdf"])
55
  # Note: nvidia_docs contains only one file (it could contain more). nvidia_docs[0] is the pdf we loaded.
56
- print(nvidia_docs[0].text[:1000])
57
 
58
  # Getting Settings out of llama_index.core which is a major part of their v0.10 update!
59
  Settings.llm = OpenAI(model="gpt-3.5-turbo")
@@ -61,24 +63,29 @@ Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small")
61
 
62
  # Using MarkdownElementNodeParser to help make sense of our Markdown objects so we can leverage the potentially structured information in the parsed documents.
63
 
 
64
  node_parser = MarkdownElementNodeParser(llm=OpenAI(model="gpt-3.5-turbo"), num_workers=8)
65
-
66
  nodes = node_parser.get_nodes_from_documents(documents=[nvidia_docs[0]])
 
 
67
  # Let's see what's in the metadata of the nodes:
68
  for nd in nodes:
69
  print(nd.metadata)
70
  for k,v in nd:
71
  if k=='table_df':
72
  print(nd)
 
73
  # Now we extract our `base_nodes` and `objects` to create the `VectorStoreIndex`.
74
  base_nodes, objects = node_parser.get_nodes_and_objects(nodes)
75
 
76
  # We could use the VectorStoreIndex from llama_index.core
77
  # Or we can use the llama_index FAISS llama-index-vector-stores-faiss
78
- # Trying the faiss, and setting its vectors' dimension.
 
79
  faiss_dim = 1536
80
  faiss_index = faiss.IndexFlatL2(faiss_dim) # default param overwrite=False, so it will append new vector.
81
- # Parameter overwrite=True suppresses appending a vector.
 
82
 
83
  # Creating the FaissVectorStore and its recursicve_index_faiss
84
  llama_faiss_vector_store = FaissVectorStore(faiss_index=faiss_index)
@@ -91,14 +98,16 @@ recursive_index_faiss = VectorStoreIndex(nodes=base_nodes+objects, storage_conte
91
  # 1. Initalize our reranker using `FlagEmbeddingReranker` powered by the `BAAI/bge-reranker-large`.
92
  # 2. Set up our recursive query engine!
93
 
94
- reranker = FlagEmbeddingReranker(
95
- top_n=5,
96
- model="BAAI/bge-reranker-large",
97
- )
 
98
 
99
  recursive_query_engine = recursive_index_faiss.as_query_engine(
100
- similarity_top_k=15,
101
- node_postprocessors=[reranker],
 
102
  verbose=True
103
  )
104
 
@@ -119,26 +128,24 @@ user_template = """ Think through your response step by step."""
119
 
120
  #user_query = "Who are the E-VP, Operations - and how old are they?"
121
 
122
- #response = recursive_query_engine.query(system_template + user_query + user_template)
123
-
124
- #str_resp ="{}".format(response)
125
-
126
-
127
  def retriever_resp(prompt):
128
  import time
129
  response = "this is my response"
130
  time.sleep(5)
131
  return response
 
132
 
133
  @cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
134
  async def main(message: cl.Message):
135
  settings = cl.user_session.get("settings")
136
 
 
137
  user_query = message.content
138
- # prompt = system_template+user_query+user_template
139
- response = recursive_query_engine.query(system_template + user_query + user_template)
140
- # response = retriever_resp(prompt)
141
- # print("AAA",user_query)
142
  str_resp ="{}".format(response)
143
  msg = cl.Message(content= str_resp)
144
- await msg.send()
 
7
  import os
8
  import sys
9
  import getpass
10
+ # import nest_asyncio
11
  # import pandas as pd
12
  import faiss
13
  import openai
 
25
  from llama_index.core.node_parser import MarkdownElementNodeParser
26
  from llama_index.llms.openai import OpenAI
27
  from llama_index.embeddings.openai import OpenAIEmbedding
28
+ # from llama_index.postprocessor.flag_embedding_reranker import FlagEmbeddingReranker
29
  from llama_parse import LlamaParse
30
 
31
  from openai import AsyncOpenAI # importing openai for API usage
32
 
33
+ # The following line is needed to run locally. Without it, it finds the GPU cards of my PC.
34
+ # os.environ["CUDA_VISIBLE_DEVICES"] = ""
35
+
36
  # GET KEYS
37
  LLAMA_CLOUD_API_KEY= os.getenv('LLAMA_CLOUD_API_KEY')
38
  OPENAI_API_KEY=os.getenv("OPENAI_API_KEY")
 
43
  # os.environ["WANDB_API_KEY"] = getpass.getpass("WandB API Key: ")
44
  """
45
 
46
+ # nest_asyncio.apply() #not needed for the app
47
 
48
+ # PARSING the pdf file with LlamaParse
49
  parser = LlamaParse(
50
  result_type="markdown",
51
  verbose=True,
 
55
 
56
  nvidia_docs = parser.load_data(["./nvidia_2tables.pdf"])
57
  # Note: nvidia_docs contains only one file (it could contain more). nvidia_docs[0] is the pdf we loaded.
58
+ # print(nvidia_docs[0].text[:1000])
59
 
60
  # Getting Settings out of llama_index.core which is a major part of their v0.10 update!
61
  Settings.llm = OpenAI(model="gpt-3.5-turbo")
 
63
 
64
  # Using MarkdownElementNodeParser to help make sense of our Markdown objects so we can leverage the potentially structured information in the parsed documents.
65
 
66
+ # Unclear if the following is needed as I do not know if there are Markdown objects
67
  node_parser = MarkdownElementNodeParser(llm=OpenAI(model="gpt-3.5-turbo"), num_workers=8)
 
68
  nodes = node_parser.get_nodes_from_documents(documents=[nvidia_docs[0]])
69
+
70
+ """
71
  # Let's see what's in the metadata of the nodes:
72
  for nd in nodes:
73
  print(nd.metadata)
74
  for k,v in nd:
75
  if k=='table_df':
76
  print(nd)
77
+ """
78
  # Now we extract our `base_nodes` and `objects` to create the `VectorStoreIndex`.
79
  base_nodes, objects = node_parser.get_nodes_and_objects(nodes)
80
 
81
  # We could use the VectorStoreIndex from llama_index.core
82
  # Or we can use the llama_index FAISS llama-index-vector-stores-faiss
83
+ # Here we will use the faiss, and setting its vectors' dimension.
84
+
85
  faiss_dim = 1536
86
  faiss_index = faiss.IndexFlatL2(faiss_dim) # default param overwrite=False, so it will append new vector.
87
+
88
+ # Parameter "overwrite=True" suppresses appending a vector.
89
 
90
  # Creating the FaissVectorStore and its recursicve_index_faiss
91
  llama_faiss_vector_store = FaissVectorStore(faiss_index=faiss_index)
 
98
  # 1. Initalize our reranker using `FlagEmbeddingReranker` powered by the `BAAI/bge-reranker-large`.
99
  # 2. Set up our recursive query engine!
100
 
101
+ # Will attempt to not use the reranker to see if it will not time-out on huggingface.
102
+ # reranker = FlagEmbeddingReranker(
103
+ # top_n=5,
104
+ # model="BAAI/bge-reranker-large",
105
+ # )
106
 
107
  recursive_query_engine = recursive_index_faiss.as_query_engine(
108
+ similarity_top_k=5,
109
+ # we will not post_precess the answer with the reranker: It takes too long...
110
+ # node_postprocessors=[reranker],
111
  verbose=True
112
  )
113
 
 
128
 
129
  #user_query = "Who are the E-VP, Operations - and how old are they?"
130
 
131
+ """ test function
 
 
 
 
132
  def retriever_resp(prompt):
133
  import time
134
  response = "this is my response"
135
  time.sleep(5)
136
  return response
137
+ """
138
 
139
  @cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
140
  async def main(message: cl.Message):
141
  settings = cl.user_session.get("settings")
142
 
143
+ # user_query is populated from what the user types
144
  user_query = message.content
145
+ # Add instructions before and after the user query which will not show in the app.
146
+ prompt = system_template+user_query+user_template
147
+
148
+ response = recursive_query_engine.query(prompt)
149
  str_resp ="{}".format(response)
150
  msg = cl.Message(content= str_resp)
151
+ await msg.send()