degbu embeddings
Browse files
app.py
CHANGED
@@ -6,11 +6,15 @@ from llama_index.llms.gemini import Gemini
|
|
6 |
from llama_index.llms.huggingface import HuggingFaceLLM
|
7 |
from llama_index.llms.mistralai import MistralAI
|
8 |
from llama_index.llms.openai import OpenAI
|
|
|
|
|
|
|
9 |
from llama_index.core import (
|
10 |
VectorStoreIndex,
|
11 |
Settings,
|
12 |
)
|
13 |
|
|
|
14 |
from llama_parse import LlamaParse
|
15 |
|
16 |
from streamlit_pdf_viewer import pdf_viewer
|
@@ -81,6 +85,7 @@ with st.sidebar:
|
|
81 |
Settings.tokenizer = tiktoken.encoding_for_model(llm_name).encode
|
82 |
Settings.num_output = max_tokens
|
83 |
Settings.context_window = 4096 # max possible
|
|
|
84 |
|
85 |
|
86 |
# Enter LLM Token
|
|
|
6 |
from llama_index.llms.huggingface import HuggingFaceLLM
|
7 |
from llama_index.llms.mistralai import MistralAI
|
8 |
from llama_index.llms.openai import OpenAI
|
9 |
+
|
10 |
+
from llama_index.embeddings.openai import OpenAIEmbedding
|
11 |
+
|
12 |
from llama_index.core import (
|
13 |
VectorStoreIndex,
|
14 |
Settings,
|
15 |
)
|
16 |
|
17 |
+
|
18 |
from llama_parse import LlamaParse
|
19 |
|
20 |
from streamlit_pdf_viewer import pdf_viewer
|
|
|
85 |
Settings.tokenizer = tiktoken.encoding_for_model(llm_name).encode
|
86 |
Settings.num_output = max_tokens
|
87 |
Settings.context_window = 4096 # max possible
|
88 |
+
Settings.embed_model = OpenAIEmbedding()
|
89 |
|
90 |
|
91 |
# Enter LLM Token
|