degbu embeddings
Browse files
app.py
CHANGED
@@ -14,7 +14,7 @@ from llama_index.core import (
|
|
14 |
Settings,
|
15 |
)
|
16 |
|
17 |
-
os.environ["OPENAI_API_KEY"] = "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"
|
18 |
|
19 |
from llama_parse import LlamaParse
|
20 |
|
@@ -38,7 +38,7 @@ def main():
|
|
38 |
provider = st.selectbox(
|
39 |
label="Select LLM Provider",
|
40 |
options=['google', 'huggingface', 'mistralai', 'openai'],
|
41 |
-
index=
|
42 |
)
|
43 |
|
44 |
# Select LLM
|
@@ -83,15 +83,14 @@ def main():
|
|
83 |
Settings.llm = OpenAI(
|
84 |
model=llm_name,
|
85 |
temperature=temperature,
|
86 |
-
max_tokens=
|
87 |
-
api_key=os.environ.get("OPENAI_API_KEY")
|
88 |
)
|
89 |
# Global tokenization needs to be consistent with LLM
|
90 |
# https://docs.llamaindex.ai/en/stable/module_guides/models/llms/
|
91 |
Settings.tokenizer = tiktoken.encoding_for_model(llm_name).encode
|
92 |
Settings.num_output = max_tokens
|
93 |
Settings.context_window = 4096 # max possible
|
94 |
-
Settings.embed_model = OpenAIEmbedding(
|
95 |
elif provider == 'huggingface':
|
96 |
os.environ['HFTOKEN'] = str(llm_token)
|
97 |
|
|
|
14 |
Settings,
|
15 |
)
|
16 |
|
17 |
+
# os.environ["OPENAI_API_KEY"] = "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"
|
18 |
|
19 |
from llama_parse import LlamaParse
|
20 |
|
|
|
38 |
provider = st.selectbox(
|
39 |
label="Select LLM Provider",
|
40 |
options=['google', 'huggingface', 'mistralai', 'openai'],
|
41 |
+
index=3
|
42 |
)
|
43 |
|
44 |
# Select LLM
|
|
|
83 |
Settings.llm = OpenAI(
|
84 |
model=llm_name,
|
85 |
temperature=temperature,
|
86 |
+
max_tokens=max_output_tokens
|
|
|
87 |
)
|
88 |
# Global tokenization needs to be consistent with LLM
|
89 |
# https://docs.llamaindex.ai/en/stable/module_guides/models/llms/
|
90 |
Settings.tokenizer = tiktoken.encoding_for_model(llm_name).encode
|
91 |
Settings.num_output = max_tokens
|
92 |
Settings.context_window = 4096 # max possible
|
93 |
+
Settings.embed_model = OpenAIEmbedding()
|
94 |
elif provider == 'huggingface':
|
95 |
os.environ['HFTOKEN'] = str(llm_token)
|
96 |
|