mahynski commited on
Commit
db64926
1 Parent(s): 1e49352

degbu embeddings

Browse files
Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -15,8 +15,6 @@ from llama_index.core import (
15
  Settings,
16
  )
17
 
18
- # os.environ["OPENAI_API_KEY"] = "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"
19
-
20
  from llama_parse import LlamaParse
21
 
22
  from streamlit_pdf_viewer import pdf_viewer
@@ -84,12 +82,12 @@ def main():
84
  Settings.llm = OpenAI(
85
  model=llm_name,
86
  temperature=temperature,
87
- max_tokens=max_output_tokens
88
  )
89
  # Global tokenization needs to be consistent with LLM
90
  # https://docs.llamaindex.ai/en/stable/module_guides/models/llms/
91
  Settings.tokenizer = tiktoken.encoding_for_model(llm_name).encode
92
- Settings.num_output = max_tokens
93
  Settings.context_window = 4096 # max possible
94
  Settings.embed_model = OpenAIEmbedding()
95
  elif provider == 'huggingface':
 
15
  Settings,
16
  )
17
 
 
 
18
  from llama_parse import LlamaParse
19
 
20
  from streamlit_pdf_viewer import pdf_viewer
 
82
  Settings.llm = OpenAI(
83
  model=llm_name,
84
  temperature=temperature,
85
+ # max_tokens=max_output_tokens
86
  )
87
  # Global tokenization needs to be consistent with LLM
88
  # https://docs.llamaindex.ai/en/stable/module_guides/models/llms/
89
  Settings.tokenizer = tiktoken.encoding_for_model(llm_name).encode
90
+ Settings.num_output = max_output_tokens
91
  Settings.context_window = 4096 # max possible
92
  Settings.embed_model = OpenAIEmbedding()
93
  elif provider == 'huggingface':