mahynski commited on
Commit
f04afcf
·
1 Parent(s): 64b8f12

degbu embeddings

Browse files
Files changed (1) hide show
  1. app.py +16 -16
app.py CHANGED
@@ -71,22 +71,7 @@ with st.sidebar:
71
  step=0.05,
72
  )
73
 
74
- max_output_tokens = 4096
75
-
76
- # Create LLM
77
- if provider == 'openai':
78
- llm = OpenAI(
79
- model=llm_name,
80
- temperature=temperature,
81
- max_tokens=max_tokens
82
- )
83
- # Global tokenization needs to be consistent with LLM
84
- # https://docs.llamaindex.ai/en/stable/module_guides/models/llms/
85
- Settings.tokenizer = tiktoken.encoding_for_model(llm_name).encode
86
- Settings.num_output = max_tokens
87
- Settings.context_window = 4096 # max possible
88
- Settings.embed_model = OpenAIEmbedding()
89
-
90
 
91
  # Enter LLM Token
92
  llm_token = st.text_input(
@@ -98,6 +83,21 @@ with st.sidebar:
98
  elif provider == 'huggingface':
99
  os.environ['HFTOKEN'] = llm_token
100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
  # Enter parsing Token
102
  parse_token = st.text_input(
103
  "Enter your LlamaParse token",
 
71
  step=0.05,
72
  )
73
 
74
+ max_output_tokens = 4096
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
  # Enter LLM Token
77
  llm_token = st.text_input(
 
83
  elif provider == 'huggingface':
84
  os.environ['HFTOKEN'] = llm_token
85
 
86
+ # Create LLM
87
+ if llm_token is not None:
88
+ if provider == 'openai':
89
+ llm = OpenAI(
90
+ model=llm_name,
91
+ temperature=temperature,
92
+ max_tokens=max_tokens
93
+ )
94
+ # Global tokenization needs to be consistent with LLM
95
+ # https://docs.llamaindex.ai/en/stable/module_guides/models/llms/
96
+ Settings.tokenizer = tiktoken.encoding_for_model(llm_name).encode
97
+ Settings.num_output = max_tokens
98
+ Settings.context_window = 4096 # max possible
99
+ Settings.embed_model = OpenAIEmbedding()
100
+
101
  # Enter parsing Token
102
  parse_token = st.text_input(
103
  "Enter your LlamaParse token",