Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -28,12 +28,15 @@ import re
|
|
28 |
list_llm = ["mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.1", \
|
29 |
"google/gemma-7b-it","google/gemma-2b-it", \
|
30 |
"HuggingFaceH4/zephyr-7b-beta", "HuggingFaceH4/zephyr-7b-gemma-v0.1", \
|
31 |
-
"meta-llama/Llama-
|
32 |
"TinyLlama/TinyLlama-1.1B-Chat-v1.0", "mosaicml/mpt-7b-instruct", "tiiuae/falcon-7b-instruct", \
|
33 |
"google/flan-t5-xxl"
|
34 |
]
|
35 |
list_llm_simple = [os.path.basename(llm) for llm in list_llm]
|
36 |
|
|
|
|
|
|
|
37 |
@spaces.GPU
|
38 |
# Load PDF document and create doc splits
|
39 |
def load_doc(list_file_path, chunk_size, chunk_overlap):
|
@@ -139,7 +142,7 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
|
|
139 |
max_new_tokens = 250,
|
140 |
top_k = top_k,
|
141 |
)
|
142 |
-
elif llm_model == "meta-llama/Llama-
|
143 |
raise gr.Error("Llama-2-7b-chat-hf model requires a Pro subscription...")
|
144 |
llm = HuggingFaceEndpoint(
|
145 |
repo_id=llm_model,
|
|
|
28 |
list_llm = ["mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.1", \
|
29 |
"google/gemma-7b-it","google/gemma-2b-it", \
|
30 |
"HuggingFaceH4/zephyr-7b-beta", "HuggingFaceH4/zephyr-7b-gemma-v0.1", \
|
31 |
+
"meta-llama/Meta-Llama-3.1-8B-Instruct", "microsoft/phi-2", \
|
32 |
"TinyLlama/TinyLlama-1.1B-Chat-v1.0", "mosaicml/mpt-7b-instruct", "tiiuae/falcon-7b-instruct", \
|
33 |
"google/flan-t5-xxl"
|
34 |
]
|
35 |
list_llm_simple = [os.path.basename(llm) for llm in list_llm]
|
36 |
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
@spaces.GPU
|
41 |
# Load PDF document and create doc splits
|
42 |
def load_doc(list_file_path, chunk_size, chunk_overlap):
|
|
|
142 |
max_new_tokens = 250,
|
143 |
top_k = top_k,
|
144 |
)
|
145 |
+
elif llm_model == "meta-llama/Meta-Llama-3.1-8B-Instruct":
|
146 |
raise gr.Error("Llama-2-7b-chat-hf model requires a Pro subscription...")
|
147 |
llm = HuggingFaceEndpoint(
|
148 |
repo_id=llm_model,
|