Update app.py
Browse files
app.py
CHANGED
@@ -46,10 +46,10 @@ model_info = {
|
|
46 |
"repo_id": "TheBloke/CodeLlama-7B-GGUF",
|
47 |
"filename": "codellama-7b.Q4_K_M.gguf",
|
48 |
},
|
49 |
-
"Falcon-7B-Instruct": {
|
50 |
-
"repo_id": "TheBloke/Falcon-7B-Instruct-GGML",
|
51 |
-
"filename": "falcon-7b-instruct.ggccv1.q4_1.bin",
|
52 |
-
},
|
53 |
|
54 |
}
|
55 |
for model_name in models:
|
@@ -106,9 +106,9 @@ def model_initialization(model):
|
|
106 |
elif(model=="vicuna-7B-v1.5"):
|
107 |
repo_id="TheBloke/vicuna-7B-v1.5-GGUF"
|
108 |
filename="vicuna-7b-v1.5.Q4_K_M.gguf"
|
109 |
-
elif(model=="Falcon-7B-Instruct"):
|
110 |
-
repo_id="TheBloke/Falcon-7B-Instruct-GGML"
|
111 |
-
filename="falcon-7b-instruct.ggccv1.q4_1.bin"
|
112 |
elif(model=="CodeLlama-7B"):
|
113 |
repo_id="TheBloke/CodeLlama-7B-GGUF"
|
114 |
filename="codellama-7b.Q4_K_M.gguf"
|
@@ -155,8 +155,8 @@ def predict(message, history):
|
|
155 |
|
156 |
with gr.Blocks() as UI:
|
157 |
|
158 |
-
models=gr.Dropdown(["CodeLlama-7B","Llama-2-13B-chat"
|
159 |
-
"vicuna-7B-v1.5"],value=["CodeLlama-7B","Llama-2-13B-chat",
|
160 |
"vicuna-7B-v1.5"], label="please select at least one model", info="default model is Mistral-7B-Instruct-v0.2")
|
161 |
textInfo = gr.Textbox(value="current model is Mistral-7B-Instruct-v0.2",label="Model Status");
|
162 |
# Chatbot interface
|
|
|
46 |
"repo_id": "TheBloke/CodeLlama-7B-GGUF",
|
47 |
"filename": "codellama-7b.Q4_K_M.gguf",
|
48 |
},
|
49 |
+
# "Falcon-7B-Instruct": {
|
50 |
+
# "repo_id": "TheBloke/Falcon-7B-Instruct-GGML",
|
51 |
+
# "filename": "falcon-7b-instruct.ggccv1.q4_1.bin",
|
52 |
+
# },
|
53 |
|
54 |
}
|
55 |
for model_name in models:
|
|
|
106 |
elif(model=="vicuna-7B-v1.5"):
|
107 |
repo_id="TheBloke/vicuna-7B-v1.5-GGUF"
|
108 |
filename="vicuna-7b-v1.5.Q4_K_M.gguf"
|
109 |
+
# elif(model=="Falcon-7B-Instruct"):
|
110 |
+
# repo_id="TheBloke/Falcon-7B-Instruct-GGML"
|
111 |
+
# filename="falcon-7b-instruct.ggccv1.q4_1.bin"
|
112 |
elif(model=="CodeLlama-7B"):
|
113 |
repo_id="TheBloke/CodeLlama-7B-GGUF"
|
114 |
filename="codellama-7b.Q4_K_M.gguf"
|
|
|
155 |
|
156 |
with gr.Blocks() as UI:
|
157 |
|
158 |
+
models=gr.Dropdown(["CodeLlama-7B","Llama-2-13B-chat" ,"Mistral-7B-Instruct-v0.2", "zephyr-7B-beta",
|
159 |
+
"vicuna-7B-v1.5"],value=["CodeLlama-7B","Llama-2-13B-chat", "Mistral-7B-Instruct-v0.2", "zephyr-7B-beta",
|
160 |
"vicuna-7B-v1.5"], label="please select at least one model", info="default model is Mistral-7B-Instruct-v0.2")
|
161 |
textInfo = gr.Textbox(value="current model is Mistral-7B-Instruct-v0.2",label="Model Status");
|
162 |
# Chatbot interface
|