Spaces:
Sleeping
Sleeping
abdullahalzubaer
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -16,7 +16,7 @@ API_KEY = os.environ.get('HUGGINGFACE_API_KEY')
|
|
16 |
|
17 |
model_links ={
|
18 |
"Mistral-7B":base_url+"mistralai/Mistral-7B-Instruct-v0.2",
|
19 |
-
"
|
20 |
# "Gemma-2B":base_url+"google/gemma-2b-it",
|
21 |
# "Zephyr-7B-β":base_url+"HuggingFaceH4/zephyr-7b-beta",
|
22 |
# "Llama-2":"meta-llama/Llama-2-7b-chat-hf"
|
@@ -28,9 +28,8 @@ model_info ={
|
|
28 |
{'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
29 |
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
|
30 |
'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
|
31 |
-
"
|
32 |
-
{'description':"""The
|
33 |
-
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-22b/) team as has over **22 billion parameters.** \n""",
|
34 |
'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'}
|
35 |
|
36 |
# "Gemma-7B":
|
|
|
16 |
|
17 |
model_links ={
|
18 |
"Mistral-7B":base_url+"mistralai/Mistral-7B-Instruct-v0.2",
|
19 |
+
"Phi-3.5":base_url+"microsoft/Phi-3.5-mini-instruct",
|
20 |
# "Gemma-2B":base_url+"google/gemma-2b-it",
|
21 |
# "Zephyr-7B-β":base_url+"HuggingFaceH4/zephyr-7b-beta",
|
22 |
# "Llama-2":"meta-llama/Llama-2-7b-chat-hf"
|
|
|
28 |
{'description':"""The Mistral model is a **Large Language Model (LLM)** that's able to have question and answer interactions.\n \
|
29 |
\nIt was created by the [**Mistral AI**](https://mistral.ai/news/announcing-mistral-7b/) team as has over **7 billion parameters.** \n""",
|
30 |
'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'},
|
31 |
+
"Phi-3.5":
|
32 |
+
{'description':"""Phi-3.5-mini is a lightweight, state-of-the-art open model built upon datasets used for Phi-3 - synthetic data and filtered publicly available websites - with a focus on very high-quality, reasoning dense data. The model belongs to the Phi-3 model family and supports 128K token context length. The model underwent a rigorous enhancement process, incorporating both supervised fine-tuning, proximal policy optimization, and direct preference optimization to ensure precise instruction adherence and robust safety measures.""",
|
|
|
33 |
'logo':'https://mistral.ai/images/logo_hubc88c4ece131b91c7cb753f40e9e1cc5_2589_256x0_resize_q97_h2_lanczos_3.webp'}
|
34 |
|
35 |
# "Gemma-7B":
|