Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 1,880 Bytes
e8fb5c0 0a632f8 dc944d5 e8fb5c0 0a632f8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
import os
import together_gradio
from utils import get_app
demo = get_app(
models=[
"meta-llama/Llama-Vision-Free",
"meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
"meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
"meta-llama/Meta-Llama-3-8B-Instruct-Turbo",
"meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
"meta-llama/Llama-3.2-3B-Instruct-Turbo",
"meta-llama/Meta-Llama-3-8B-Instruct-Lite",
"meta-llama/Meta-Llama-3-70B-Instruct-Lite",
"meta-llama/Llama-3-8b-chat-hf",
"meta-llama/Llama-3-70b-chat-hf",
"nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
"Qwen/Qwen2.5-Coder-32B-Instruct",
"microsoft/WizardLM-2-8x22B",
"google/gemma-2-27b-it",
"google/gemma-2-9b-it",
"databricks/dbrx-instruct",
"mistralai/Mixtral-8x7B-Instruct-v0.1",
"mistralai/Mixtral-8x22B-Instruct-v0.1",
"Qwen/Qwen2.5-7B-Instruct-Turbo",
"Qwen/Qwen2.5-72B-Instruct-Turbo",
"Qwen/Qwen2-72B-Instruct",
"deepseek-ai/deepseek-llm-67b-chat",
"google/gemma-2b-it",
"Gryphe/MythoMax-L2-13b",
"meta-llama/Llama-2-13b-chat-hf",
"mistralai/Mistral-7B-Instruct-v0.1",
"mistralai/Mistral-7B-Instruct-v0.2",
"mistralai/Mistral-7B-Instruct-v0.3",
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"togethercomputer/StripedHyena-Nous-7B",
"upstage/SOLAR-10.7B-Instruct-v1.0",
],
default_model="meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
src=together_gradio.registry,
accept_token=not os.getenv("TOGETHER_API_KEY"),
multimodal=True,
)
if __name__ == "__main__":
demo.launch()
|