Spaces:
Sleeping
Sleeping
update demo
Browse files- gradio_demo/seed_llama_flask.py +1 -1
- start.sh +0 -1
gradio_demo/seed_llama_flask.py
CHANGED
@@ -51,7 +51,7 @@ class Arguments:
|
|
51 |
llm_device: Optional[str] = field(default='cuda:0', metadata={"help": "llm device"})
|
52 |
tokenizer_device: Optional[str] = field(default='cuda:0', metadata={"help": "tokenizer device"})
|
53 |
offload_encoder: Optional[bool] = field(default=False, metadata={"help": "offload image tokenizer"})
|
54 |
-
offload_decoder: Optional[bool] = field(default=
|
55 |
|
56 |
|
57 |
parser = transformers.HfArgumentParser(Arguments)
|
|
|
51 |
llm_device: Optional[str] = field(default='cuda:0', metadata={"help": "llm device"})
|
52 |
tokenizer_device: Optional[str] = field(default='cuda:0', metadata={"help": "tokenizer device"})
|
53 |
offload_encoder: Optional[bool] = field(default=False, metadata={"help": "offload image tokenizer"})
|
54 |
+
offload_decoder: Optional[bool] = field(default=False, metadata={"help": "offload image tokenizer"})
|
55 |
|
56 |
|
57 |
parser = transformers.HfArgumentParser(Arguments)
|
start.sh
CHANGED
@@ -6,5 +6,4 @@ python3 gradio_demo/seed_llama_flask.py \
|
|
6 |
--port 7890 \
|
7 |
--llm_device cuda:0 \
|
8 |
--tokenizer_device cuda:0 \
|
9 |
-
--offload_encoder \
|
10 |
--offload_decoder
|
|
|
6 |
--port 7890 \
|
7 |
--llm_device cuda:0 \
|
8 |
--tokenizer_device cuda:0 \
|
|
|
9 |
--offload_decoder
|