Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import AutoProcessor,
|
3 |
from threading import Thread
|
4 |
import re
|
5 |
import time
|
@@ -10,8 +10,12 @@ import spaces
|
|
10 |
#subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
11 |
|
12 |
|
13 |
-
processor = AutoProcessor.from_pretrained(
|
14 |
-
|
|
|
|
|
|
|
|
|
15 |
torch_dtype=torch.bfloat16,
|
16 |
#_attn_implementation="flash_attention_2"
|
17 |
).to("cuda")
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoProcessor, AutoModel, TextIteratorStreamer
|
3 |
from threading import Thread
|
4 |
import re
|
5 |
import time
|
|
|
10 |
#subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
11 |
|
12 |
|
13 |
+
processor = AutoProcessor.from_pretrained(
|
14 |
+
"rhymes-ai/Aria",
|
15 |
+
revision="0e2a8319",
|
16 |
+
)
|
17 |
+
model = AutoModel.from_pretrained("rhymes-ai/Aria",
|
18 |
+
revision="f7c92bba",
|
19 |
torch_dtype=torch.bfloat16,
|
20 |
#_attn_implementation="flash_attention_2"
|
21 |
).to("cuda")
|