Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,15 +6,15 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStream
|
|
6 |
import gradio as gr
|
7 |
from threading import Thread
|
8 |
|
9 |
-
MODEL_LIST = ["
|
10 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
11 |
MODEL = os.environ.get("MODEL_ID")
|
12 |
|
13 |
-
TITLE = "<h1><center>
|
14 |
|
15 |
PLACEHOLDER = """
|
16 |
<center>
|
17 |
-
<p>
|
18 |
</center>
|
19 |
"""
|
20 |
|
@@ -75,7 +75,8 @@ def stream_chat(
|
|
75 |
top_k = top_k,
|
76 |
temperature = temperature,
|
77 |
streamer=streamer,
|
78 |
-
pad_token_id =
|
|
|
79 |
)
|
80 |
|
81 |
with torch.no_grad():
|
@@ -109,7 +110,7 @@ with gr.Blocks(css=CSS, theme="soft") as demo:
|
|
109 |
),
|
110 |
gr.Slider(
|
111 |
minimum=128,
|
112 |
-
maximum=
|
113 |
step=1,
|
114 |
value=1024,
|
115 |
label="Max new tokens",
|
|
|
6 |
import gradio as gr
|
7 |
from threading import Thread
|
8 |
|
9 |
+
MODEL_LIST = ["LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct"]
|
10 |
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
11 |
MODEL = os.environ.get("MODEL_ID")
|
12 |
|
13 |
+
TITLE = "<h1><center>EXAONE-3.0-7.8B-Instruct</center></h1>"
|
14 |
|
15 |
PLACEHOLDER = """
|
16 |
<center>
|
17 |
+
<p>EXAONE-3.0-7.8B-Instruct is a pre-trained and instruction-tuned bilingual (English and Korean) generative model with 7.8 billion parameters</p>
|
18 |
</center>
|
19 |
"""
|
20 |
|
|
|
75 |
top_k = top_k,
|
76 |
temperature = temperature,
|
77 |
streamer=streamer,
|
78 |
+
pad_token_id = 0,
|
79 |
+
eos_token_id = tokenizer.eos_token_id # 361
|
80 |
)
|
81 |
|
82 |
with torch.no_grad():
|
|
|
110 |
),
|
111 |
gr.Slider(
|
112 |
minimum=128,
|
113 |
+
maximum=4096,
|
114 |
step=1,
|
115 |
value=1024,
|
116 |
label="Max new tokens",
|