lkm2835 commited on
Commit
ca87bc9
β€’
1 Parent(s): 16065eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -20,9 +20,9 @@ DESCRIPTION = """\
20
  <center>πŸ‘‹ For more details, please check <a href=https://www.lgresearch.ai/blog/view?seq=507>our blog</a> or <a href=https://arxiv.org/abs/2412.04862>technical report</a></center>
21
  """
22
 
23
- MAX_MAX_NEW_TOKENS = 4096
24
  DEFAULT_MAX_NEW_TOKENS = 512
25
- MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "3840"))
26
 
27
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
28
 
@@ -93,7 +93,7 @@ def generate(
93
  BOT_AVATAR = "EXAONE_logo.png"
94
 
95
  chatbot = gr.Chatbot(
96
- label="EXAONE-3.0-7.8B-Instruct",
97
  avatar_images=[None, BOT_AVATAR],
98
  layout="bubble",
99
  bubble_full_width=False
@@ -111,7 +111,7 @@ chat_interface = gr.ChatInterface(
111
  gr.Slider(
112
  label="Max new tokens",
113
  minimum=1,
114
- maximum=MAX_MAX_NEW_TOKENS,
115
  step=1,
116
  value=DEFAULT_MAX_NEW_TOKENS,
117
  ),
 
20
  <center>πŸ‘‹ For more details, please check <a href=https://www.lgresearch.ai/blog/view?seq=507>our blog</a> or <a href=https://arxiv.org/abs/2412.04862>technical report</a></center>
21
  """
22
 
23
+ MAX_NEW_TOKENS = 2048
24
  DEFAULT_MAX_NEW_TOKENS = 512
25
+ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "16384"))
26
 
27
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
28
 
 
93
  BOT_AVATAR = "EXAONE_logo.png"
94
 
95
  chatbot = gr.Chatbot(
96
+ label="EXAONE-3.5-7.8B-Instruct",
97
  avatar_images=[None, BOT_AVATAR],
98
  layout="bubble",
99
  bubble_full_width=False
 
111
  gr.Slider(
112
  label="Max new tokens",
113
  minimum=1,
114
+ maximum=MAX_NEW_TOKENS,
115
  step=1,
116
  value=DEFAULT_MAX_NEW_TOKENS,
117
  ),