kabbi commited on
Commit
61f72c5
1 Parent(s): cc17248

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -43,6 +43,7 @@ model = AutoModelForCausalLM.from_pretrained(
43
 
44
  @spaces.GPU()
45
  def stream_chat(
 
46
  message: str,
47
  history: list,
48
  temperature: float = 0.3,
@@ -54,7 +55,7 @@ def stream_chat(
54
  print(f'message: {message}')
55
  print(f'history: {history}')
56
 
57
- conversation = [{"role": "system", "content": "You are EXAONE model from LG AI Research, a helpful assistant."}]
58
  for prompt, answer in history:
59
  conversation.extend([
60
  {"role": "user", "content": prompt},
@@ -105,6 +106,11 @@ with gr.Blocks(css=CSS, theme="soft") as demo:
105
  fill_height=True,
106
  additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
107
  additional_inputs=[
 
 
 
 
 
108
  gr.Slider(
109
  minimum=0,
110
  maximum=1,
@@ -117,7 +123,7 @@ with gr.Blocks(css=CSS, theme="soft") as demo:
117
  minimum=128,
118
  maximum=4096,
119
  step=1,
120
- value=256,
121
  label="Max new tokens",
122
  render=False,
123
  ),
 
43
 
44
  @spaces.GPU()
45
  def stream_chat(
46
+ system_prompt: str,
47
  message: str,
48
  history: list,
49
  temperature: float = 0.3,
 
55
  print(f'message: {message}')
56
  print(f'history: {history}')
57
 
58
+ conversation = [{"role": "system", "content": system_prompt}]
59
  for prompt, answer in history:
60
  conversation.extend([
61
  {"role": "user", "content": prompt},
 
106
  fill_height=True,
107
  additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
108
  additional_inputs=[
109
+ gr.Textbox(
110
+ value="You are EXAONE model from LG AI Research, a helpful assistant.",
111
+ label="System Prompt",
112
+ render=False,
113
+ ),
114
  gr.Slider(
115
  minimum=0,
116
  maximum=1,
 
123
  minimum=128,
124
  maximum=4096,
125
  step=1,
126
+ value=1024,
127
  label="Max new tokens",
128
  render=False,
129
  ),