Illia56 commited on
Commit
2e97054
·
1 Parent(s): 9dc5d2a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -3
app.py CHANGED
@@ -11,15 +11,22 @@ a Llama 2 model with 70B parameters fine-tuned for chat instructions.
11
  # Initialize client
12
  client = Client("https://ysharma-explore-llamav2-with-tgi.hf.space/")
13
 
 
 
 
 
 
 
 
14
  # Prediction function
15
  def predict(message, system_prompt="", temperature=0.9, max_new_tokens=4096):
16
  with st.status("Requesting LLama-2"):
17
  st.write("Requesting API")
18
  response = client.predict(
19
  message, # str in 'Message' Textbox component
20
- system_prompt, # str in 'Optional system prompt' Textbox component
21
- temperature, # int | float (numeric value between 0.0 and 1.0)
22
- max_new_tokens, # int | float (numeric value between 0 and 4096)
23
  0.3, # int | float (numeric value between 0.0 and 1)
24
  1, # int | float (numeric value between 1.0 and 2.0)
25
  api_name="/chat"
 
11
  # Initialize client
12
  client = Client("https://ysharma-explore-llamav2-with-tgi.hf.space/")
13
 
14
+
15
+ with st.sidebar:
16
+ system_promptSide = st.text_input("Optional system prompt:")
17
+ temperatureSide = st.slider("Temperature", min_value=0.0, max_value=1.0, value=0.9, step=0.05)
18
+ max_new_tokensSide = st.slider("Max new tokens", min_value=0, max_value=4096, value=4096, step=32)
19
+
20
+
21
  # Prediction function
22
  def predict(message, system_prompt="", temperature=0.9, max_new_tokens=4096):
23
  with st.status("Requesting LLama-2"):
24
  st.write("Requesting API")
25
  response = client.predict(
26
  message, # str in 'Message' Textbox component
27
+ system_promptSide, # str in 'Optional system prompt' Textbox component
28
+ temperatureSide, # int | float (numeric value between 0.0 and 1.0)
29
+ max_new_tokensSide, # int | float (numeric value between 0 and 4096)
30
  0.3, # int | float (numeric value between 0.0 and 1)
31
  1, # int | float (numeric value between 1.0 and 2.0)
32
  api_name="/chat"