awacke1 commited on
Commit
3bb2801
1 Parent(s): 9849bac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -10
app.py CHANGED
@@ -227,18 +227,19 @@ def StreamLLMChatResponse(prompt):
227
  except:
228
  st.write('Llama model is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
229
 
230
- def query(filename):
231
- with open(filename, "rb") as f:
232
- data = f.read
233
- st.write('Posting request to model ' + API_URL_IE)
234
- response = requests.post(API_URL_IE, headers=headers, data=data)
235
- return response.json()
236
 
237
  # 4. Run query with payload
238
- #def query(payload):
239
- # response = requests.post(API_URL, headers=headers, json=payload)
240
- # st.markdown(response.json())
241
- # return response.json()
 
242
 
243
  def get_output(prompt):
244
  return query({"inputs": prompt})
 
227
  except:
228
  st.write('Llama model is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
229
 
230
+ #def query(filename):
231
+ # with open(filename, "rb") as f:
232
+ # data = f.read
233
+ # st.write('Posting request to model ' + API_URL_IE)
234
+ # response = requests.post(API_URL_IE, headers=headers, data=data)
235
+ # return response.json()
236
 
237
  # 4. Run query with payload
238
+ def query(payload):
239
+ st.write('Posting request to model ' + API_URL_IE)
240
+ response = requests.post(API_URL, headers=headers, json=payload)
241
+ st.markdown(response.json())
242
+ return response.json()
243
 
244
  def get_output(prompt):
245
  return query({"inputs": prompt})