Madhuri123 commited on
Commit
5254778
·
verified ·
1 Parent(s): 194b12e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -16
app.py CHANGED
@@ -18,23 +18,22 @@ user_input = st.chat_input("Enter a text prompt:")
18
  client = InferenceClient(token=hf_token)
19
 
20
  # Button to trigger the inference
21
- if st.button("Generate Text"):
22
- if user_input:
23
- with st.spinner(f"Generating text using {model_id}..."):
24
  # Perform inference using the selected model
25
- response = client.chat.completions.create(
26
- model=model_id,
27
- messages=[
28
- {"role": "system", "content": "You are a question answering assistant."},
29
- {"role": "user", "content": user_input}
30
- ],
31
- max_tokens=500,
32
- stream=False
33
- )
34
- st.success("Text generated!")
35
- st.write(response['choices'][0]['message']['content'])
36
- else:
37
- st.warning("Please enter a prompt to generate text.")
38
 
39
 
40
 
 
18
  client = InferenceClient(token=hf_token)
19
 
20
  # Button to trigger the inference
21
+ if user_input:
22
+ with st.spinner(f"Generating text using {model_id}..."):
 
23
  # Perform inference using the selected model
24
+ response = client.chat.completions.create(
25
+ model=model_id,
26
+ messages=[
27
+ {"role": "system", "content": "You are a question answering assistant."},
28
+ {"role": "user", "content": user_input}
29
+ ],
30
+ max_tokens=500,
31
+ stream=False
32
+ )
33
+ st.success("Text generated!")
34
+ st.write(response['choices'][0]['message']['content'])
35
+ else:
36
+ st.warning("Please enter a prompt to generate text.")
37
 
38
 
39