Spaces:
Sleeping
Sleeping
Rename streamlit_s1.py to app.py
Browse files- streamlit_s1.py → app.py +6 -2
streamlit_s1.py → app.py
RENAMED
@@ -2,7 +2,10 @@ import streamlit as st
|
|
2 |
from huggingface_hub import InferenceClient
|
3 |
|
4 |
# Access your Hugging Face token (stored as a secret in the environment)
|
|
|
5 |
|
|
|
|
|
6 |
|
7 |
# Streamlit App UI
|
8 |
st.title("LLM Model Inference with Streamlit")
|
@@ -15,14 +18,15 @@ model_id = st.selectbox(
|
|
15 |
"Select a LLaMA model",
|
16 |
["meta-llama/Meta-Llama-3-2B", "meta-llama/Meta-Llama-3-3B"] # Replace with the correct model names
|
17 |
)
|
18 |
-
|
19 |
# Button to trigger the inference
|
20 |
if st.button("Generate Text"):
|
21 |
if user_input:
|
22 |
with st.spinner(f"Generating text using {model_id}..."):
|
23 |
# Perform inference using the selected model
|
24 |
-
response = client.text_generation(inputs=user_input)
|
25 |
st.success("Text generated!")
|
26 |
st.write(response)
|
27 |
else:
|
28 |
st.warning("Please enter a prompt to generate text.")
|
|
|
|
2 |
from huggingface_hub import InferenceClient
|
3 |
|
4 |
# Access your Hugging Face token (stored as a secret in the environment)
|
5 |
+
hf_token = st.secrets["HF_Token"]
|
6 |
|
7 |
+
# Initialize the Hugging Face Inference Client with the token
|
8 |
+
client = InferenceClient(token=hf_token)
|
9 |
|
10 |
# Streamlit App UI
|
11 |
st.title("LLM Model Inference with Streamlit")
|
|
|
18 |
"Select a LLaMA model",
|
19 |
["meta-llama/Meta-Llama-3-2B", "meta-llama/Meta-Llama-3-3B"] # Replace with the correct model names
|
20 |
)
|
21 |
+
|
22 |
# Button to trigger the inference
|
23 |
if st.button("Generate Text"):
|
24 |
if user_input:
|
25 |
with st.spinner(f"Generating text using {model_id}..."):
|
26 |
# Perform inference using the selected model
|
27 |
+
response = client.text_generation(model=model_id, inputs=user_input)
|
28 |
st.success("Text generated!")
|
29 |
st.write(response)
|
30 |
else:
|
31 |
st.warning("Please enter a prompt to generate text.")
|
32 |
+
|