s1 / app.py
Madhuri123's picture
Update app.py
a0de172 verified
raw
history blame
1.08 kB
import streamlit as st
from huggingface_hub import InferenceClient
# Access your Hugging Face token (stored as a secret in the environment)
hf_token = st.secrets["Hf_Token"]
# Initialize the Hugging Face Inference Client with the token
client = InferenceClient(token=hf_token)
# Streamlit App UI
st.title("LLM Model Inference with Streamlit")
# Input text box for user prompt
user_input = st.text_input("Enter a text prompt:", "")
# Dropdown for selecting between LLaMA 3 models
model_id = st.selectbox(
"Select a LLaMA model",
["meta-llama/Meta-Llama-3-2B", "meta-llama/Meta-Llama-3-3B"] # Replace with the correct model names
)
# Button to trigger the inference
if st.button("Generate Text"):
if user_input:
with st.spinner(f"Generating text using {model_id}..."):
# Perform inference using the selected model
response = client.text_generation(model=model_id, inputs=user_input)
st.success("Text generated!")
st.write(response)
else:
st.warning("Please enter a prompt to generate text.")