import streamlit as st from transformers import AutoModelForCausalLM, AutoTokenizer # Load model and tokenizer @st.cache_resource def load_model(): model_name = "microsoft/Phi-3-mini-4k-instruct" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) return model, tokenizer def generate_text(prompt, model, tokenizer): inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate(inputs.input_ids, max_length=300, temperature=0.7, top_k=50, top_p=0.95) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response # Streamlit app def main(): st.title("Instruction-Following Model: Phi-3-Mini-4k-Instruct") st.write("Ask a question or give an instruction to get a response.") model, tokenizer = load_model() prompt = st.text_input("Enter your prompt:", "Explain the concept of machine learning in simple terms.") if st.button("Generate Response"): response = generate_text(prompt, model, tokenizer) st.write(response) if __name__ == "__main__": main()