import streamlit as st from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline # Set the title of the app st.title('LLaMA2Glenda') # Load the model and tokenizer model = AutoModelForCausalLM.from_pretrained("tminh/llama-2-7b-glenda") tokenizer = AutoTokenizer.from_pretrained("TinyPixel/Llama-2-7B-bf16-sharded") # Create a text input for the prompt prompt = st.text_input('Enter your prompt:') # Create a button to trigger the inference if st.button('Generate Answer'): # Run text generation pipeline pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=200) result = pipe(f"[INST] {prompt} [/INST]") # Display the result st.write(result[0]['generated_text'])