import os import streamlit as st from transformers import AutoTokenizer, AutoModelForCausalLM # Load the fine-tuned model and tokenizer model_path = "path/to/your/fine-tuned-model" tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForCausalLM.from_pretrained(model_path) # Streamlit app layout st.title("🤖 Fine-tuned Arabic Mistral Model 🧙") # Input text area for user query user_query = st.text_area("✨ Enter your query in Arabic:", height=100) # Sliders for temperature and max length (as in your original code) # Button to trigger the query if st.button("🪄 Generate Response"): if user_query: # Tokenize input and generate response inputs = tokenizer(user_query, return_tensors="pt") outputs = model.generate( inputs.input_ids, max_length=max_length, temperature=temperature ) response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Display the response st.markdown("🔮 Response from Fine-tuned Arabic Model:") st.write(response) # Save query and response to session state (as in your original code) else: st.write("🚨 Please enter a query.") # History display and clear button (as in your original code)