import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer from datasets import load_dataset huggingface-cli login # Load LLaMA model and tokenizer from Hugging Face model_name = "meta-llama/Llama-3.2-1B" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Load financial dataset to enrich responses dataset = load_dataset("gbharti/finance-alpaca") # Helper function to extract dataset info (optional enhancement) def get_insight_from_dataset(): sample = dataset["train"].shuffle(seed=42).select([0])[0] return f"Example insight: {sample['text']}" # Function to process user input and generate financial advice def financial_advisor(user_input): # Tokenize the user input inputs = tokenizer(user_input, return_tensors="pt") # Generate response using the LLaMA model outputs = model.generate(**inputs, max_length=256, num_return_sequences=1) advice = tokenizer.decode(outputs[0], skip_special_tokens=True) # Get additional insight from dataset to enrich advice (optional) insight = get_insight_from_dataset() # Combine the advice and the insight full_response = f"Advice: {advice}\n\n{insight}" return full_response # Create Gradio Interface interface = gr.Interface( fn=financial_advisor, inputs=gr.Textbox(lines=5, placeholder="Enter your financial question..."), outputs="text", title="AI Financial Advisor", description="Ask me anything related to finance, investments, savings, and more.", examples=[ "Should I invest in stocks or real estate?", "How can I save more money on a tight budget?", "What are some good investment options for retirement?", ] ) # Launch the Gradio app if __name__ == "__main__": interface.launch()