from langchain_ollama import ChatOllama # Define local LLM local_llm = "llama3.2:3b-instruct-fp16" # Instantiate the models llm = ChatOllama(model=local_llm, temperature=0) llm_json_mode = ChatOllama(model=local_llm, temperature=0, format="json") # Test the model response = llm.invoke("What is AI?") print(response)