Spaces:
Sleeping
Sleeping
from langchain_openai import ChatOpenAI | |
from langchain_core.prompts import ChatPromptTemplate | |
from langchain_core.output_parsers import StrOutputParser | |
from langchain_community.llms import Ollama | |
import streamlit as st | |
import os | |
from dotenv import load_dotenv | |
# Load environment variables | |
load_dotenv() | |
# Set environment variables | |
os.environ["LANGCHAIN_TRACING_V2"] = "true" | |
os.environ["LANGCHAIN_API_KEY"] = os.getenv("LANGCHAIN_API_KEY") | |
# Prompt Template | |
prompt = ChatPromptTemplate.from_messages( | |
[ | |
("system", "You are a helpful assistant. Please respond to the user queries"), | |
("user", "Question: {question}") | |
] | |
) | |
# Streamlit app | |
st.title('Langchain Demo With Ollama Llama2 API') | |
input_text = st.text_input("Search the topic you want") | |
# Ollama LLama2 LLM with remote or local settings | |
# Ensure the model points to a valid URL if not running locally | |
llm = Ollama( | |
model="llama2", | |
server_url=os.getenv("OLLAMA_SERVER_URL", "http://localhost:11434") # Add server URL to env | |
) | |
output_parser = StrOutputParser() | |
chain = prompt | llm | output_parser | |
# Display result when user inputs text | |
if input_text: | |
try: | |
response = chain.invoke({"question": input_text}) | |
st.write(response) | |
except Exception as e: | |
st.error(f"Error: {e}") | |