Spaces:
Sleeping
Sleeping
import streamlit as st | |
from langchain import LLMChain | |
from langchain.chat_models import HuggingFaceHub | |
from langchain.prompts import ChatPromptTemplate | |
import os | |
# Initialize HuggingFaceHub LLM with access token from environment variables | |
llm = HuggingFaceHub( | |
repo_id="meta-llama/Llama-2-7b-chat-hf", | |
huggingfacehub_api_token=os.getenv("HUGGINGFACE_API_KEY"), | |
model_kwargs={ | |
"temperature": 0.7, | |
"max_new_tokens": 512, | |
} | |
) | |
# Define the prompt template | |
prompt = ChatPromptTemplate.from_messages( | |
[ | |
("system", "You are a helpful assistant."), | |
("user", "Question: {question}") | |
] | |
) | |
# Create the LLM Chain | |
chain = LLMChain(llm=llm, prompt=prompt, output_key="response") | |
# Streamlit App Interface | |
st.title('LangChain Demo with LLaMA 2 on Hugging Face') | |
# User input | |
input_text = st.text_input("Enter your question:") | |
# Display the response | |
if input_text: | |
try: | |
response = chain.run({"question": input_text}) | |
st.write(response) | |
except Exception as e: | |
st.error(f"Error: {e}") | |