langchain-QA / app.py
KrishnaMS's picture
comm1
adde775
raw
history blame
684 Bytes
from langchain.llms import OpenAI
from dotenv import load_dotenv
from langchain import HuggingFaceHub
import os
load_dotenv()
import streamlit as st
def get_openai_response(question):
print(question)
llm=OpenAI(openai_api_key=os.getenv("OPEN_API_KEY"),model_name='text-davinci-003', temperature=0.6)
# response = llm_huggingface.predict(question[0])
response = llm(question)
return response
st.set_page_config(page_title='QA demo')
st.header('LangChain APP')
input = st.text_input("Input: ",key="input")
response=get_openai_response(input)
submit = st.button("Ask the question")
if submit:
st.subheader("The response is: ")
st.write(response)