# from langchain.llms import OpenAI from langchain_openai import OpenAI from dotenv import load_dotenv # from langchain import HuggingFaceHub import os load_dotenv() os.environ['OPENAI_API_KEY'] = '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' import streamlit as st def get_openai_response(question): print(question) llm=OpenAI(model_name='gpt-4', temperature=0.6) # response = llm_huggingface.predict(question[0]) response = llm(question) return response st.set_page_config(page_title='QA demo') st.header('LangChain APP') input = st.text_input("Input: ",key="input") response=get_openai_response(input) submit = st.button("Ask the question") if submit: st.subheader("The response is: ") st.write(response)