KvrParaskevi
commited on
Update chatbot_bedrock.py
Browse files- chatbot_bedrock.py +5 -3
chatbot_bedrock.py
CHANGED
@@ -1,14 +1,16 @@
|
|
1 |
import os
|
2 |
-
from langchain import PromptTemplate, HuggingFaceHub, LLMChain
|
3 |
from langchain.memory import ConversationBufferMemory
|
4 |
from langchain.chains import ConversationChain
|
5 |
import langchain.globals
|
6 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
7 |
|
8 |
-
|
|
|
9 |
tokenizer = AutoTokenizer.from_pretrained("KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b")
|
10 |
model = AutoModelForCausalLM.from_pretrained("KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b")
|
11 |
-
return model
|
12 |
|
13 |
|
14 |
#Write function to connect to Bedrock
|
|
|
1 |
import os
|
2 |
+
#from langchain import PromptTemplate, HuggingFaceHub, LLMChain
|
3 |
from langchain.memory import ConversationBufferMemory
|
4 |
from langchain.chains import ConversationChain
|
5 |
import langchain.globals
|
6 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
7 |
+
import streamlit as st
|
8 |
|
9 |
+
@st.cache_resource
|
10 |
+
def load_model():
|
11 |
tokenizer = AutoTokenizer.from_pretrained("KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b")
|
12 |
model = AutoModelForCausalLM.from_pretrained("KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b")
|
13 |
+
return tokenizer,model
|
14 |
|
15 |
|
16 |
#Write function to connect to Bedrock
|