File size: 1,573 Bytes
189a7a7 90a40c9 189a7a7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
import os
from langchain import PromptTemplate, HuggingFaceHub, LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationChain
import langchain.globals
from transformers import AutoModelForCausalLM, AutoTokenizer
def get_Model():
tokenizer = AutoTokenizer.from_pretrained("KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b")
model = AutoModelForCausalLM.from_pretrained("KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b")
return model
#Write function to connect to Bedrock
# def demo_chatbot():
# # client = boto3.client('bedrock-runtime')
# template = """Question: {question}
# Answer: Let's think step by step."""
# prompt = PromptTemplate(template=template, input_variables=["question"])
# llm=HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature":1e-10})
# question = "When was Google founded?"
# print(llm_chain.run(question))
# return demo_llm
#test out the code with the Predicgt method
#return demo_llm.predict(input)
# = demo_chatbot('What is the temperature in Nuremberg today?')
#print(response)
def demo_miny_memory(model):
# llm_data = get_Model(hugging_face_key)
memory = ConversationBufferMemory(llm = model,max_token_limit = 512)
return memory
def demo_chain(input_text, memory,model):
# llm_data = get_Model(hugging_face_key)
llm_conversation = ConversationChain(llm=model,memory=memory,verbose=langchain.globals.get_verbose())
chat_reply = llm_conversation.predict(input=input_text)
return chat_reply |