Spaces:
Sleeping
Sleeping
File size: 1,501 Bytes
2faf743 9c51c0d c88baef 2faf743 c88baef 9c51c0d c88baef 9c51c0d c88baef 9c51c0d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 |
import os
import torch
from langchain import HuggingFacePipeline
from transformers import AutoTokenizer
import transformers
from langchain import HuggingFaceHub
def get_openai_chat_model(API_key):
try:
from langchain.llms import OpenAI
except ImportError as err:
raise "{}, unable to load openAI. Please install openai and add OPENAIAPI_KEY"
os.environ["OPENAI_API_KEY"] = API_key
llm = OpenAI()
return llm
def get_llama_model(temperature=0,api_key=None,max_tokens=2048):
model_id = "meta-llama/Llama-2-7b-chat-hf"
llm = None
try:
tokenizer = AutoTokenizer.from_pretrained(model_id)
pipeline = transformers.pipeline("text-generation",
model=model_id,
tokenizer=tokenizer,
torch_dtype=torch.bfloat16,
trust_remote_code=True,
device_map="auto",
max_length=1000,
eos_token_id=tokenizer.eos_token_id
)
llm = HuggingFacePipeline(pipeline = pipeline, model_kwargs = {'temperature':temperature})
except:
raise "User not autorized to access the Model"
return llm
def get_model_from_hub(api_key,temperature=0.1,max_tokens=2048,model_id="meta-llama/Llama-2-7b-chat-hf"):
llm = HuggingFaceHub(huggingfacehub_api_token=api_key,
repo_id=model_id,
model_kwargs={"temperature": temperature, "max_new_tokens": max_tokens})
return llm |