Arthur Angelici
Create README.md
b7e22c4
|
raw
history blame
578 Bytes

Usage

import torch from peft import PeftModel, PeftConfig from transformers import AutoModelForSeq2SeqLM, AutoTokenizer

Load peft config for pre-trained checkpoint etc.

peft_model_id = "results" config = PeftConfig.from_pretrained(peft_model_id)

load base LLM model and tokenizer

model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path, load_in_8bit=True, device_map={"":0}) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)

Load the Lora model

model = PeftModel.from_pretrained(model, peft_model_id, device_map={"":0})