John6666's picture
Upload 9 files
4a940d2 verified
raw
history blame
No virus
20.6 kB
import torch
import torch.amp.autocast_mode
import os
import sys
import logging
import warnings
import argparse
from PIL import Image
from pathlib import Path
from tqdm import tqdm
from torch import nn
from transformers import AutoModel, AutoProcessor, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast, AutoModelForCausalLM
from typing import List, Union
import torchvision.transforms.functional as TVF
from peft import PeftModel
import gc
# Constants
HF_TOKEN = os.environ.get("HF_TOKEN", None)
BASE_DIR = Path(__file__).resolve().parent # Define the base directory
CLIP_PATH = "google/siglip-so400m-patch14-384"
DEFAULT_MODEL_PATH = "unsloth/Meta-Llama-3.1-8B-bnb-4bit"
#DEFAULT_MODEL_PATH = "Orenguteng/Llama-3.1-8B-Lexi-Uncensored-V2" # Works better but full weight.
CHECKPOINT_PATH = BASE_DIR / Path("9em124t2-499968")
LORA_PATH = CHECKPOINT_PATH / "text_model"
CAPTION_TYPE_MAP = {
("descriptive", "formal", False, False): ["Write a descriptive caption for this image in a formal tone."],
("descriptive", "formal", False, True): ["Write a descriptive caption for this image in a formal tone within {word_count} words."],
("descriptive", "formal", True, False): ["Write a {length} descriptive caption for this image in a formal tone."],
("descriptive", "informal", False, False): ["Write a descriptive caption for this image in a casual tone."],
("descriptive", "informal", False, True): ["Write a descriptive caption for this image in a casual tone within {word_count} words."],
("descriptive", "informal", True, False): ["Write a {length} descriptive caption for this image in a casual tone."],
("training_prompt", "formal", False, False): ["Write a stable diffusion prompt for this image."],
("training_prompt", "formal", False, True): ["Write a stable diffusion prompt for this image within {word_count} words."],
("training_prompt", "formal", True, False): ["Write a {length} stable diffusion prompt for this image."],
("rng-tags", "formal", False, False): ["Write a list of Booru tags for this image."],
("rng-tags", "formal", False, True): ["Write a list of Booru tags for this image within {word_count} words."],
("rng-tags", "formal", True, False): ["Write a {length} list of Booru tags for this image."],
}
IMAGE_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.bmp', '.webp')
# Global Variables
IS_NF4 = True
IS_LORA = True
MODEL_PATH = DEFAULT_MODEL_PATH
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Running on {device}")
warnings.filterwarnings("ignore", category=UserWarning)
logging.getLogger("transformers").setLevel(logging.ERROR)
class ImageAdapter(nn.Module):
def __init__(self, input_features: int, output_features: int, ln1: bool, pos_emb: bool, num_image_tokens: int, deep_extract: bool):
super().__init__()
self.deep_extract = deep_extract
if self.deep_extract:
input_features = input_features * 5
self.linear1 = nn.Linear(input_features, output_features)
self.activation = nn.GELU()
self.linear2 = nn.Linear(output_features, output_features)
self.ln1 = nn.Identity() if not ln1 else nn.LayerNorm(input_features)
self.pos_emb = None if not pos_emb else nn.Parameter(torch.zeros(num_image_tokens, input_features))
# Mode token
#self.mode_token = nn.Embedding(n_modes, output_features)
#self.mode_token.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3
# Other tokens (<|image_start|>, <|image_end|>, <|eot_id|>)
self.other_tokens = nn.Embedding(3, output_features)
self.other_tokens.weight.data.normal_(mean=0.0, std=0.02) # Matches HF's implementation of llama3
def forward(self, vision_outputs: torch.Tensor):
if self.deep_extract:
x = torch.concat((
vision_outputs[-2],
vision_outputs[3],
vision_outputs[7],
vision_outputs[13],
vision_outputs[20],
), dim=-1)
assert len(x.shape) == 3, f"Expected 3, got {len(x.shape)}" # batch, tokens, features
assert x.shape[-1] == vision_outputs[-2].shape[-1] * 5, f"Expected {vision_outputs[-2].shape[-1] * 5}, got {x.shape[-1]}"
else:
x = vision_outputs[-2]
x = self.ln1(x)
if self.pos_emb is not None:
assert x.shape[-2:] == self.pos_emb.shape, f"Expected {self.pos_emb.shape}, got {x.shape[-2:]}"
x = x + self.pos_emb
x = self.linear1(x)
x = self.activation(x)
x = self.linear2(x)
# Mode token
#mode_token = self.mode_token(mode)
#assert mode_token.shape == (x.shape[0], mode_token.shape[1], x.shape[2]), f"Expected {(x.shape[0], 1, x.shape[2])}, got {mode_token.shape}"
#x = torch.cat((x, mode_token), dim=1)
# <|image_start|>, IMAGE, <|image_end|>
other_tokens = self.other_tokens(torch.tensor([0, 1], device=self.other_tokens.weight.device).expand(x.shape[0], -1))
assert other_tokens.shape == (x.shape[0], 2, x.shape[2]), f"Expected {(x.shape[0], 2, x.shape[2])}, got {other_tokens.shape}"
x = torch.cat((other_tokens[:, 0:1], x, other_tokens[:, 1:2]), dim=1)
return x
def get_eot_embedding(self):
return self.other_tokens(torch.tensor([2], device=self.other_tokens.weight.device)).squeeze(0)
def load_models():
global MODEL_PATH, IS_NF4, IS_LORA
try:
if IS_NF4:
from transformers import BitsAndBytesConfig
nf4_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4",
bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16)
print("Loading in NF4")
print("Loading CLIP πŸ“Ž")
clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)
clip_model = AutoModel.from_pretrained(CLIP_PATH).vision_model
if (CHECKPOINT_PATH / "clip_model.pt").exists():
print("Loading VLM's custom vision model πŸ“Ž")
checkpoint = torch.load(CHECKPOINT_PATH / "clip_model.pt", map_location='cpu', weights_only=False)
checkpoint = {k.replace("_orig_mod.module.", ""): v for k, v in checkpoint.items()}
clip_model.load_state_dict(checkpoint)
del checkpoint
clip_model.eval().requires_grad_(False).to(device)
print("Loading tokenizer πŸͺ™")
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, use_fast=False)
assert isinstance(tokenizer, (PreTrainedTokenizer, PreTrainedTokenizerFast)), f"Tokenizer is of type {type(tokenizer)}"
print(f"Loading LLM: {MODEL_PATH} πŸ€–")
text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, quantization_config=nf4_config, device_map=device, torch_dtype=torch.bfloat16).eval()
if False and IS_LORA and LORA_PATH.exists(): # omitted
print("Loading VLM's custom text model πŸ€–")
text_model = PeftModel.from_pretrained(model=text_model, model_id=LORA_PATH, device_map=device, quantization_config=nf4_config)
text_model = text_model.merge_and_unload(safe_merge=True) # to avoid PEFT bug https://github.com/huggingface/transformers/issues/28515
else: print("VLM's custom text model isn't loaded πŸ€–")
print("Loading image adapter πŸ–ΌοΈ")
image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False).eval().to("cpu")
image_adapter.load_state_dict(torch.load(CHECKPOINT_PATH / "image_adapter.pt", map_location="cpu", weights_only=False))
image_adapter.eval().to(device)
else:
print("Loading in bfloat16")
print("Loading CLIP πŸ“Ž")
clip_processor = AutoProcessor.from_pretrained(CLIP_PATH)
clip_model = AutoModel.from_pretrained(CLIP_PATH).vision_model
if (CHECKPOINT_PATH / "clip_model.pt").exists():
print("Loading VLM's custom vision model πŸ“Ž")
checkpoint = torch.load(CHECKPOINT_PATH / "clip_model.pt", map_location='cpu', weights_only=False)
checkpoint = {k.replace("_orig_mod.module.", ""): v for k, v in checkpoint.items()}
clip_model.load_state_dict(checkpoint)
del checkpoint
clip_model.eval().requires_grad_(False).to(device)
print("Loading tokenizer πŸͺ™")
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, use_fast=False)
assert isinstance(tokenizer, (PreTrainedTokenizer, PreTrainedTokenizerFast)), f"Tokenizer is of type {type(tokenizer)}"
print(f"Loading LLM: {MODEL_PATH} πŸ€–")
text_model = AutoModelForCausalLM.from_pretrained(MODEL_PATH, device_map="auto", torch_dtype=torch.bfloat16).eval() # device_map="auto" may cause LoRA issue
if IS_LORA and LORA_PATH.exists():
print("Loading VLM's custom text model πŸ€–")
text_model = PeftModel.from_pretrained(model=text_model, model_id=LORA_PATH, device_map=device)
text_model = text_model.merge_and_unload(safe_merge=True) # to avoid PEFT bug https://github.com/huggingface/transformers/issues/28515
else: print("VLM's custom text model isn't loaded πŸ€–")
print("Loading image adapter πŸ–ΌοΈ")
image_adapter = ImageAdapter(clip_model.config.hidden_size, text_model.config.hidden_size, False, False, 38, False).eval().to("cpu")
image_adapter.load_state_dict(torch.load(CHECKPOINT_PATH / "image_adapter.pt", map_location="cpu", weights_only=False))
except Exception as e:
print(f"Error loading models: {e}")
sys.exit(1)
finally:
torch.cuda.empty_cache()
gc.collect()
return clip_processor, clip_model, tokenizer, text_model, image_adapter
@torch.inference_mode()
def stream_chat(input_images: List[Image.Image], caption_type: str, caption_tone: str, caption_length: Union[str, int],
max_new_tokens: int, top_p: float, temperature: float, batch_size: int, pbar: tqdm, models: tuple) -> List[str]:
global MODEL_PATH
clip_processor, clip_model, tokenizer, text_model, image_adapter = models
torch.cuda.empty_cache()
all_captions = []
# 'any' means no length specified
length = None if caption_length == "any" else caption_length
if isinstance(length, str):
try:
length = int(length)
except ValueError:
pass
# 'rng-tags' and 'training_prompt' don't have formal/informal tones
if caption_type == "rng-tags" or caption_type == "training_prompt":
caption_tone = "formal"
# Build prompt
prompt_key = (caption_type, caption_tone, isinstance(length, str), isinstance(length, int))
if prompt_key not in CAPTION_TYPE_MAP:
raise ValueError(f"Invalid caption type: {prompt_key}")
prompt_str = CAPTION_TYPE_MAP[prompt_key][0].format(length=length, word_count=length)
print(f"Prompt: {prompt_str}")
for i in range(0, len(input_images), batch_size):
batch = input_images[i:i+batch_size]
# Preprocess image
for input_image in input_images:
try:
image = input_image.resize((384, 384), Image.LANCZOS)
pixel_values = TVF.pil_to_tensor(image).unsqueeze(0) / 255.0
pixel_values = TVF.normalize(pixel_values, [0.5], [0.5])
pixel_values = pixel_values.to(device)
except ValueError as e:
print(f"Error processing image: {e}")
print("Skipping this image and continuing...")
continue
# Embed image
with torch.amp.autocast_mode.autocast(device, enabled=True):
vision_outputs = clip_model(pixel_values=pixel_values, output_hidden_states=True)
image_features = vision_outputs.hidden_states
embedded_images = image_adapter(image_features).to(device)
# Tokenize the prompt
prompt = tokenizer.encode(prompt_str, return_tensors='pt', padding=False, truncation=False, add_special_tokens=False)
# Embed prompt
prompt_embeds = text_model.model.embed_tokens(prompt.to(device))
assert prompt_embeds.shape == (1, prompt.shape[1], text_model.config.hidden_size), f"Prompt shape is {prompt_embeds.shape}, expected {(1, prompt.shape[1], text_model.config.hidden_size)}"
embedded_bos = text_model.model.embed_tokens(torch.tensor([[tokenizer.bos_token_id]], device=text_model.device, dtype=torch.int64))
eot_embed = image_adapter.get_eot_embedding().unsqueeze(0).to(dtype=text_model.dtype)
# Construct prompts
inputs_embeds = torch.cat([
embedded_bos.expand(embedded_images.shape[0], -1, -1),
embedded_images.to(dtype=embedded_bos.dtype),
prompt_embeds.expand(embedded_images.shape[0], -1, -1),
eot_embed.expand(embedded_images.shape[0], -1, -1),
], dim=1)
input_ids = torch.cat([
torch.tensor([[tokenizer.bos_token_id]], dtype=torch.long),
torch.zeros((1, embedded_images.shape[1]), dtype=torch.long),
prompt,
torch.tensor([[tokenizer.convert_tokens_to_ids("<|eot_id|>")]], dtype=torch.long),
], dim=1).to(device)
attention_mask = torch.ones_like(input_ids)
generate_ids = text_model.generate(input_ids=input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, do_sample=True,
suppress_tokens=None, max_new_tokens=max_new_tokens, top_p=top_p, temperature=temperature)
# Trim off the prompt
generate_ids = generate_ids[:, input_ids.shape[1]:]
if generate_ids[0][-1] == tokenizer.eos_token_id or generate_ids[0][-1] == tokenizer.convert_tokens_to_ids("<|eot_id|>"):
generate_ids = generate_ids[:, :-1]
caption = tokenizer.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]
all_captions.append(caption.strip())
if pbar:
pbar.update(len(batch))
return all_captions
def process_directory(input_dir: Path, output_dir: Path, caption_type: str, caption_tone: str, caption_length: Union[str, int],
max_new_tokens: int, top_p: float, temperature: float, batch_size: int, models: tuple):
output_dir.mkdir(parents=True, exist_ok=True)
image_files = [f for f in input_dir.iterdir() if f.suffix.lower() in IMAGE_EXTENSIONS]
images_to_process = [f for f in image_files if not (output_dir / f"{f.stem}.txt").exists()]
if not images_to_process:
print("No new images to process.")
return
with tqdm(total=len(images_to_process), desc="Processing images", unit="image") as pbar:
for i in range(0, len(images_to_process), batch_size):
batch_files = images_to_process[i:i+batch_size]
batch_images = [Image.open(f).convert('RGB') for f in batch_files]
captions = stream_chat(batch_images, caption_type, caption_tone, caption_length,
max_new_tokens, top_p, temperature, batch_size, pbar, models)
for file, caption in zip(batch_files, captions):
with open(output_dir / f"{file.stem}.txt", 'w', encoding='utf-8') as f:
f.write(caption)
for img in batch_images:
img.close()
def parse_arguments():
parser = argparse.ArgumentParser(description="Process images and generate captions.")
parser.add_argument("input", nargs='+', help="Input image file or directory (or multiple directories)")
parser.add_argument("--output", help="Output directory (optional)")
parser.add_argument("--bs", type=int, default=4, help="Batch size (default: 4)")
parser.add_argument("--type", type=str, default="descriptive", choices=["descriptive", "training_prompt", "rng-tags"],
help='Caption Type (default: "descriptive")')
parser.add_argument("--tone", type=str, default="formal", choices=["formal", "informal"],
help='Caption Tone (default: "formal")')
parser.add_argument("--len", default="any",
choices=["any", "very short", "short", "medium-length", "long", "very long"] + [str(i) for i in range(20, 261, 10)],
help='Caption Length (default: "any")')
parser.add_argument("--model", type=str, default=DEFAULT_MODEL_PATH,
help='Huggingface LLM repo (default: "unsloth/Meta-Llama-3.1-8B-bnb-4bit")')
parser.add_argument("--bf16", action="store_true", default=False, help="Use bfloat16 (default: NF4)")
parser.add_argument("--nolora", action="store_true", default=False, help="Disable VLM's custom text model (default: Enable)")
parser.add_argument("--tokens", type=int, default=300, help="Max tokens (default: 300)")
parser.add_argument("--topp", type=float, default=0.9, help="Top-P (default: 0.9)")
parser.add_argument("--temp", type=float, default=0.6, help="Temperature (default: 0.6)")
return parser.parse_args()
def is_valid_repo(repo_id):
from huggingface_hub import HfApi
import re
try:
if not re.fullmatch(r'^[^/,\s\"\']+/[^/,\s\"\']+$', repo_id): return False
api = HfApi()
if api.repo_exists(repo_id=repo_id): return True
else: return False
except Exception as e:
print(f"Failed to connect {repo_id}. {e}")
return False
def main():
global MODEL_PATH, IS_NF4, IS_LORA
args = parse_arguments()
input_paths = [Path(input_path) for input_path in args.input]
batch_size = args.bs
caption_type = args.type
caption_tone = args.tone
caption_length = args.len
max_new_tokens = args.tokens
top_p = args.topp
temperature = args.temp
IS_NF4 = False if args.bf16 else True
IS_LORA = False if args.nolora else True
if is_valid_repo(args.model): MODEL_PATH = args.model
else: sys.exit(1)
models = load_models()
for input_path in input_paths:
if input_path.is_file() and input_path.suffix.lower() in IMAGE_EXTENSIONS:
output_path = input_path.with_suffix('.txt')
print(f"Processing single image 🎞️: {input_path.name}")
with tqdm(total=1, desc="Processing image", unit="image") as pbar:
captions = stream_chat([Image.open(input_path).convert('RGB')], caption_type, caption_tone, caption_length,
max_new_tokens, top_p, temperature, 1, pbar, models)
with open(output_path, 'w', encoding='utf-8') as f:
f.write(captions[0])
print(f"Output saved to {output_path}")
elif input_path.is_dir():
output_path = Path(args.output) if args.output else input_path
print(f"Processing directory πŸ“: {input_path}")
print(f"Output directory πŸ“¦: {output_path}")
print(f"Batch size πŸ—„οΈ: {batch_size}")
process_directory(input_path, output_path, caption_type, caption_tone, caption_length,
max_new_tokens, top_p, temperature, batch_size, models)
else:
print(f"Invalid input: {input_path}")
print("Skipping...")
if not input_paths:
print("Usage:")
print("For single image: python app.py [image_file] [--bs batch_size]")
print("For directory (same input/output): python app.py [directory] [--bs batch_size]")
print("For directory (separate input/output): python app.py [directory] --output [output_directory] [--bs batch_size]")
print("For multiple directories: python app.py [directory1] [directory2] ... [--output output_directory] [--bs batch_size]")
sys.exit(1)
if __name__ == "__main__":
main()