File size: 5,685 Bytes
6cc2674 ae922ce c42a813 2769331 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
---
license: apache-2.0
---
### Korean Otter
[Otter](https://huggingface.co/luodian/OTTER-9B-LA-InContext) ๋ชจ๋ธ์ [KoLLaVA-Instruct-150K](https://huggingface.co/datasets/tabtoyou/KoLLaVA-Instruct-150k) ์ค Complex resoning์ ํด๋นํ๋ 77k ๋ฐ์ดํฐ์
์ผ๋ก ํ์ตํ์ต๋๋ค. Otter ์ด๋ฏธ์ง [๋ฐ๋ชจ](https://github.com/Luodian/Otter)์์ ํ๊ตญ์ด ์ง๋ฌธ์ ์ด๋์ ๋ ์ดํดํด ์์ด๋ก ๋ต๋ณํ๋ ๊ฒ์ ํ์ธํ๊ณ , ํด๋น ๋ชจ๋ธ์ ๊ทธ๋๋ก ๊ฐ์ ธ์ ํ๊ตญ์ด ๋ฐ์ดํฐ์
์ผ๋ก ํ์ต์ด ๋๋์ง ํ
์คํธํ ๋ชจ๋ธ์
๋๋ค. GPU memory ํ๊ณ๋ก Otter์ LLM ๋ถ๋ถ์์ ํน์ ๋ ์ด์ด ์ด์(>25)๋ง 1epoch ํ์ตํ์ต๋๋ค. ์ด ๋ชจ๋ธ์ ๋ต๋ณ ํ๋ฆฌํฐ๋ ์ข์ง ์์ง๋ง, ๋ ๋ง์ ๋ฐ์ดํฐ์
์ผ๋ก epoch์ ๋๋ ค ํ์ตํ๋ค๋ฉด ๋ ์ข์ ๊ฒฐ๊ณผ๋ฅผ ์ป์ ์ ์์ ๊ฒ์ผ๋ก ๋ณด์
๋๋ค. ์ด๋ฌํ ๊ฐ๋ฅ์ฑ์ ํ์ธํ๋ค๋ ๊ฒ์ ์๋ฏธ๊ฐ ์๋ค๊ณ ์๊ฐํด ๋ชจ๋ธ์ ๊ณต์ ํฉ๋๋ค.
``` python
import mimetypes
import os
from io import BytesIO
from typing import Union
import cv2
import requests
import torch
import transformers
from PIL import Image
from torchvision.transforms import Compose, Resize, ToTensor
from tqdm import tqdm
import sys
from otter.modeling_otter import OtterForConditionalGeneration
# Disable warnings
requests.packages.urllib3.disable_warnings()
# ------------------- Utility Functions -------------------
def get_content_type(file_path):
content_type, _ = mimetypes.guess_type(file_path)
return content_type
# ------------------- Image and Video Handling Functions -------------------
def get_image(url: str) -> Union[Image.Image, list]:
if "://" not in url: # Local file
content_type = get_content_type(url)
else: # Remote URL
content_type = requests.head(url, stream=True, verify=False).headers.get("Content-Type")
if "image" in content_type:
if "://" not in url: # Local file
return Image.open(url)
else: # Remote URL
return Image.open(requests.get(url, stream=True, verify=False).raw)
else:
raise ValueError("Invalid content type. Expected image or video.")
# ------------------- OTTER Prompt and Response Functions -------------------
def get_formatted_prompt(prompt: str, in_context_prompts: list = []) -> str:
in_context_string = ""
for in_context_prompt, in_context_answer in in_context_prompts:
in_context_string += f"<image>User: {in_context_prompt} GPT:<answer> {in_context_answer}<|endofchunk|>"
return f"{in_context_string}<image>User: {prompt} GPT:<answer>"
def get_response(image_list, prompt: str, model=None, image_processor=None, in_context_prompts: list = []) -> str:
input_data = image_list
if isinstance(input_data, Image.Image):
vision_x = image_processor.preprocess([input_data], return_tensors="pt")["pixel_values"].unsqueeze(1).unsqueeze(0)
elif isinstance(input_data, list): # list of video frames
vision_x = image_processor.preprocess(input_data, return_tensors="pt")["pixel_values"].unsqueeze(1).unsqueeze(0)
else:
raise ValueError("Invalid input data. Expected PIL Image or list of video frames.")
lang_x = model.text_tokenizer(
[
get_formatted_prompt(prompt, in_context_prompts),
],
return_tensors="pt",
)
bad_words_id = tokenizer(["User:", "GPT1:", "GFT:", "GPT:"], add_special_tokens=False).input_ids
generated_text = model.generate(
vision_x=vision_x.to(model.device),
lang_x=lang_x["input_ids"].to(model.device),
attention_mask=lang_x["attention_mask"].to(model.device),
max_new_tokens=512,
num_beams=3,
no_repeat_ngram_size=3,
bad_words_ids=bad_words_id,
)
parsed_output = (
model.text_tokenizer.decode(generated_text[0])
.split("<answer>")[-1]
.lstrip()
.rstrip()
.split("<|endofchunk|>")[0]
.lstrip()
.rstrip()
.lstrip('"')
.rstrip('"')
)
return parsed_output
# ------------------- Main Function -------------------
if __name__ == "__main__":
model = OtterForConditionalGeneration.from_pretrained("tabtoyou/Ko-Otter-9B-LACR-v0", device_map="auto")
model.text_tokenizer.padding_side = "left"
tokenizer = model.text_tokenizer
image_processor = transformers.CLIPImageProcessor()
model.eval()
while True:
urls = [
"https://images.cocodataset.org/train2017/000000339543.jpg",
"https://images.cocodataset.org/train2017/000000140285.jpg",
]
encoded_frames_list = []
for url in urls:
frames = get_image(url)
encoded_frames_list.append(frames)
in_context_prompts = []
in_context_examples = [
"์ด๋ฏธ์ง์ ๋ํด ๋ฌ์ฌํด์ฃผ์ธ์::ํ ๊ฐ์กฑ์ด ์ค์ฐ ์์์ ์ฌ์ง์ ์ฐ๊ณ ์์ต๋๋ค.",
]
for in_context_input in in_context_examples:
in_context_prompt, in_context_answer = in_context_input.split("::")
in_context_prompts.append((in_context_prompt.strip(), in_context_answer.strip()))
# prompts_input = input("Enter the prompts separated by commas (or type 'quit' to exit): ")
prompts_input = "์ด๋ฏธ์ง์ ๋ํด ๋ฌ์ฌํด์ฃผ์ธ์"
prompts = [prompt.strip() for prompt in prompts_input.split(",")]
for prompt in prompts:
print(f"\nPrompt: {prompt}")
response = get_response(encoded_frames_list, prompt, model, image_processor, in_context_prompts)
print(f"Response: {response}")
if prompts_input.lower() == "quit":
break
``` |