import discord import logging import os import asyncio from transformers import PaliGemmaForConditionalGeneration, PaliGemmaProcessor import torch import re import requests from PIL import Image import io import gradio as gr import threading from huggingface_hub import InferenceClient # 로깅 설정 logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(name)s:%(message)s', handlers=[logging.StreamHandler()]) # 디스코드 인텐트 설정 intents = discord.Intents.default() intents.message_content = True intents.messages = True intents.guilds = True intents.guild_messages = True # 추론 API 클라이언트 설정 hf_client = InferenceClient("CohereForAI/aya-23-35B", token=os.getenv("HF_TOKEN")) # PaliGemma 모델 설정 (CPU 모드) model = PaliGemmaForConditionalGeneration.from_pretrained("gokaygokay/sd3-long-captioner").to("cpu").eval() processor = PaliGemmaProcessor.from_pretrained("gokaygokay/sd3-long-captioner") # 대화 히스토리를 저장할 전역 변수 conversation_history = [] def modify_caption(caption: str) -> str: prefix_substrings = [ ('captured from ', ''), ('captured at ', '') ] pattern = '|'.join([re.escape(opening) for opening, _ in prefix_substrings]) replacers = {opening: replacer for opening, replacer in prefix_substrings} def replace_fn(match): return replacers[match.group(0)] return re.sub(pattern, replace_fn, caption, count=1, flags=re.IGNORECASE) async def create_captions_rich(image: Image.Image) -> str: prompt = "caption en" image_tensor = processor(images=image, return_tensors="pt").pixel_values.to("cpu") image_tensor = (image_tensor * 255).type(torch.uint8) model_inputs = processor(text=prompt, images=image_tensor, return_tensors="pt").to("cpu") input_len = model_inputs["input_ids"].shape[-1] loop = asyncio.get_event_loop() generation = await loop.run_in_executor( None, lambda: model.generate(**model_inputs, max_new_tokens=256, do_sample=False) ) generation = generation[0][input_len:] decoded = processor.decode(generation, skip_special_tokens=True) modified_caption = modify_caption(decoded) return modified_caption async def translate_to_korean(text: str) -> str: messages = [ {"role": "system", "content": "Translate the following text from English to Korean."}, {"role": "user", "content": text} ] loop = asyncio.get_event_loop() response = await loop.run_in_executor( None, lambda: hf_client.chat_completion( messages, max_tokens=1000, stream=True, temperature=0.7, top_p=0.85 ) ) full_response = [] for part in response: if part.choices and part.choices[0].delta and part.choices[0].delta.content: full_response.append(part.choices[0].delta.content) full_response_text = ''.join(full_response) return full_response_text.strip() async def interact_with_model(user_input: str) -> str: global conversation_history conversation_history.append({"role": "user", "content": user_input}) messages = [ {"role": "system", "content": "Translate the following text from English to Korean and respond as if you are an assistant who provides detailed answers in Korean."}, ] + conversation_history loop = asyncio.get_event_loop() response = await loop.run_in_executor( None, lambda: hf_client.chat_completion( messages, max_tokens=1000, stream=True, temperature=0.7, top_p=0.85 ) ) full_response = [] for part in response: if part.choices and part.choices[0].delta and part.choices[0].delta.content: full_response.append(part.choices[0].delta.content) full_response_text = ''.join(full_response) conversation_history.append({"role": "assistant", "content": full_response_text}) return full_response_text.strip() # Gradio 인터페이스 설정 def create_captions_rich_sync(image): caption = asyncio.run(create_captions_rich(image)) translated_caption = asyncio.run(translate_to_korean(caption)) return translated_caption css = """ #mkd { height: 500px; overflow: auto; border: 1px solid #ccc; } """ with gr.Blocks(css=css) as demo: gr.HTML("