Spaces:
Runtime error
Runtime error
seawolf2357
commited on
Commit
โข
60c2701
1
Parent(s):
ddd7cdb
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,8 @@
|
|
1 |
import discord
|
2 |
import logging
|
3 |
import os
|
|
|
4 |
import uuid
|
5 |
-
import torch
|
6 |
-
import subprocess
|
7 |
-
from huggingface_hub import snapshot_download
|
8 |
-
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
|
9 |
from transformers import pipeline
|
10 |
|
11 |
# ๋ก๊น
์ค์
|
@@ -13,91 +10,68 @@ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(nam
|
|
13 |
|
14 |
# ์ธํ
ํธ ์ค์
|
15 |
intents = discord.Intents.default()
|
16 |
-
intents.
|
17 |
-
|
18 |
-
# Hugging Face ๋ชจ๋ธ ๋ค์ด๋ก๋
|
19 |
-
huggingface_token = os.getenv("HF_TOKEN")
|
20 |
-
model_local_dir = "Fluently-XL-Final"
|
21 |
-
|
22 |
-
# ๋ชจ๋ธ์ ๋ก์ปฌ ๋๋ ํ ๋ฆฌ์ ๋ค์ด๋ก๋
|
23 |
-
model_path = snapshot_download(
|
24 |
-
repo_id="fluently/Fluently-XL-Final",
|
25 |
-
repo_type="model",
|
26 |
-
local_dir=model_local_dir,
|
27 |
-
token=huggingface_token,
|
28 |
-
)
|
29 |
-
|
30 |
-
# ๋ชจ๋ธ ๋ก๋ ํจ์
|
31 |
-
def load_pipeline(pipeline_type):
|
32 |
-
logging.debug(f'Loading pipeline: {pipeline_type}')
|
33 |
-
if pipeline_type == "text2img":
|
34 |
-
return StableDiffusionPipeline.from_pretrained(model_local_dir, torch_dtype=torch.float32)
|
35 |
-
elif pipeline_type == "img2img":
|
36 |
-
return StableDiffusionImg2ImgPipeline.from_pretrained(model_local_dir, torch_dtype=torch.float32)
|
37 |
-
|
38 |
-
# ๋๋ฐ์ด์ค ์ค์ (CPU ์ ์ฉ)
|
39 |
-
device = torch.device("cpu")
|
40 |
|
41 |
# ๋ฒ์ญ ํ์ดํ๋ผ์ธ ์ค์
|
42 |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
43 |
|
44 |
-
#
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
# ๋์ค์ฝ๋ ๋ด ํด๋์ค
|
48 |
class MyClient(discord.Client):
|
49 |
def __init__(self, *args, **kwargs):
|
50 |
super().__init__(*args, **kwargs)
|
51 |
-
self.is_processing = False
|
52 |
-
self.text2img_pipeline = load_pipeline("text2img").to(device)
|
53 |
-
self.text2img_pipeline.enable_attention_slicing() # ๋ฉ๋ชจ๋ฆฌ ์ต์ ํ
|
54 |
|
55 |
async def on_ready(self):
|
56 |
-
logging.info(f'{self.user} logged in!')
|
57 |
-
subprocess.Popen(["python", "web.py"])
|
58 |
-
logging.info("web.py server has started.")
|
59 |
|
60 |
async def on_message(self, message):
|
61 |
if message.author == self.user:
|
62 |
return
|
63 |
if message.content.startswith('!image '):
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
)
|
77 |
-
finally:
|
78 |
-
self.is_processing = False
|
79 |
-
|
80 |
-
async def generate_image(self, prompt, negative_prompt):
|
81 |
-
try:
|
82 |
-
if negative_prompt is None:
|
83 |
-
negative_prompt = ""
|
84 |
-
logging.debug(f"Calling text2img_pipeline with prompt: {prompt} and negative_prompt: {negative_prompt}")
|
85 |
-
|
86 |
-
result = self.text2img_pipeline(
|
87 |
-
prompt=prompt,
|
88 |
-
negative_prompt=negative_prompt,
|
89 |
-
num_inference_steps=50
|
90 |
)
|
91 |
-
images = result.images
|
92 |
|
|
|
|
|
|
|
|
|
93 |
image_path = f'/tmp/{uuid.uuid4()}.png'
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
return image_path
|
98 |
-
|
99 |
-
|
100 |
-
raise
|
101 |
|
102 |
# ํ๋กฌํํธ ๋ฒ์ญ ํจ์
|
103 |
def translate_prompt(prompt):
|
|
|
1 |
import discord
|
2 |
import logging
|
3 |
import os
|
4 |
+
import requests
|
5 |
import uuid
|
|
|
|
|
|
|
|
|
6 |
from transformers import pipeline
|
7 |
|
8 |
# ๋ก๊น
์ค์
|
|
|
10 |
|
11 |
# ์ธํ
ํธ ์ค์
|
12 |
intents = discord.Intents.default()
|
13 |
+
intents.messages = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
# ๋ฒ์ญ ํ์ดํ๋ผ์ธ ์ค์
|
16 |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
|
17 |
|
18 |
+
# Hugging Face API ํ ํฐ
|
19 |
+
hf_token = os.getenv("HF_TOKEN")
|
20 |
+
|
21 |
+
# ๋ชจ๋ธ URL ์ค์
|
22 |
+
model_urls = {
|
23 |
+
"epiCPhotoGasm": "https://api-inference.huggingface.co/models/Yntec/epiCPhotoGasm",
|
24 |
+
"AM-mix1": "https://api-inference.huggingface.co/models/digiplay/AM-mix1",
|
25 |
+
"AbsoluteReality": "https://api-inference.huggingface.co/models/Yntec/AbsoluteReality",
|
26 |
+
"DreamPhotoGASM": "https://api-inference.huggingface.co/models/Yntec/DreamPhotoGASM",
|
27 |
+
"insaneRealistic_v1": "https://api-inference.huggingface.co/models/digiplay/insaneRealistic_v1",
|
28 |
+
"photoMovieXFinal": "https://api-inference.huggingface.co/models/Yntec/photoMovieXFinal",
|
29 |
+
"Memento": "https://api-inference.huggingface.co/models/Yntec/Memento",
|
30 |
+
"photoMovieRealistic": "https://api-inference.huggingface.co/models/Yntec/photoMovieRealistic",
|
31 |
+
"Timeless": "https://api-inference.huggingface.co/models/Yntec/Timeless",
|
32 |
+
"Fabulous": "https://api-inference.huggingface.co/models/Yntec/Fabulous",
|
33 |
+
"IncredibleLife": "https://api-inference.huggingface.co/models/Yntec/IncredibleLife",
|
34 |
+
"RealLife": "https://api-inference.huggingface.co/models/Yntec/RealLife",
|
35 |
+
"HyperRealism": "https://api-inference.huggingface.co/models/Yntec/HyperRealism"
|
36 |
+
}
|
37 |
|
38 |
# ๋์ค์ฝ๋ ๋ด ํด๋์ค
|
39 |
class MyClient(discord.Client):
|
40 |
def __init__(self, *args, **kwargs):
|
41 |
super().__init__(*args, **kwargs)
|
|
|
|
|
|
|
42 |
|
43 |
async def on_ready(self):
|
44 |
+
logging.info(f'{self.user} has logged in!')
|
|
|
|
|
45 |
|
46 |
async def on_message(self, message):
|
47 |
if message.author == self.user:
|
48 |
return
|
49 |
if message.content.startswith('!image '):
|
50 |
+
parts = message.content.split(' ', 2)
|
51 |
+
if len(parts) < 3 or parts[1] not in model_urls:
|
52 |
+
await message.channel.send("Invalid model name or usage.")
|
53 |
+
return
|
54 |
+
|
55 |
+
model_name = parts[1]
|
56 |
+
prompt = parts[2]
|
57 |
+
prompt_en = translate_prompt(prompt) # ๋ฒ์ญ ๊ธฐ๋ฅ ์ถ๊ฐ
|
58 |
+
image_path = await self.generate_image(model_urls[model_name], prompt_en)
|
59 |
+
user_id = message.author.id
|
60 |
+
await message.channel.send(
|
61 |
+
f"<@{user_id}> Here is your requested image:",
|
62 |
+
file=discord.File(image_path, 'generated_image.png')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
)
|
|
|
64 |
|
65 |
+
async def generate_image(self, model_url, prompt):
|
66 |
+
headers = {"Authorization": f"Bearer {hf_token}"}
|
67 |
+
response = requests.post(model_url, headers=headers, json={"inputs": prompt})
|
68 |
+
if response.status_code == 200:
|
69 |
image_path = f'/tmp/{uuid.uuid4()}.png'
|
70 |
+
with open(image_path, 'wb') as f:
|
71 |
+
f.write(response.content) # ์ด๋ฏธ์ง ๋ฐ์ดํฐ๋ฅผ ํ์ผ๋ก ์ ์ฅ
|
|
|
72 |
return image_path
|
73 |
+
else:
|
74 |
+
raise Exception("Failed to generate image from the API")
|
|
|
75 |
|
76 |
# ํ๋กฌํํธ ๋ฒ์ญ ํจ์
|
77 |
def translate_prompt(prompt):
|