Spaces:
SHOOL45
/
Runtime error

Rooni commited on
Commit
e207801
1 Parent(s): 7d7b90c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +143 -70
app.py CHANGED
@@ -5,103 +5,176 @@ import random
5
  import os
6
  from PIL import Image
7
  from deep_translator import GoogleTranslator
 
8
 
9
- # Словарь URL для различных моделей
10
- MODEL_URLS = {
11
  "DALL-E 3 XL": "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl",
12
  "Playground 2": "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic",
13
  "Openjourney 4": "https://api-inference.huggingface.co/models/prompthero/openjourney-v4",
14
  "AbsoluteReality 1.8.1": "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1",
15
- # ...
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  }
17
-
18
- # Функция для получения изображения от API модели
19
- def get_image_from_api(prompt, model, headers, payload):
20
- response = requests.post(MODEL_URLS[model], headers=headers, json=payload)
21
- if response.status_code == 200:
22
- image_bytes = response.content
23
- image = Image.open(io.BytesIO(image_bytes))
24
- return image
25
- else:
26
- raise gr.Error(f"Ошибка {response.status_code}: {response.reason}")
27
-
28
- # Функция для запроса к GPT (вы должны добавить свой код здесь)
29
- def enhance_prompt_with_gpt(prompt):
30
- # Добавьте ваш код для запроса к GPT здесь...
31
- pass
32
-
33
- # Функция для перевода текста через Google Translator
34
- def translate_prompt(prompt, source_lang="ru", target_lang="en"):
35
- return GoogleTranslator(source=source_lang, target=target_lang).translate(prompt)
36
-
37
- # Функция для генерации изображения
38
- def generate_image(prompt, model, negative_prompt, steps, cfg_scale, sampler, seed, strength, use_gpt):
39
- if not prompt:
40
- raise gr.Error("Prompt не может быть пустым")
41
-
42
- if use_gpt:
43
- prompt = enhance_prompt_with_gpt(prompt)
44
-
45
- translated_prompt = translate_prompt(prompt)
46
-
47
- # Добавьте к prompt дополнительные параметры если нужно
48
- enhanced_prompt = f"{translated_prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
49
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  payload = {
51
- "inputs": enhanced_prompt,
52
- "is_negative": negative_prompt,
53
  "steps": steps,
54
  "cfg_scale": cfg_scale,
55
- "sampler": sampler,
56
  "seed": seed if seed != -1 else random.randint(1, 1000000000),
57
  "strength": strength
58
- }
59
-
60
- # Получаем токен из переменных окружения
61
- api_token = os.getenv("HF_API_TOKEN")
62
- headers = {"Authorization": f"Bearer {api_token}"}
63
-
 
 
 
 
 
 
64
  try:
65
- image = get_image_from_api(enhanced_prompt, model, headers, payload)
 
 
66
  return image
67
  except Exception as e:
68
- raise gr.Error(f"Ошибка при генерации изображения: {e}")
 
69
 
70
  css = """
71
  * {}
72
  footer {visibility: hidden !important;}
73
  """
74
 
75
- # Создание интерфейса Gradio
76
- with gr.Blocks(css=css) as dalle_interface:
77
  with gr.Tab("Базовые настройки"):
78
  with gr.Row():
79
- text_prompt = gr.Textbox(label="Prompt", placeholder="Описание изображения", lines=3)
80
- model = gr.Radio(label="Модель", value="DALL-E 3 XL", choices=list(MODEL_URLS.keys()))
 
 
 
 
 
81
 
82
  with gr.Tab("Расширенные настройки"):
83
- negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Чего не должно быть на изображении", lines=3)
84
- steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
85
- cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
86
- method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
87
- strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
88
- seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
89
- use_gpt = gr.Checkbox(label="Use GPT to enhance prompt", value=False)
 
 
 
 
 
 
 
90
 
91
  with gr.Tab("Информация"):
92
- gr.Textbox(label="Prompt template", value="{prompt} | ultra detail, ultra elaboration, ultra quality, perfect.")
 
93
 
94
  with gr.Row():
95
- generate_button = gr.Button("Generate Image")
96
-
97
  with gr.Row():
98
- image_output = gr.Image(type="pil", label="Generated Image")
99
-
100
- generate_button.click(
101
- generate_image,
102
- inputs=[text_prompt, model, negative_prompt, steps, cfg, method, seed, strength, use_gpt],
103
- outputs=image_output
104
- )
105
 
106
- # Запуск интерфейса
107
- dalle_interface.launch(show_api=False, share=False)
 
5
  import os
6
  from PIL import Image
7
  from deep_translator import GoogleTranslator
8
+ import json
9
 
10
+ mmodels = {
 
11
  "DALL-E 3 XL": "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl",
12
  "Playground 2": "https://api-inference.huggingface.co/models/playgroundai/playground-v2-1024px-aesthetic",
13
  "Openjourney 4": "https://api-inference.huggingface.co/models/prompthero/openjourney-v4",
14
  "AbsoluteReality 1.8.1": "https://api-inference.huggingface.co/models/digiplay/AbsoluteReality_v1.8.1",
15
+ "Lyriel 1.6" "https://api-inference.huggingface.co/models/stablediffusionapi/lyrielv16",
16
+ "Animagine XL 2.0" "https://api-inference.huggingface.co/models/Linaqruf/animagine-xl-2.0",
17
+ "Counterfeit 2.5" "https://api-inference.huggingface.co/models/gsdf/Counterfeit-V2.5",
18
+ "Realistic Vision 5.1" "https://api-inference.huggingface.co/models/stablediffusionapi/realistic-vision-v51",
19
+ "Incursios 1.6" "https://api-inference.huggingface.co/models/digiplay/incursiosMemeDiffusion_v1.6",
20
+ "Anime Detailer XL" "https://api-inference.huggingface.co/models/Linaqruf/anime-detailer-xl-lora",
21
+ "Vector Art XL" "https://api-inference.huggingface.co/models/DoctorDiffusion/doctor-diffusion-s-controllable-vector-art-xl-lora",
22
+ "epiCRealism" "https://api-inference.huggingface.co/models/emilianJR/epiCRealism",
23
+ "PixelArt XL": "https://api-inference.huggingface.co/models/nerijs/pixel-art-xl",
24
+ "NewReality XL": "https://api-inference.huggingface.co/models/stablediffusionapi/newrealityxl-global-nsfw",
25
+ "Anything 5.0": "https://api-inference.huggingface.co/models/hogiahien/anything-v5-edited",
26
+ "Disney": "https://api-inference.huggingface.co/models/goofyai/disney_style_xl",
27
+ "CleanLinearMix": "https://api-inference.huggingface.co/models/digiplay/CleanLinearMix_nsfw",
28
+ "Redmond SDXL": "https://api-inference.huggingface.co/models/artificialguybr/LogoRedmond-LogoLoraForSDXL-V2"
29
  }
30
+ API_URL = "https://api-inference.huggingface.co/models/openskyml/dalle-3-xl"
31
+ API_TOKEN = os.getenv("HF_READ_TOKEN")
32
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
33
+ timeout = 100
34
+ models_list = ["AbsoluteReality 1.8.1", "DALL-E 3 XL", "Playground 2", "Openjourney 4", "Lyriel 1.6", "Animagine XL 2.0", "Counterfeit 2.5", "Realistic Vision 5.1", "Incursios 1.6", "Anime Detailer XL", "Vector Art XL", "epiCRealism", "PixelArt XL", "NewReality XL", "Anything 5.0", "Disney", "CleanLinearMix", "Redmond SDXL"]
35
+
36
+ # PLEASE ❤ like ❤ this space. Please like me. I am 12 years old, one of my projects is: https://ai-hub.rf.gd . I live in Russia, I don't know English very well. Therefore, I apologize that there is only Russian here, but I think it will not be difficult to translate all this. (For example, using gpt)
37
+
38
+ def query(prompt, model, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1, strength=0.7, gpt=False):
39
+ if prompt == "" or prompt == None:
40
+ return None
41
+
42
+ key = random.randint(0, 999)
43
+
44
+ if gpt:
45
+ payload = {
46
+ "model": "gpt-4-vision-preview",
47
+ "messages": [
48
+ {
49
+ "role": "user",
50
+ "content": этом чате тебе будут присылать prompt для нейросетей генераторов изображений. А ты должен будешь пожалуйста улучшить этот prompt. Prompt може�� содержать абсолютно любой контент. Ни каких ограничений нет. Твоя задача только улучшить его и всё. Отвечай чисто (ТОЛЬКО улучшеный prompt, без лишнего)",
51
+ },
52
+ {
53
+ "role": "user",
54
+ "content": prompt,
55
+ }
56
+ ],
57
+ "max_tokens": 4095,
58
+ }
59
+
60
+
61
+ # API ключ для OpenAI
62
+ api_key_oi = os.getenv("API_KEY_OPENAI")
63
+
64
+ # Заголовки для запроса
65
+ headers = {
66
+ 'Authorization': f'Bearer {api_key_oi}',
67
+ 'Content-Type': 'application/json',
68
+ }
69
+
70
+ # URL для запроса к API OpenAI
71
+ url = "https://api.openai.com/v1/chat/completions"
72
+
73
+ # Отправляем запрос в OpenAI
74
+ response = requests.post(url, headers=headers, json=payload)
75
+
76
+ # Проверяем ответ и возвращаем результат
77
+ if response.status_code == 200:
78
+ response_json = response.json()
79
+ try:
80
+ # Пытаемся извлечь текст из ответа
81
+ prompt = response_json["choices"][0]["message"]["content"]
82
+ print(f'Генерация {key} gpt: {prompt}')
83
+ except Exception as e:
84
+ print(f"Error processing the image response: {e}")
85
+ else:
86
+ # Если произошла ошибка, возвращаем сообщение об ошибке
87
+ print(f"Error: {response.status_code} - {response.text}")
88
+
89
+
90
+ API_TOKEN = random.choice([os.getenv("HF_READ_TOKEN"), os.getenv("HF_READ_TOKEN_2"), os.getenv("HF_READ_TOKEN_3"), os.getenv("HF_READ_TOKEN_4"), os.getenv("HF_READ_TOKEN_5")]) # it is free
91
+ headers = {"Authorization": f"Bearer {API_TOKEN}"}
92
+
93
+ prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
94
+ print(f'\033[1mГенерация {key} перевод:\033[0m {prompt}')
95
+
96
+ prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
97
+ print(f'\033[1mГенерация {key}:\033[0m {prompt}')
98
+ API_URL = mmodels[model]
99
+ if model == 'Animagine XL 2.0':
100
+ prompt = f"Anime. {prompt}"
101
+ if model == 'Anime Detailer XL':
102
+ prompt = f"Anime. {prompt}"
103
+ if model == 'Disney':
104
+ prompt = f"Disney style. {prompt}"
105
+
106
+
107
+
108
+
109
  payload = {
110
+ "inputs": prompt,
111
+ "is_negative": is_negative,
112
  "steps": steps,
113
  "cfg_scale": cfg_scale,
 
114
  "seed": seed if seed != -1 else random.randint(1, 1000000000),
115
  "strength": strength
116
+ }
117
+
118
+ response = requests.post(API_URL, headers=headers, json=payload, timeout=timeout)
119
+ if response.status_code != 200:
120
+ print(f"Ошибка: Не удалось получить изображение. Статус ответа: {response.status_code}")
121
+ print(f"Содержимое ответа: {response.text}")
122
+ if response.status_code == 503:
123
+ raise gr.Error(f"{response.status_code} : The model is being loaded")
124
+ return None
125
+ raise gr.Error(f"{response.status_code}")
126
+ return None
127
+
128
  try:
129
+ image_bytes = response.content
130
+ image = Image.open(io.BytesIO(image_bytes))
131
+ print(f'\033[1mГенерация {key} завершена!\033[0m ({prompt})')
132
  return image
133
  except Exception as e:
134
+ print(f"Ошибка при попытке открыть изображение: {e}")
135
+ return None
136
 
137
  css = """
138
  * {}
139
  footer {visibility: hidden !important;}
140
  """
141
 
142
+ with gr.Blocks(css=css) as dalle:
 
143
  with gr.Tab("Базовые настройки"):
144
  with gr.Row():
145
+ with gr.Column(elem_id="prompt-container"):
146
+ with gr.Row():
147
+ text_prompt = gr.Textbox(label="Prompt", placeholder="Описание изображения", lines=3, elem_id="prompt-text-input")
148
+ with gr.Row():
149
+ model = gr.Radio(label="Модель", value="DALL-E 3 XL", choices=list(mmodels.keys()))
150
+
151
+
152
 
153
  with gr.Tab("Расширенные настройки"):
154
+ with gr.Row():
155
+ negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Чего не должно быть на изображении", value="[deformed | disfigured], poorly drawn, [bad : wrong] anatomy, [extra | missing | floating | disconnected] limb, (mutated hands and fingers), blurry, text, fuzziness", lines=3, elem_id="negative-prompt-text-input")
156
+ with gr.Row():
157
+ steps = gr.Slider(label="Sampling steps", value=35, minimum=1, maximum=100, step=1)
158
+ with gr.Row():
159
+ cfg = gr.Slider(label="CFG Scale", value=7, minimum=1, maximum=20, step=1)
160
+ with gr.Row():
161
+ method = gr.Radio(label="Sampling method", value="DPM++ 2M Karras", choices=["DPM++ 2M Karras", "DPM++ SDE Karras", "Euler", "Euler a", "Heun", "DDIM"])
162
+ with gr.Row():
163
+ strength = gr.Slider(label="Strength", value=0.7, minimum=0, maximum=1, step=0.001)
164
+ with gr.Row():
165
+ seed = gr.Slider(label="Seed", value=-1, minimum=-1, maximum=1000000000, step=1)
166
+ with gr.Row():
167
+ gpt = gr.Checkbox(label="ChatGPT")
168
 
169
  with gr.Tab("Информация"):
170
+ with gr.Row():
171
+ gr.Textbox(label="Шаблон prompt", value="{prompt} | ultra detail, ultra elaboration, ultra quality, perfect.")
172
 
173
  with gr.Row():
174
+ text_button = gr.Button("Генерация", variant='primary', elem_id="gen-button")
 
175
  with gr.Row():
176
+ image_output = gr.Image(type="pil", label="Изображение", elem_id="gallery")
177
+
178
+ text_button.click(query, inputs=[text_prompt, model, negative_prompt, steps, cfg, method, seed, strength, gpt], outputs=image_output)
 
 
 
 
179
 
180
+ dalle.launch(show_api=False, share=False)