File size: 3,355 Bytes
1deaa99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422d177
 
 
 
1deaa99
 
422d177
1deaa99
 
 
 
422d177
1deaa99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422d177
 
1deaa99
 
422d177
 
 
 
1deaa99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422d177
1deaa99
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import gradio as gr
import numpy as np
import random
import multiprocessing
import subprocess
import sys
import time
import signal
import json
import os
import requests

from loguru import logger
from decouple import config

from pathlib import Path
from PIL import Image
import io

URL = config('URL')
OUTPUT_DIR = config('OUTPUT_DIR')
INPUT_DIR = config('INPUT_DIR')
COMF_PATH = config('COMF_PATH')

import torch

#import spaces

print(f"Is CUDA available: {torch.cuda.is_available()}")
print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
print(torch.version.cuda)
device = torch.cuda.get_device_name(torch.cuda.current_device())
print(device)


def get_latest_image(folder):
    files = os.listdir(folder)
    image_files = [f for f in files if f.lower().endswith(('.png', '.jpg', '.jpeg'))]
    image_files.sort(key=lambda x: os.path.getmtime(os.path.join(folder, x)))
    latest_image = os.path.join(folder, image_files[-1]) if image_files else None
    return latest_image


def start_queue(prompt_workflow):
    p = {"prompt": prompt_workflow}
    data = json.dumps(p).encode('utf-8')
    requests.post(URL, data=data)


def check_server_ready():
    try:
        response = requests.get(f"http://127.0.0.1:8188/history/123", timeout=5)
        return response.status_code == 200
    except requests.RequestException:
        return False


#@spaces.GPU
def generate_image(prompt, image):
    previous_image = get_latest_image(OUTPUT_DIR)

    image = Image.fromarray(image)
    image.save(OUTPUT_DIR+'/input.png', format='PNG')


    # Запускаем скрипт как подпроцесс
    process = subprocess.Popen([sys.executable, COMF_PATH, "--listen", "127.0.0.1"])
    logger.debug(f'Subprocess started with PID: {process.pid}')

    try:
        # Ожидание запуска сервера
        for _ in range(20):  # Максимум 20 секунд ожидания
            if check_server_ready():  # Вам нужно реализовать эту функцию
                break
            time.sleep(1)
        else:
            raise TimeoutError("Server did not start in time")

        start_queue(prompt)

        # Ожидание нового изображения
        timeout = 400  # Максимальное время ожидания в секундах
        start_time = time.time()
        while time.time() - start_time < timeout:
            latest_image = get_latest_image(OUTPUT_DIR)
            if latest_image != previous_image:
                return latest_image
            time.sleep(1)

        raise TimeoutError("New image was not generated in time")

    except Exception as e:
        logger.error(f"Error in generate_image: {e}")
        raise
    finally:
        # Завершаем подпроцесс
        if process.poll() is None:
            process.terminate()
            try:
                process.wait(timeout=5)
            except subprocess.TimeoutExpired:
                process.kill()

        logger.error("No new image was generated")
        return None


if __name__ == "__main__":
    demo = gr.Interface(fn=generate_image, inputs=["text", "image"], outputs=["image"])
    demo.launch(debug=True)
    logger.debug('demo.launch()')

    logger.info("Основной скрипт завершил работу.")