File size: 4,410 Bytes
c5c1856
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import random
import gradio as gr
import requests
from concurrent.futures import ThreadPoolExecutor
from MonsterAPIClient import MClient
from typing import Tuple

client = MClient()

def generate_model_output(model: str, input_text: str, neg_prompt: str, samples: int, steps: int,
                          aspect_ratio: str, guidance_scale: float, random_seed: str) -> str:
    """
    Generate output from a specific model.

    Parameters:
        model (str): The name of the model.
        input_text (str): Your input text prompt.
        neg_prompt (str): Negative text prompt.
        samples (int): No. of images to be generated.
        steps (int): Sampling steps per image.
        aspect_ratio (str): Aspect ratio of the generated image.
        guidance_scale (float): Prompt guidance scale.
        random_seed (str): Random number used to initialize the image generation.

    Returns:
        str: The generated output text or image URL.
    """
    try:
        response = client.get_response(model, {
            "prompt": input_text,
            "negprompt": neg_prompt,
            "samples": samples,
            "steps": steps,
            "aspect_ratio": aspect_ratio,
            "guidance_scale": guidance_scale,
            "seed": random_seed,
        })
        output = client.wait_and_get_result(response['process_id'])
        if 'output' in output:
            return output['output']
        else:
            return "No output available."
    except Exception as e:
        return f"Error occurred: {str(e)}"

def generate_output(input_text: str, neg_prompt: str, samples: int, steps: int,
                    aspect_ratio: str, guidance_scale: float, random_seed: str):
    with ThreadPoolExecutor() as executor:
        # Schedule the function calls asynchronously
        future_sdxl_base = executor.submit(generate_model_output, 'sdxl-base', input_text, neg_prompt, samples, steps,
                                           aspect_ratio, guidance_scale, random_seed)
        future_txt2img = executor.submit(generate_model_output, 'txt2img', input_text, neg_prompt, samples, steps,
                                         aspect_ratio, guidance_scale, random_seed)
        
        # Get the results from the completed futures
        sdxl_base_output = future_sdxl_base.result()
        txt2img_output = future_txt2img.result()
    
    return [sdxl_base_output, txt2img_output]

# Function to stitch


input_components = [
    gr.inputs.Textbox(label="Input Prompt"),
    gr.inputs.Textbox(label="Negative Prompt"),
    gr.inputs.Slider(label="No. of Images to Generate", minimum=1, maximum=3, default=1),
    gr.inputs.Slider(label="Sampling Steps per Image", minimum=30, maximum=40, default=30),
    gr.inputs.Dropdown(label="Aspect Ratio", choices=["square", "landscape", "portrait"], default="square"),
    gr.inputs.Slider(label="Prompt Guidance Scale", minimum=0.1, maximum=20.0, default=7.5),
    gr.inputs.Textbox(label="Random Seed", default=random.randint(0, 1000000)),
]

output_component_sdxl_base = gr.Gallery(label="Stable Diffusion V2.0 Output", type="pil", container = True)
output_component_txt2img = gr.Gallery(label="Stable Diffusion V1.5 Output", type="pil", container = True)

interface = gr.Interface(
    fn=generate_output,
    inputs=input_components,
    outputs=[output_component_sdxl_base, output_component_txt2img],
    live=False,
    capture_session=True,
    title="Stable Diffusion Evaluation powered by MonsterAPI",
    description="""This HuggingFace Space has been designed to help you compare the outputs between Stable-Diffusion V1.5 vs V2.0. These models are hosted on [MonsterAPI](https://monsterapi.ai/?utm_source=llm-evaluation&utm_medium=referral) - An AI infrastructure platform built for easily accessing AI models via scalable APIs and [finetuning LLMs](https://docs.monsterapi.ai/fine-tune-a-large-language-model-llm) at very low cost with our no-code implementation. MonsterAPI is powered by our low cost and highly scalable GPU computing platform - [Q Blocks](https://www.qblocks.cloud?utm_source=llm-evaluation&utm_medium=referral). These LLMs are accessible via scalable REST APIs. Checkout our [API documentation](https://documenter.getpostman.com/view/13759598/2s8ZDVZ3Yi) to integrate them in your AI powered applications.""",
    css="body {background-color: black}"
)

# Launch the Gradio app
interface.launch()