#!/usr/bin/env python # coding: utf-8 import requests from PIL import Image import numpy as np import matplotlib.pyplot as plt from io import BytesIO import base64 import os import gradio as gr from dalle_mini.helpers import captioned_strip backend_url = os.environ["BACKEND_SERVER"] class ServiceError(Exception): def __init__(self, status_code): self.status_code = status_code def get_images_from_ngrok(prompt): r = requests.post( backend_url, json={"prompt": prompt} ) if r.status_code == 200: images = r.json()["images"] images = [Image.open(BytesIO(base64.b64decode(img))) for img in images] return images else: raise ServiceError(r.status_code) def run_inference(prompt): try: images = get_images_from_ngrok(prompt) predictions = captioned_strip(images) output_title = f"""
Best predictions
We asked our model to generate 128 candidates for your prompt:
{prompt}
We then used a pre-trained CLIP model to score them according to the similarity of the text and the image representations.
This is the result:
""" output_description = """Read our full report for more details on how this works.
Created with DALL·E mini
""" except ServiceError: output_title = f""" Sorry, there was an error retrieving the images. Please, try again later or contact us here. """ predictions = None output_description = "" return (output_title, predictions, output_description) outputs = [ gr.outputs.HTML(label=""), # To be used as title gr.outputs.Image(label=''), gr.outputs.HTML(label=""), # Additional text that appears in the screenshot ] description = """ Welcome to DALL·E-mini, a text-to-image generation model. """ gr.Interface(run_inference, inputs=[gr.inputs.Textbox(label='Prompt')], outputs=outputs, title='DALL·E mini', description=description, article="DALLE·mini by Boris Dayma et al. | GitHub
", layout='vertical', theme='huggingface', examples=[['an armchair in the shape of an avocado'], ['snowy mountains by the sea']], allow_flagging=False, live=False, # server_name="0.0.0.0", # Bind to all interfaces ).launch()