nevreal commited on
Commit
6031dc7
·
verified ·
1 Parent(s): 84b3fee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -60
app.py CHANGED
@@ -1,71 +1,44 @@
1
  import gradio as gr
2
- import requests
3
- import os
4
- import time
5
-
6
- # Environment variables for API details
7
- API_TOKEN = os.getenv("HUGGINGFACE_API_TOKEN") # Fetching the API token from environment variable
8
-
9
- # Function to query Hugging Face API
10
- def query_huggingface_api(api_url, prompt):
11
- headers = {"Authorization": f"Bearer {API_TOKEN}"}
12
- data = {"inputs": prompt}
13
- response = requests.post(api_url, headers=headers, json=data)
14
-
15
- if response.status_code == 200:
16
- return response.content, None # Return the image and no error
17
  else:
18
- return None, f"Error {response.status_code}: {response.text}" # Return None and the error message
19
-
20
- # Gradio function for generating the image
21
- def generate_image(api_url, prompt):
22
- # Attempt to query the API with retry logic for loading models
23
- for attempt in range(5): # Try up to 5 times
24
- result, error = query_huggingface_api(f"https://api-inference.huggingface.co/models/{api_url}", prompt)
25
-
26
- if result:
27
- return result, None
28
- elif "Model is currently loading" in error:
29
- estimated_time = float(error.split("estimated_time\":")[1].split("}")[0]) # Extract estimated time from error message
30
- time.sleep(estimated_time + 5) # Wait for the model to load, with an additional buffer time
31
- else:
32
- return None, error # Return the error if it's not a loading issue
33
 
34
- return None, "Model is still loading after multiple attempts. Please try again later." # Final error if all attempts fail
 
 
 
 
35
 
36
- # Create Gradio Blocks Interface
37
- with gr.Blocks(theme="nevreal/blues") as demo:
38
- gr.Markdown(
39
- """
40
- # Text to Image Generator
41
- Enter a text prompt, and the custom model will generate an image.
42
- """
43
- )
44
 
45
  with gr.Row():
46
  with gr.Column():
47
- text_input = gr.Textbox(
48
- label="Enter your prompt",
49
- placeholder="Type something here...",
50
- value=""
51
- )
52
- model_input = gr.Textbox(
53
- label="Model URL",
54
- placeholder="Enter the model URL...",
55
- value=""
56
- )
57
  generate_btn = gr.Button("Generate Image")
58
 
59
  with gr.Column():
60
- image_output = gr.Image(label="Generated Image")
61
- error_output = gr.Textbox(label="Error", interactive=False)
62
-
63
- # Define the action for the button
64
- generate_btn.click(
65
- fn=generate_image,
66
- inputs=[model_input, text_input], # Pass both model URL and prompt
67
- outputs=[image_output, error_output]
68
- )
69
 
70
- # Launch the Gradio Blocks WebUI
71
- demo.launch(share=True, debug=True)
 
1
  import gradio as gr
2
+ from diffusers import StableDiffusionPipeline
3
+ import torch
4
+
5
+ # Function to automatically switch between GPU and CPU
6
+ def load_model(model_id):
7
+ if torch.cuda.is_available():
8
+ device = "cuda"
9
+ info = "Running on GPU (CUDA)"
 
 
 
 
 
 
 
10
  else:
11
+ device = "cpu"
12
+ info = "Running on CPU"
13
+
14
+ # Load the model dynamically on the correct device
15
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16 if device == "cuda" else torch.float32)
16
+ pipe = pipe.to(device)
17
+
18
+ return pipe, info
 
 
 
 
 
 
 
19
 
20
+ # Function for text-to-image generation with dynamic model ID and device info
21
+ def generate_image(model_id, prompt):
22
+ pipe, info = load_model(model_id)
23
+ image = pipe(prompt).images[0]
24
+ return image, info
25
 
26
+ # Create the Gradio interface
27
+ with gr.Blocks() as demo:
28
+ gr.Markdown("## Custom Text-to-Image Generator")
 
 
 
 
 
29
 
30
  with gr.Row():
31
  with gr.Column():
32
+ model_id = gr.Textbox(label="Enter Model ID (e.g., nevreal/vMurderDrones)", placeholder="Model ID")
33
+ prompt = gr.Textbox(label="Enter your prompt", placeholder="Describe the image you want to generate")
 
 
 
 
 
 
 
 
34
  generate_btn = gr.Button("Generate Image")
35
 
36
  with gr.Column():
37
+ output_image = gr.Image(label="Generated Image")
38
+ device_info = gr.Markdown() # To display if GPU or CPU is used
39
+
40
+ # Link the button to the image generation function
41
+ generate_btn.click(fn=generate_image, inputs=[model_id, prompt], outputs=[output_image, device_info])
 
 
 
 
42
 
43
+ # Launch the app
44
+ demo.launch()