idlebg commited on
Commit
cd9f41f
1 Parent(s): 10fe60a

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +7 -9
  2. app.py +107 -1
  3. requirements.txt +10 -0
README.md CHANGED
@@ -1,13 +1,11 @@
1
- ---
2
- title: FFusion.AI Beta Playground
3
- emoji: 👀
4
- colorFrom: pink
5
  colorTo: pink
6
  sdk: gradio
7
- sdk_version: 3.35.2
8
  app_file: app.py
9
- pinned: false
10
  license: creativeml-openrail-m
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: FFusion.AI -beta- Playground
3
+ emoji: 😻
4
+ colorFrom: purple
5
  colorTo: pink
6
  sdk: gradio
7
+ sdk_version: 3.29.0
8
  app_file: app.py
9
+ pinned: true
10
  license: creativeml-openrail-m
11
+ ---
 
 
app.py CHANGED
@@ -1,3 +1,109 @@
1
  import gradio as gr
 
 
 
 
 
2
 
3
- gr.Interface.load("models/FFusion/FFusion-BaSE").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import numpy as np
3
+ from diffusers import StableDiffusionPipeline, DDPMScheduler, DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler
4
+ import torch
5
+ import PIL.Image
6
+ import datetime
7
 
8
+ # Check environment
9
+ print(f"Is CUDA available: {torch.cuda.is_available()}")
10
+ print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
11
+
12
+ device = "cuda"
13
+
14
+ schedulers = {
15
+ "DDPMScheduler": DDPMScheduler,
16
+ "DDIMScheduler": DDIMScheduler,
17
+ "PNDMScheduler": PNDMScheduler,
18
+ "LMSDiscreteScheduler": LMSDiscreteScheduler,
19
+ "EulerDiscreteScheduler": EulerDiscreteScheduler,
20
+ "EulerAncestralDiscreteScheduler": EulerAncestralDiscreteScheduler,
21
+ "DPMSolverMultistepScheduler": DPMSolverMultistepScheduler
22
+ }
23
+
24
+ class Model:
25
+ def __init__(self, modelID, schedulerName):
26
+ self.modelID = modelID
27
+ self.pipe = StableDiffusionPipeline.from_pretrained(modelID, torch_dtype=torch.float16)
28
+ self.pipe = self.pipe.to(device)
29
+ self.pipe.scheduler = schedulers[schedulerName].from_config(self.pipe.scheduler.config)
30
+ self.pipe.enable_xformers_memory_efficient_attention()
31
+
32
+ def process(self,
33
+ prompt: str,
34
+ negative_prompt: str,
35
+ guidance_scale:int = 6,
36
+ num_images:int = 1,
37
+ num_steps:int = 35):
38
+ seed = np.random.randint(0, np.iinfo(np.int32).max)
39
+ generator = torch.Generator(device).manual_seed(seed)
40
+ now = datetime.datetime.now()
41
+ print(now)
42
+ print(self.modelID)
43
+ print(prompt)
44
+ print(negative_prompt)
45
+ with torch.inference_mode():
46
+ images = self.pipe(prompt=prompt,
47
+ negative_prompt=negative_prompt,
48
+ guidance_scale=guidance_scale,
49
+ num_images_per_prompt=num_images,
50
+ num_inference_steps=num_steps,
51
+ generator=generator,
52
+ height=768,
53
+ width=768).images
54
+ images = [PIL.Image.fromarray(np.array(img)) for img in images]
55
+ return images
56
+
57
+
58
+
59
+
60
+
61
+ def generateImage(prompt, n_prompt, modelName, schedulerName):
62
+ images = models[modelName].process(prompt, n_prompt)
63
+ images = [np.array(img) for img in images]
64
+ return images[0] # Return the first image
65
+
66
+ def create_demo():
67
+ # Settings are defined here
68
+ prompt = gr.inputs.Textbox(label='Prompt',default='a sprinkled donut sitting on top of a table, blender donut tutorial, colorful hyperrealism, everything is made of candy, hyperrealistic digital painting, covered in sprinkles and crumbs, vibrant colors hyper realism, colorful smoke explosion background')
69
+ n_prompt = gr.inputs.Textbox(
70
+ label='Negative Prompt',
71
+ default='(disfigured), ((bad art)), ((deformed)), ((extra limbs)), (((duplicate))), ((morbid)), ((mutilated)), out of frame, extra fingers, mutated hands, poorly drawn eyes, ((poorly drawn hands)), ((poorly drawn face)), (((mutation))), ((ugly)), blurry, ((bad anatomy)), (((bad proportions))), cloned face, body out of frame, out of frame, bad anatomy, gross proportions, (malformed limbs), ((missing arms)), ((missing legs)), (((extra arms))), (((extra legs))), (fused fingers), (too many fingers), (((long neck))), Deformed, blurry'
72
+ )
73
+ modelName = gr.inputs.Dropdown(choices=list(models.keys()),
74
+ label="FFusion Test Model",
75
+ default=list(models.keys())[0]) # Set the default model
76
+ schedulerName = gr.inputs.Dropdown(choices=list(schedulers.keys()),
77
+ label="Scheduler",
78
+ default=list(schedulers.keys())[0]) # Set the default scheduler
79
+ inputs = [prompt, n_prompt, modelName, schedulerName]
80
+
81
+ # Images are displayed here
82
+ result = gr.outputs.Image(label='Output', type="numpy")
83
+
84
+ # Define the function to run when the button is clicked
85
+ def run(prompt, n_prompt, modelName, schedulerName):
86
+ return generateImage(prompt, n_prompt, modelName, schedulerName)
87
+
88
+ # Create the interface
89
+ iface = gr.Interface(
90
+ fn=run,
91
+ inputs=inputs,
92
+ outputs=result,
93
+ layout=[
94
+ gr.Markdown("### FFusion.AI - beta Playground"),
95
+ inputs,
96
+ result
97
+ ]
98
+ )
99
+
100
+ return iface
101
+
102
+ if __name__ == '__main__':
103
+ models = {
104
+ "FFUSION.ai-768-BaSE": Model("FFusion/FFusion-BaSE", list(schedulers.keys())[0]),
105
+ "FFUSION.ai-v2.1-768-BaSE-alpha-preview": Model("FFusion/di.FFUSION.ai-v2.1-768-BaSE-alpha", list(schedulers.keys())[0]),
106
+ "FFusion.ai.Beta-512": Model("FFusion/di.ffusion.ai.Beta512", list(schedulers.keys())[0])
107
+ }
108
+ demo = create_demo()
109
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu113
2
+ torch
3
+ accelerate==0.18.0
4
+ diffusers==0.16.0
5
+ gradio==3.30.0
6
+ safetensors==0.3.0
7
+ torchvision==0.15.1
8
+ transformers==4.28.1
9
+ xformers==0.0.18
10
+ numpy