Eyalgut commited on
Commit
c2f35d3
1 Parent(s): a0a8de7

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +110 -0
  2. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ hf_token = os.environ.get("HF_TOKEN")
4
+ import spaces
5
+ from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
6
+ import torch
7
+ import time
8
+
9
+ class Dummy():
10
+ pass
11
+
12
+ # Load pipeline
13
+ default_negative_prompt= "Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers"
14
+ model_id = "briaai/BRIA-2.2"
15
+ scheduler = EulerAncestralDiscreteScheduler(
16
+ beta_start=0.00085,
17
+ beta_end=0.012,
18
+ beta_schedule="scaled_linear",
19
+ num_train_timesteps=1000,
20
+ steps_offset=1
21
+ )
22
+ pipe = StableDiffusionXLPipeline.from_pretrained(model_id, torch_dtype=torch.float16,scheduler=scheduler).to("cuda")
23
+
24
+ print("Optimizing BRIA-2.2 - this could take a while")
25
+ t=time.time()
26
+ pipe.unet = torch.compile(
27
+ pipe.unet, mode="reduce-overhead", fullgraph=True # 600 secs compilation
28
+ )
29
+ with torch.no_grad():
30
+ outputs = pipe(
31
+ prompt="an apple",
32
+ num_inference_steps=30,
33
+ )
34
+
35
+ # This will avoid future compilations on different shapes
36
+ unet_compiled = torch._dynamo.run(pipe.unet)
37
+ unet_compiled.config=pipe.unet.config
38
+ unet_compiled.add_embedding = Dummy()
39
+ unet_compiled.add_embedding.linear_1 = Dummy()
40
+ unet_compiled.add_embedding.linear_1.in_features = pipe.unet.add_embedding.linear_1.in_features
41
+ pipe.unet = unet_compiled
42
+
43
+ print(f"Optimizing finished successfully after {time.time()-t} secs")
44
+
45
+ @spaces.GPU(enable_queue=True)
46
+ def infer(prompt):
47
+ print(f"""
48
+ —/n
49
+ {prompt}
50
+ """)
51
+
52
+ # generator = torch.Generator("cuda").manual_seed(555)
53
+ t=time.time()
54
+ image = pipe(prompt,num_inference_steps=30, negative_prompt=default_negative_prompt).images[0]
55
+ print(f'gen time is {time.time()-t} secs')
56
+
57
+ # Future
58
+ # Add amound of steps
59
+ # if nsfw:
60
+ # raise gr.Error("Generated image is NSFW")
61
+
62
+ return image
63
+
64
+ css = """
65
+ #col-container{
66
+ margin: 0 auto;
67
+ max-width: 580px;
68
+ }
69
+ """
70
+ with gr.Blocks(css=css) as demo:
71
+ with gr.Column(elem_id="col-container"):
72
+ gr.HTML("""
73
+ <h2 style="text-align: center;">
74
+ BRIA-2.2
75
+ </h2>
76
+ """)
77
+ with gr.Group():
78
+ with gr.Column():
79
+ prompt_in = gr.Textbox(label="Prompt", value="A red colored sports car")
80
+ submit_btn = gr.Button("Generate")
81
+ result = gr.Image(label="BRIA-2.2 Result")
82
+
83
+ # gr.Examples(
84
+ # examples = [
85
+ # "Dragon, digital art, by Greg Rutkowski",
86
+ # "Armored knight holding sword",
87
+ # "A flat roof villa near a river with black walls and huge windows",
88
+ # "A calm and peaceful office",
89
+ # "Pirate guinea pig"
90
+ # ],
91
+ # fn = infer,
92
+ # inputs = [
93
+ # prompt_in
94
+ # ],
95
+ # outputs = [
96
+ # result
97
+ # ]
98
+ # )
99
+
100
+ submit_btn.click(
101
+ fn = infer,
102
+ inputs = [
103
+ prompt_in
104
+ ],
105
+ outputs = [
106
+ result
107
+ ]
108
+ )
109
+
110
+ demo.queue().launch(show_api=False)
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ transformers
2
+ diffusers
3
+ torch
4
+ torchvision
5
+ accelerate
6
+ spaces
7
+ gradio