yash commited on
Commit
b7bfdd0
·
1 Parent(s): fa15aee

first commit

Browse files
Files changed (2) hide show
  1. requirements.txt +99 -0
  2. txt_to_img.py +206 -0
requirements.txt ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.30.1
2
+ aiofiles==23.2.1
3
+ altair==5.3.0
4
+ annotated-types==0.6.0
5
+ anyio==4.3.0
6
+ attrs==23.2.0
7
+ certifi==2024.2.2
8
+ charset-normalizer==3.3.2
9
+ click==8.1.7
10
+ contourpy==1.2.1
11
+ cycler==0.12.1
12
+ diffusers==0.27.2
13
+ dnspython==2.6.1
14
+ email_validator==2.1.1
15
+ exceptiongroup==1.2.1
16
+ fastapi==0.111.0
17
+ fastapi-cli==0.0.3
18
+ ffmpy==0.3.2
19
+ filelock==3.14.0
20
+ fonttools==4.51.0
21
+ fsspec==2024.3.1
22
+ gradio==3.50.2
23
+ gradio_client==0.6.1
24
+ h11==0.14.0
25
+ httpcore==1.0.5
26
+ httptools==0.6.1
27
+ httpx==0.27.0
28
+ huggingface-hub==0.23.0
29
+ idna==3.7
30
+ importlib_metadata==7.1.0
31
+ importlib_resources==6.4.0
32
+ Jinja2==3.1.4
33
+ jsonschema==4.22.0
34
+ jsonschema-specifications==2023.12.1
35
+ kiwisolver==1.4.5
36
+ markdown-it-py==3.0.0
37
+ MarkupSafe==2.1.5
38
+ matplotlib==3.8.4
39
+ mdurl==0.1.2
40
+ mpmath==1.3.0
41
+ networkx==3.3
42
+ numpy==1.26.4
43
+ nvidia-cublas-cu12==12.1.3.1
44
+ nvidia-cuda-cupti-cu12==12.1.105
45
+ nvidia-cuda-nvrtc-cu12==12.1.105
46
+ nvidia-cuda-runtime-cu12==12.1.105
47
+ nvidia-cudnn-cu12==8.9.2.26
48
+ nvidia-cufft-cu12==11.0.2.54
49
+ nvidia-curand-cu12==10.3.2.106
50
+ nvidia-cusolver-cu12==11.4.5.107
51
+ nvidia-cusparse-cu12==12.1.0.106
52
+ nvidia-nccl-cu12==2.20.5
53
+ nvidia-nvjitlink-cu12==12.4.127
54
+ nvidia-nvtx-cu12==12.1.105
55
+ orjson==3.10.3
56
+ packaging==24.0
57
+ pandas==2.2.2
58
+ pillow==10.3.0
59
+ psutil==5.9.8
60
+ pydantic==2.7.1
61
+ pydantic_core==2.18.2
62
+ pydub==0.25.1
63
+ Pygments==2.18.0
64
+ pyparsing==3.1.2
65
+ python-dateutil==2.9.0.post0
66
+ python-dotenv==1.0.1
67
+ python-multipart==0.0.9
68
+ pytz==2024.1
69
+ PyYAML==6.0.1
70
+ referencing==0.35.1
71
+ regex==2024.5.10
72
+ requests==2.31.0
73
+ rich==13.7.1
74
+ rpds-py==0.18.1
75
+ safetensors==0.4.3
76
+ semantic-version==2.10.0
77
+ shellingham==1.5.4
78
+ six==1.16.0
79
+ sniffio==1.3.1
80
+ starlette==0.37.2
81
+ sympy==1.12
82
+ tokenizers==0.19.1
83
+ toolz==0.12.1
84
+ torch==2.3.0
85
+ torchvision==0.18.0
86
+ tqdm==4.66.4
87
+ transformers==4.40.2
88
+ triton==2.3.0
89
+ typer==0.12.3
90
+ typing_extensions==4.11.0
91
+ tzdata==2024.1
92
+ ujson==5.10.0
93
+ urllib3==2.2.1
94
+ uvicorn==0.29.0
95
+ uvloop==0.19.0
96
+ watchfiles==0.21.0
97
+ websockets==11.0.3
98
+ xformers==0.0.26.post1
99
+ zipp==3.18.1
txt_to_img.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from diffusers import StableDiffusionPipeline
4
+ from diffusers import ControlNetModel, DDIMScheduler,EulerDiscreteScheduler,EulerAncestralDiscreteScheduler,UniPCMultistepScheduler
5
+ from diffusers import KDPM2DiscreteScheduler,KDPM2AncestralDiscreteScheduler,PNDMScheduler,StableDiffusionPipeline
6
+ from diffusers import DPMSolverMultistepScheduler
7
+ import random
8
+
9
+ # pipe = StableDiffusionPipeline.from_pretrained(
10
+ # "SG161222/Realistic_Vision_V5.1_noVAE",
11
+ # torch_dtype=torch.float16,
12
+ # use_safetensors=True,
13
+ # ).to("cpu")
14
+
15
+
16
+
17
+
18
+ def set_pipeline(model_id_repo,scheduler):
19
+ # pipe = StableDiffusionPipeline.from_single_file(
20
+ # "/home/ubuntu/stable-diffusion-webui/models/Stable-diffusion/realisticVisionV51_v51VAE.safetensors",
21
+ # # torch_dtype=torch.float16,
22
+ # use_safetensors=True,
23
+ # ).to("cpu")
24
+
25
+
26
+ model_ids_dict = {
27
+ "dreamshaper": "Lykon/DreamShaper",
28
+ "deliberate": "soren127/Deliberate",
29
+ "runwayml": "runwayml/stable-diffusion-v1-5",
30
+ "Realistic_Vision_V5_1_noVAE":"SG161222/Realistic_Vision_V5.1_noVAE"
31
+ }
32
+ model_id = model_id_repo
33
+ model_repo = model_ids_dict.get(model_id)
34
+ print("model_repo :",model_repo)
35
+
36
+
37
+ # pipe = StableDiffusionPipeline.from_pretrained(
38
+ # model_repo,
39
+ # # torch_dtype=torch.float16, # to run on cpu
40
+ # use_safetensors=True,
41
+ # ).to("cpu")
42
+
43
+ pipe = StableDiffusionPipeline.from_pretrained(
44
+ model_repo,
45
+ torch_dtype=torch.float16, # to run on cpu
46
+ use_safetensors=True,
47
+ ).to("cuda")
48
+
49
+
50
+ scheduler_classes = {
51
+ "DDIM": DDIMScheduler,
52
+ "Euler": EulerDiscreteScheduler,
53
+ "Euler a": EulerAncestralDiscreteScheduler,
54
+ "UniPC": UniPCMultistepScheduler,
55
+ "DPM2 Karras": KDPM2DiscreteScheduler,
56
+ "DPM2 a Karras": KDPM2AncestralDiscreteScheduler,
57
+ "PNDM": PNDMScheduler,
58
+ "DPM++ 2M Karras": DPMSolverMultistepScheduler,
59
+ "DPM++ 2M SDE Karras": DPMSolverMultistepScheduler,
60
+ }
61
+
62
+ sampler_name = scheduler # Example sampler name, replace with the actual value
63
+ scheduler_class = scheduler_classes.get(sampler_name)
64
+
65
+ if scheduler_class is not None:
66
+ print("sampler_name:",sampler_name)
67
+ pipe.scheduler = scheduler_class.from_config(pipe.scheduler.config)
68
+ else:
69
+ pass
70
+
71
+ # # prompt = "a photo of an astronaut riding a horse on mars"
72
+ # # pipe.enable_attention_slicing()
73
+ # image = pipe(prompt).images[0]
74
+ # image.save("1.png")
75
+ return pipe
76
+
77
+
78
+ def img_args(
79
+ prompt,
80
+ negative_prompt,
81
+ model_id_repo = "Realistic_Vision_V5_1_noVAE",
82
+ scheduler= "Euler a",
83
+ height=896,
84
+ width=896,
85
+ num_inference_steps = 30,
86
+ guidance_scale = 7.5,
87
+ num_images_per_prompt = 1,
88
+ seed = 0
89
+ ):
90
+
91
+ print(model_id_repo)
92
+ print(scheduler)
93
+ print(prompt,"&&&&&&&&&&&&&&&&")
94
+
95
+ pipe = set_pipeline(model_id_repo,scheduler)
96
+
97
+ if seed == 0:
98
+ seed = random.randint(0,25647981548564)
99
+ print(f"random seed :{seed}")
100
+ generator = torch.manual_seed(seed)
101
+ else:
102
+ generator = torch.manual_seed(seed)
103
+ print(f"manual seed :{seed}")
104
+
105
+ image = pipe(prompt=prompt,
106
+ negative_prompt = negative_prompt,
107
+ height = height,
108
+ width = width,
109
+ num_inference_steps = num_inference_steps,
110
+ guidance_scale = guidance_scale,
111
+ num_images_per_prompt = num_images_per_prompt, # default 1
112
+ generator = generator,
113
+ ).images
114
+ print(image,"#############")
115
+ # image.save("1.png")
116
+ return image
117
+
118
+
119
+ block = gr.Blocks().queue()
120
+ block.title = "Inpaint Anything"
121
+ with block as image_gen:
122
+ with gr.Column():
123
+ with gr.Row():
124
+ gr.Markdown("## Image Generation")
125
+ with gr.Row():
126
+ with gr.Column():
127
+ # with gr.Row():
128
+ prompt = gr.Textbox(placeholder="what you want to generate",label="Positive Prompt")
129
+ negative_prompt = gr.Textbox(placeholder="what you don't want to generate",label="Negative prompt")
130
+ run_btn = gr.Button("image generation", elem_id="select_btn", variant="primary")
131
+ with gr.Accordion(label="Advance Options",open=False):
132
+ model_selection = gr.Dropdown(choices=["dreamshaper","deliberate","runwayml","Realistic_Vision_V5_1_noVAE"],value="Realistic_Vision_V5_1_noVAE",label="Models")
133
+ schduler_selection = gr.Dropdown(choices=["DDIM","Euler","Euler a","UniPC","DPM2 Karras","DPM2 a Karras","PNDM","DPM++ 2M Karras","DPM++ 2M SDE Karras"],value="Euler a",label="Scheduler")
134
+ guidance_scale_slider = gr.Slider(label="guidance_scale", minimum=0, maximum=15, value=7.5, step=0.5)
135
+ num_images_per_prompt_slider = gr.Slider(label="num_images_per_prompt", minimum=0, maximum=5, value=1, step=1)
136
+ height_slider = gr.Slider(label="height", minimum=0, maximum=2048, value=896, step=1)
137
+ width_slider = gr.Slider(label="width", minimum=0, maximum=2048, value=896, step=1)
138
+ num_inference_steps_slider = gr.Slider(label="num_inference_steps", minimum=0, maximum=150, value=30, step=1)
139
+ seed_slider = gr.Slider(label="Seed Slider", minimum=0, maximum=256479815, value=0, step=1)
140
+ with gr.Column():
141
+ # out_img = gr.Image(type="pil",label="Output",height=480)
142
+ out_img = gr.Gallery(label='Output', show_label=False, elem_id="gallery", preview=True)
143
+
144
+
145
+ run_btn.click(fn=img_args,inputs=[prompt,negative_prompt,model_selection,schduler_selection,height_slider,width_slider,num_inference_steps_slider,guidance_scale_slider,num_images_per_prompt_slider,seed_slider],outputs=[out_img])
146
+ image_gen.launch()
147
+
148
+ # block = gr.Blocks().queue()
149
+ # block.title = "Inpaint Anything"
150
+ # with block as inpaint_anything_interface:
151
+ # with gr.Column():
152
+ # with gr.Row():
153
+ # gr.Markdown("## Inpainting with Segment Anything (Multi Controlnet)")
154
+ # with gr.Row():
155
+ # with gr.Column():
156
+ # # with gr.Row():
157
+ # model_selection = gr.Dropdown(choices=["dreamshaper","deliberate","realisticVisionV51_v51VAE","revAnimated_v121Inp","runwayml","Realistic_Vision_V5_1_noVAE"],value = "Realistic_Vision_V5_1_noVAE",label="Models")
158
+ # # scheduler = gr.Dropdown(choices=["DDIM","Euler","Euler a","UniPC","DPM2 Karras","DPM2 a Karras","PNDM","DPM++ 2M Karras","DPM++ 2M SDE Karras"],value = "Euler a",label="Sampler")
159
+ # input_image = gr.Image(type="numpy",label="input",height=400)
160
+ # run_btn = gr.Button("Run Segment", elem_id="select_btn", variant="primary")
161
+
162
+ # prompt = gr.Textbox(placeholder="what you want to generate")
163
+ # guidance_scale_slider = gr.Slider(label="Guidance Scale", minimum=0, maximum=20.0, value=7.5, step=0.5)
164
+ # inference_slider = gr.Slider(label="Guidance Scale", minimum=0, maximum=150, value=50, step=1)
165
+ # with gr.Row():
166
+ # canny_slider = gr.Slider(label="Canny Slider", minimum=0, maximum=1.0, value=0.5, step=0.1)
167
+ # depth_slider = gr.Slider(label="Depth Slider", minimum=0, maximum=1.0, value=0.5, step=0.1)
168
+ # seg_slider = gr.Slider(label="Segment Slider", minimum=0, maximum=1.0, value=0.5, step=0.1)
169
+ # out_img = gr.Image(type="pil",label="output")
170
+ # seed_slider = gr.Slider(label="Seed Slider",elem_id="expand_mask_iteration_count", minimum=0, maximum=25647981548564, value=0, step=1)
171
+ # grn_btn = gr.Button("image generation", elem_id="select_btn", variant="primary")
172
+ # # bru_btn = gr.Button("Brush generation", elem_id="select_btn", variant="primary")
173
+ # with gr.Column():
174
+ # scheduler = gr.Dropdown(choices=["DDIM","Euler","Euler a","UniPC","DPM2 Karras","DPM2 a Karras","PNDM","DPM++ 2M Karras","DPM++ 2M SDE Karras"],value = "Euler a",label="Sampler")
175
+ # # lora_chk = gr.Checkbox(label="Use Lora", elem_id="invert_chk", show_label=True, value=False, interactive=True)
176
+ # # image_out = gr.Image(type="pil",label="Output")
177
+ # sam_image = gr.Image(label="Segment Anything image", elem_id="ia_sam_image", type="numpy", tool="sketch", brush_radius=8,
178
+ # show_label=False, interactive=True,height=400)
179
+ # mask_btn = gr.Button("Create Mask", elem_id="select_btn", variant="primary")
180
+ # with gr.Column():
181
+ # with gr.Row():
182
+ # invert_chk = gr.Checkbox(label="Invert mask", elem_id="invert_chk", show_label=True, value=True, interactive=True)
183
+ # ignore_black_chk = gr.Checkbox(label="Ignore black area", elem_id="ignore_black_chk", value=True, show_label=True, interactive=True)
184
+ # lora_chk = gr.Checkbox(label="Use Lora", elem_id="invert_chk", show_label=True, value=False, interactive=True)
185
+ # with gr.Column():
186
+ # sel_mask = gr.Image(label="Selected mask image", elem_id="ia_sel_mask", type="numpy", tool="sketch", brush_radius=12,
187
+ # show_label=False, interactive=True, height=480)
188
+ # with gr.Column():
189
+ # with gr.Row():
190
+ # expand_mask_btn = gr.Button("Expand mask region", elem_id="expand_mask_btn")
191
+ # # with gr.Column():
192
+ # expand_mask_iteration_count = gr.Slider(label="Expand Mask Iterations",
193
+ # elem_id="expand_mask_iteration_count", minimum=1, maximum=100, value=1, step=1)
194
+ # with gr.Row():
195
+ # add_mask_btn = gr.Button("Add mask by sketch", elem_id="add_mask_btn")
196
+ # apply_mask_btn = gr.Button("Trim mask by sketch", elem_id="apply_mask_btn")
197
+
198
+
199
+ # run_btn.click(fn=run_seg,inputs=[input_image],outputs=[sam_image])
200
+ # mask_btn.click(fn=select_mask,inputs=[input_image, sam_image, invert_chk, ignore_black_chk,sel_mask], outputs=[sel_mask])
201
+ # expand_mask_btn.click(expand_mask, inputs=[input_image, sel_mask, expand_mask_iteration_count], outputs=[sel_mask])
202
+ # apply_mask_btn.click(apply_mask, inputs=[input_image, sel_mask], outputs=[sel_mask])
203
+ # add_mask_btn.click(add_mask, inputs=[input_image, sel_mask], outputs=[sel_mask])
204
+ # grn_btn.click(fn=generate_image,inputs=[input_image,sam_image,prompt,seed_slider,canny_slider,depth_slider,seg_slider,model_selection,scheduler,guidance_scale_slider,inference_slider,lora_chk],outputs=[out_img])
205
+ # bru_btn.click(fn=brush_geeration,inputs=[input_image,prompt],outputs=[out_img])
206
+ # inpaint_anything_interface.launch()