Spaces:
Running
on
Zero
Running
on
Zero
adamelliotfields
commited on
Commit
•
a46e137
1
Parent(s):
79ce657
Remove LoRA support
Browse files- .gitignore +0 -1
- DOCS.md +1 -10
- README.md +2 -3
- app.py +0 -51
- lib/__init__.py +0 -2
- lib/config.py +0 -17
- lib/inference.py +0 -43
- lib/utils.py +0 -30
- partials/intro.html +2 -2
.gitignore
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
__pycache__/
|
2 |
.venv/
|
3 |
-
loras/
|
4 |
app.log
|
|
|
1 |
__pycache__/
|
2 |
.venv/
|
|
|
3 |
app.log
|
DOCS.md
CHANGED
@@ -32,15 +32,6 @@ Each model checkpoint has a different aesthetic:
|
|
32 |
* [SG161222/Realistic_Vision_V5](https://huggingface.co/SG161222/Realistic_Vision_V5.1_noVAE): realistic
|
33 |
* [XpucT/Deliberate_v6](https://huggingface.co/XpucT/Deliberate): general purpose stylized
|
34 |
|
35 |
-
### LoRA
|
36 |
-
|
37 |
-
Apply up to 2 LoRA (low-rank adaptation) adapters with adjustable strength:
|
38 |
-
|
39 |
-
* [Perfection Style](https://civitai.com/models/411088?modelVersionId=486099): attempts to improve aesthetics, use high strength
|
40 |
-
* [Detailed Style](https://civitai.com/models/421162?modelVersionId=486110): attempts to improve details, use low strength
|
41 |
-
|
42 |
-
> NB: The trigger words are automatically appended to the positive prompt for you.
|
43 |
-
|
44 |
### Styles
|
45 |
|
46 |
[Styles](https://huggingface.co/spaces/adamelliotfields/diffusion/blob/main/data/styles.json) are prompt templates that wrap your positive and negative prompts. They were originally derived from the [twri/sdxl_prompt_styler](https://github.com/twri/sdxl_prompt_styler) Comfy node, but have since been entirely rewritten.
|
@@ -55,7 +46,7 @@ The `Anime: *` styles work the best with Dreamshaper. When using the anime-speci
|
|
55 |
* Guidance: `10`
|
56 |
* Steps: `50`
|
57 |
|
58 |
-
You subject should be a few simple tokens like `girl, brunette, blue eyes, armor, nebula, celestial`. Experiment with `Clip Skip` and `Karras`.
|
59 |
|
60 |
### Scale
|
61 |
|
|
|
32 |
* [SG161222/Realistic_Vision_V5](https://huggingface.co/SG161222/Realistic_Vision_V5.1_noVAE): realistic
|
33 |
* [XpucT/Deliberate_v6](https://huggingface.co/XpucT/Deliberate): general purpose stylized
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
### Styles
|
36 |
|
37 |
[Styles](https://huggingface.co/spaces/adamelliotfields/diffusion/blob/main/data/styles.json) are prompt templates that wrap your positive and negative prompts. They were originally derived from the [twri/sdxl_prompt_styler](https://github.com/twri/sdxl_prompt_styler) Comfy node, but have since been entirely rewritten.
|
|
|
46 |
* Guidance: `10`
|
47 |
* Steps: `50`
|
48 |
|
49 |
+
You subject should be a few simple tokens like `girl, brunette, blue eyes, armor, nebula, celestial`. Experiment with `Clip Skip` and `Karras`.
|
50 |
|
51 |
### Scale
|
52 |
|
README.md
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
---
|
2 |
# https://huggingface.co/docs/hub/en/spaces-config-reference
|
3 |
-
title: Diffusion
|
4 |
-
short_description: Image generation studio
|
5 |
emoji: 🧨
|
6 |
colorFrom: purple
|
7 |
colorTo: blue
|
@@ -62,7 +62,6 @@ preload_from_hub: # up to 10
|
|
62 |
Gradio app for Stable Diffusion 1.5 featuring:
|
63 |
* txt2img and img2img pipelines with ControlNet and IP-Adapter
|
64 |
* Canny edge detection (more preprocessors coming soon)
|
65 |
-
* Curated models, LoRAs, and TI embeddings
|
66 |
* Compel prompt weighting
|
67 |
* Hand-written style templates
|
68 |
* Multiple samplers with Karras scheduling
|
|
|
1 |
---
|
2 |
# https://huggingface.co/docs/hub/en/spaces-config-reference
|
3 |
+
title: Diffusion
|
4 |
+
short_description: Image generation studio for SD 1.5
|
5 |
emoji: 🧨
|
6 |
colorFrom: purple
|
7 |
colorTo: blue
|
|
|
62 |
Gradio app for Stable Diffusion 1.5 featuring:
|
63 |
* txt2img and img2img pipelines with ControlNet and IP-Adapter
|
64 |
* Canny edge detection (more preprocessors coming soon)
|
|
|
65 |
* Compel prompt weighting
|
66 |
* Hand-written style templates
|
67 |
* Multiple samplers with Karras scheduling
|
app.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
import argparse
|
2 |
import json
|
3 |
-
import os
|
4 |
import random
|
5 |
|
6 |
import gradio as gr
|
@@ -9,7 +8,6 @@ from lib import (
|
|
9 |
Config,
|
10 |
async_call,
|
11 |
disable_progress_bars,
|
12 |
-
download_civit_file,
|
13 |
download_repo_files,
|
14 |
generate,
|
15 |
read_file,
|
@@ -215,41 +213,6 @@ with gr.Blocks(
|
|
215 |
label="Scheduler",
|
216 |
filterable=False,
|
217 |
)
|
218 |
-
with gr.Row():
|
219 |
-
with gr.Group(elem_classes=["gap-0"]):
|
220 |
-
lora_1 = gr.Dropdown(
|
221 |
-
min_width=240,
|
222 |
-
label="LoRA #1",
|
223 |
-
value="none",
|
224 |
-
choices=[("None", "none")]
|
225 |
-
+ [
|
226 |
-
(lora["name"], lora_id) for lora_id, lora in Config.CIVIT_LORAS.items()
|
227 |
-
],
|
228 |
-
)
|
229 |
-
lora_1_weight = gr.Slider(
|
230 |
-
value=0.0,
|
231 |
-
minimum=0.0,
|
232 |
-
maximum=1.0,
|
233 |
-
step=0.1,
|
234 |
-
show_label=False,
|
235 |
-
)
|
236 |
-
with gr.Group(elem_classes=["gap-0"]):
|
237 |
-
lora_2 = gr.Dropdown(
|
238 |
-
min_width=240,
|
239 |
-
label="LoRA #2",
|
240 |
-
value="none",
|
241 |
-
choices=[("None", "none")]
|
242 |
-
+ [
|
243 |
-
(lora["name"], lora_id) for lora_id, lora in Config.CIVIT_LORAS.items()
|
244 |
-
],
|
245 |
-
)
|
246 |
-
lora_2_weight = gr.Slider(
|
247 |
-
value=0.0,
|
248 |
-
minimum=0.0,
|
249 |
-
maximum=1.0,
|
250 |
-
step=0.1,
|
251 |
-
show_label=False,
|
252 |
-
)
|
253 |
|
254 |
# Generation settings
|
255 |
gr.HTML("<h3>Generation</h3>")
|
@@ -479,10 +442,6 @@ with gr.Blocks(
|
|
479 |
image_prompt,
|
480 |
control_image_prompt,
|
481 |
ip_image_prompt,
|
482 |
-
lora_1,
|
483 |
-
lora_1_weight,
|
484 |
-
lora_2,
|
485 |
-
lora_2_weight,
|
486 |
style,
|
487 |
seed,
|
488 |
model,
|
@@ -518,16 +477,6 @@ if __name__ == "__main__":
|
|
518 |
for repo_id, allow_patterns in Config.HF_MODELS.items():
|
519 |
download_repo_files(repo_id, allow_patterns, token=Config.HF_TOKEN)
|
520 |
|
521 |
-
# download civit loras
|
522 |
-
for lora_id, lora in Config.CIVIT_LORAS.items():
|
523 |
-
file_path = os.path.join(os.path.dirname(__file__), "loras")
|
524 |
-
download_civit_file(
|
525 |
-
lora_id,
|
526 |
-
lora["model_version_id"],
|
527 |
-
file_path=file_path,
|
528 |
-
token=Config.CIVIT_TOKEN,
|
529 |
-
)
|
530 |
-
|
531 |
# https://www.gradio.app/docs/gradio/interface#interface-queue
|
532 |
demo.queue(default_concurrency_limit=1).launch(
|
533 |
server_name=args.server,
|
|
|
1 |
import argparse
|
2 |
import json
|
|
|
3 |
import random
|
4 |
|
5 |
import gradio as gr
|
|
|
8 |
Config,
|
9 |
async_call,
|
10 |
disable_progress_bars,
|
|
|
11 |
download_repo_files,
|
12 |
generate,
|
13 |
read_file,
|
|
|
213 |
label="Scheduler",
|
214 |
filterable=False,
|
215 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
216 |
|
217 |
# Generation settings
|
218 |
gr.HTML("<h3>Generation</h3>")
|
|
|
442 |
image_prompt,
|
443 |
control_image_prompt,
|
444 |
ip_image_prompt,
|
|
|
|
|
|
|
|
|
445 |
style,
|
446 |
seed,
|
447 |
model,
|
|
|
477 |
for repo_id, allow_patterns in Config.HF_MODELS.items():
|
478 |
download_repo_files(repo_id, allow_patterns, token=Config.HF_TOKEN)
|
479 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
480 |
# https://www.gradio.app/docs/gradio/interface#interface-queue
|
481 |
demo.queue(default_concurrency_limit=1).launch(
|
482 |
server_name=args.server,
|
lib/__init__.py
CHANGED
@@ -3,7 +3,6 @@ from .inference import generate
|
|
3 |
from .utils import (
|
4 |
async_call,
|
5 |
disable_progress_bars,
|
6 |
-
download_civit_file,
|
7 |
download_repo_files,
|
8 |
read_file,
|
9 |
)
|
@@ -12,7 +11,6 @@ __all__ = [
|
|
12 |
"Config",
|
13 |
"async_call",
|
14 |
"disable_progress_bars",
|
15 |
-
"download_civit_file",
|
16 |
"download_repo_files",
|
17 |
"generate",
|
18 |
"read_file",
|
|
|
3 |
from .utils import (
|
4 |
async_call,
|
5 |
disable_progress_bars,
|
|
|
6 |
download_repo_files,
|
7 |
read_file,
|
8 |
)
|
|
|
11 |
"Config",
|
12 |
"async_call",
|
13 |
"disable_progress_bars",
|
|
|
14 |
"download_repo_files",
|
15 |
"generate",
|
16 |
"read_file",
|
lib/config.py
CHANGED
@@ -57,7 +57,6 @@ sd_files = [
|
|
57 |
# Using namespace instead of dataclass for simplicity
|
58 |
Config = SimpleNamespace(
|
59 |
HF_TOKEN=os.environ.get("HF_TOKEN", None),
|
60 |
-
CIVIT_TOKEN=os.environ.get("CIVIT_TOKEN", None),
|
61 |
ZERO_GPU=import_module("spaces").config.Config.zero_gpu,
|
62 |
# TODO: fix model config redundancy
|
63 |
HF_MODELS={
|
@@ -74,22 +73,6 @@ Config = SimpleNamespace(
|
|
74 |
"SG161222/Realistic_Vision_V5.1_noVAE": ["Realistic_Vision_V5.1_fp16-no-ema.safetensors"],
|
75 |
"XpucT/Deliberate": ["Deliberate_v6.safetensors"],
|
76 |
},
|
77 |
-
CIVIT_LORAS={
|
78 |
-
# https://civitai.com/models/411088?modelVersionId=486099
|
79 |
-
"perfection_style": {
|
80 |
-
"model_id": "411088",
|
81 |
-
"model_version_id": "486099",
|
82 |
-
"name": "Perfection Style",
|
83 |
-
"trigger": "perfection style",
|
84 |
-
},
|
85 |
-
# https://civitai.com/models/421162?modelVersionId=486110
|
86 |
-
"detailed_style": {
|
87 |
-
"model_id": "421162",
|
88 |
-
"model_version_id": "486110",
|
89 |
-
"name": "Detailed Style",
|
90 |
-
"trigger": "detailed style",
|
91 |
-
},
|
92 |
-
},
|
93 |
MONO_FONTS=["monospace"],
|
94 |
SANS_FONTS=[
|
95 |
"sans-serif",
|
|
|
57 |
# Using namespace instead of dataclass for simplicity
|
58 |
Config = SimpleNamespace(
|
59 |
HF_TOKEN=os.environ.get("HF_TOKEN", None),
|
|
|
60 |
ZERO_GPU=import_module("spaces").config.Config.zero_gpu,
|
61 |
# TODO: fix model config redundancy
|
62 |
HF_MODELS={
|
|
|
73 |
"SG161222/Realistic_Vision_V5.1_noVAE": ["Realistic_Vision_V5.1_fp16-no-ema.safetensors"],
|
74 |
"XpucT/Deliberate": ["Deliberate_v6.safetensors"],
|
75 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
MONO_FONTS=["monospace"],
|
77 |
SANS_FONTS=[
|
78 |
"sans-serif",
|
lib/inference.py
CHANGED
@@ -66,10 +66,6 @@ def generate(
|
|
66 |
image_prompt=None,
|
67 |
control_image_prompt=None,
|
68 |
ip_image_prompt=None,
|
69 |
-
lora_1=None,
|
70 |
-
lora_1_weight=0.0,
|
71 |
-
lora_2=None,
|
72 |
-
lora_2_weight=0.0,
|
73 |
style=None,
|
74 |
seed=None,
|
75 |
model="Lykon/dreamshaper-8",
|
@@ -159,40 +155,6 @@ def generate(
|
|
159 |
pipe = loader.pipe
|
160 |
upscaler = loader.upscaler
|
161 |
|
162 |
-
# load loras
|
163 |
-
loras = []
|
164 |
-
weights = []
|
165 |
-
loras_and_weights = [(lora_1, lora_1_weight), (lora_2, lora_2_weight)]
|
166 |
-
loras_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "loras"))
|
167 |
-
total_loras = sum(1 for lora, _ in loras_and_weights if lora and lora.lower() != "none")
|
168 |
-
desc_loras = "Loading LoRAs"
|
169 |
-
if total_loras > 0:
|
170 |
-
with timer(f"Loading {total_loras} LoRA{'s' if total_loras > 1 else ''}"):
|
171 |
-
safe_progress(progress, 0, total_loras, desc_loras)
|
172 |
-
for i, (lora, weight) in enumerate(loras_and_weights):
|
173 |
-
if lora and lora.lower() != "none" and lora not in loras:
|
174 |
-
config = Config.CIVIT_LORAS.get(lora)
|
175 |
-
if config:
|
176 |
-
try:
|
177 |
-
pipe.load_lora_weights(
|
178 |
-
loras_dir,
|
179 |
-
adapter_name=lora,
|
180 |
-
weight_name=f"{lora}.{config['model_version_id']}.safetensors",
|
181 |
-
)
|
182 |
-
weights.append(weight)
|
183 |
-
loras.append(lora)
|
184 |
-
safe_progress(progress, i + 1, total_loras, desc_loras)
|
185 |
-
except Exception:
|
186 |
-
raise Error(f"Error loading {config['name']} LoRA")
|
187 |
-
|
188 |
-
# unload after generating or if there was an error
|
189 |
-
try:
|
190 |
-
if loras:
|
191 |
-
pipe.set_adapters(loras, adapter_weights=weights)
|
192 |
-
except Exception:
|
193 |
-
pipe.unload_lora_weights()
|
194 |
-
raise Error("Error setting LoRA weights")
|
195 |
-
|
196 |
# Load negative embedding if requested
|
197 |
if negative_embedding:
|
198 |
embeddings_dir = os.path.abspath(
|
@@ -234,9 +196,6 @@ def generate(
|
|
234 |
if negative_embedding:
|
235 |
negative_styled += f", <{Config.NEGATIVE_EMBEDDING}>"
|
236 |
|
237 |
-
for lora in loras:
|
238 |
-
positive_styled += f", {Config.CIVIT_LORAS[lora]['trigger']}"
|
239 |
-
|
240 |
positive_embeds, negative_embeds = compel.pad_conditioning_tensors_to_same_length(
|
241 |
[compel(positive_styled), compel(negative_styled)]
|
242 |
)
|
@@ -278,8 +237,6 @@ def generate(
|
|
278 |
finally:
|
279 |
if negative_embedding:
|
280 |
pipe.unload_textual_inversion()
|
281 |
-
if loras:
|
282 |
-
pipe.unload_lora_weights()
|
283 |
CURRENT_STEP = 0
|
284 |
CURRENT_IMAGE += 1
|
285 |
|
|
|
66 |
image_prompt=None,
|
67 |
control_image_prompt=None,
|
68 |
ip_image_prompt=None,
|
|
|
|
|
|
|
|
|
69 |
style=None,
|
70 |
seed=None,
|
71 |
model="Lykon/dreamshaper-8",
|
|
|
155 |
pipe = loader.pipe
|
156 |
upscaler = loader.upscaler
|
157 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
# Load negative embedding if requested
|
159 |
if negative_embedding:
|
160 |
embeddings_dir = os.path.abspath(
|
|
|
196 |
if negative_embedding:
|
197 |
negative_styled += f", <{Config.NEGATIVE_EMBEDDING}>"
|
198 |
|
|
|
|
|
|
|
199 |
positive_embeds, negative_embeds = compel.pad_conditioning_tensors_to_same_length(
|
200 |
[compel(positive_styled), compel(negative_styled)]
|
201 |
)
|
|
|
237 |
finally:
|
238 |
if negative_embedding:
|
239 |
pipe.unload_textual_inversion()
|
|
|
|
|
240 |
CURRENT_STEP = 0
|
241 |
CURRENT_IMAGE += 1
|
242 |
|
lib/utils.py
CHANGED
@@ -7,7 +7,6 @@ from contextlib import contextmanager
|
|
7 |
from typing import Callable, Tuple, TypeVar
|
8 |
|
9 |
import anyio
|
10 |
-
import httpx
|
11 |
import numpy as np
|
12 |
import torch
|
13 |
from anyio import Semaphore
|
@@ -19,7 +18,6 @@ from transformers import logging as transformers_logging
|
|
19 |
from typing_extensions import ParamSpec
|
20 |
|
21 |
from .annotators import CannyAnnotator
|
22 |
-
from .logger import Logger
|
23 |
|
24 |
T = TypeVar("T")
|
25 |
P = ParamSpec("P")
|
@@ -91,34 +89,6 @@ def download_repo_files(repo_id, allow_patterns, token=None):
|
|
91 |
return snapshot_path
|
92 |
|
93 |
|
94 |
-
def download_civit_file(lora_id, version_id, file_path=".", token=None):
|
95 |
-
base_url = "https://civitai.com/api/download/models"
|
96 |
-
file = f"{file_path}/{lora_id}.{version_id}.safetensors"
|
97 |
-
log = Logger("download_civit_file")
|
98 |
-
|
99 |
-
if os.path.exists(file):
|
100 |
-
return
|
101 |
-
|
102 |
-
try:
|
103 |
-
params = {"token": token}
|
104 |
-
response = httpx.get(
|
105 |
-
f"{base_url}/{version_id}",
|
106 |
-
timeout=None,
|
107 |
-
params=params,
|
108 |
-
follow_redirects=True,
|
109 |
-
)
|
110 |
-
|
111 |
-
response.raise_for_status()
|
112 |
-
os.makedirs(file_path, exist_ok=True)
|
113 |
-
|
114 |
-
with open(file, "wb") as f:
|
115 |
-
f.write(response.content)
|
116 |
-
except httpx.HTTPStatusError as e:
|
117 |
-
log.error(f"{e.response.status_code} {e.response.text}")
|
118 |
-
except httpx.RequestError as e:
|
119 |
-
log.error(f"RequestError: {e}")
|
120 |
-
|
121 |
-
|
122 |
def image_to_pil(image: Image.Image):
|
123 |
"""Converts various image inputs to RGB PIL Image."""
|
124 |
if isinstance(image, str) and os.path.isfile(image):
|
|
|
7 |
from typing import Callable, Tuple, TypeVar
|
8 |
|
9 |
import anyio
|
|
|
10 |
import numpy as np
|
11 |
import torch
|
12 |
from anyio import Semaphore
|
|
|
18 |
from typing_extensions import ParamSpec
|
19 |
|
20 |
from .annotators import CannyAnnotator
|
|
|
21 |
|
22 |
T = TypeVar("T")
|
23 |
P = ParamSpec("P")
|
|
|
89 |
return snapshot_path
|
90 |
|
91 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
def image_to_pil(image: Image.Image):
|
93 |
"""Converts various image inputs to RGB PIL Image."""
|
94 |
if isinstance(image, str) and os.path.isfile(image):
|
partials/intro.html
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
<div id="intro">
|
2 |
<div>
|
3 |
<h1>
|
4 |
-
Diffusion
|
5 |
</h1>
|
6 |
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 15 15">
|
7 |
<path d="M7.48877 6.75C7.29015 6.75 7.09967 6.82902 6.95923 6.96967C6.81879 7.11032 6.73989 7.30109 6.73989 7.5C6.73989 7.69891 6.81879 7.88968 6.95923 8.03033C7.09967 8.17098 7.29015 8.25 7.48877 8.25C7.68738 8.25 7.87786 8.17098 8.0183 8.03033C8.15874 7.88968 8.23764 7.69891 8.23764 7.5C8.23764 7.30109 8.15874 7.11032 8.0183 6.96967C7.87786 6.82902 7.68738 6.75 7.48877 6.75ZM7.8632 0C11.2331 0 11.3155 2.6775 9.54818 3.5625C8.80679 3.93 8.47728 4.7175 8.335 5.415C8.69446 5.565 9.00899 5.7975 9.24863 6.0975C12.0195 4.5975 15 5.19 15 7.875C15 11.25 12.3265 11.325 11.4428 9.5475C11.0684 8.805 10.2746 8.475 9.57813 8.3325C9.42836 8.6925 9.19621 9 8.89665 9.255C10.3869 12.0225 9.79531 15 7.11433 15C3.74438 15 3.67698 12.315 5.44433 11.43C6.17823 11.0625 6.50774 10.2825 6.65751 9.5925C6.29056 9.4425 5.96855 9.2025 5.72891 8.9025C2.96555 10.3875 0 9.8025 0 7.125C0 3.75 2.666 3.6675 3.54967 5.445C3.92411 6.1875 4.71043 6.51 5.40689 6.6525C5.54918 6.2925 5.78882 5.9775 6.09586 5.7375C4.60559 2.97 5.1972 0 7.8632 0Z"></path>
|
8 |
</svg>
|
9 |
</div>
|
10 |
<p>
|
11 |
-
Stable Diffusion
|
12 |
</p>
|
13 |
</div>
|
|
|
1 |
<div id="intro">
|
2 |
<div>
|
3 |
<h1>
|
4 |
+
Diffusion
|
5 |
</h1>
|
6 |
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 15 15">
|
7 |
<path d="M7.48877 6.75C7.29015 6.75 7.09967 6.82902 6.95923 6.96967C6.81879 7.11032 6.73989 7.30109 6.73989 7.5C6.73989 7.69891 6.81879 7.88968 6.95923 8.03033C7.09967 8.17098 7.29015 8.25 7.48877 8.25C7.68738 8.25 7.87786 8.17098 8.0183 8.03033C8.15874 7.88968 8.23764 7.69891 8.23764 7.5C8.23764 7.30109 8.15874 7.11032 8.0183 6.96967C7.87786 6.82902 7.68738 6.75 7.48877 6.75ZM7.8632 0C11.2331 0 11.3155 2.6775 9.54818 3.5625C8.80679 3.93 8.47728 4.7175 8.335 5.415C8.69446 5.565 9.00899 5.7975 9.24863 6.0975C12.0195 4.5975 15 5.19 15 7.875C15 11.25 12.3265 11.325 11.4428 9.5475C11.0684 8.805 10.2746 8.475 9.57813 8.3325C9.42836 8.6925 9.19621 9 8.89665 9.255C10.3869 12.0225 9.79531 15 7.11433 15C3.74438 15 3.67698 12.315 5.44433 11.43C6.17823 11.0625 6.50774 10.2825 6.65751 9.5925C6.29056 9.4425 5.96855 9.2025 5.72891 8.9025C2.96555 10.3875 0 9.8025 0 7.125C0 3.75 2.666 3.6675 3.54967 5.445C3.92411 6.1875 4.71043 6.51 5.40689 6.6525C5.54918 6.2925 5.78882 5.9775 6.09586 5.7375C4.60559 2.97 5.1972 0 7.8632 0Z"></path>
|
8 |
</svg>
|
9 |
</div>
|
10 |
<p>
|
11 |
+
Image generation studio for Stable Diffusion.
|
12 |
</p>
|
13 |
</div>
|