jojosims4557 commited on
Commit
e57d493
·
verified ·
1 Parent(s): 917f044

Upload 5 files

Browse files
Files changed (5) hide show
  1. README.md +47 -13
  2. flux_lora.png +0 -0
  3. live_preview_helpers.py +166 -0
  4. loras.json +234 -0
  5. requirements.txt +6 -0
README.md CHANGED
@@ -1,13 +1,47 @@
1
- ---
2
- title: Nananie
3
- emoji: 🐠
4
- colorFrom: gray
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 5.1.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: FLUX LoRa the Explorer
3
+ emoji: 🏆
4
+ colorFrom: red
5
+ colorTo: pink
6
+ sdk: gradio
7
+ sdk_version: 5.1.0
8
+ app_file: app.py
9
+ pinned: true
10
+ license: mit
11
+ models:
12
+ - black-forest-labs/FLUX.1-dev
13
+ - renderartist/retrocomicflux
14
+ - glif/l0w-r3z
15
+ - Purz/vhs-box
16
+ - renderartist/simplevectorflux
17
+ - glif/anime-blockprint-style
18
+ - multimodalart/flux-tarot-v1
19
+ - alvdansen/pola-photo-flux
20
+ - dvyio/flux-lora-the-sims
21
+ - alvdansen/softpasty-flux-dev
22
+ - dvyio/flux-lora-film-noir
23
+ - AIWarper/RubberCore1920sCartoonStyle
24
+ - Norod78/JojosoStyle-flux-lora
25
+ - XLabs-AI/flux-RealismLora
26
+ - multimodalart/vintage-ads-flux
27
+ - glif/how2draw
28
+ - mgwr/Cine-Aesthetic
29
+ - sWizad/pokemon-trainer-sprites-pixelart-flux
30
+ - nerijs/animation2k-flux
31
+ - alvdansen/softserve_anime
32
+ - veryVANYA/ps1-style-flux
33
+ - alvdansen/flux-koda
34
+ - alvdansen/frosting_lane_flux
35
+ - davisbro/half_illustration
36
+ - fofr/flux-wrong
37
+ - linoyts/yarn_art_Flux_LoRA
38
+ - Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style
39
+ - SebastianBodza/Flux_Aquarell_Watercolor_v2
40
+ - dataautogpt3/FLUX-SyntheticAnime
41
+ - fofr/flux-80s-cyberpunk
42
+ - kudzueye/boreal-flux-dev-v2
43
+ - XLabs-AI/flux-lora-collection
44
+ - martintomov/retrofuturism-flux
45
+ ---
46
+
47
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
flux_lora.png ADDED
live_preview_helpers.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from diffusers import FluxPipeline, AutoencoderTiny, FlowMatchEulerDiscreteScheduler
4
+ from typing import Any, Dict, List, Optional, Union
5
+
6
+ # Helper functions
7
+ def calculate_shift(
8
+ image_seq_len,
9
+ base_seq_len: int = 256,
10
+ max_seq_len: int = 4096,
11
+ base_shift: float = 0.5,
12
+ max_shift: float = 1.16,
13
+ ):
14
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
15
+ b = base_shift - m * base_seq_len
16
+ mu = image_seq_len * m + b
17
+ return mu
18
+
19
+ def retrieve_timesteps(
20
+ scheduler,
21
+ num_inference_steps: Optional[int] = None,
22
+ device: Optional[Union[str, torch.device]] = None,
23
+ timesteps: Optional[List[int]] = None,
24
+ sigmas: Optional[List[float]] = None,
25
+ **kwargs,
26
+ ):
27
+ if timesteps is not None and sigmas is not None:
28
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
29
+ if timesteps is not None:
30
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
31
+ timesteps = scheduler.timesteps
32
+ num_inference_steps = len(timesteps)
33
+ elif sigmas is not None:
34
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
35
+ timesteps = scheduler.timesteps
36
+ num_inference_steps = len(timesteps)
37
+ else:
38
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
39
+ timesteps = scheduler.timesteps
40
+ return timesteps, num_inference_steps
41
+
42
+ # FLUX pipeline function
43
+ @torch.inference_mode()
44
+ def flux_pipe_call_that_returns_an_iterable_of_images(
45
+ self,
46
+ prompt: Union[str, List[str]] = None,
47
+ prompt_2: Optional[Union[str, List[str]]] = None,
48
+ height: Optional[int] = None,
49
+ width: Optional[int] = None,
50
+ num_inference_steps: int = 28,
51
+ timesteps: List[int] = None,
52
+ guidance_scale: float = 3.5,
53
+ num_images_per_prompt: Optional[int] = 1,
54
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
55
+ latents: Optional[torch.FloatTensor] = None,
56
+ prompt_embeds: Optional[torch.FloatTensor] = None,
57
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
58
+ output_type: Optional[str] = "pil",
59
+ return_dict: bool = True,
60
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
61
+ max_sequence_length: int = 512,
62
+ good_vae: Optional[Any] = None,
63
+ ):
64
+ height = height or self.default_sample_size * self.vae_scale_factor
65
+ width = width or self.default_sample_size * self.vae_scale_factor
66
+
67
+ # 1. Check inputs
68
+ self.check_inputs(
69
+ prompt,
70
+ prompt_2,
71
+ height,
72
+ width,
73
+ prompt_embeds=prompt_embeds,
74
+ pooled_prompt_embeds=pooled_prompt_embeds,
75
+ max_sequence_length=max_sequence_length,
76
+ )
77
+
78
+ self._guidance_scale = guidance_scale
79
+ self._joint_attention_kwargs = joint_attention_kwargs
80
+ self._interrupt = False
81
+
82
+ # 2. Define call parameters
83
+ batch_size = 1 if isinstance(prompt, str) else len(prompt)
84
+ device = self._execution_device
85
+
86
+ # 3. Encode prompt
87
+ lora_scale = joint_attention_kwargs.get("scale", None) if joint_attention_kwargs is not None else None
88
+ prompt_embeds, pooled_prompt_embeds, text_ids = self.encode_prompt(
89
+ prompt=prompt,
90
+ prompt_2=prompt_2,
91
+ prompt_embeds=prompt_embeds,
92
+ pooled_prompt_embeds=pooled_prompt_embeds,
93
+ device=device,
94
+ num_images_per_prompt=num_images_per_prompt,
95
+ max_sequence_length=max_sequence_length,
96
+ lora_scale=lora_scale,
97
+ )
98
+ # 4. Prepare latent variables
99
+ num_channels_latents = self.transformer.config.in_channels // 4
100
+ latents, latent_image_ids = self.prepare_latents(
101
+ batch_size * num_images_per_prompt,
102
+ num_channels_latents,
103
+ height,
104
+ width,
105
+ prompt_embeds.dtype,
106
+ device,
107
+ generator,
108
+ latents,
109
+ )
110
+ # 5. Prepare timesteps
111
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps)
112
+ image_seq_len = latents.shape[1]
113
+ mu = calculate_shift(
114
+ image_seq_len,
115
+ self.scheduler.config.base_image_seq_len,
116
+ self.scheduler.config.max_image_seq_len,
117
+ self.scheduler.config.base_shift,
118
+ self.scheduler.config.max_shift,
119
+ )
120
+ timesteps, num_inference_steps = retrieve_timesteps(
121
+ self.scheduler,
122
+ num_inference_steps,
123
+ device,
124
+ timesteps,
125
+ sigmas,
126
+ mu=mu,
127
+ )
128
+ self._num_timesteps = len(timesteps)
129
+
130
+ # Handle guidance
131
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32).expand(latents.shape[0]) if self.transformer.config.guidance_embeds else None
132
+
133
+ # 6. Denoising loop
134
+ for i, t in enumerate(timesteps):
135
+ if self.interrupt:
136
+ continue
137
+
138
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
139
+
140
+ noise_pred = self.transformer(
141
+ hidden_states=latents,
142
+ timestep=timestep / 1000,
143
+ guidance=guidance,
144
+ pooled_projections=pooled_prompt_embeds,
145
+ encoder_hidden_states=prompt_embeds,
146
+ txt_ids=text_ids,
147
+ img_ids=latent_image_ids,
148
+ joint_attention_kwargs=self.joint_attention_kwargs,
149
+ return_dict=False,
150
+ )[0]
151
+ # Yield intermediate result
152
+ latents_for_image = self._unpack_latents(latents, height, width, self.vae_scale_factor)
153
+ latents_for_image = (latents_for_image / self.vae.config.scaling_factor) + self.vae.config.shift_factor
154
+ image = self.vae.decode(latents_for_image, return_dict=False)[0]
155
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
156
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
157
+ torch.cuda.empty_cache()
158
+
159
+
160
+ # Final image using good_vae
161
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
162
+ latents = (latents / good_vae.config.scaling_factor) + good_vae.config.shift_factor
163
+ image = good_vae.decode(latents, return_dict=False)[0]
164
+ self.maybe_free_model_hooks()
165
+ torch.cuda.empty_cache()
166
+ yield self.image_processor.postprocess(image, output_type=output_type)[0]
loras.json ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "repo": "Purz/choose-your-own-adventure",
4
+ "image": "https://huggingface.co/Purz/choose-your-own-adventure/resolve/main/34584570.jpeg",
5
+ "trigger_word": "cy04,",
6
+ "trigger_position": "prepend",
7
+ "title": "choose your own adventure",
8
+ "aspect": "portrait"
9
+ },
10
+ {
11
+ "image": "https://huggingface.co/renderartist/retrocomicflux/resolve/main/images/ComfyUI_temp_ipugi_00131_.png",
12
+ "repo": "renderartist/retrocomicflux",
13
+ "trigger_word": "c0m1c style vintage 1930s style comic strip panel of",
14
+ "title": "Retro Comic",
15
+ "trigger_position": "prepend"
16
+ },
17
+ {
18
+ "image": "https://huggingface.co/glif/l0w-r3z/resolve/main/images/a19d658b-5d4c-45bc-9df6-f2bec54462a5.png",
19
+ "repo": "glif/l0w-r3z",
20
+ "trigger_word": ", l0w-r3z",
21
+ "title": "Low Res 3D"
22
+ },
23
+ {
24
+ "repo": "Purz/vhs-box",
25
+ "image": "https://huggingface.co/Purz/vhs-box/resolve/main/33726559.jpeg",
26
+ "trigger_word": ", vhs_box",
27
+ "title": "VHS Box"
28
+ },
29
+ {
30
+ "image": "https://huggingface.co/renderartist/simplevectorflux/resolve/main/images/ComfyUI_09477_.jpeg",
31
+ "title": "Simple Vector",
32
+ "repo": "renderartist/simplevectorflux",
33
+ "trigger_word": "v3ct0r style, simple flat vector art, isolated on white bg,",
34
+ "trigger_position": "prepend"
35
+ },
36
+ {
37
+ "image": "https://huggingface.co/glif/anime-blockprint-style/resolve/main/images/glif-block-print-anime-flux-dev-araminta-k-lora-araminta-k-kbde06qyovrmvsv65ubfyhn1.jpg",
38
+ "repo": "glif/anime-blockprint-style",
39
+ "trigger_word": ", blockprint style",
40
+ "title": "Blockprint Style"
41
+ },
42
+ {
43
+ "image": "https://huggingface.co/Purz/face-projection/resolve/main/34031841.jpeg",
44
+ "repo": "Purz/face-projection",
45
+ "trigger_word": "f4c3_p40j3ct10n,",
46
+ "trigger_position": "prepend",
47
+ "title": "face projection"
48
+ },
49
+ {
50
+ "image": "https://huggingface.co/multimodalart/flux-tarot-v1/resolve/main/images/e5f2761e5d474e6ba492d20dca0fa26f_e78f1524074b42b6ac49643ffad50ac6.png",
51
+ "title": "Tarot v1",
52
+ "repo": "multimodalart/flux-tarot-v1",
53
+ "trigger_word": "in the style of TOK a trtcrd, tarot style",
54
+ "aspect": "portrait"
55
+ },
56
+ {
57
+ "repo": "alvdansen/pola-photo-flux",
58
+ "image": "https://huggingface.co/alvdansen/pola-photo-flux/resolve/main/images/out-2%20(83).webp",
59
+ "trigger_word": ", polaroid style",
60
+ "title": "Polaroid Style"
61
+ },
62
+ {
63
+ "image": "https://huggingface.co/dvyio/flux-lora-the-sims/resolve/main/images/dunBAVBsALOepaE_dsWFI_6b0fef6b0fc4472aa07d00edea7c75b3.jpg",
64
+ "repo": "dvyio/flux-lora-the-sims",
65
+ "trigger_word": ", video game screenshot in the style of THSMS",
66
+ "title": "The Sims style"
67
+ },
68
+ {
69
+ "image": "https://huggingface.co/alvdansen/softpasty-flux-dev/resolve/main/images/ComfyUI_00814_%20(2).png",
70
+ "title": "SoftPasty",
71
+ "repo": "alvdansen/softpasty-flux-dev",
72
+ "trigger_word": "araminta_illus illustration style"
73
+ },
74
+ {
75
+ "image": "https://huggingface.co/dvyio/flux-lora-film-noir/resolve/main/images/S8iWMa0GamEcFkanHHmI8_a232d8b83bb043808742d661dac257f7.jpg",
76
+ "title": "Film Noir",
77
+ "repo": "dvyio/flux-lora-film-noir",
78
+ "trigger_word": "in the style of FLMNR"
79
+ },
80
+ {
81
+ "image": "https://huggingface.co/AIWarper/RubberCore1920sCartoonStyle/resolve/main/images/Rub_00006_.png",
82
+ "title": "1920s cartoon",
83
+ "repo": "AIWarper/RubberCore1920sCartoonStyle",
84
+ "trigger_word": "RU883R style",
85
+ "trigger_position": "prepend"
86
+ },
87
+ {
88
+ "image": "https://huggingface.co/Norod78/JojosoStyle-flux-lora/resolve/main/samples/1725244218477__000004255_1.jpg",
89
+ "title": "JoJo Style",
90
+ "repo": "Norod78/JojosoStyle-flux-lora",
91
+ "trigger_word": "JojosoStyle",
92
+ "trigger_position": "prepend"
93
+ },
94
+ {
95
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/picture-6-rev1.png?raw=true",
96
+ "title": "flux-Realism",
97
+ "repo": "XLabs-AI/flux-RealismLora",
98
+ "trigger_word": ""
99
+ },
100
+ {
101
+ "image": "https://huggingface.co/multimodalart/vintage-ads-flux/resolve/main/samples/j_XNU6Oe0mgttyvf9uPb3_dc244dd3d6c246b4aff8351444868d66.png",
102
+ "title": "Vintage Ads",
103
+ "repo":"multimodalart/vintage-ads-flux",
104
+ "trigger_word": "a vintage ad of",
105
+ "trigger_position": "prepend"
106
+ },
107
+ {
108
+ "image": "https://huggingface.co/glif/how2draw/resolve/main/images/glif-how2draw-araminta-k-vbnvy94npt8m338r2vm02m50.jpg",
109
+ "repo": "glif/how2draw",
110
+ "trigger_word": ", How2Draw",
111
+ "title": "How2Draw"
112
+ },
113
+ {
114
+ "image": "https://huggingface.co/mgwr/Cine-Aesthetic/resolve/main/images/00030-1333633802.png",
115
+ "title": "Cine Aesthetic",
116
+ "repo": "mgwr/Cine-Aesthetic",
117
+ "trigger_word": "mgwr/cine",
118
+ "trigger_position": "prepend"
119
+ },
120
+ {
121
+ "image": "https://huggingface.co/sWizad/pokemon-trainer-sprites-pixelart-flux/resolve/main/26578915.jpeg",
122
+ "repo": "sWizad/pokemon-trainer-sprites-pixelart-flux",
123
+ "title": "Pokemon Trainer Sprites",
124
+ "trigger_word": "white background, a pixel image of",
125
+ "trigger_position": "prepend"
126
+ },
127
+ {
128
+ "image": "https://huggingface.co/nerijs/animation2k-flux/resolve/main/images/Q8-oVxNnXvZ9HNrgbNpGw_02762aaaba3b47859ee5fe9403a371e3.png",
129
+ "title": "animation2k",
130
+ "repo": "nerijs/animation2k-flux",
131
+ "trigger_word": ""
132
+ },
133
+ {
134
+ "image":"https://huggingface.co/alvdansen/softserve_anime/resolve/main/images/ComfyUI_00062_.png",
135
+ "title":"SoftServe Anime",
136
+ "repo": "alvdansen/softserve_anime",
137
+ "trigger_word": ""
138
+ },
139
+ {
140
+ "image": "https://huggingface.co/veryVANYA/ps1-style-flux/resolve/main/24439220.jpeg",
141
+ "title": "PS1 style",
142
+ "repo": "veryVANYA/ps1-style-flux",
143
+ "trigger_word": "ps1 game screenshot,",
144
+ "trigger_position": "prepend"
145
+ },
146
+ {
147
+ "image": "https://huggingface.co/alvdansen/flux-koda/resolve/main/images/ComfyUI_00566_%20(2).png",
148
+ "title": "flux koda",
149
+ "repo": "alvdansen/flux-koda",
150
+ "trigger_word": "flmft style"
151
+ },
152
+ {
153
+ "image": "https://huggingface.co/alvdansen/frosting_lane_flux/resolve/main/images/content%20-%202024-08-11T005936.346.jpeg",
154
+ "title": "Frosting Lane Flux",
155
+ "repo": "alvdansen/frosting_lane_flux",
156
+ "trigger_word": ""
157
+ },
158
+ {
159
+ "image": "https://huggingface.co/davisbro/half_illustration/resolve/main/images/example3.webp",
160
+ "title": "Half Illustration",
161
+ "repo": "davisbro/half_illustration",
162
+ "trigger_word": "in the style of TOK"
163
+ },
164
+ {
165
+ "image":"https://pbs.twimg.com/media/GVRiSH7WgAAnI4P?format=jpg&name=medium",
166
+ "title":"wrong",
167
+ "repo": "fofr/flux-wrong",
168
+ "trigger_word": "WRNG"
169
+ },
170
+ {
171
+ "image":"https://huggingface.co/linoyts/yarn_art_Flux_LoRA/resolve/main/yarn_art_2.png",
172
+ "title":"Yarn Art",
173
+ "repo": "linoyts/yarn_art_Flux_LoRA",
174
+ "trigger_word": ", yarn art style"
175
+ },
176
+ {
177
+ "image": "https://huggingface.co/Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style/resolve/main/08a19840b6214b76b0607b2f9d5a7e28_63159b9d98124c008efb1d36446a615c.png",
178
+ "title": "Paper Cutout",
179
+ "repo": "Norod78/Flux_1_Dev_LoRA_Paper-Cutout-Style",
180
+ "trigger_word": ", Paper Cutout Style"
181
+ },
182
+ {
183
+ "image": "https://huggingface.co/SebastianBodza/flux_lora_aquarel_watercolor/resolve/main/images/ascend.webp",
184
+ "title": "Aquarell Watercolor",
185
+ "repo": "SebastianBodza/Flux_Aquarell_Watercolor_v2",
186
+ "trigger_word": "in a watercolor style, AQUACOLTOK. White background."
187
+ },
188
+ {
189
+ "image": "https://huggingface.co/dataautogpt3/FLUX-SyntheticAnime/resolve/main/assets/angel.png",
190
+ "title": "SyntheticAnime",
191
+ "repo": "dataautogpt3/FLUX-SyntheticAnime",
192
+ "trigger_word": "1980s anime screengrab, VHS quality"
193
+ },
194
+ {
195
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_14.png?raw=true",
196
+ "title": "flux-anime",
197
+ "repo": "XLabs-AI/flux-lora-collection",
198
+ "weights": "anime_lora.safetensors",
199
+ "trigger_word": ", anime"
200
+ },
201
+ {
202
+ "image": "https://replicate.delivery/yhqm/QD8Ioy5NExqSCtBS8hG04XIRQZFaC9pxJemINT1bibyjZfSTA/out-0.webp",
203
+ "title": "80s Cyberpunk",
204
+ "repo": "fofr/flux-80s-cyberpunk",
205
+ "trigger_word": "style of 80s cyberpunk",
206
+ "trigger_position": "prepend"
207
+ },
208
+ {
209
+ "image": "https://huggingface.co/kudzueye/Boreal/resolve/main/images/ComfyUI_00845_.png",
210
+ "title": "Boreal",
211
+ "repo": "kudzueye/boreal-flux-dev-v2",
212
+ "trigger_word": "phone photo"
213
+ },
214
+ {
215
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_18.png?raw=true",
216
+ "title": "flux-disney",
217
+ "repo": "XLabs-AI/flux-lora-collection",
218
+ "weights": "disney_lora.safetensors",
219
+ "trigger_word": ", disney style"
220
+ },
221
+ {
222
+ "image": "https://github.com/XLabs-AI/x-flux/blob/main/assets/readme/examples/result_23.png?raw=true",
223
+ "title": "flux-art",
224
+ "repo": "XLabs-AI/flux-lora-collection",
225
+ "weights": "art_lora.safetensors",
226
+ "trigger_word": ", art"
227
+ },
228
+ {
229
+ "image": "https://huggingface.co/martintomov/retrofuturism-flux/resolve/main/images/2e40deba-858e-454f-ae1c-d1ba2adb6a65.jpeg",
230
+ "title": "Retrofuturism Flux",
231
+ "repo": "martintomov/retrofuturism-flux",
232
+ "trigger_word": ", retrofuturism"
233
+ }
234
+ ]
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ git+https://github.com/huggingface/diffusers.git
2
+ git+https://github.com/huggingface/transformers.git
3
+ git+https://github.com/huggingface/accelerate.git
4
+ safetensors
5
+ sentencepiece
6
+ git+https://github.com/huggingface/peft.git