Spaces:
Runtime error
Runtime error
Ritesh Khanna
commited on
Commit
·
d1ec534
1
Parent(s):
82905a4
touchups
Browse files
app.py
CHANGED
@@ -57,6 +57,8 @@ def generate_text(prompt, extra=False, top_k=100, top_p=0.95, temperature=0.85,
|
|
57 |
if model is None:
|
58 |
return ["Try Again"] * 4
|
59 |
|
|
|
|
|
60 |
if not prompt.startswith("BRF:"):
|
61 |
prompt = "BRF: " + prompt
|
62 |
|
@@ -75,6 +77,7 @@ def generate_text(prompt, extra=False, top_k=100, top_p=0.95, temperature=0.85,
|
|
75 |
for output in outputs:
|
76 |
sample = tokenizer.decode(output, skip_special_tokens=True)
|
77 |
sample = format_prompt(sample, enhancers, inpspiration, negative_prompt)
|
|
|
78 |
samples.append(sample)
|
79 |
except Exception as e:
|
80 |
print(e)
|
@@ -86,10 +89,12 @@ if __name__ == "__main__":
|
|
86 |
with gr.Row():
|
87 |
gr.Markdown("""# Midjourney / Dalle 2 / Stable Diffusion Prompt Generator
|
88 |
This is the 355M parameter model. There is also a 7B parameter model that is much better but far slower (access coming soon).
|
89 |
-
Just enter a basic prompt and the fungineering model will use its wildest imagination to expand the prompt in detail. You can then use this prompt to generate images with Midjourney, Dalle 2, Stable Diffusion, Bing Image Creator, or any other image generation model. Treat this model more like a text-to-text model (simple prompt > complex prompt) rather than a generative model (prefix + word generation). It is a generative model under the hood.
|
|
|
|
|
90 |
with gr.Row():
|
91 |
with gr.Column():
|
92 |
-
base_prompt = gr.Textbox(lines=
|
93 |
extra = gr.Checkbox(value=True, label="Extra Fungineer Imagination", info="If checked, the model will be allowed to go wild with its imagination.")
|
94 |
with gr.Accordion("Advanced Generation Settings", open=False):
|
95 |
top_k = gr.Slider( minimum=10, maximum=1000, value=100, label="Top K", info="Top K sampling")
|
|
|
57 |
if model is None:
|
58 |
return ["Try Again"] * 4
|
59 |
|
60 |
+
prompt = prompt.strip()
|
61 |
+
|
62 |
if not prompt.startswith("BRF:"):
|
63 |
prompt = "BRF: " + prompt
|
64 |
|
|
|
77 |
for output in outputs:
|
78 |
sample = tokenizer.decode(output, skip_special_tokens=True)
|
79 |
sample = format_prompt(sample, enhancers, inpspiration, negative_prompt)
|
80 |
+
print(f"Sample: {sample}")
|
81 |
samples.append(sample)
|
82 |
except Exception as e:
|
83 |
print(e)
|
|
|
89 |
with gr.Row():
|
90 |
gr.Markdown("""# Midjourney / Dalle 2 / Stable Diffusion Prompt Generator
|
91 |
This is the 355M parameter model. There is also a 7B parameter model that is much better but far slower (access coming soon).
|
92 |
+
Just enter a basic prompt and the fungineering model will use its wildest imagination to expand the prompt in detail. You can then use this prompt to generate images with Midjourney, Dalle 2, Stable Diffusion, Bing Image Creator, or any other image generation model. Treat this model more like a text-to-text model (simple prompt > complex prompt) rather than a generative model (prefix + word generation). It is a generative model under the hood.
|
93 |
+
## TIP: Keep the base prompt short and simple. The model will do the rest.
|
94 |
+
""")
|
95 |
with gr.Row():
|
96 |
with gr.Column():
|
97 |
+
base_prompt = gr.Textbox(lines=1, label="Base Prompt (Shorter is Better)", placeholder="An astronaut in space.", info="Enter a very simple prompt that will be fungineered into something exciting!")
|
98 |
extra = gr.Checkbox(value=True, label="Extra Fungineer Imagination", info="If checked, the model will be allowed to go wild with its imagination.")
|
99 |
with gr.Accordion("Advanced Generation Settings", open=False):
|
100 |
top_k = gr.Slider( minimum=10, maximum=1000, value=100, label="Top K", info="Top K sampling")
|