Spaces:
Running
Running
Duplicate from doevent/prompt-generator
Browse filesCo-authored-by: Max Skobeev <doevent@users.noreply.huggingface.co>
- .gitattributes +31 -0
- README.md +13 -0
- app.py +56 -0
- name.txt +0 -0
- requirements.txt +3 -0
.gitattributes
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
23 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Midjourney Prompt Generator
|
3 |
+
emoji: 🌍
|
4 |
+
colorFrom: pink
|
5 |
+
colorTo: gray
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.28.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: doevent/prompt-generator
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import pipeline, set_seed
|
2 |
+
import gradio as grad
|
3 |
+
import random
|
4 |
+
import re
|
5 |
+
|
6 |
+
gpt2_pipe = pipeline('text-generation', model='succinctly/text2image-prompt-generator')
|
7 |
+
|
8 |
+
with open("name.txt", "r") as f:
|
9 |
+
line = f.readlines()
|
10 |
+
|
11 |
+
|
12 |
+
def generate(starting_text):
|
13 |
+
for count in range(6):
|
14 |
+
seed = random.randint(100, 1000000)
|
15 |
+
set_seed(seed)
|
16 |
+
|
17 |
+
# If the text field is empty
|
18 |
+
if starting_text == "":
|
19 |
+
starting_text: str = line[random.randrange(0, len(line))].replace("\n", "").lower().capitalize()
|
20 |
+
starting_text: str = re.sub(r"[,:\-–.!;?_]", '', starting_text)
|
21 |
+
print(starting_text)
|
22 |
+
|
23 |
+
response = gpt2_pipe(starting_text, max_length=random.randint(60, 90), num_return_sequences=8)
|
24 |
+
response_list = []
|
25 |
+
for x in response:
|
26 |
+
resp = x['generated_text'].strip()
|
27 |
+
if resp != starting_text and len(resp) > (len(starting_text) + 4) and resp.endswith((":", "-", "—")) is False:
|
28 |
+
response_list.append(resp)
|
29 |
+
|
30 |
+
response_end = "\n".join(response_list)
|
31 |
+
response_end = re.sub('[^ ]+\.[^ ]+','', response_end)
|
32 |
+
response_end = response_end.replace("<", "").replace(">", "")
|
33 |
+
if response_end != "":
|
34 |
+
return response_end
|
35 |
+
if count == 5:
|
36 |
+
return response_end
|
37 |
+
|
38 |
+
|
39 |
+
txt = grad.Textbox(lines=1, label="English", placeholder="English Text here")
|
40 |
+
out = grad.Textbox(lines=6, label="Generated Text")
|
41 |
+
examples = [["mythology of the Slavs"], ["All-seeing eye monitors these world"], ["astronaut dog"],
|
42 |
+
["A monochrome forest of ebony trees"], ["sad view of worker in office,"],
|
43 |
+
["Headshot photo portrait of John Lennon"], ["wide field with thousands of blue nemophila,"]]
|
44 |
+
title = "Midjourney Prompt Generator"
|
45 |
+
description = "This is an unofficial demo for Midjourney Prompt Generator. To use it, simply send your text, or click one of the examples to load them. Read more at the links below.<br>Model: https://huggingface.co/succinctly/text2image-prompt-generator<br>Telegram bot: https://t.me/prompt_generator_bot<br>[![](https://img.shields.io/twitter/follow/DoEvent?label=@DoEvent&style=social)](https://twitter.com/DoEvent)"
|
46 |
+
article = "<div><center><img src='https://visitor-badge.glitch.me/badge?page_id=max_skobeev_prompt_generator_public' alt='visitor badge'></center></div>"
|
47 |
+
|
48 |
+
grad.Interface(fn=generate,
|
49 |
+
inputs=txt,
|
50 |
+
outputs=out,
|
51 |
+
examples=examples,
|
52 |
+
title=title,
|
53 |
+
description=description,
|
54 |
+
article=article,
|
55 |
+
allow_flagging='never',
|
56 |
+
cache_examples=False).queue(concurrency_count=1, api_open=False).launch(show_api=False, show_error=True)
|
name.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
torch
|
3 |
+
transformers[sentencepiece]
|