thefcraft commited on
Commit
6a82a26
1 Parent(s): e0d5752

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +94 -1
README.md CHANGED
@@ -4,4 +4,97 @@ datasets:
4
  language:
5
  - en
6
  pipeline_tag: text-generation
7
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  language:
5
  - en
6
  pipeline_tag: text-generation
7
+ ---
8
+
9
+ ### How to use
10
+ ```
11
+ import gradio as gr
12
+ import pickle
13
+ import random
14
+ import numpy as np
15
+
16
+ with open('models.pickle', 'rb')as f:
17
+ models = pickle.load(f)
18
+
19
+ LORA_TOKEN = ''#'<|>LORA_TOKEN<|>'
20
+ # WEIGHT_TOKEN = '<|>WEIGHT_TOKEN<|>'
21
+ NOT_SPLIT_TOKEN = '<|>NOT_SPLIT_TOKEN<|>'
22
+
23
+ def sample_next(ctx:str,model,k):
24
+
25
+ ctx = ', '.join(ctx.split(', ')[-k:])
26
+ if model.get(ctx) is None:
27
+ return " "
28
+ possible_Chars = list(model[ctx].keys())
29
+ possible_values = list(model[ctx].values())
30
+
31
+ # print(possible_Chars)
32
+ # print(possible_values)
33
+
34
+ return np.random.choice(possible_Chars,p=possible_values)
35
+
36
+ def generateText(model, minLen=100, size=5):
37
+ keys = list(model.keys())
38
+ starting_sent = random.choice(keys)
39
+ k = len(random.choice(keys).split(', '))
40
+
41
+ sentence = starting_sent
42
+ ctx = ', '.join(starting_sent.split(', ')[-k:])
43
+
44
+ while True:
45
+ next_prediction = sample_next(ctx,model,k)
46
+ sentence += f", {next_prediction}"
47
+ ctx = ', '.join(sentence.split(', ')[-k:])
48
+
49
+ # if sentence.count('\n')>size: break
50
+ if '\n' in sentence: break
51
+ sentence = sentence.replace(NOT_SPLIT_TOKEN, ', ')
52
+ # sentence = re.sub(WEIGHT_TOKEN.replace('|', '\|'), lambda match: f":{random.randint(0,2)}.{random.randint(0,9)}", sentence)
53
+ # sentence = sentence.replace(":0.0", ':0.1')
54
+ # return sentence
55
+
56
+ prompt = sentence.split('\n')[0]
57
+ if len(prompt)<minLen:
58
+ prompt = generateText(model, minLen, size=1)[0]
59
+
60
+ size = size-1
61
+ if size == 0: return [prompt]
62
+ output = []
63
+ for i in range(size+1):
64
+ prompt = generateText(model, minLen, size=1)[0]
65
+ output.append(prompt)
66
+
67
+ return output
68
+
69
+ def sentence_builder(quantity, minLen, Type, negative):
70
+ if Type == "NSFW": idx=1
71
+ elif Type == "SFW": idx=2
72
+ else: idx=0
73
+ model = models[idx]
74
+ output = ""
75
+ for i in range(quantity):
76
+ prompt = generateText(model[0], minLen=minLen, size=1)[0]
77
+ output+=f"PROMPT: {prompt}\n\n"
78
+ if negative:
79
+ negative_prompt = generateText(model[1], minLen=minLen, size=5)[0]
80
+ output+=f"NEGATIVE PROMPT: {negative_prompt}\n"
81
+ output+="----------------------------------------------------------------"
82
+ output+="\n\n\n"
83
+
84
+ return output[:-3]
85
+
86
+
87
+ ui = gr.Interface(
88
+ sentence_builder,
89
+ [
90
+ gr.Slider(1, 10, value=4, label="Count", info="Choose between 1 and 10", step=1),
91
+ gr.Slider(100, 1000, value=300, label="minLen", info="Choose between 100 and 1000", step=50),
92
+ gr.Radio(["NSFW", "SFW", "BOTH"], label="TYPE", info="NSFW stands for NOT SAFE FOR WORK, so choose any one you want?"),
93
+ gr.Checkbox(label="negitive Prompt", info="Do you want to generate negative prompt as well as prompt?"),
94
+ ],
95
+ "text"
96
+ )
97
+
98
+ if __name__ == "__main__":
99
+ ui.launch()
100
+ ```