Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -44,23 +44,23 @@ def generate(your_prompt, task_prefix, max_new_tokens, repetition_penalty, tempe
|
|
44 |
return better_prompt
|
45 |
|
46 |
|
47 |
-
your_prompt = gr.Textbox(label="Your Prompt", info="Your Prompt that you wanna make better"
|
48 |
|
49 |
-
task_prefix = gr.Textbox(label="Task Prefix", info="The prompt prefix for how the AI should make yours better",value="Expand the following prompt to add more detail"
|
50 |
|
51 |
-
max_new_tokens = gr.Slider(value=512, minimum=250, maximum=512, step=1,
|
52 |
|
53 |
-
repetition_penalty = gr.Slider(value=1.2, minimum=0, maximum=2, step=0.05,
|
54 |
|
55 |
-
temperature = gr.Slider(value=0.5, minimum=0, maximum=1, step=0.05,
|
56 |
|
57 |
model_precision_type = gr.Dropdown(["fp16", "fp32"], value="fp16", label="Model Precision Type", info="The precision type to load the model, like fp16 which is faster, or fp32 which is more precise but more resource consuming")
|
58 |
|
59 |
-
top_p = gr.Slider(value=1, minimum=0, maximum=2, step=0.05,
|
60 |
|
61 |
-
top_k = gr.Slider(value=50, minimum=1, maximum=100, step=1,
|
62 |
|
63 |
-
seed = gr.Slider(value=42, minimum=0, maximum=2**32-1,
|
64 |
|
65 |
examples = [
|
66 |
["A storefront with 'Text to Image' written on it.", 512, 1.2, 0.5, "fp16", 1, 50, 42]
|
|
|
44 |
return better_prompt
|
45 |
|
46 |
|
47 |
+
your_prompt = gr.Textbox(label="Your Prompt", info="Your Prompt that you wanna make better")
|
48 |
|
49 |
+
task_prefix = gr.Textbox(label="Task Prefix", info="The prompt prefix for how the AI should make yours better",value="Expand the following prompt to add more detail")
|
50 |
|
51 |
+
max_new_tokens = gr.Slider(value=512, minimum=250, maximum=512, step=1, label="Max New Tokens", info="The maximum numbers of new tokens, controls how long is the output")
|
52 |
|
53 |
+
repetition_penalty = gr.Slider(value=1.2, minimum=0, maximum=2, step=0.05, label="Repetition Penalty", info="Penalize repeated tokens, making the AI repeat less itself")
|
54 |
|
55 |
+
temperature = gr.Slider(value=0.5, minimum=0, maximum=1, step=0.05, label="Temperature", info="Higher values produce more diverse outputs")
|
56 |
|
57 |
model_precision_type = gr.Dropdown(["fp16", "fp32"], value="fp16", label="Model Precision Type", info="The precision type to load the model, like fp16 which is faster, or fp32 which is more precise but more resource consuming")
|
58 |
|
59 |
+
top_p = gr.Slider(value=1, minimum=0, maximum=2, step=0.05, label="Top P", info="Higher values sample more low-probability tokens")
|
60 |
|
61 |
+
top_k = gr.Slider(value=50, minimum=1, maximum=100, step=1, label="Top K", info="Higher k means more diverse outputs by considering a range of tokens")
|
62 |
|
63 |
+
seed = gr.Slider(value=42, minimum=0, maximum=2**32-1, label="Seed", info="A starting point to initiate the generation process, put 0 for a random one")
|
64 |
|
65 |
examples = [
|
66 |
["A storefront with 'Text to Image' written on it.", 512, 1.2, 0.5, "fp16", 1, 50, 42]
|