Nick088 commited on
Commit
fb18417
·
verified ·
1 Parent(s): 9d83197

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -27,7 +27,7 @@ def generate(your_prompt, max_new_tokens, repetition_penalty, temperature, model
27
 
28
  model.to(dtype)
29
 
30
- input_text = f"Expand the following prompt to add more detail: {your_prompt}"
31
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
32
 
33
  outputs = model.generate(
@@ -44,7 +44,9 @@ def generate(your_prompt, max_new_tokens, repetition_penalty, temperature, model
44
  return better_prompt
45
 
46
 
47
- your_prompt = gr.Textbox(label="Your Prompt", interactive=True)
 
 
48
 
49
  max_new_tokens = gr.Slider(value=512, minimum=250, maximum=512, step=1, interactive=True, label="Max New Tokens", info="The maximum numbers of new tokens, controls how long is the output")
50
 
 
27
 
28
  model.to(dtype)
29
 
30
+ input_text = f"{task_prefix}: {your_prompt}"
31
  input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device)
32
 
33
  outputs = model.generate(
 
44
  return better_prompt
45
 
46
 
47
+ your_prompt = gr.Textbox(label="Your Prompt", info="Your Prompt that you wanna make better", interactive=True)
48
+
49
+ task_prefix = gr.Textbox(label="Task Prefix", info="The prompt prefix for how the AI should make yours better",value="Expand the following prompt to add more detail", interactive=True)
50
 
51
  max_new_tokens = gr.Slider(value=512, minimum=250, maximum=512, step=1, interactive=True, label="Max New Tokens", info="The maximum numbers of new tokens, controls how long is the output")
52