king007 commited on
Commit
817d6d5
β€’
1 Parent(s): cce6693

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -10
app.py CHANGED
@@ -7,32 +7,34 @@ model = AutoModelForSeq2SeqLM.from_pretrained("merve/chatgpt-prompt-generator-v1
7
  tokenizer2 = AutoTokenizer.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum")
8
  model2 = AutoModelForSeq2SeqLM.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True)
9
 
10
- def generate(prompt):
11
-
12
  batch = tokenizer(prompt, return_tensors="pt")
13
- generated_ids = model.generate(batch["input_ids"], max_new_tokens=150)
14
  output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
15
  return output[0]
16
 
17
  def generate2(prompt, max_new_tokens):
18
  batch = tokenizer2(prompt, return_tensors="pt")
19
- generated_ids = model2.generate(batch["input_ids"], max_new_tokens=150)
20
  output = tokenizer2.batch_decode(generated_ids, skip_special_tokens=True)
21
  return output[0]
 
22
  def generate2_test(prompt):
23
  batch = tokenizer2(prompt, return_tensors="pt")
24
  generated_ids = model2.generate(batch["input_ids"], max_new_tokens=150)
25
  output = tokenizer2.batch_decode(generated_ids, skip_special_tokens=True)
26
  return output[0]
27
 
28
- def generate_prompt(type, prompt, max_new_tokens):
29
- if type==1:
30
- return generate(prompt)
31
- elif type==2:
32
  return generate2(prompt, max_new_tokens)
33
  #
34
- input_component = gr.Textbox(label = "Input a persona, e.g. photographer", value = "photographer")
 
 
35
  output_component = gr.Textbox(label = "Prompt")
36
  examples = [["photographer"], ["developer"]]
37
  description = ""
38
- gr.Interface(generate2_test, inputs = input_component, outputs=output_component, examples=examples, title = "πŸ‘¨πŸ»β€πŸŽ€ ChatGPT Prompt Generator v12 πŸ‘¨πŸ»β€πŸŽ€", description=description).launch()
 
7
  tokenizer2 = AutoTokenizer.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum")
8
  model2 = AutoModelForSeq2SeqLM.from_pretrained("Kaludi/chatgpt-gpt4-prompts-bart-large-cnn-samsum", from_tf=True)
9
 
10
+ def generate(prompt, max_new_tokens):
 
11
  batch = tokenizer(prompt, return_tensors="pt")
12
+ generated_ids = model.generate(batch["input_ids"], max_new_tokens=max_new_tokens)
13
  output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
14
  return output[0]
15
 
16
  def generate2(prompt, max_new_tokens):
17
  batch = tokenizer2(prompt, return_tensors="pt")
18
+ generated_ids = model2.generate(batch["input_ids"], max_new_tokens=max_new_tokens)
19
  output = tokenizer2.batch_decode(generated_ids, skip_special_tokens=True)
20
  return output[0]
21
+
22
  def generate2_test(prompt):
23
  batch = tokenizer2(prompt, return_tensors="pt")
24
  generated_ids = model2.generate(batch["input_ids"], max_new_tokens=150)
25
  output = tokenizer2.batch_decode(generated_ids, skip_special_tokens=True)
26
  return output[0]
27
 
28
+ def generate_prompt(aitype, prompt, max_new_tokens):
29
+ if aitype=='1':
30
+ return generate(prompt, max_new_tokens)
31
+ elif aitype=='2':
32
  return generate2(prompt, max_new_tokens)
33
  #
34
+ input_aitype = gr.Textbox(label = "Input a persona, e.g. photographer", value = "2")
35
+ input_prompt = gr.Textbox(label = "Input a persona, e.g. photographer", value = "photographer")
36
+ input_maxtokens = gr.Textbox(label = "max tokens", value = "150")
37
  output_component = gr.Textbox(label = "Prompt")
38
  examples = [["photographer"], ["developer"]]
39
  description = ""
40
+ gr.Interface(generate_prompt, inputs = [input_aitype,input_prompt,input_maxtokens], outputs=output_component, examples=examples, title = "πŸ‘¨πŸ»β€πŸŽ€ ChatGPT Prompt Generator v12 πŸ‘¨πŸ»β€πŸŽ€", description=description).launch()