File size: 3,709 Bytes
5101535 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
# NeuroPrompts Model Card
NeuroPrompts is an interface to Stable Diffusion which automatically optimizes a user’s prompt for improved image aesthetics while maintaining stylistic control according to the user’s preferences.
Preprint: [arxiv.org/abs/2311.12229](https://arxiv.org/abs/2311.12229)
NeuroPrompts was accepted to EACL 2024.
## The interface of NeuroPrompts in side-by-side comparison mode
![NeuroPrompts overview](images/comparisons_interface.png)
## Usage
```shell script
pip install torch torchvision gradio==3.39.0 transformers diffusers flair==0.12.2 numpy tqdm webdataset pytorch_lightning datasets openai-clip scipy==1.10.1
```
```python
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import sys
import os
# from categories import styles_list, artists_list, formats_list, perspective_list, booster_list, vibe_list
# Set environment variables and PyTorch configurations
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
# Set the path for the 'neurologic' module
neurologic_path = os.path.abspath('neurologic/')
os.environ['NEUROLOGIC_PATH'] = neurologic_path
sys.path.insert(0,neurologic_path)
from neurologic_pe import generate_neurologic
# Load the pre-trained model and tokenizer
model_name = "/home/srosenma/src/test_45"
model_type = 'finetuned'
# model_type = 'ppo'
rand_seed = 1535471403
model = AutoModelForCausalLM.from_pretrained(model_name).to('cuda')
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
# Set the inference parameters
length_penalty = 1.0
max_length = 77
beam_size = 5
inference_steps = 25
# Initialize the input constraints
curr_input_artist = None
curr_input_style = None
curr_input_format = None
curr_input_perspective = None
curr_input_booster = None
curr_input_vibe = None
curr_input_negative = ""
# Set the plain text input
plain_text = "A boy and his dog"
# Construct the positive and negative constraints
constraints = []
for clause in [curr_input_artist, curr_input_style, curr_input_format, curr_input_perspective, curr_input_booster, curr_input_vibe]:
if clause is not None and len(clause) > 0:
constraints.append([clause.lower(), clause.title()])
print(f"Positive constraints:{constraints}")
neg_constraints = []
neg_inputs = [i.strip() for i in curr_input_negative.split(',')]
for clause in neg_inputs:
if clause is not None and len(clause) > 0:
neg_constraints += [clause.lower(), clause.title()]
print(f"Negative constraints:{neg_constraints}")
# Generate the output using the 'generate_neurologic' function
res = generate_neurologic(plain_text,
model=model,
tokenizer=tokenizer,
model_type=model_type,
constraint_method='clusters',
clusters_file='/home/philliph/mcai/mm-counterfactuals/prompt_engineering/template_keywords.json',
user_constraints = constraints if len(constraints) > 0 else None,
negative_constraints = neg_constraints if len(neg_constraints) > 0 else None,
length_penalty=float(length_penalty),
max_tgt_length=int(max_length),
beam_size=int(beam_size),
num_return_sequences=int(beam_size),
ngram_size=2,
n_per_cluster=1,
seed=None)[0][0]
# Print the result
print(f"\nResult:\n{res}")
```
|