Upload folder using huggingface_hub
Browse files
text_image_similarity_loss.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import torch
|
3 |
+
import torch.nn.functional as F
|
4 |
+
from torchvision import transforms
|
5 |
+
import open_clip
|
6 |
+
|
7 |
+
# Set device
|
8 |
+
torch_device = "cuda" if torch.cuda.is_available() else "cpu"
|
9 |
+
|
10 |
+
clip_model, _, clip_preprocess = open_clip.create_model_and_transforms('ViT-B-32', pretrained='laion2b_s34b_b79k')
|
11 |
+
clip_model = clip_model.to(torch_device)
|
12 |
+
clip_model.eval() # model in train mode by default, impacts some models with BatchNorm or stochastic depth active
|
13 |
+
clip_tokenizer = open_clip.get_tokenizer('ViT-B-32')
|
14 |
+
|
15 |
+
def get_text_embedding(text):
|
16 |
+
text_tokens = clip_tokenizer([text]).to(torch_device)
|
17 |
+
with torch.no_grad(), torch.cuda.amp.autocast():
|
18 |
+
text_features = clip_model.encode_text(text_tokens).float()
|
19 |
+
text_features /= text_features.norm(dim=-1, keepdim=True)
|
20 |
+
return text_features
|
21 |
+
|
22 |
+
def get_image_embedding(image):
|
23 |
+
image_input = clip_preprocess(image).unsqueeze(0).to(torch_device)
|
24 |
+
with torch.no_grad(), torch.cuda.amp.autocast():
|
25 |
+
image_features = clip_model.encode_image(image_input).float()
|
26 |
+
image_features /= image_features.norm(dim=-1, keepdim=True)
|
27 |
+
return image_features
|
28 |
+
|
29 |
+
|
30 |
+
def text_image_similarity_loss(generated_images, target_text = "plain background"):
|
31 |
+
# Get text embedding
|
32 |
+
text_embedding = get_text_embedding(target_text)
|
33 |
+
|
34 |
+
# Ensure the generated_images have requires_grad=True
|
35 |
+
# generated_images.requires_grad_(True)
|
36 |
+
|
37 |
+
# Convert image tensor to the required format (normalization, resizing)
|
38 |
+
# Normalize the images (assuming they are in [0, 1])
|
39 |
+
transform = transforms.Compose([
|
40 |
+
transforms.Resize((224, 224)), # Example size, modify as needed
|
41 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
42 |
+
])
|
43 |
+
|
44 |
+
# Apply the transformation
|
45 |
+
transformed_images = transform(generated_images)
|
46 |
+
|
47 |
+
# Assuming `image_encoder` is a pretrained model that returns image embeddings
|
48 |
+
# Get image embeddings
|
49 |
+
# image_embeddings = image_encoder(generated_images)
|
50 |
+
with torch.cuda.amp.autocast():
|
51 |
+
image_features = clip_model.encode_image(transformed_images).float()
|
52 |
+
norm_image_features = image_features / image_features.norm(dim=-1, keepdim=True)
|
53 |
+
|
54 |
+
|
55 |
+
# Calculate cosine similarity
|
56 |
+
cos_sim = F.cosine_similarity(norm_image_features, text_embedding, dim=-1)
|
57 |
+
|
58 |
+
# Define the loss as 1 - cosine similarity (assuming we want to maximize similarity)
|
59 |
+
loss = 1 - cos_sim.mean()
|
60 |
+
|
61 |
+
return loss
|