|
import sys, os |
|
sys.path.append( |
|
os.path.join( |
|
os.getcwd(), |
|
"imageprocessing", |
|
"artemis" |
|
) |
|
) |
|
|
|
import artemis |
|
from .color_detection import get_colors |
|
from .clip_object_recognition import get_objects_in_image |
|
from .emotion_detection import get_all_emotions_in_image |
|
|
|
|
|
import torch |
|
|
|
import clip |
|
|
|
clip_device = "cpu" |
|
clip_model, clip_preprocess = clip.load('ViT-B/32', clip_device) |
|
|
|
emo_device = "cpu" |
|
img2emo_model = torch.load( |
|
|
|
os.path.join( |
|
os.getcwd(), |
|
"imageprocessing", |
|
"img2emo.pt" |
|
), |
|
map_location=emo_device |
|
) |
|
|
|
|
|
def extract_all_information_from_image( |
|
image_filepath : os.PathLike |
|
)-> dict: |
|
"""Extracts objects (and probabilities), colors, and emotion from the image. |
|
|
|
Parameters |
|
---------- |
|
image_filepath : os.PathLike |
|
Path to the image |
|
|
|
Returns |
|
------- |
|
dict |
|
Dictionary with the objects, colors, and emotion from the image |
|
""" |
|
|
|
colors = get_colors(image_filepath) |
|
|
|
objects_and_probs = get_objects_in_image( |
|
image_filepath = image_filepath, |
|
model = clip_model, |
|
preprocess = clip_preprocess, |
|
device = clip_device |
|
) |
|
emotion, _ = get_all_emotions_in_image( |
|
filepath = image_filepath, |
|
model = img2emo_model, |
|
) |
|
|
|
result = { |
|
"colors_list": colors, |
|
"objects_and_probs" : objects_and_probs, |
|
"emotion": emotion |
|
} |
|
return result |