Spaces:
Sleeping
Sleeping
# Copyright 2020 The HuggingFace Datasets Authors. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""Triplet Margin Loss metric.""" | |
import datasets | |
import evaluate | |
import numpy as np | |
_DESCRIPTION = """ | |
Triplet margin loss is a loss function that measures a relative similarity between the samples. | |
A triplet is comprised of reference input 'anchor (a)', matching input 'positive examples (p)' and non-matching input 'negative examples (n)'. | |
The loss function for each triplet is given by:\n | |
L(a, p, n) = max{d(a,p) - d(a,n) + margin, 0}\n | |
where d(x, y) is the 2nd order (Euclidean) pairwise distance between x and y. | |
""" | |
_KWARGS_DESCRIPTION = """ | |
Args: | |
anchor (`list` of `float`): Reference inputs. | |
positive (`list` of `float`): Matching inputs. | |
negative (`list` of `float`): Non-matching inputs. | |
margin (`float`): Margin, default:`1.0` | |
Returns: | |
triplet_margin_loss (`float`): Total loss. | |
Examples: | |
Example 1-A simple example | |
>>> triplet_margin_loss = evaluate.load("triplet_margin_loss") | |
>>> results = triplet_margin_loss.compute( | |
anchor=[-0.4765, 1.7133, 1.3971, -1.0121, 0.0732], | |
positive=[0.9218, 0.6305, 0.3381, 0.1412, 0.2607], | |
negative=[0.1971, 0.7246, 0.6729, 0.0941, 0.1011]) | |
>>> print(results) | |
{'triplet_margin_loss': 1.59} | |
Example 2-The same as Example 1, except with `margin` set to `2.0`. | |
>>> triplet_margin_loss = evaluate.load("triplet_margin_loss") | |
>>> results = triplet_margin_loss.compute( | |
anchor=[-0.4765, 1.7133, 1.3971, -1.0121, 0.0732], | |
positive=[0.9218, 0.6305, 0.3381, 0.1412, 0.2607], | |
negative=[0.1971, 0.7246, 0.6729, 0.0941, 0.1011]), | |
margin=2.0) | |
>>> print(results) | |
{'triplet_margin_loss': 2.59} | |
""" | |
_CITATION = """ | |
@article{scikit-learn, | |
title={Scikit-learn: Machine Learning in {P}ython}, | |
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. | |
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. | |
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and | |
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, | |
journal={Journal of Machine Learning Research}, | |
volume={12}, | |
pages={2825--2830}, | |
year={2011} | |
} | |
@article{schultz2003learning, | |
title={Learning a distance metric from relative comparisons}, | |
author={Schultz, Matthew and Joachims, Thorsten}, | |
journal={Advances in neural information processing systems}, | |
volume={16}, | |
year={2003} | |
} | |
""" | |
class TripletMarginLoss(evaluate.EvaluationModule): | |
def _info(self): | |
return evaluate.EvaluationModuleInfo( | |
description=_DESCRIPTION, | |
citation=_CITATION, | |
inputs_description=_KWARGS_DESCRIPTION, | |
features=datasets.Features( | |
{ | |
"anchor": datasets.Sequence(datasets.Value("float"), id="reference"), | |
"positive": datasets.Sequence(datasets.Value("float"), id="sequence"), | |
"negative": datasets.Sequence(datasets.Value("float"), id="sequence"), | |
"margin": datasets.Value("float") | |
} | |
), | |
reference_urls=["https://proceedings.neurips.cc/paper/2003/hash/d3b1fb02964aa64e257f9f26a31f72cf-Abstract.html"], | |
) | |
def _compute(self, anchor, positive, negative, margin=1.0): | |
if not (len(anchor) == len(positive) == len(negative)): | |
raise ValueError("Anchor, Positive and Negative examples must be of same length.") | |
d_a_p_sum = 0.0 | |
d_a_n_sum = 0.0 | |
for a, p, n in zip(anchor, positive, negative): | |
d_a_p_sum += (a - p)**2 | |
d_a_n_sum += (a - n)**2 | |
loss = max(np.sqrt(d_a_p_sum) - np.sqrt(d_a_n_sum) + margin, 0) | |
return { | |
"triplet_margin_loss": float( | |
loss | |
) | |
} |