Skip to content

lipdp.losses module

DP_KCosineSimilarity

Bases: DP_Loss

Source code in lipdp/losses.py
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
class DP_KCosineSimilarity(DP_Loss):
    def __init__(
        self,
        K=1.0,
        axis=-1,
        reduction=tf.keras.losses.Reduction.AUTO,
        name="cosine_similarity",
    ):
        super().__init__(reduction=reduction, name=name)
        # as the espilon is applied before the sqrt in tf.linalg.l2_normalize we
        # apply square to it
        self.K = K**2
        self.axis = axis

    @tf.function
    def call(self, y_true, y_pred):
        y_true = tf.linalg.l2_normalize(y_true, epsilon=self.K, axis=self.axis)
        y_pred = tf.linalg.l2_normalize(y_pred, epsilon=self.K, axis=self.axis)
        return -tf.reduce_sum(y_true * y_pred, axis=self.axis)

    def get_L(self):
        """returns the lipschitz constant of the loss"""
        return 1 / float(self.K)

get_L()

returns the lipschitz constant of the loss

Source code in lipdp/losses.py
56
57
58
def get_L(self):
    """returns the lipschitz constant of the loss"""
    return 1 / float(self.K)

DP_Loss

Bases: Loss

Source code in lipdp/losses.py
30
31
32
33
class DP_Loss(Loss):
    def get_L(self):
        """returns the lipschitz constant of the loss"""
        raise NotImplementedError()

get_L()

returns the lipschitz constant of the loss

Source code in lipdp/losses.py
31
32
33
def get_L(self):
    """returns the lipschitz constant of the loss"""
    raise NotImplementedError()

DP_MeanAbsoluteError

Bases: tf.keras.losses.MeanAbsoluteError, DP_Loss

Source code in lipdp/losses.py
190
191
192
193
194
195
196
197
198
199
200
201
202
203
class DP_MeanAbsoluteError(tf.keras.losses.MeanAbsoluteError, DP_Loss):
    def __init__(
        self,
        reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
        name="MulticlassKR",
    ):
        r"""
        Mean Absolute Error
        """
        super(DP_MeanAbsoluteError, self).__init__(reduction=reduction, name=name)

    def get_L(self):
        """returns the lipschitz constant of the loss"""
        return 1.0

__init__(reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE, name='MulticlassKR')

Mean Absolute Error

Source code in lipdp/losses.py
191
192
193
194
195
196
197
198
199
def __init__(
    self,
    reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
    name="MulticlassKR",
):
    r"""
    Mean Absolute Error
    """
    super(DP_MeanAbsoluteError, self).__init__(reduction=reduction, name=name)

get_L()

returns the lipschitz constant of the loss

Source code in lipdp/losses.py
201
202
203
def get_L(self):
    """returns the lipschitz constant of the loss"""
    return 1.0

DP_MulticlassHKR

Bases: losses.MulticlassHKR, DP_Loss

Source code in lipdp/losses.py
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
class DP_MulticlassHKR(losses.MulticlassHKR, DP_Loss):
    def __init__(
            self,
            alpha=10.0,
            min_margin=1.0,
            reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
            name="MulticlassHKR",
    ):
        """
        The multiclass version of HKR. This is done by computing the HKR term over each
        class and averaging the results.

        Note that `y_true` should be one-hot encoded or pre-processed with the
        `deel.lip.utils.process_labels_for_multi_gpu()` function.

        Using a multi-GPU/TPU strategy requires to set `multi_gpu` to True and to
        pre-process the labels `y_true` with the
        `deel.lip.utils.process_labels_for_multi_gpu()` function.

        Args:
            alpha (float): regularization factor
            min_margin (float): margin to enforce.
            multi_gpu (bool): set to True when running on multi-GPU/TPU
            reduction: passed to tf.keras.Loss constructor
            name (str): passed to tf.keras.Loss constructor

        """
        super(DP_MulticlassHKR, self).__init__(
            alpha=alpha,
            min_margin=min_margin,
            multi_gpu=False,
            reduction=reduction,
            name=name,
        )

    def get_L(self):
        """returns the lipschitz constant of the loss"""
        return self.alpha + 1.0

__init__(alpha=10.0, min_margin=1.0, reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE, name='MulticlassHKR')

The multiclass version of HKR. This is done by computing the HKR term over each class and averaging the results.

Note that y_true should be one-hot encoded or pre-processed with the deel.lip.utils.process_labels_for_multi_gpu() function.

Using a multi-GPU/TPU strategy requires to set multi_gpu to True and to pre-process the labels y_true with the deel.lip.utils.process_labels_for_multi_gpu() function.

Parameters:

Name Type Description Default
alpha float

regularization factor

10.0
min_margin float

margin to enforce.

1.0
multi_gpu bool

set to True when running on multi-GPU/TPU

required
reduction

passed to tf.keras.Loss constructor

tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE
name str

passed to tf.keras.Loss constructor

'MulticlassHKR'
Source code in lipdp/losses.py
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
def __init__(
        self,
        alpha=10.0,
        min_margin=1.0,
        reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
        name="MulticlassHKR",
):
    """
    The multiclass version of HKR. This is done by computing the HKR term over each
    class and averaging the results.

    Note that `y_true` should be one-hot encoded or pre-processed with the
    `deel.lip.utils.process_labels_for_multi_gpu()` function.

    Using a multi-GPU/TPU strategy requires to set `multi_gpu` to True and to
    pre-process the labels `y_true` with the
    `deel.lip.utils.process_labels_for_multi_gpu()` function.

    Args:
        alpha (float): regularization factor
        min_margin (float): margin to enforce.
        multi_gpu (bool): set to True when running on multi-GPU/TPU
        reduction: passed to tf.keras.Loss constructor
        name (str): passed to tf.keras.Loss constructor

    """
    super(DP_MulticlassHKR, self).__init__(
        alpha=alpha,
        min_margin=min_margin,
        multi_gpu=False,
        reduction=reduction,
        name=name,
    )

get_L()

returns the lipschitz constant of the loss

Source code in lipdp/losses.py
122
123
124
def get_L(self):
    """returns the lipschitz constant of the loss"""
    return self.alpha + 1.0

DP_MulticlassHinge

Bases: losses.MulticlassHinge, DP_Loss

Source code in lipdp/losses.py
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
class DP_MulticlassHinge(losses.MulticlassHinge, DP_Loss):
    def __init__(
            self,
            min_margin=1.0,
            reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
            name="MulticlassHinge",
    ):
        """
        Loss to estimate the Hinge loss in a multiclass setup. It computes the
        element-wise Hinge term. Note that this formulation differs from the one
        commonly found in tensorflow/pytorch (which maximises the difference between
        the two largest logits). This formulation is consistent with the binary
        classification loss used in a multiclass fashion.

        Note that `y_true` should be one-hot encoded or pre-processed with the
        `deel.lip.utils.process_labels_for_multi_gpu()` function.

        Args:
            min_margin (float): margin to enforce.
            reduction: passed to tf.keras.Loss constructor
            name (str): passed to tf.keras.Loss constructor

        """
        super(DP_MulticlassHinge, self).__init__(
            min_margin=min_margin, reduction=reduction, name=name
        )

    def get_L(self):
        """returns the lipschitz constant of the loss"""
        return 1.0

__init__(min_margin=1.0, reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE, name='MulticlassHinge')

Loss to estimate the Hinge loss in a multiclass setup. It computes the element-wise Hinge term. Note that this formulation differs from the one commonly found in tensorflow/pytorch (which maximises the difference between the two largest logits). This formulation is consistent with the binary classification loss used in a multiclass fashion.

Note that y_true should be one-hot encoded or pre-processed with the deel.lip.utils.process_labels_for_multi_gpu() function.

Parameters:

Name Type Description Default
min_margin float

margin to enforce.

1.0
reduction

passed to tf.keras.Loss constructor

tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE
name str

passed to tf.keras.Loss constructor

'MulticlassHinge'
Source code in lipdp/losses.py
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
def __init__(
        self,
        min_margin=1.0,
        reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
        name="MulticlassHinge",
):
    """
    Loss to estimate the Hinge loss in a multiclass setup. It computes the
    element-wise Hinge term. Note that this formulation differs from the one
    commonly found in tensorflow/pytorch (which maximises the difference between
    the two largest logits). This formulation is consistent with the binary
    classification loss used in a multiclass fashion.

    Note that `y_true` should be one-hot encoded or pre-processed with the
    `deel.lip.utils.process_labels_for_multi_gpu()` function.

    Args:
        min_margin (float): margin to enforce.
        reduction: passed to tf.keras.Loss constructor
        name (str): passed to tf.keras.Loss constructor

    """
    super(DP_MulticlassHinge, self).__init__(
        min_margin=min_margin, reduction=reduction, name=name
    )

get_L()

returns the lipschitz constant of the loss

Source code in lipdp/losses.py
154
155
156
def get_L(self):
    """returns the lipschitz constant of the loss"""
    return 1.0

DP_MulticlassKR

Bases: losses.MulticlassKR, DP_Loss

Source code in lipdp/losses.py
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
class DP_MulticlassKR(losses.MulticlassKR, DP_Loss):
    def __init__(
            self,
            reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
            name="MulticlassKR",
    ):
        r"""
        Loss to estimate average of Wasserstein-1 distance using Kantorovich-Rubinstein
        duality over outputs. In this multiclass setup, the KR term is computed for each
        class and then averaged.

        Note that `y_true` should be one-hot encoded or pre-processed with the
        `deel.lip.utils.process_labels_for_multi_gpu()` function.

        Using a multi-GPU/TPU strategy requires to set `multi_gpu` to True and to
        pre-process the labels `y_true` with the
        `deel.lip.utils.process_labels_for_multi_gpu()` function.

        Args:
            multi_gpu (bool): set to True when running on multi-GPU/TPU
            reduction: passed to tf.keras.Loss constructor
            name (str): passed to tf.keras.Loss constructor

        """
        super(DP_MulticlassKR, self).__init__(reduction=reduction, name=name)

    def get_L(self):
        """returns the lipschitz constant of the loss"""
        return 1.0

__init__(reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE, name='MulticlassKR')

Loss to estimate average of Wasserstein-1 distance using Kantorovich-Rubinstein duality over outputs. In this multiclass setup, the KR term is computed for each class and then averaged.

Note that y_true should be one-hot encoded or pre-processed with the deel.lip.utils.process_labels_for_multi_gpu() function.

Using a multi-GPU/TPU strategy requires to set multi_gpu to True and to pre-process the labels y_true with the deel.lip.utils.process_labels_for_multi_gpu() function.

Parameters:

Name Type Description Default
multi_gpu bool

set to True when running on multi-GPU/TPU

required
reduction

passed to tf.keras.Loss constructor

tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE
name str

passed to tf.keras.Loss constructor

'MulticlassKR'
Source code in lipdp/losses.py
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
def __init__(
        self,
        reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
        name="MulticlassKR",
):
    r"""
    Loss to estimate average of Wasserstein-1 distance using Kantorovich-Rubinstein
    duality over outputs. In this multiclass setup, the KR term is computed for each
    class and then averaged.

    Note that `y_true` should be one-hot encoded or pre-processed with the
    `deel.lip.utils.process_labels_for_multi_gpu()` function.

    Using a multi-GPU/TPU strategy requires to set `multi_gpu` to True and to
    pre-process the labels `y_true` with the
    `deel.lip.utils.process_labels_for_multi_gpu()` function.

    Args:
        multi_gpu (bool): set to True when running on multi-GPU/TPU
        reduction: passed to tf.keras.Loss constructor
        name (str): passed to tf.keras.Loss constructor

    """
    super(DP_MulticlassKR, self).__init__(reduction=reduction, name=name)

get_L()

returns the lipschitz constant of the loss

Source code in lipdp/losses.py
185
186
187
def get_L(self):
    """returns the lipschitz constant of the loss"""
    return 1.0

DP_TauCategoricalCrossentropy

Bases: losses.TauCategoricalCrossentropy, DP_Loss

Source code in lipdp/losses.py
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
class DP_TauCategoricalCrossentropy(losses.TauCategoricalCrossentropy, DP_Loss):
    def __init__(
            self,
            tau,
            reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
            name="TauCategoricalCrossentropy",
    ):
        """
        Similar to original categorical crossentropy, but with a settable temperature
        parameter.

        Args:
            tau (float): temperature parameter.
            reduction: reduction of the loss, must be SUM_OVER_BATCH_SIZE in order have a correct accounting.
            name (str): name of the loss
        """
        super(DP_TauCategoricalCrossentropy, self).__init__(
            tau=tau, reduction=reduction, name=name
        )

    def get_L(self):
        """returns the lipschitz constant of the loss"""
        # as the implementation divide the loss by self.tau (and as it is used with "from_logit=True")
        return math.sqrt(2)

__init__(tau, reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE, name='TauCategoricalCrossentropy')

Similar to original categorical crossentropy, but with a settable temperature parameter.

Parameters:

Name Type Description Default
tau float

temperature parameter.

required
reduction

reduction of the loss, must be SUM_OVER_BATCH_SIZE in order have a correct accounting.

tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE
name str

name of the loss

'TauCategoricalCrossentropy'
Source code in lipdp/losses.py
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def __init__(
        self,
        tau,
        reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE,
        name="TauCategoricalCrossentropy",
):
    """
    Similar to original categorical crossentropy, but with a settable temperature
    parameter.

    Args:
        tau (float): temperature parameter.
        reduction: reduction of the loss, must be SUM_OVER_BATCH_SIZE in order have a correct accounting.
        name (str): name of the loss
    """
    super(DP_TauCategoricalCrossentropy, self).__init__(
        tau=tau, reduction=reduction, name=name
    )

get_L()

returns the lipschitz constant of the loss

Source code in lipdp/losses.py
81
82
83
84
def get_L(self):
    """returns the lipschitz constant of the loss"""
    # as the implementation divide the loss by self.tau (and as it is used with "from_logit=True")
    return math.sqrt(2)