Module library.classes.losses

Expand source code
import tensorflow as tf
from library.classes.generators import BOX_SCALE_FACTOR, PADDING_X, PADDING_Y, print_matrix, ABSOLUT_POSITION_SCALE
class BackmappingRelativeVectorLoss(tf.keras.losses.Loss):
    def __init__(self, name="backmapping_rel_loss"):
        super().__init__(name=name)

    def call(self, y_true, y_pred):
        # Remove padding as we don't want to calculate the loss for the padding
        y_true = y_true[:, PADDING_X:-PADDING_X, PADDING_Y:-PADDING_Y, 0]
        y_pred = y_pred[:, PADDING_X:-PADDING_X, PADDING_Y:-PADDING_Y, 0]

        # Calculate the total bond direction loss
        y_true_n = tf.nn.l2_normalize(y_true, axis=-1) # Normalize the vectors to unit length
        y_pred_n = tf.nn.l2_normalize(y_pred, axis=-1) # Normalize the vectors to unit length
        bond_direction_loss = tf.reduce_mean(tf.square(y_true_n - y_pred_n))

        # Calculate the total bond length loss
        bond_length_loss = []
        for vec_true, vec_pred in zip(y_true, y_pred):
            l_true = tf.norm(vec_true) # Calculate the length in anstrom so we have a feeling for the loss
            l_pred = tf.norm(vec_pred) # Calculate the length in anstrom so we have a feeling for the loss
            bond_length_loss.append(tf.abs(l_true - l_pred))
        bond_length_loss = tf.reduce_mean(bond_length_loss)

        # Calculate the atom positions
        positions_pred = [tf.constant([0,0,0], dtype=tf.float32)]
        positions_true = [tf.constant([0,0,0], dtype=tf.float32)]
        for i, bond in enumerate(y_pred):
            positions_pred.append(tf.add(positions_pred[i], bond))
        for i, bond in enumerate(y_true):
            positions_true.append(tf.add(positions_true[i], bond))

        # Calculate the total position loss
        position_loss = []
        for pos_true, pos_pred in zip(positions_true, positions_pred):
            position_loss.append(tf.norm(pos_true - pos_pred))
        position_loss = tf.reduce_mean(position_loss)

        return 0.3 * bond_direction_loss + 0.1 * bond_length_loss + 0.6 * position_loss


class BackmappingAbsolutePositionLoss(tf.keras.losses.Loss):
    def __init__(self, name="backmapping_abs_loss"):
        super().__init__(name=name)

    def call(self, y_true, y_pred):
        # We ignore the padding
        pos_true = y_true[:, PADDING_X:-PADDING_X, PADDING_Y:-PADDING_Y, 0]
        pos_pred = y_pred[:, PADDING_X:-PADDING_X, PADDING_Y:-PADDING_Y, 0]
        
        # Add loss for each atom position
        positional_loss = tf.reduce_mean(tf.norm(pos_true - pos_pred))

        # Calculate the total position loss
        return positional_loss

Classes

class BackmappingAbsolutePositionLoss (name='backmapping_abs_loss')

Loss base class.

To be implemented by subclasses: * call(): Contains the logic for loss calculation using y_true, y_pred.

Example subclass implementation:

class MeanSquaredError(Loss):

  def call(self, y_true, y_pred):
    return tf.reduce_mean(tf.math.square(y_pred - y_true), axis=-1)

When used with tf.distribute.Strategy, outside of built-in training loops such as tf.keras compile and fit, please use 'SUM' or 'NONE' reduction types, and reduce losses explicitly in your training loop. Using 'AUTO' or 'SUM_OVER_BATCH_SIZE' will raise an error.

Please see this custom training tutorial for more details on this.

You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like:

with strategy.scope():
  loss_obj = tf.keras.losses.CategoricalCrossentropy(
      reduction=tf.keras.losses.Reduction.NONE)
  ....
  loss = (tf.reduce_sum(loss_obj(labels, predictions)) *
          (1. / global_batch_size))

Initializes Loss class.

Args

reduction
Type of tf.keras.losses.Reduction to apply to loss. Default value is AUTO. AUTO indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to SUM_OVER_BATCH_SIZE. When used with tf.distribute.Strategy, outside of built-in training loops such as tf.keras compile and fit, using AUTO or SUM_OVER_BATCH_SIZE will raise an error. Please see this custom training tutorial for more details.
name
Optional name for the instance.
Expand source code
class BackmappingAbsolutePositionLoss(tf.keras.losses.Loss):
    def __init__(self, name="backmapping_abs_loss"):
        super().__init__(name=name)

    def call(self, y_true, y_pred):
        # We ignore the padding
        pos_true = y_true[:, PADDING_X:-PADDING_X, PADDING_Y:-PADDING_Y, 0]
        pos_pred = y_pred[:, PADDING_X:-PADDING_X, PADDING_Y:-PADDING_Y, 0]
        
        # Add loss for each atom position
        positional_loss = tf.reduce_mean(tf.norm(pos_true - pos_pred))

        # Calculate the total position loss
        return positional_loss

Ancestors

  • keras.losses.Loss

Methods

def call(self, y_true, y_pred)

Invokes the Loss instance.

Args

y_true
Ground truth values. shape = [batch_size, d0, .. dN], except sparse loss functions such as sparse categorical crossentropy where shape = [batch_size, d0, .. dN-1]
y_pred
The predicted values. shape = [batch_size, d0, .. dN]

Returns

Loss values with the shape [batch_size, d0, .. dN-1].

Expand source code
def call(self, y_true, y_pred):
    # We ignore the padding
    pos_true = y_true[:, PADDING_X:-PADDING_X, PADDING_Y:-PADDING_Y, 0]
    pos_pred = y_pred[:, PADDING_X:-PADDING_X, PADDING_Y:-PADDING_Y, 0]
    
    # Add loss for each atom position
    positional_loss = tf.reduce_mean(tf.norm(pos_true - pos_pred))

    # Calculate the total position loss
    return positional_loss
class BackmappingRelativeVectorLoss (name='backmapping_rel_loss')

Loss base class.

To be implemented by subclasses: * call(): Contains the logic for loss calculation using y_true, y_pred.

Example subclass implementation:

class MeanSquaredError(Loss):

  def call(self, y_true, y_pred):
    return tf.reduce_mean(tf.math.square(y_pred - y_true), axis=-1)

When used with tf.distribute.Strategy, outside of built-in training loops such as tf.keras compile and fit, please use 'SUM' or 'NONE' reduction types, and reduce losses explicitly in your training loop. Using 'AUTO' or 'SUM_OVER_BATCH_SIZE' will raise an error.

Please see this custom training tutorial for more details on this.

You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like:

with strategy.scope():
  loss_obj = tf.keras.losses.CategoricalCrossentropy(
      reduction=tf.keras.losses.Reduction.NONE)
  ....
  loss = (tf.reduce_sum(loss_obj(labels, predictions)) *
          (1. / global_batch_size))

Initializes Loss class.

Args

reduction
Type of tf.keras.losses.Reduction to apply to loss. Default value is AUTO. AUTO indicates that the reduction option will be determined by the usage context. For almost all cases this defaults to SUM_OVER_BATCH_SIZE. When used with tf.distribute.Strategy, outside of built-in training loops such as tf.keras compile and fit, using AUTO or SUM_OVER_BATCH_SIZE will raise an error. Please see this custom training tutorial for more details.
name
Optional name for the instance.
Expand source code
class BackmappingRelativeVectorLoss(tf.keras.losses.Loss):
    def __init__(self, name="backmapping_rel_loss"):
        super().__init__(name=name)

    def call(self, y_true, y_pred):
        # Remove padding as we don't want to calculate the loss for the padding
        y_true = y_true[:, PADDING_X:-PADDING_X, PADDING_Y:-PADDING_Y, 0]
        y_pred = y_pred[:, PADDING_X:-PADDING_X, PADDING_Y:-PADDING_Y, 0]

        # Calculate the total bond direction loss
        y_true_n = tf.nn.l2_normalize(y_true, axis=-1) # Normalize the vectors to unit length
        y_pred_n = tf.nn.l2_normalize(y_pred, axis=-1) # Normalize the vectors to unit length
        bond_direction_loss = tf.reduce_mean(tf.square(y_true_n - y_pred_n))

        # Calculate the total bond length loss
        bond_length_loss = []
        for vec_true, vec_pred in zip(y_true, y_pred):
            l_true = tf.norm(vec_true) # Calculate the length in anstrom so we have a feeling for the loss
            l_pred = tf.norm(vec_pred) # Calculate the length in anstrom so we have a feeling for the loss
            bond_length_loss.append(tf.abs(l_true - l_pred))
        bond_length_loss = tf.reduce_mean(bond_length_loss)

        # Calculate the atom positions
        positions_pred = [tf.constant([0,0,0], dtype=tf.float32)]
        positions_true = [tf.constant([0,0,0], dtype=tf.float32)]
        for i, bond in enumerate(y_pred):
            positions_pred.append(tf.add(positions_pred[i], bond))
        for i, bond in enumerate(y_true):
            positions_true.append(tf.add(positions_true[i], bond))

        # Calculate the total position loss
        position_loss = []
        for pos_true, pos_pred in zip(positions_true, positions_pred):
            position_loss.append(tf.norm(pos_true - pos_pred))
        position_loss = tf.reduce_mean(position_loss)

        return 0.3 * bond_direction_loss + 0.1 * bond_length_loss + 0.6 * position_loss

Ancestors

  • keras.losses.Loss

Methods

def call(self, y_true, y_pred)

Invokes the Loss instance.

Args

y_true
Ground truth values. shape = [batch_size, d0, .. dN], except sparse loss functions such as sparse categorical crossentropy where shape = [batch_size, d0, .. dN-1]
y_pred
The predicted values. shape = [batch_size, d0, .. dN]

Returns

Loss values with the shape [batch_size, d0, .. dN-1].

Expand source code
def call(self, y_true, y_pred):
    # Remove padding as we don't want to calculate the loss for the padding
    y_true = y_true[:, PADDING_X:-PADDING_X, PADDING_Y:-PADDING_Y, 0]
    y_pred = y_pred[:, PADDING_X:-PADDING_X, PADDING_Y:-PADDING_Y, 0]

    # Calculate the total bond direction loss
    y_true_n = tf.nn.l2_normalize(y_true, axis=-1) # Normalize the vectors to unit length
    y_pred_n = tf.nn.l2_normalize(y_pred, axis=-1) # Normalize the vectors to unit length
    bond_direction_loss = tf.reduce_mean(tf.square(y_true_n - y_pred_n))

    # Calculate the total bond length loss
    bond_length_loss = []
    for vec_true, vec_pred in zip(y_true, y_pred):
        l_true = tf.norm(vec_true) # Calculate the length in anstrom so we have a feeling for the loss
        l_pred = tf.norm(vec_pred) # Calculate the length in anstrom so we have a feeling for the loss
        bond_length_loss.append(tf.abs(l_true - l_pred))
    bond_length_loss = tf.reduce_mean(bond_length_loss)

    # Calculate the atom positions
    positions_pred = [tf.constant([0,0,0], dtype=tf.float32)]
    positions_true = [tf.constant([0,0,0], dtype=tf.float32)]
    for i, bond in enumerate(y_pred):
        positions_pred.append(tf.add(positions_pred[i], bond))
    for i, bond in enumerate(y_true):
        positions_true.append(tf.add(positions_true[i], bond))

    # Calculate the total position loss
    position_loss = []
    for pos_true, pos_pred in zip(positions_true, positions_pred):
        position_loss.append(tf.norm(pos_true - pos_pred))
    position_loss = tf.reduce_mean(position_loss)

    return 0.3 * bond_direction_loss + 0.1 * bond_length_loss + 0.6 * position_loss