Converting tensor to numpy array in custom loss function

Question:

Here is my custom loss function:

import tensorflow.keras.backend as K
import cmath

epsylon=np.finfo(float).eps

def to_array(tensor):
    return tf.make_ndarray(tensor)


def addError(test,range_min,range_max,result):
    err = abs(log(range_max/max(range_min,epsylon)))
    if range_min <= test <= range_max:
        result.append(err)
    else:
        e1=abs(log(test/max(range_min,epsylon)))
        e2=abs(log(test/max(range_max,epsylon)))
        result.append( min(e1,e2) / max(err,epsylon) *100 + err)


def rangeLoss(yTrue,yPred):
    #print(type(yPred))
    a_pred=to_array(yPred)
    a_true=to_array(yTrue)

    result=[]

    for i in range(a_true.size):
        range_min=abs(a_pred[i*2])
        range_max=abs(a_pred[i*2+1])
        test= abs(a_true[i])

        addError(test,range_min,range_max,result)


    return tf.constant(result)

When I run training, it fails with

/home/ubuntu/.local/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py:591 MakeNdarray
        shape = [d.size for d in tensor.tensor_shape.dim]

    AttributeError: 'Tensor' object has no attribute 'tensor_shape'

When I modify to_array to use proto tensor

def to_array(tensor):
    proto_tensor = tf.make_tensor_proto(tensor)
    return tf.make_ndarray(proto_tensor)

I get following error:

    /home/ubuntu/.local/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py:451 make_tensor_proto
        _AssertCompatible(values, dtype)
    /home/ubuntu/.local/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py:328 _AssertCompatible
        raise TypeError("Expected any non-tensor type, got a tensor instead.")

    TypeError: Expected any non-tensor type, got a tensor instead.

Another option I’ve tried was tensor.numpy(), which resulted in following error:

    <ipython-input-20-0a8051a4a034>:8 to_array
        return tensor.numpy()

    AttributeError: 'Tensor' object has no attribute 'numpy'

And of course there is tensor.eval(session=tf.compat.v1.Session()), which fails too

How do I do this?

Asked By: Arsen Zahray

||

Answers:

I have solved the issue by slicing the original tensor. Here is the code:

def above_zero(value):
    return tf.math.maximum(value,np.finfo(float).eps)

def range_loss(yTrue,yPred):
    #shapes: yTrue: (None,1)
    #        yPred: (None,2)
    yTrueSize=yTrue.shape[1]
    min=above_zero(tf.math.abs(yPred[:,:yTrueSize]))
    max=above_zero(tf.math.abs(yPred[:,yTrueSize:]))

    #following step should not be needed, but let's do it just in case
    yTrue=above_zero(tf.math.abs(yTrue))

    baseError    = tf.math.abs(tf.math.log(max/min))
    baseErrorDiv = above_zero(baseError)

    topRange     = tf.math.maximum(min,max)
    bottomRange  = tf.math.minimum(min,max)

    extraError = tf.math.maximum(0.0,tf.math.log(yTrue/topRange))+tf.math.maximum(0.0,tf.math.log(bottomRange/yTrue))
    extraError /= baseErrorDiv

    totalError=tf.math.pow(extraError,2.0)*100+tf.math.pow(baseError,2.0)

    return tf.math.reduce_sum(totalError)
Answered By: Arsen Zahray

Thanks for the question @Arsen I am having similar troubles with the custom loss function. What do you mean by slicing btw?

Answered By: smukherjee