keras:how to get initial loss function value before training

keras:how to get initial loss function value before training



In Keras, I checked the callbacks mechanism.
However it does not provide any information before the start of training.Like the output is always after epoch = 1.I would like to check the value of the loss function for the first time feed forward.How can I achieve this?
Thanks.



This answer does not work.
'setting model.trainable = False and then train the model'.How to perform feed forward propagation in CNN using Keras?



I set model.trainable = False before compiling the model, but the model still output different loss functions.This is weird.It is supposed to output a constant loss which is the loss when a forward-feed is performed.


The code is in the following:

from keras import backend as K
from keras.models import Model
from keras.layers import Dense, Input
from keras.models import Sequential
import numpy as np
import random
from keras.layers import Input, Dense
from keras.models import Model
from keras.layers.core import Dropout,Activation,Flatten,Lambda
from keras.layers.normalization import BatchNormalization
import keras
import time
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from ann_visualizer.visualize import ann_viz;

def gen_x(n,p,rho):
if abs(rho) < 1 :
beta=np.sqrt(rho/(1-rho))
x0=np.random.normal(size=(n,p))
z=np.random.normal(size=(n,1))
x=beta*np.repeat(z,repeats=p,axis=1)+x0

if abs(rho)==1:
x=np.repeat(z,repeats=p,axis=1)
return x


## This function creates true survival times as described in section 3 of the paper. In all simulations we set snr (signal to noise ratio) to 3.
def genecoef(p):
#return list( map(lambda x : np.power(-1,x)*np.exp(-0.1*(x-1)), np.arange(1,p+1,1)) )
return list( np.random.rand(p) )

def gen_times(x,snr):
n,p=x.shape
coef=genecoef(p)
f=np.matmul(np.matrix(x),np.matrix(coef).T)
e=np.random.normal(size=(n,1))
k=np.sqrt(np.var(f)/(snr*np.var(e)))
y=np.exp(f+k*e)
return(y)


## This function creates true survival times as described in section 3 of the paper. In all simulations we set snr (signal to noise ratio) to 3.

def gen_times_censor(x,snr):
n,p=x.shape
coef=genecoef(p)
f=np.matmul(np.matrix(x),np.matrix(coef).T)
e=np.random.normal(size=(n,1))
k=np.sqrt(np.var(f)/(snr*np.var(e)))
y=np.exp(k*e)
return(y)
def nltr(x):
y1 = x[:,0]*x[:,1]
y2 = x[:,2]*x[:,3]
y3 = x[:,4]**2
y4 = x[:,5]* (x[:,6]**2)
y5 = x[:,7]*x[:,8]* x[:,9]
y6 = 0.5 *np.exp(x[:,8]* x[:,9])
newx = np.column_stack((y1,y2,y3,y4,y5,y6))
return newx


def survdata(n,p,snr,rho):
x = gen_x(n,p,rho)
time = gen_times(x,snr)
censortime = gen_times_censor(x,snr)
y = np.apply_along_axis(np.min,1,np.column_stack((time,censortime)))
y = np.array(y)
#b==0 censored b ==1 uncensored
b = np.apply_along_axis(np.argmax,1,np.column_stack((time,censortime)))
b = np.array(b)
a = x
ordery=np.argsort(y)
a=a[ordery]
y=y[ordery]
b=b[ordery]
Rlist=
event_index=np.argwhere(b==1).ravel().astype(np.int32)
nsample=len(b)
nevent=sum(b)
Rlist=
for j in range(nevent):
Rlist+=[ list(range(np.argwhere(b==1).ravel()[j],nsample) )]
bmask = b.astype(bool)
cumlist=list(reversed(np.append(event_index,n)))
slarr=np.vectorize(lambda x:(len(x)-1))
nctrue = np.sum(slarr(Rlist))
#a:n(#samples)*p(#features) matrix,survival time from short to high
#y:survival time
#b censored(0) or not(1)
#bmask bool(b)
#nevent #uncensored
return a,y,b,bmask,nsample,nevent,event_index,Rlist,cumlist,nctrue


n=50
p=10
snr=1
rho=0.1
a,y,b,bmask,nsample,nevent,event_index,Rlist,cumlist,nctrue= survdata(n,p,snr,rho)

sc=StandardScaler()

a=nltr(a)
a=sc.fit_transform(a)




def ploss(y_true,y_pred):
#y_pred for sample x_i is the value of np.dot(x_i,beta) in the linear cox case
#y_pred is the loss for sample i
z = 0
#for j in event_index:
#z = z + K.sum(y_pred[j,0])
#z = z + K.constant(y_pred[j,0])
#z = K.sum(tf.boolean_mask(y_pred,bmask) )
#iz = K.print_tensor(tf.boolean_mask(y_pred,bmask),'y_pred_mask is')
#gz = K.print_tensor(K.gather(y_pred,event_index),'y_pred_gather is')
z = K.sum(K.gather(y_pred,event_index))
currentsum = 0
for j in range(nevent):
currentsum = currentsum + K.sum(K.exp(K.gather(y_pred,
np.array(range(cumlist[j+1],cumlist[j])))))
z = z - K.log(currentsum)
#tempz=0
#for i in j:
#tempz = tempz + K.exp(y_pred[i,0])
#z = z - K.log(tempz)
z = -z
return z





def c_index_func(y_true, y_pred):
#y_pred is the loss for sample i
c_hat = 0
for i in range(nevent-1):
c_hat = c_hat + K.sum(K.cast(y_pred[event_index[i]+1:,0]
<y_pred[event_index[i],0],tf.float32))
#c_hat = c_hat + K.sum(K.cast(y_pred[event_index[i]+1:,0]
#<y_pred[event_index[i],0],float32))

return c_hat/nctrue


model=Sequential()
model.add(Dense(1,activation='linear',kernel_initializer='one',
batch_input_shape=(a.shape[0],a.shape[1])))

#model.add(Dropout(0.2))

#model.compile(loss=ploss,optimizer='newton-raphson')

#model.compile(loss=ploss,optimizer=keras.optimizers.Adam(lr=0, beta_1=0.9, beta_2=0.999,
#epsilon=None, decay=0.0, amsgrad=False),metrics=[c_index_func])
model.trainable=False
model.compile(loss=ploss,optimizer=keras.optimizers.SGD(lr=0.001, momentum=0.0,
decay=0.0, nesterov=False),metrics=[c_index_func])

model.fit(x=a,y=y,batch_size=len(a),epochs=3,verbose=2)





How to output the loss function at epoch = 0?
– Ferret Zhang
Aug 29 at 22:26




1 Answer
1



For this you can just use model.evaluate(x, y) and it will return an array with the loss and metrics. The first element of this array is the loss on the given data. Just do this before training and it will give you the initial loss.


model.evaluate(x, y)





That is really helpful.
– Ferret Zhang
Aug 30 at 18:15



Required, but never shown



Required, but never shown






By clicking "Post Your Answer", you acknowledge that you have read our updated terms of service, privacy policy and cookie policy, and that your continued use of the website is subject to these policies.

Popular posts from this blog

𛂒𛀶,𛀽𛀑𛂀𛃧𛂓𛀙𛃆𛃑𛃷𛂟𛁡𛀢𛀟𛁤𛂽𛁕𛁪𛂟𛂯,𛁞𛂧𛀴𛁄𛁠𛁼𛂿𛀤 𛂘,𛁺𛂾𛃭𛃭𛃵𛀺,𛂣𛃍𛂖𛃶 𛀸𛃀𛂖𛁶𛁏𛁚 𛂢𛂞 𛁰𛂆𛀔,𛁸𛀽𛁓𛃋𛂇𛃧𛀧𛃣𛂐𛃇,𛂂𛃻𛃲𛁬𛃞𛀧𛃃𛀅 𛂭𛁠𛁡𛃇𛀷𛃓𛁥,𛁙𛁘𛁞𛃸𛁸𛃣𛁜,𛂛,𛃿,𛁯𛂘𛂌𛃛𛁱𛃌𛂈𛂇 𛁊𛃲,𛀕𛃴𛀜 𛀶𛂆𛀶𛃟𛂉𛀣,𛂐𛁞𛁾 𛁷𛂑𛁳𛂯𛀬𛃅,𛃶𛁼

Edmonton

Crossroads (UK TV series)