why does tensorboard give this error: InvalidArgumentError: Nan in summary histogram for
why does tensorboard give this error: InvalidArgumentError: Nan in summary histogram for
我使用 tf.Keras 使用 1D 卷积层将模型构建为 classification。如果我删除 tensorboard.As 这很好用 我是初学者,我无法理解找出问题所在。
请帮忙
%reload_ext tensorboard
import tensorflow as tf
from tensorflow.keras.layers import Dense, Activation,Conv1D,MaxPool1D,GlobalAveragePooling1D,Dropout,Flatten,concatenate,Input
from tensorflow.keras.models import Model
inp = Input(shape=(1000,21))
a = Conv1D(filters=250,padding='valid', kernel_size=(8),strides=1)(inp)
a=MaxPool1D(1000-8+1,strides=1,padding='valid')(a)
a=Flatten()(a)
b = Conv1D(filters=250,padding='valid', kernel_size=(12))(inp)
b=MaxPool1D(1000-12+1,strides=1,padding='valid')(b)
b=Flatten()(b)
c = Conv1D(filters=250,padding='valid', kernel_size=(16))(inp)
c=MaxPool1D(1000-16+1,strides=1,padding='valid')(c)
c=Flatten()(c)
d = Conv1D(filters=250,padding='valid', kernel_size=(20))(inp)
d=MaxPool1D(1000-20+1,strides=1,padding='valid')(d)
d=Flatten()(d)
e = Conv1D(filters=250,padding='valid', kernel_size=(24))(inp)
e=MaxPool1D(1000-24+1,strides=1,padding='valid')(e)
e=Flatten()(e)
f = Conv1D(filters=250,padding='valid', kernel_size=(28))(inp)
f=MaxPool1D(1000-28+1,strides=1,padding='valid')(f)
f=Flatten()(f)
g = Conv1D(filters=250,padding='valid', kernel_size=(32))(inp)
g=MaxPool1D(1000-32+1,strides=1,padding='valid')(g)
g=Flatten()(g)
h= Conv1D(filters=250,padding='valid', kernel_size=(36))(inp)
h=MaxPool1D(1000-36+1,strides=1,padding='valid')(h)
h=Flatten()(h)
model=concatenate([a,b,c,d,e,f,g,h],1)
model = Dropout(0.3)(model)
#model = Dense(2000,activation='relu')(model)
model = Dense(2892,activation='sigmoid')(model)
model = Model(inp, model)
print(model.summary())
并使用序列class批量生成器输入批量
class MyGenerator(tf.keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, ids, train_dir):
'Initialization'
self.ids = ids
self.train_dir = train_dir
def __len__(self):
'Denotes the number of batches per epoch'
batch_size=100
numofBatchs=math.ceil(len(self.train_dir) / batch_size)
return numofBatchs
def __getitem__(self, index):
test_resL=[]
test_resD=[]
start_posT = index * 100
end_posT = min(start_posT + 100, len(self.train_dir))
test_resL=self.ids[start_posT:end_posT]
test_resD=self.train_dir[start_posT:end_posT]
#iTest += 1
isize=len(test_resL)
dataTest = np.zeros( (isize,1000,21), dtype=np.float32 )
labelsTest = np.zeros( (isize,2892), dtype=np.uint8 )
#rawTest = []
for i , idy in enumerate(test_resL):
label=idy
#print(label)
encoding_label_np(label, labelsTest[i] )
for i , idx in enumerate(test_resD):
seq=idx
encoding_seq_np(seq, dataTest[i] )
#dataShped=np.reshape(dataTest,(-1, 1, 1000, 21))
# print(dataTest[0:2])
# print(labelsTest[0:2])
return dataTest,labelsTest
添加张量板后,它仅运行第一个时期。
poch 1/20
10431/10431 [==============================] - ETA: 0s - loss: 2.7652 - accuracy: 0.3496
---------------------------------------------------------------------------
_FallbackException Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_summary_ops.py in write_histogram_summary(writer, step, tag, values, name)
463 _ctx._context_handle, tld.device_name, "WriteHistogramSummary", name,
--> 464 tld.op_callbacks, writer, step, tag, values)
465 return _result
_FallbackException: This function does not handle the case of the path where all inputs are not already EagerTensors.
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
13 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
58 ctx.ensure_initialized()
59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
62 if name is not None:
InvalidArgumentError: Nan in summary histogram for: conv1d_16/kernel_0 [Op:WriteHistogramSummary]
在此处(回答部分)提供解决方案,即使它出现在评论部分也是为了社区的利益。
InvalidArgumentError: Nan in summary histogram for: conv1d_16/kernel_0 [Op:WriteHistogramSummary]
在模型的密集层将激活函数从 sigmoid
修改为 softmax
后,此问题已解决。
最终模型结构如下
import tensorflow as tf
from tensorflow.keras.layers import Dense, Activation,Conv1D,MaxPool1D,GlobalAveragePooling1D,Dropout,Flatten,concatenate,Input
from tensorflow.keras.models import Model
inp = Input(shape=(1000,21))
a = Conv1D(filters=250,padding='valid', kernel_size=(8),strides=1)(inp)
a=MaxPool1D(1000-8+1,strides=1,padding='valid')(a)
a=Flatten()(a)
b = Conv1D(filters=250,padding='valid', kernel_size=(12))(inp)
b=MaxPool1D(1000-12+1,strides=1,padding='valid')(b)
b=Flatten()(b)
c = Conv1D(filters=250,padding='valid', kernel_size=(16))(inp)
c=MaxPool1D(1000-16+1,strides=1,padding='valid')(c)
c=Flatten()(c)
d = Conv1D(filters=250,padding='valid', kernel_size=(20))(inp)
d=MaxPool1D(1000-20+1,strides=1,padding='valid')(d)
d=Flatten()(d)
e = Conv1D(filters=250,padding='valid', kernel_size=(24))(inp)
e=MaxPool1D(1000-24+1,strides=1,padding='valid')(e)
e=Flatten()(e)
f = Conv1D(filters=250,padding='valid', kernel_size=(28))(inp)
f=MaxPool1D(1000-28+1,strides=1,padding='valid')(f)
f=Flatten()(f)
g = Conv1D(filters=250,padding='valid', kernel_size=(32))(inp)
g=MaxPool1D(1000-32+1,strides=1,padding='valid')(g)
g=Flatten()(g)
h= Conv1D(filters=250,padding='valid', kernel_size=(36))(inp)
h=MaxPool1D(1000-36+1,strides=1,padding='valid')(h)
h=Flatten()(h)
model=concatenate([a,b,c,d,e,f,g,h],1)
model = Dropout(0.3)(model)
#model = Dense(2000,activation='relu')(model)
model = Dense(2892,activation='softmax')(model)
model = Model(inp, model)
print(model.summary())
输出:
Model: "model"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 1000, 21)] 0
__________________________________________________________________________________________________
conv1d (Conv1D) (None, 993, 250) 42250 input_1[0][0]
__________________________________________________________________________________________________
conv1d_1 (Conv1D) (None, 989, 250) 63250 input_1[0][0]
__________________________________________________________________________________________________
conv1d_2 (Conv1D) (None, 985, 250) 84250 input_1[0][0]
__________________________________________________________________________________________________
conv1d_3 (Conv1D) (None, 981, 250) 105250 input_1[0][0]
__________________________________________________________________________________________________
conv1d_4 (Conv1D) (None, 977, 250) 126250 input_1[0][0]
__________________________________________________________________________________________________
conv1d_5 (Conv1D) (None, 973, 250) 147250 input_1[0][0]
__________________________________________________________________________________________________
conv1d_6 (Conv1D) (None, 969, 250) 168250 input_1[0][0]
__________________________________________________________________________________________________
conv1d_7 (Conv1D) (None, 965, 250) 189250 input_1[0][0]
__________________________________________________________________________________________________
max_pooling1d (MaxPooling1D) (None, 1, 250) 0 conv1d[0][0]
__________________________________________________________________________________________________
max_pooling1d_1 (MaxPooling1D) (None, 1, 250) 0 conv1d_1[0][0]
__________________________________________________________________________________________________
max_pooling1d_2 (MaxPooling1D) (None, 1, 250) 0 conv1d_2[0][0]
__________________________________________________________________________________________________
max_pooling1d_3 (MaxPooling1D) (None, 1, 250) 0 conv1d_3[0][0]
__________________________________________________________________________________________________
max_pooling1d_4 (MaxPooling1D) (None, 1, 250) 0 conv1d_4[0][0]
__________________________________________________________________________________________________
max_pooling1d_5 (MaxPooling1D) (None, 1, 250) 0 conv1d_5[0][0]
__________________________________________________________________________________________________
max_pooling1d_6 (MaxPooling1D) (None, 1, 250) 0 conv1d_6[0][0]
__________________________________________________________________________________________________
max_pooling1d_7 (MaxPooling1D) (None, 1, 250) 0 conv1d_7[0][0]
__________________________________________________________________________________________________
flatten (Flatten) (None, 250) 0 max_pooling1d[0][0]
__________________________________________________________________________________________________
flatten_1 (Flatten) (None, 250) 0 max_pooling1d_1[0][0]
__________________________________________________________________________________________________
flatten_2 (Flatten) (None, 250) 0 max_pooling1d_2[0][0]
__________________________________________________________________________________________________
flatten_3 (Flatten) (None, 250) 0 max_pooling1d_3[0][0]
__________________________________________________________________________________________________
flatten_4 (Flatten) (None, 250) 0 max_pooling1d_4[0][0]
__________________________________________________________________________________________________
flatten_5 (Flatten) (None, 250) 0 max_pooling1d_5[0][0]
__________________________________________________________________________________________________
flatten_6 (Flatten) (None, 250) 0 max_pooling1d_6[0][0]
__________________________________________________________________________________________________
flatten_7 (Flatten) (None, 250) 0 max_pooling1d_7[0][0]
__________________________________________________________________________________________________
concatenate (Concatenate) (None, 2000) 0 flatten[0][0]
flatten_1[0][0]
flatten_2[0][0]
flatten_3[0][0]
flatten_4[0][0]
flatten_5[0][0]
flatten_6[0][0]
flatten_7[0][0]
__________________________________________________________________________________________________
dropout (Dropout) (None, 2000) 0 concatenate[0][0]
__________________________________________________________________________________________________
dense (Dense) (None, 2892) 5786892 dropout[0][0]
==================================================================================================
Total params: 6,712,892
Trainable params: 6,712,892
Non-trainable params: 0
______________________________________
我使用 tf.Keras 使用 1D 卷积层将模型构建为 classification。如果我删除 tensorboard.As 这很好用 我是初学者,我无法理解找出问题所在。 请帮忙
%reload_ext tensorboard
import tensorflow as tf
from tensorflow.keras.layers import Dense, Activation,Conv1D,MaxPool1D,GlobalAveragePooling1D,Dropout,Flatten,concatenate,Input
from tensorflow.keras.models import Model
inp = Input(shape=(1000,21))
a = Conv1D(filters=250,padding='valid', kernel_size=(8),strides=1)(inp)
a=MaxPool1D(1000-8+1,strides=1,padding='valid')(a)
a=Flatten()(a)
b = Conv1D(filters=250,padding='valid', kernel_size=(12))(inp)
b=MaxPool1D(1000-12+1,strides=1,padding='valid')(b)
b=Flatten()(b)
c = Conv1D(filters=250,padding='valid', kernel_size=(16))(inp)
c=MaxPool1D(1000-16+1,strides=1,padding='valid')(c)
c=Flatten()(c)
d = Conv1D(filters=250,padding='valid', kernel_size=(20))(inp)
d=MaxPool1D(1000-20+1,strides=1,padding='valid')(d)
d=Flatten()(d)
e = Conv1D(filters=250,padding='valid', kernel_size=(24))(inp)
e=MaxPool1D(1000-24+1,strides=1,padding='valid')(e)
e=Flatten()(e)
f = Conv1D(filters=250,padding='valid', kernel_size=(28))(inp)
f=MaxPool1D(1000-28+1,strides=1,padding='valid')(f)
f=Flatten()(f)
g = Conv1D(filters=250,padding='valid', kernel_size=(32))(inp)
g=MaxPool1D(1000-32+1,strides=1,padding='valid')(g)
g=Flatten()(g)
h= Conv1D(filters=250,padding='valid', kernel_size=(36))(inp)
h=MaxPool1D(1000-36+1,strides=1,padding='valid')(h)
h=Flatten()(h)
model=concatenate([a,b,c,d,e,f,g,h],1)
model = Dropout(0.3)(model)
#model = Dense(2000,activation='relu')(model)
model = Dense(2892,activation='sigmoid')(model)
model = Model(inp, model)
print(model.summary())
并使用序列class批量生成器输入批量
class MyGenerator(tf.keras.utils.Sequence):
'Generates data for Keras'
def __init__(self, ids, train_dir):
'Initialization'
self.ids = ids
self.train_dir = train_dir
def __len__(self):
'Denotes the number of batches per epoch'
batch_size=100
numofBatchs=math.ceil(len(self.train_dir) / batch_size)
return numofBatchs
def __getitem__(self, index):
test_resL=[]
test_resD=[]
start_posT = index * 100
end_posT = min(start_posT + 100, len(self.train_dir))
test_resL=self.ids[start_posT:end_posT]
test_resD=self.train_dir[start_posT:end_posT]
#iTest += 1
isize=len(test_resL)
dataTest = np.zeros( (isize,1000,21), dtype=np.float32 )
labelsTest = np.zeros( (isize,2892), dtype=np.uint8 )
#rawTest = []
for i , idy in enumerate(test_resL):
label=idy
#print(label)
encoding_label_np(label, labelsTest[i] )
for i , idx in enumerate(test_resD):
seq=idx
encoding_seq_np(seq, dataTest[i] )
#dataShped=np.reshape(dataTest,(-1, 1, 1000, 21))
# print(dataTest[0:2])
# print(labelsTest[0:2])
return dataTest,labelsTest
添加张量板后,它仅运行第一个时期。
poch 1/20
10431/10431 [==============================] - ETA: 0s - loss: 2.7652 - accuracy: 0.3496
---------------------------------------------------------------------------
_FallbackException Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_summary_ops.py in write_histogram_summary(writer, step, tag, values, name)
463 _ctx._context_handle, tld.device_name, "WriteHistogramSummary", name,
--> 464 tld.op_callbacks, writer, step, tag, values)
465 return _result
_FallbackException: This function does not handle the case of the path where all inputs are not already EagerTensors.
During handling of the above exception, another exception occurred:
InvalidArgumentError Traceback (most recent call last)
13 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
58 ctx.ensure_initialized()
59 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 60 inputs, attrs, num_outputs)
61 except core._NotOkStatusException as e:
62 if name is not None:
InvalidArgumentError: Nan in summary histogram for: conv1d_16/kernel_0 [Op:WriteHistogramSummary]
在此处(回答部分)提供解决方案,即使它出现在评论部分也是为了社区的利益。
InvalidArgumentError: Nan in summary histogram for: conv1d_16/kernel_0 [Op:WriteHistogramSummary]
在模型的密集层将激活函数从 sigmoid
修改为 softmax
后,此问题已解决。
最终模型结构如下
import tensorflow as tf
from tensorflow.keras.layers import Dense, Activation,Conv1D,MaxPool1D,GlobalAveragePooling1D,Dropout,Flatten,concatenate,Input
from tensorflow.keras.models import Model
inp = Input(shape=(1000,21))
a = Conv1D(filters=250,padding='valid', kernel_size=(8),strides=1)(inp)
a=MaxPool1D(1000-8+1,strides=1,padding='valid')(a)
a=Flatten()(a)
b = Conv1D(filters=250,padding='valid', kernel_size=(12))(inp)
b=MaxPool1D(1000-12+1,strides=1,padding='valid')(b)
b=Flatten()(b)
c = Conv1D(filters=250,padding='valid', kernel_size=(16))(inp)
c=MaxPool1D(1000-16+1,strides=1,padding='valid')(c)
c=Flatten()(c)
d = Conv1D(filters=250,padding='valid', kernel_size=(20))(inp)
d=MaxPool1D(1000-20+1,strides=1,padding='valid')(d)
d=Flatten()(d)
e = Conv1D(filters=250,padding='valid', kernel_size=(24))(inp)
e=MaxPool1D(1000-24+1,strides=1,padding='valid')(e)
e=Flatten()(e)
f = Conv1D(filters=250,padding='valid', kernel_size=(28))(inp)
f=MaxPool1D(1000-28+1,strides=1,padding='valid')(f)
f=Flatten()(f)
g = Conv1D(filters=250,padding='valid', kernel_size=(32))(inp)
g=MaxPool1D(1000-32+1,strides=1,padding='valid')(g)
g=Flatten()(g)
h= Conv1D(filters=250,padding='valid', kernel_size=(36))(inp)
h=MaxPool1D(1000-36+1,strides=1,padding='valid')(h)
h=Flatten()(h)
model=concatenate([a,b,c,d,e,f,g,h],1)
model = Dropout(0.3)(model)
#model = Dense(2000,activation='relu')(model)
model = Dense(2892,activation='softmax')(model)
model = Model(inp, model)
print(model.summary())
输出:
Model: "model"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 1000, 21)] 0
__________________________________________________________________________________________________
conv1d (Conv1D) (None, 993, 250) 42250 input_1[0][0]
__________________________________________________________________________________________________
conv1d_1 (Conv1D) (None, 989, 250) 63250 input_1[0][0]
__________________________________________________________________________________________________
conv1d_2 (Conv1D) (None, 985, 250) 84250 input_1[0][0]
__________________________________________________________________________________________________
conv1d_3 (Conv1D) (None, 981, 250) 105250 input_1[0][0]
__________________________________________________________________________________________________
conv1d_4 (Conv1D) (None, 977, 250) 126250 input_1[0][0]
__________________________________________________________________________________________________
conv1d_5 (Conv1D) (None, 973, 250) 147250 input_1[0][0]
__________________________________________________________________________________________________
conv1d_6 (Conv1D) (None, 969, 250) 168250 input_1[0][0]
__________________________________________________________________________________________________
conv1d_7 (Conv1D) (None, 965, 250) 189250 input_1[0][0]
__________________________________________________________________________________________________
max_pooling1d (MaxPooling1D) (None, 1, 250) 0 conv1d[0][0]
__________________________________________________________________________________________________
max_pooling1d_1 (MaxPooling1D) (None, 1, 250) 0 conv1d_1[0][0]
__________________________________________________________________________________________________
max_pooling1d_2 (MaxPooling1D) (None, 1, 250) 0 conv1d_2[0][0]
__________________________________________________________________________________________________
max_pooling1d_3 (MaxPooling1D) (None, 1, 250) 0 conv1d_3[0][0]
__________________________________________________________________________________________________
max_pooling1d_4 (MaxPooling1D) (None, 1, 250) 0 conv1d_4[0][0]
__________________________________________________________________________________________________
max_pooling1d_5 (MaxPooling1D) (None, 1, 250) 0 conv1d_5[0][0]
__________________________________________________________________________________________________
max_pooling1d_6 (MaxPooling1D) (None, 1, 250) 0 conv1d_6[0][0]
__________________________________________________________________________________________________
max_pooling1d_7 (MaxPooling1D) (None, 1, 250) 0 conv1d_7[0][0]
__________________________________________________________________________________________________
flatten (Flatten) (None, 250) 0 max_pooling1d[0][0]
__________________________________________________________________________________________________
flatten_1 (Flatten) (None, 250) 0 max_pooling1d_1[0][0]
__________________________________________________________________________________________________
flatten_2 (Flatten) (None, 250) 0 max_pooling1d_2[0][0]
__________________________________________________________________________________________________
flatten_3 (Flatten) (None, 250) 0 max_pooling1d_3[0][0]
__________________________________________________________________________________________________
flatten_4 (Flatten) (None, 250) 0 max_pooling1d_4[0][0]
__________________________________________________________________________________________________
flatten_5 (Flatten) (None, 250) 0 max_pooling1d_5[0][0]
__________________________________________________________________________________________________
flatten_6 (Flatten) (None, 250) 0 max_pooling1d_6[0][0]
__________________________________________________________________________________________________
flatten_7 (Flatten) (None, 250) 0 max_pooling1d_7[0][0]
__________________________________________________________________________________________________
concatenate (Concatenate) (None, 2000) 0 flatten[0][0]
flatten_1[0][0]
flatten_2[0][0]
flatten_3[0][0]
flatten_4[0][0]
flatten_5[0][0]
flatten_6[0][0]
flatten_7[0][0]
__________________________________________________________________________________________________
dropout (Dropout) (None, 2000) 0 concatenate[0][0]
__________________________________________________________________________________________________
dense (Dense) (None, 2892) 5786892 dropout[0][0]
==================================================================================================
Total params: 6,712,892
Trainable params: 6,712,892
Non-trainable params: 0
______________________________________