Tensorflow 预测器:指定 serving_input_receiver_fn

Tensorflow predictor: Specifying the serving_input_receiver_fn

我想从 tf.estimator.Estimator 模型构建预测器。因此,我需要指定一个 input_receiver_fn 来指定从接收器张量到将由预测器传递给 model_fn 的特征的预处理图。

下面是估算器 eval_input_fn 的示例:

def eval_input_fn(params):
    ds = tf.data.Dataset.from_generator(
        generator=Eval_Generator(params),
        output_types=(tf.uint16,tf.uint16),
        output_shapes = ([3]+params['crop_size'],[2]+params['crop_size']))
    augmentations = [Convert,Downsample,Clip]
    ds = ds.repeat()
    for augmentation in augmentations:
        ds = ds.map(augmentation, num_parallel_calls=params['threads'])
    ds = ds.batch(1).prefetch(None)
    return ds

我将增强函数从接受两个参数(特征:tf.Tensor,标签:tf.Tensor)更改为只接受一个参数(特征:tf.Tensor)并编写了相应的input_receiver_fn 看起来像这样:

def serving_input_receiver_fn():
    rec_raw = tf.placeholder(tf.float32, [3, 256, 256, 256],name='raw')
    raw = Convert(rec_raw)
    raw = Downsample(raw)
    raw = Clip(raw)
    raw = tf.expand_dims(raw,0)
    return tf.estimator.export.TensorServingInputReceiver(features=raw,receiver_tensors=rec_raw)

函数returns以下对象:

TensorServingInputReceiver(features=<tf.Tensor 'ExpandDims_1:0' shape=(1, 3, 128, 128, 128) dtype=float32>, receiver_tensors={'input': <tf.Tensor 'raw:0' shape=(3, 256, 256, 256) dtype=float32>}, receiver_tensors_alternatives=None)

这似乎很正确。但是当它尝试通过以下方式实例化预测器时:

config = tf.estimator.RunConfig(model_dir = params['model_dir'])
estimator = tf.estimator.Estimator(model_fn=model_fn, params=params,config=config)
predict_fn = tf.contrib.predictor.from_estimator(estimator, serving_input_receiver_fn)

我会收到以下错误消息:

INFO:tensorflow:Calling model_fn.
Traceback (most recent call last):
  File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 510, in _apply_op_helper
    preferred_dtype=default_dtype)
  File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 1146, in internal_convert_to_tensor
    ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
  File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py", line 229, in _constant_tensor_conversion_function
    return constant(v, dtype=dtype, name=name)
  File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/framework/constant_op.py", line 208, in constant
    value, dtype=dtype, shape=shape, verify_shape=verify_shape))
  File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/framework/tensor_util.py", line 430, in make_tensor_proto
    raise ValueError("None values not supported.")
ValueError: None values not supported.

  File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/contrib/predictor/predictor_factories.py", line 105, in from_estimator
    config=config)
  File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/contrib/predictor/core_estimator_predictor.py", line 72, in __init__
    serving_input_receiver, estimator, output_key)
  File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/contrib/predictor/core_estimator_predictor.py", line 37, in _get_signature_def
    estimator.config)
  File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py", line 235, in public_model_fn
    return self._call_model_fn(features, labels, mode, config)
  File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/estimator/estimator.py", line 1195, in _call_model_fn
    model_fn_results = self._model_fn(features=features, **kwargs)
  File "/fast/AG_Kainmueller/jrumber/flylight_01/train_tf.py", line 227, in model_fn
    gt,fg = tf.unstack(labels,num=2,axis=1)
  File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/ops/array_ops.py", line 1027, in unstack
    return gen_array_ops.unpack(value, num=num, axis=axis, name=name)
  File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/ops/gen_array_ops.py", line 9429, in unpack
    "Unpack", value=value, num=num, axis=axis, name=name)
  File "/home/jrumber/anaconda3/envs/tf1.12_gpuenv/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py", line 528, in _apply_op_helper
    (input_name, err))
ValueError: Tried to convert 'value' to a tensor and failed. Error: None values not supported.

因为这可能是我的 model_fn 的问题,我也会 post:

def model_fn(features,labels,mode,params):
    gt,fg = tf.unstack(labels,num=2,axis=1)
    gt.set_shape([1]+params['input_size'])
    fg.set_shape([1]+params['input_size'])
    features.set_shape([1,3]+params['input_size'])
    # first layer to set input_shape
    features = tf.keras.layers.Conv3D(
        input_shape = tuple([3]+params['input_size']),
        data_format = 'channels_first',
        filters  = params['chan'],
        kernel_size = [3,3,3],
        strides=(1, 1, 1),
        padding='same',
        activation='relu',
        kernel_regularizer=tf.keras.regularizers.l2(l=0.01))(features)
    # U-Net 
    out = unet(features, params['unet_initial_filters'], params['width_factor'], params['architecture'])
    # Embedding conv pass
    output_batched = conv_pass(
        out,
        kernel_size=1,
        num_fmaps=params['chan'],
        num_repetitions=1,
        activation=None,
        name='conv_embedding')
    output = tf.squeeze(output_batched)
    # Fg/Bg segmentation conv pass
    mask_batched = conv_pass(
        out,
        kernel_size=1,
        num_fmaps=1,
        num_repetitions=1,
        activation='sigmoid',
        name='conv_mask')
    prob_mask = tf.squeeze(mask_batched)
    logits_mask = logit(prob_mask)
    # store predictions in dict
    predictions = {
        'prob_mask': tf.expand_dims(prob_mask,0),
        'embedding': output,
        'gt': tf.squeeze(gt,0)}
    #  RAIN mode
    if mode == tf.contrib.learn.ModeKeys.TRAIN:
        loss , l_var, l_dist, l_reg = discriminative_loss_single(prediction=output,
                                                                correct_label=tf.squeeze(gt),
                                                                feature_dim=params['chan'],
                                                                delta_v= params['delta_v'],
                                                                delta_d= params['delta_d'],
                                                                param_var= params['param_var'],
                                                                param_dist= params['param_dist'],
                                                                param_reg= params['param_reg']
                                                                )
        mask_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
                    labels=tf.squeeze(fg),
                    logits=logits_mask))
        reg_loss =  tf.losses.get_regularization_loss() * 1e-6
        loss += mask_loss + reg_loss
        opt = tf.train.AdamOptimizer(
                learning_rate=0.5e-4,
                beta1=0.95,
                beta2=0.999,
                epsilon=1e-8)
        optimizer = opt.minimize(loss, global_step=tf.train.get_global_step())
        global_step = tf.Variable(1, name='global_step', trainable=False, dtype=tf.int32)
        increment_global_step_op = tf.assign(global_step, global_step+1)
        logging_hook = tf.train.LoggingTensorHook({"loss" : loss,'global_step':increment_global_step_op}, every_n_iter=1)
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, loss=loss, train_op=optimizer, training_hooks=[logging_hook])
    # PREDICT mode
    if mode == tf.estimator.ModeKeys.PREDICT:
        export_outputs = {
            'predict_output': tf.estimator.export.PredictOutput(predictions)
        }
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, export_outputs=export_outputs)
    # EVAL mode
    if mode == tf.estimator.ModeKeys.EVAL:
        export_outputs = {
            'eval_output': tf.estimator.export.EvalOutput(predictions)
        }
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions, export_outputs=export_outputs)

有人在这里发现我的错误吗? 最好 :)

错误在 model_fn。以下行必须向下移动到函数的#TRAIN 模式部分

    gt,fg = tf.unstack(labels,num=2,axis=1)
    gt.set_shape([1]+params['input_size'])
    fg.set_shape([1]+params['input_size'])

Estimator.predict 将只提供特征和 None 而不是标签,因此 tf.unstack 将抛出异常,因此所有对标签起作用的操作都必须移至# model_fn.

的训练模式部分