与客户端连接时出现 Tensorflow 服务错误 "input size does not match signature"

Tensorflow serving error when connecting with client "input size does not match signature"

我已经开始使用下面显示的 export_textsum.py 文件导出 textsum 模型,当我使用下面显示的 textsumclient.py 文件连接时,我收到错误:

Traceback (most recent call last): File "textsum_client.py", line 90, in tf.app.run() File "/usr/local/lib/python2.7/site-packages/tensorflow/python/platform/app.py", line 48, in run _sys.exit(main(_sys.argv[:1] + flags_passthrough)) File "textsum_client.py", line 83, in main FLAGS.concurrency, FLAGS.num_tests) File "textsum_client.py", line 72, in do_singleDecode result = stub.Predict(request, 5.0) # 5 seconds File "/usr/local/lib/python2.7/site-packages/grpc/beta/_client_adaptations.py", line 324, in call self._request_serializer, self._response_deserializer) File "/usr/local/lib/python2.7/site-packages/grpc/beta/_client_adaptations.py", line 210, in _blocking_unary_unary raise _abortion_error(rpc_error_call) grpc.framework.interfaces.face.face.AbortionError: AbortionError(code=StatusCode.INVALID_ARGUMENT, details="input size does not match signature")

我认为这可能与我的 export_textsum 文件中 tf_example 的构建有关,但老实说,我还没有运气弄清楚这一点。任何有更多经验的人都知道我在这里做错了什么?如果有任何想法可以帮助我缩小范围,我愿意接受任何建议。谢谢

textsumclient.py

from __future__ import print_function

import sys
import threading

# This is a placeholder for a Google-internal import.

from grpc.beta import implementations
import numpy
import tensorflow as tf
from datetime import datetime 

from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
#from tensorflow_serving.example import mnist_input_data


tf.app.flags.DEFINE_integer('concurrency', 1,
                            'maximum number of concurrent inference requests')
tf.app.flags.DEFINE_integer('num_tests', 10, 'Number of test images')
tf.app.flags.DEFINE_string('server', '172.17.0.2:9000', 'PredictionService host:port')
tf.app.flags.DEFINE_string('work_dir', '/tmp', 'Working directory. ')
FLAGS = tf.app.flags.FLAGS


def do_singleDecode(hostport, work_dir, concurrency, num_tests):
  #Connect to server
  host, port = hostport.split(':')
  channel = implementations.insecure_channel(host, int(port))
  stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)

  #Prepare our request object
  request = predict_pb2.PredictRequest()
  request.model_spec.name = 'textsum_model'
  request.model_spec.signature_name = 'predict'  

  #Make some test data
  test_data_set = ['This is a test','This is a sample']

  #Lets test her out
  now = datetime.now()
  article, abstract = test_data_set

  #***** POPULATE REQUEST INPUTS *****

  request.inputs['article'].CopyFrom(
      tf.contrib.util.make_tensor_proto(test_data_set[0], shape=[len(test_data_set[0])]))
  request.inputs['abstract'].CopyFrom(
      tf.contrib.util.make_tensor_proto(test_data_set[1], shape=[len(test_data_set[1])]))


  result = stub.Predict(request, 5.0)  # 5 seconds
  waiting = datetime.now() - now
  return result, waiting.microseconds


def main(_):
  if not FLAGS.server:
      print('please specify server host:port')
      return

  result, waiting = do_singleDecode(FLAGS.server, FLAGS.work_dir,
                            FLAGS.concurrency, FLAGS.num_tests)
  print('\nTextsum result: %s%%' % result)
  print('Waiting time is: ', waiting, 'microseconds.')



if __name__ == '__main__':
    tf.app.run()

export_textsum.py

            decode_mdl_hps = hps
            # Only need to restore the 1st step and reuse it since
            # we keep and feed in state for each step's output.
            decode_mdl_hps = hps._replace(dec_timesteps=1)
            model = seq2seq_attention_model.Seq2SeqAttentionModel(
                decode_mdl_hps, vocab, num_gpus=FLAGS.num_gpus)
            decoder = seq2seq_attention_decode.BSDecoder(model, batcher, hps, vocab)
            serialized_output = tf.placeholder(tf.string, name='tf_output')


            serialized_tf_example = tf.placeholder(tf.string, name='tf_example')
            feature_configs = {
                'article': tf.FixedLenFeature(shape=[1], dtype=tf.string),
                'abstract': tf.FixedLenFeature(shape=[1], dtype=tf.string),
            }
            tf_example = tf.parse_example(serialized_tf_example, feature_configs)

            saver = tf.train.Saver()
            config = tf.ConfigProto(allow_soft_placement = True)

            with tf.Session(config = config) as sess:

                # Restore variables from training checkpoints.
                ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
                    print('Successfully loaded model from %s at step=%s.' %
                        (ckpt.model_checkpoint_path, global_step))
                else:
                    print('No checkpoint file found at %s' % FLAGS.checkpoint_dir)
                    return

                # ************** EXPORT MODEL ***************
                export_path = os.path.join(FLAGS.export_dir,str(FLAGS.export_version))
                print('Exporting trained model to %s' % export_path)


                #-------------------------------------------

                tensor_info_inputs = tf.saved_model.utils.build_tensor_info(serialized_tf_example)
                tensor_info_outputs = tf.saved_model.utils.build_tensor_info(serialized_output)


                prediction_signature = (
                    tf.saved_model.signature_def_utils.build_signature_def(
                        inputs={ tf.saved_model.signature_constants.PREDICT_INPUTS: tensor_info_inputs},
                        outputs={tf.saved_model.signature_constants.PREDICT_OUTPUTS:tensor_info_outputs},
                        method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
                        ))

                #----------------------------------

                legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
                builder = saved_model_builder.SavedModelBuilder(export_path)

                builder.add_meta_graph_and_variables(
                    sess=sess, 
                    tags=[tf.saved_model.tag_constants.SERVING],
                    signature_def_map={
                        'predict':prediction_signature,
                    },
                    legacy_init_op=legacy_init_op)
                builder.save()

                print('Successfully exported model to %s' % export_path)
    except:
        traceback.print_exc()
        pass


def main(_):
    Export()

if __name__ == "__main__":
    tf.app.run()

看起来您应该在客户端和图形定义中都指定 [1] 的形状。 export_textsum.py

feature_configs = {
                'article': tf.FixedLenFeature(shape=[1], dtype=tf.string),
                'abstract': tf.FixedLenFeature(shape=[1], dtype=tf.string),
            }

textsumclient.py

  request.inputs['article'].CopyFrom(
      tf.contrib.util.make_tensor_proto([test_data_set[0]], shape=[1]))
  request.inputs['abstract'].CopyFrom(
      tf.contrib.util.make_tensor_proto([test_data_set[1]], shape=[1]))

或者使用 shape=[len(test_data_set[0])] 可能更合适

QuantumLicht 我再次感谢您在这里的帮助,因为这是我问题的一部分。它似乎与功能配置中使用的键有关。我仍在使用 TF 1.2,我记得以前读过一些修复程序,以便现在可以在新版本中使用正确的键名。也就是说,在调试时我注意到它需要一个名为 "inputs" 的输入。所以我删除了 "abstract" 并将文章设置为输入。然后我不得不修改解码的输出,最后一个问题与我只加载模型但从未 运行 针对模型的函数取回我需要的输出然后发送到 tensor_info_outputs。