Why I'm getting "TypeError: Failed to convert object of type <type 'dict'> to Tensor."?

Why I'm getting "TypeError: Failed to convert object of type <type 'dict'> to Tensor."?

我是 TF 和 ML 的新手。

有关数据的详细信息:每个样本的特征 (x) - (70 x 70 x 70) 张量,y - 每个样本的浮点数。

使用以下代码创建的 TFRecords:

def convert_to_tf_records():

    def _bytes_feature(value):
        return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))

    def _float64_feature(value):
        return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))

    tfrecords_filename = 'A-100-h2-h2o.tfrecords'

    writer = tf.python_io.TFRecordWriter(tfrecords_filename)

    # Get data from db for now.
    db = connect('results-60-70.db')
    data = db.select(selection='Ti')

    i = 0
    for row in data:

        desc = np.array(json.loads(row.descriptor), dtype=np.float32)
        print(desc.shape)
        be = float(row.binding_energy) * 23  # Convert to Kcal/mol ?

        desc = desc.flatten()
        desc = desc.tostring()

        example = tf.train.Example(features=tf.train.Features(feature={'voxel_grid': _bytes_feature(desc), 'binding_energy': _float64_feature(be)}))

        writer.write(example.SerializeToString())
        i += 1
        if i >= 10:
            break

输入函数:

def my_input_function(fname, perform_shuffle=False, repeat_count=None):

    def _parse_elements(example):
        features = tf.parse_single_example(example, features={'voxel_grid': tf.FixedLenFeature([], tf.string), 'binding_energy': tf.FixedLenFeature([], tf.float32)})
        vg = tf.decode_raw(features['voxel_grid'], tf.float32)
        vg = tf.reshape(vg, [70, 70, 70])
        vg = tf.convert_to_tensor(vg, dtype=tf.float32)
        vg = {'voxel_grid': vg}
        e = tf.cast(features['binding_energy'], tf.float32)

        return vg, e

    def input_function():
        dataset = tf.data.TFRecordDataset(fname).map(_parse_elements)
        dataset = dataset.repeat(repeat_count)
        dataset = dataset.batch(5)
        dataset = dataset.prefetch(1)

        if perform_shuffle:
            dataset.shuffle(20)

        iterator = dataset.make_one_shot_iterator()
        batch_features, batch_labels = iterator.get_next()

        return batch_features, batch_labels

    return input_function

模型函数:

def my_model_function(features, labels, mode):
    if mode == tf.estimator.ModeKeys.PREDICT:
        tf.logging.info("my_model_fn: PREDICT, {}".format(mode))
    elif mode == tf.estimator.ModeKeys.EVAL:
        tf.logging.info("my_model_fn: EVAL, {}".format(mode))
    elif mode == tf.estimator.ModeKeys.TRAIN:
        tf.logging.info("my_model_fn: TRAIN, {}".format(mode))

    feature_columns = [tf.feature_column.numeric_column('voxel_grid', shape=(70, 70, 70), dtype=tf.float32)]

    # Create the layer of input
    input_layer = tf.feature_column.input_layer(features, feature_columns)
    input_layer = tf.reshape(input_layer, [-1, 70, 70, 70, 1])

    # Convolution layers
    conv1 = tf.layers.conv3d(inputs=input_layer, strides=(2, 2, 2), filters=32, kernel_size=(7, 7, 7))
    conv2 = tf.layers.conv3d(inputs=conv1, strides=(2, 2, 2), filters=32, kernel_size=(7, 7, 7))
    pool3 = tf.layers.max_pooling3d(inputs=conv2, pool_size=[2, 2, 2], strides=2)
    flat = tf.layers.flatten(pool3)

    dense1 = tf.layers.dense(inputs=flat, units=10, activation=tf.nn.relu)
    dense2 = tf.layers.dense(inputs=dense1, units=10, activation=tf.nn.relu)

    output = tf.layers.dense(inputs=dense2, units=1)

    predictions = {'binding_energy': output}

    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)

    # Calculate loss
    loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions)

    if mode == tf.estimator.ModeKeys.TRAIN:
        optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
        train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
        return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)

    # Add evaluation metrics
    eval_metric_ops = {"mse": tf.metrics.mean_squared_error(labels=labels, predictions=predictions['binding_energy'])}

    if mode == tf.estimator.ModeKeys.EVAL:
        return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)

使用

调用model.train时
model = tf.estimator.Estimator(model_fn=my_model_function, model_dir='./model_dir')
model.train(input_fn=my_input_function('A-100-h2-h2o.tfrecords'), steps=100)

我收到以下错误。 类型错误:无法将类型对象转换为张量。

找到了!

改变

# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions)

# Calculate loss
loss = tf.losses.mean_squared_error(labels=labels, predictions=predictions['binding_energy'])

问题已解决。