Tensorflow 对象检测 - 将检测到的对象转换为图像

Tensorflow Object Detection - convert detected object into an Image

我训练了一个 ssd_mobilenet_v1 模型来检测静态灰度图像中的小物体。

现在我想确定物体的水平角度之类的东西。我如何 "extract" 将对象作为图像或图像数组进行进一步的几何研究?

这是我在 Github 上来自 Tensorflow Object Detection API 的 object_detection_tutorial.ipynb 文件的修改版本(原件可在此处找到:https://github.com/tensorflow/models/tree/master/research/object_detection

代码:

进口

mport numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile

from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image

# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
from object_detection.utils import ops as utils_ops

对象检测导入

from utils import label_map_util

from utils import visualization_utils as vis_util

变量

# What model to download.
MODEL_NAME = 'shard_graph_ssd'

# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'

# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'label_map.pbtxt')

NUM_CLASSES = 1

将(冻结的)Tensorflow 模型加载到内存中。

detection_graph = tf.Graph()
with detection_graph.as_default():
  od_graph_def = tf.GraphDef()
  with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
    serialized_graph = fid.read()
    od_graph_def.ParseFromString(serialized_graph)
    tf.import_graph_def(od_graph_def, name='')

正在加载标签图

label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)

帮助代码

def load_image_into_numpy_array(image):
  # The function supports only grayscale images
    last_axis = -1
    dim_to_repeat = 2
    repeats = 3
    grscale_img_3dims = np.expand_dims(image, last_axis)
    training_image = np.repeat(grscale_img_3dims, repeats, dim_to_repeat).astype('uint8')
    assert len(training_image.shape) == 3
    assert training_image.shape[-1] == 3
    return training_image

检测

PATH_TO_TEST_IMAGES_DIR = '/home/usr/test_images'
L = []
for n in os.listdir(PATH_TO_TEST_IMAGES_DIR):
    if n.endswith('png'):
        L.append(n)
L.sort()
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, i) for i in L ]

# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)

def run_inference_for_single_image(image, graph):
  with graph.as_default():
    with tf.Session() as sess:
      # Get handles to input and output tensors
      ops = tf.get_default_graph().get_operations()
      all_tensor_names = {output.name for op in ops for output in op.outputs}
      tensor_dict = {}
      for key in [
          'num_detections', 'detection_boxes', 'detection_scores',
          'detection_classes', 'detection_masks'
      ]:
        tensor_name = key + ':0'
        if tensor_name in all_tensor_names:
          tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
              tensor_name)
      if 'detection_masks' in tensor_dict:
        # The following processing is only for single image
        detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
        detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
        # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
        real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
        detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
        detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
        detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
            detection_masks, detection_boxes, image.shape[0], image.shape[1])
        detection_masks_reframed = tf.cast(
            tf.greater(detection_masks_reframed, 0.5), tf.uint8)
        # Follow the convention by adding back the batch dimension
        tensor_dict['detection_masks'] = tf.expand_dims(
            detection_masks_reframed, 0)
      image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')

      # Run inference
      output_dict = sess.run(tensor_dict,
                             feed_dict={image_tensor: np.expand_dims(image, 0)})

      # all outputs are float32 numpy arrays, so convert types as appropriate
      output_dict['num_detections'] = int(output_dict['num_detections'][0])
      output_dict['detection_classes'] = output_dict[
          'detection_classes'][0].astype(np.uint8)
      output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
      output_dict['detection_scores'] = output_dict['detection_scores'][0]
      if 'detection_masks' in output_dict:
        output_dict['detection_masks'] = output_dict['detection_masks'][0]
  return output_dict

i = 0
for image_path in TEST_IMAGE_PATHS:
  image = Image.open(image_path)
  # the array based representation of the image will be used later in order to prepare the
  # result image with boxes and labels on it.
  image_np = load_image_into_numpy_array(image)
  # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
  image_np_expanded = np.expand_dims(image_np, axis=0)
  # Actual detection.
  output_dict = run_inference_for_single_image(image_np, detection_graph)
  # Visualization of the results of a detection.
  vis_util.visualize_boxes_and_labels_on_image_array(
      image_np,
      output_dict['detection_boxes'],
      output_dict['detection_classes'],
      output_dict['detection_scores'],
      category_index,
      instance_masks=output_dict.get('detection_masks'),
      use_normalized_coordinates=True,
      line_thickness=2,
      skip_labels=True,
      max_boxes_to_draw=1,
      min_score_thresh=0.5)
  plt.figure(figsize=IMAGE_SIZE)
  i += 1
  plt.imsave('/home/usr/Images_after_inference/' + str(i), image_np, cmap = 'gray')

我用以下函数解决了这个问题:

i是一个用来循环的变量,基本上就是当前图片的个数

def crop_objects(image, image_np, output_dict, i):
    global ymin, ymax, xmin, xmax
    width, height = image.size

    #Coordinates of detected objects
    ymin = int(output_dict['detection_boxes'][0][0]*height)
    xmin = int(output_dict['detection_boxes'][0][1]*width)
    ymax = int(output_dict['detection_boxes'][0][2]*height)
    xmax = int(output_dict['detection_boxes'][0][3]*width)
    crop_img = image_np[ymin:ymax, xmin:xmax]

    # 1. Only crop objects that are detected with an accuracy above 50%, 
    # images 
    # with objects below 50% will be filled with zeros (black image)
    # This is something I need in my program
    # 2. Only crop the object with the highest score (Object Zero)
    if output_dict['detection_scores'][0] < 0.5:
        crop_img.fill(0)

    #Save cropped object into image
    cv2.imwrite('Images/Step_2/' + str(i) + '.png', crop_img)
    return ymin, ymax, xmin, xmax

它需要这些才能工作:

image = Image.open(image_path)
image_np = load_image_into_numpy_array(image)

def load_image_into_numpy_array(image):
    #Für Bilderkennung benötigte Funktion
    last_axis = -1
    dim_to_repeat = 2
    repeats = 3
    grscale_img_3dims = np.expand_dims(image, last_axis)
    training_image = np.repeat(grscale_img_3dims, repeats, dim_to_repeat).astype('uint8')
    assert len(training_image.shape) == 3
    assert training_image.shape[-1] == 3
    return training_image

这可能比仅裁剪对象所需的代码更多。