TF Lite 中 FULLY_CONNECTED 操作的问题

Problem with FULLY_CONNECTED op in TF Lite

我想要 运行 一个在 Rasperry 微控制器上使用 Keras 的简单神经网络模型。使用图层时出现问题。代码定义如下:

#include "main.h"
#include <string.h>
#include "tensorflow/lite/micro/kernels/micro_ops.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
#include "tensorflow/lite/version.h"
#include "my_model.h"

/* Private variables    
TIM_HandleTypeDef htim16;

UART_HandleTypeDef huart2;

/* USER CODE BEGIN PV */

// TFLite globals
namespace {
tflite::ErrorReporter* error_reporter = nullptr;
const tflite::Model* model = nullptr;
tflite::MicroInterpreter* interpreter = nullptr;
TfLiteTensor* model_input = nullptr;
TfLiteTensor* model_output = nullptr;

// Create an area of memory to use for input, output, and other TensorFlow
// arrays. You'll need to adjust this by compiling, running, and looking
// for errors.
constexpr int kTensorArenaSize = 2 * 1024;
__attribute__((aligned(16)))uint8_t tensor_arena[kTensorArenaSize];
} // namespace


void SystemClock_Config(void);
static void MX_GPIO_Init(void);
static void MX_USART2_UART_Init(void);
static void MX_TIM16_Init(void);
/* USER CODE BEGIN PFP */

/* USER CODE END PFP */

/* Private user code 
/* USER CODE BEGIN 0 */

int main(void)
{
/* USER CODE BEGIN 1 */
char buf[50];
int buf_len = 0;
TfLiteStatus tflite_status;
uint32_t num_elements;
uint32_t timestamp;
float y_val;

/* USER CODE END 1 */

/* MCU Configuration--------------------------------------------------------*/

/* Reset of all peripherals, Initializes the Flash interface and the Systick. */
HAL_Init();

/* USER CODE BEGIN Init */

/* USER CODE END Init */

/* Configure the system clock */
SystemClock_Config();

/* USER CODE BEGIN SysInit */

/* USER CODE END SysInit */

/* Initialize all configured peripherals */
MX_GPIO_Init();
MX_USART2_UART_Init();
MX_TIM16_Init();
/* USER CODE BEGIN 2 */

// Start timer/counter
HAL_TIM_Base_Start(&htim16);

// Set up logging (modify tensorflow/lite/micro/debug_log.cc)
static tflite::MicroErrorReporter micro_error_reporter;
error_reporter = &micro_error_reporter;

// Say something to test error reporter
error_reporter->Report("STM32 TensorFlow Lite test");

// Map the model into a usable data structure
model = tflite::GetModel(my_model);
if (model->version() != TFLITE_SCHEMA_VERSION)
{
  error_reporter->Report("Model version does not match Schema");
  while(1);
}

// Pull in only needed operations (should match NN layers). Template parameter
// <n> is number of ops to be added. Available ops:
// tensorflow/lite/micro/kernels/micro_ops.h
static tflite::MicroMutableOpResolver<1> micro_op_resolver;

// Add dense neural network layer operation
tflite_status = micro_op_resolver.AddBuiltin(
    tflite::BuiltinOperator_FULLY_CONNECTED,
    tflite::ops::micro::Register_FULLY_CONNECTED());
if (tflite_status != kTfLiteOk)
{
  error_reporter->Report("Could not add FULLY CONNECTED op");
  while(1);
}

// Build an interpreter to run the model with.
static tflite::MicroInterpreter static_interpreter(
    model, micro_op_resolver, tensor_arena, kTensorArenaSize, error_reporter);
interpreter = &static_interpreter;

// Allocate memory from the tensor_arena for the model's tensors.
tflite_status = interpreter->AllocateTensors();
if (tflite_status != kTfLiteOk)
{
  error_reporter->Report("AllocateTensors() failed");
  while(1);
}

// Assign model input and output buffers (tensors) to pointers
model_input = interpreter->input(0);
model_output = interpreter->output(0);

// Get number of elements in input tensor
num_elements = model_input->bytes / sizeof(float);
buf_len = sprintf(buf, "Number of input elements: %lu\r\n", num_elements);
HAL_UART_Transmit(&huart2, (uint8_t *)buf, buf_len, 100);

/* USER CODE END 2 */

/* Infinite loop */
/* USER CODE BEGIN WHILE */
while (1)
{
  // Fill input buffer (use test value)
  for (uint32_t i = 0; i < num_elements; i++)
  {
    model_input->data.f[i] = 2.0f;
  }

  // Get current timestamp
  timestamp = htim16.Instance->CNT;

  // Run inference
  tflite_status = interpreter->Invoke();
  if (tflite_status != kTfLiteOk)
  {
    error_reporter->Report("Invoke failed");
  }

  // Read output (predicted y) of neural network
  y_val = model_output->data.f[0];

  // Print output of neural network along with inference time (microseconds)
  buf_len = sprintf(buf,
                    "Output: %f | Duration: %lu\r\n",
                    y_val,
                    htim16.Instance->CNT - timestamp);
  HAL_UART_Transmit(&huart2, (uint8_t *)buf, buf_len, 100);

  // Wait before doing it again
  HAL_Delay(500);

  /* USER CODE END WHILE */

  /* USER CODE BEGIN 3 */
}
/* USER CODE END 3 */
}

当我 运行 程序时,我遇到了这个编译问题:

../Core/Src/main.cpp: In function 'int main()':
../Core/Src/main.cpp:181:57: error: no matching function for call to 'tflite::MicroMutableOpResolver<1>::AddBuiltin(tflite::BuiltinOperator, TfLiteRegistration*)'
   tflite::ops::micro::Register_FULLY_CONNECTED());
                                                 ^

In file included from ../Core/Src/main.cpp:31:0:
STM32CubeIDE/workspace_1.3.0/sine/tensorflow_lite/tensorflow/lite/micro/micro_mutable_op_resolver.h:470:16: note: candidate: TfLiteStatus tflite::MicroMutableOpResolver<tOpCount>::AddBuiltin(tflite::BuiltinOperator, const TfLiteRegistration&, tflite::MicroOpResolver::BuiltinParseFunction) [with unsigned int tOpCount = 1; TfLiteStatus = TfLiteStatus; TfLiteRegistration = TfLiteRegistration; tflite::MicroOpResolver::BuiltinParseFunction = TfLiteStatus (*)(const tflite::Operator*, tflite::BuiltinOperator, tflite::ErrorReporter*, tflite::BuiltinDataAllocator*, void**)]
 TfLiteStatus AddBuiltin(tflite::BuiltinOperator op,
              ^~~~~~~~~~
STM32CubeIDE/workspace_1.3.0/sine/tensorflow_lite/tensorflow/lite/micro/micro_mutable_op_resolver.h:470:16: note:   candidate expects 3 arguments, 2 provided
make: *** [Core/Src/subdir.mk:37: Core/Src/main.o] Error 1
"make -j2 all" terminated with exit code 2. Build might be incomplete.

我不确定我必须为 FULLY_CONNECTED() 插入哪个第三个参数,或者是否存在其他问题。

我遇到了同样的问题,伙计。 想把tflite移植到CEVA的开发板上。编译没有问题。在运行的过程中,AddBuiltin(full_connect)也有错误。 目前我猜测唯一可能的情况是部分设备不支持tflite

我想我 运行 发现用于生成模型的 TFlite for Micro Python 库与用于生成模型的 TFlite for Micro C/C++ 库之间的版本不匹配运行 推断。我用这个解决了这个问题:

static tflite::MicroMutableOpResolver<1> micro_op_resolver;
tflite_status = micro_op_resolver.AddFullyConnected();

您是否检查过 FULLY_CONNECTED 运算符是否列在 micro_ops.h 头文件中?头文件包含一个注册方法列表,供用户在其模型需要操作员时选择。 我在编译过程中遇到了同样的问题,发现我缺少方法 TfLiteRegistration Register_FULLY_CONNECTED() .