Arduino Implementation - ofithcheallaigh/masters_project GitHub Wiki

Introdunction

This section of the wiki will detail the implementation of the model onto the Arduino BLE 33 Sense microcontroller.

Discussion and Code

During the implementation of the Arduino code the system did not carry out any correct inference. To try and understand the issue the Arduino code was stripped back to the basics required to carry out inference. The input from the sensors was removed, and data was taken from the training data and placed in .h files to simulate input data from the sensors.

The Stripped Back Arduino Code

This stripped-back code can be seen below in full, and will be explained underneath:

#include <stdlib.h>
#include <time.h>
#include <stdio.h>

#include <TensorFlowLite.h>
#include <tensorflow/lite/micro/all_ops_resolver.h>
#include <tensorflow/lite/micro/tflite_bridge/micro_error_reporter.h> // Had to add "tflite_bridge"
#include <tensorflow/lite/micro/micro_interpreter.h>
#include <tensorflow/lite/schema/schema_generated.h>
#include <tensorflow/lite/version.h>
#include <tensorflow/lite/c/common.h>

#include "channel1.h" //input_array_1
#include "channel2.h" //input_array_2

#include "model.h"
// #include "object_detect.h"

// Declare the 3D array of data.
float reshaped_data[1][5000][2];

// Declare the 2D array of data.
float output_data[5000][2];

// Global variable used for TF Lite Micro
tflite::MicroErrorReporter tflErrorReporter;

// Import all the TF Lite Micro ops. I could pull in just the ones I need.
// For the minute I will take everything in. But this may change.
tflite::AllOpsResolver tflOpsResolver;

// Used to declare a variable that will be used to store a reference to a TF Lite model
const tflite::Model* tflModel = nullptr;
tflite::MicroInterpreter* tflInterpreter = nullptr;
TfLiteTensor* tflInputTensor = nullptr;
TfLiteTensor* tflOutputTensor = nullptr;

constexpr int tensorArenaSize = 60 * 1024;
byte tensorArena[tensorArenaSize] __attribute__((aligned(16)));

// Array to map grids
const char* GRIDS[] = {
  "Zero",
  "One",
  "Two",
  "Three",
  "Four",
  "Five",
  "Six",
  "Seven",
  "Eight",
  "Nine",       
};

#define NUM_GRIDS (sizeof(GRIDS) / sizeof(GRIDS[0]))
// #define NUM_GRIDS 10

void setup() {
  delay(1000);
  Serial.begin(9600);
  while(!Serial);
  Serial.print("Starting..."); // Text to note start of data collection

    // Get the TFL repsentation of the model byte array
  tflModel = tflite::GetModel(model);
    if (tflModel->version() != TFLITE_SCHEMA_VERSION) 
    {
      Serial.println("Model schema mismatch!");
      while (1);
    }
  
  // Generate an interpretor to run the model
  tflInterpreter = new tflite::MicroInterpreter(tflModel, tflOpsResolver, tensorArena, tensorArenaSize);  

  // Assign memory for the model's input and output tensors
  tflInterpreter->AllocateTensors();

  // Pointers for the model input and output tensors
  tflInputTensor = tflInterpreter->input(0);
  tflOutputTensor = tflInterpreter->output(0);


  int output_shape[] = {1, 10};
  // TfLiteTensor* output_tensor = Interpreter->output(0);
  tflOutputTensor->dims->data[0] = output_shape[0];
  tflOutputTensor->dims->data[1] = output_shape[1];

  // Get a pointer to the output tensor data
  // float* output_data = tflOutputTensor->data.f;
}

void loop() 
{
  // Copy the data from `input_array_1` to the first column of `output_data`.
  Serial.println("Building array from input_array_1");
  for (int i = 0; i < 5000; i++) 
  {
    output_data[i][0] = input_array_1[i];
    // Serial.println(i);
  }

  // Copy the data from `input_array_2` to the second column of `output_data`.
  Serial.println("Building array from input_array_2");
  for (int i = 0; i < 5000; i++) 
  {
    output_data[i][1] = input_array_2[i];
  }


  // Reshape the 2D array to a 3D array.

  // Copy the data to the reshaped array 
  for (int i = 0; i < 5000; i++) 
  { 
    reshaped_data[0][i][0] = output_data[i][0]; 
    reshaped_data[0][i][1] = output_data[i][1]; 
  }

  // Serial.println(",");
  // Serial.print("Finished reshaping ...");
  
  for (int i = 0; i < 5000; i++) 
  {
    for (int j = 0; j < 2; j++) 
    {
        float value = reshaped_data[0][i][j];
        // float value = reshaped_data[0][i][j][0];
        int input_index = j + i * 2;
        // tflInputTensor->data.f[input_index] = value;
        tflInputTensor->data.f[input_index] = value;
    }
  }

  Serial.println("Blah"); // Put here to see where I am in the code
  
  delay(5000);

  TfLiteStatus invokeStatus = tflInterpreter->Invoke();
    Serial.println("Invoke");      
    if (invokeStatus != kTfLiteOk) 
    {
      Serial.println("Invoke failed!");
      while (1);
      return;
    }

    // Check the output tensor shape
    /*
    int output_num_dims = tflOutputTensor->dims->size;
    Serial.print("Output tensor shape: [");
    for (int i = 0; i < output_num_dims; i++) {
      int dim_size = tflOutputTensor->dims->data[i];
      Serial.print(dim_size);
      if (i < output_num_dims - 1) {
        Serial.print(", ");
      }
    }
    Serial.println("]");
    */

    // Loop through the output tensor values from the model
    for (int i = 0; i < NUM_GRIDS; i++) 
    {
      Serial.print(GRIDS[i]);
      Serial.print(": ");
      Serial.println(tflOutputTensor->data.f[i],2); // The int here gives the decimal places
    }
    //======================================================= */ 
    Serial.println();
    delay(2000);
}

The Code Explained

At the start of the code are the include statements. These are all the background files the code will require to do its job. The final include statements are:

#include "channel1.h" //input_array_1
#include "channel2.h" //input_array_2
#include "model.h"

These are the .h files which include the data from the training data, and the model file that was generated at the end of the neural network analysis. A screenshot of the channel1.h file is shown below (and the channel2.h file is very similar):

A screenshot of the model.h file is shown below:

In this section, the required arrays are initialised and the TensorFlow Lite commands are initialised:

// Declare the 3D array of data.
float reshaped_data[1][5000][2];

// Declare the 2D array of data.
float output_data[5000][2];

// Global variable used for TF Lite Micro
tflite::MicroErrorReporter tflErrorReporter;

// Import all the TF Lite Micro ops. I could pull in just the ones I need.
// For the minute I will take everything in. But this may change.
tflite::AllOpsResolver tflOpsResolver;

// Used to declare a variable that will be used to store a reference to a TF Lite model
const tflite::Model* tflModel = nullptr;
tflite::MicroInterpreter* tflInterpreter = nullptr;
TfLiteTensor* tflInputTensor = nullptr;
TfLiteTensor* tflOutputTensor = nullptr;

This section sets up the required memory, knows at the arena:

constexpr int tensorArenaSize = 60 * 1024;
byte tensorArena[tensorArenaSize] __attribute__((aligned(16)));

This section of the code sets up our grids, against which the inference will be done:

// Array to map grids
const char* GRIDS[] = {
  "Zero",
  "One",
  "Two",
  "Three",
  "Four",
  "Five",
  "Six",
  "Seven",
  "Eight",
  "Nine",       
};

Next, we have the setup() function. Here, the serial monitor is connected, and the TensorFlow model is brought in:

void setup() {
  delay(1000);
  Serial.begin(9600);
  while(!Serial);
  Serial.print("Starting..."); // Text to note start of data collection

    // Get the TFL repsentation of the model byte array
  tflModel = tflite::GetModel(model);
    if (tflModel->version() != TFLITE_SCHEMA_VERSION) 
    {
      Serial.println("Model schema mismatch!");
      while (1);
    }
  
  // Generate an interpretor to run the model
  tflInterpreter = new tflite::MicroInterpreter(tflModel, tflOpsResolver, tensorArena, tensorArenaSize);  

  // Assign memory for the model's input and output tensors
  tflInterpreter->AllocateTensors();

  // Pointers for the model input and output tensors
  tflInputTensor = tflInterpreter->input(0);
  tflOutputTensor = tflInterpreter->output(0);


  int output_shape[] = {1, 10};
  // TfLiteTensor* output_tensor = Interpreter->output(0);
  tflOutputTensor->dims->data[0] = output_shape[0];
  tflOutputTensor->dims->data[1] = output_shape[1];

  // Get a pointer to the output tensor data
  // float* output_data = tflOutputTensor->data.f;
}

The line int output_shape[] = {1, 10}; sets up the output shape, which is 10 grids in total.

In the void loop() section, we start to read the data in from out .h files, and build them into the output_data array:

void loop() 
{
// Copy the data from `input_array_1` to the first column of `output_data`.
Serial.println("Building array from input_array_1");
for (int i = 0; i < 5000; i++) 
{
  output_data[i][0] = input_array_1[i];
  // Serial.println(i);
}

// Copy the data from `input_array_2` to the second column of `output_data`.
Serial.println("Building array from input_array_2");
for (int i = 0; i < 5000; i++) 
{
  output_data[i][1] = input_array_2[i];
}

Next, this data needs to be reshaped to fit the form required by TensorFlow Lite. The required array is called reshaped_data:

for (int i = 0; i < 5000; i++) 
{ 
  reshaped_data[0][i][0] = output_data[i][0]; 
  reshaped_data[0][i][1] = output_data[i][1]; 
}

And now it is passed into the InputTensor, and it is this tensor upon which the inference is done:

for (int i = 0; i < 5000; i++) 
{
  for (int j = 0; j < 2; j++) 
  {
      float value = reshaped_data[0][i][j];
      // float value = reshaped_data[0][i][j][0];
      int input_index = j + i * 2;
      tflInputTensor->data.f[input_index] = value;
  }
}

With the data in place, the inference can be carried out:

TfLiteStatus invokeStatus = tflInterpreter->Invoke();
Serial.println("Invoke");      
if (invokeStatus != kTfLiteOk) 
{
  Serial.println("Invoke failed!");
  while (1);
  return;
}

With the results output on the serial monitor:

for (int i = 0; i < NUM_GRIDS; i++) 
{
  Serial.print(GRIDS[i]);
  Serial.print(": ");
  Serial.println(tflOutputTensor->data.f[i],2); // The int here gives the decimal places
}

When this code is run, the following is obtained:

This same output is obtained no matter what grid the input data was gathered from.

⚠️ **GitHub.com Fallback** ⚠️