diff --git a/FruitToEmoji/FruitToEmoji.ipynb b/FruitToEmoji/FruitToEmoji.ipynb
index f455ff2..e09b70b 100644
--- a/FruitToEmoji/FruitToEmoji.ipynb
+++ b/FruitToEmoji/FruitToEmoji.ipynb
@@ -1,351 +1,364 @@
{
- "nbformat": 4,
- "nbformat_minor": 0,
- "metadata": {
- "colab": {
- "name": "FruitToEmoji-GIT.ipynb",
- "provenance": [],
- "collapsed_sections": [],
- "toc_visible": true
- },
- "kernelspec": {
- "name": "python3",
- "display_name": "Python 3"
- }
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "f92-4Hjy7kA8"
+ },
+ "source": [
+ "
\n",
+ "# Tiny ML on Arduino\n",
+ "## Classify objects by color tutorial\n",
+ "\n",
+ " \n",
+ "https://github.com/arduino/ArduinoTensorFlowLiteTutorials/"
+ ]
},
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "f92-4Hjy7kA8",
- "colab_type": "text"
- },
- "source": [
- "
\n",
- "# Tiny ML on Arduino\n",
- "## Classify objects by color tutorial\n",
- "\n",
- " \n",
- "https://github.com/arduino/ArduinoTensorFlowLiteTutorials/"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "uvDA8AK7QOq-",
- "colab_type": "text"
- },
- "source": [
- "## Setup Python Environment \n",
- "\n",
- "The next cell sets up the dependencies in required for the notebook, run it."
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "Y2gs-PL4xDkZ",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "# Setup environment\n",
- "!apt-get -qq install xxd\n",
- "!pip install pandas numpy matplotlib\n",
- "%tensorflow_version 2.x\n",
- "!pip install tensorflow"
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "9lwkeshJk7dg",
- "colab_type": "text"
- },
- "source": [
- "# Upload Data\n",
- "\n",
- "1. Open the panel on the left side of Colab by clicking on the __>__\n",
- "1. Select the Files tab\n",
- "1. Drag `csv` files from your computer to the tab to upload them into colab."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "kSxUeYPNQbOg",
- "colab_type": "text"
- },
- "source": [
- "# Train Neural Network\n",
- "\n",
- "\n",
- "\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "Gxk414PU3oy3",
- "colab_type": "text"
- },
- "source": [
- "## Parse and prepare the data\n",
- "\n",
- "The next cell parses the csv files and transforms them to a format that will be used to train the full connected neural network.\n",
- "\n"
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "AGChd1FAk5_j",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "import matplotlib.pyplot as plt\n",
- "import numpy as np\n",
- "import pandas as pd\n",
- "import tensorflow as tf\n",
- "import os\n",
- "import fileinput\n",
- "\n",
- "print(f\"TensorFlow version = {tf.__version__}\\n\")\n",
- "\n",
- "# Set a fixed random seed value, for reproducibility, this will allow us to get\n",
- "# the same random numbers each time the notebook is run\n",
- "SEED = 1337\n",
- "np.random.seed(SEED)\n",
- "tf.random.set_seed(SEED)\n",
- "\n",
- "CLASSES = [];\n",
- "\n",
- "for file in os.listdir(\"/content/\"):\n",
- " if file.endswith(\".csv\"):\n",
- " CLASSES.append(os.path.splitext(file)[0])\n",
- "\n",
- "CLASSES.sort()\n",
- "\n",
- "SAMPLES_WINDOW_LEN = 1\n",
- "NUM_CLASSES = len(CLASSES)\n",
- "\n",
- "# create a one-hot encoded matrix that is used in the output\n",
- "ONE_HOT_ENCODED_CLASSES = np.eye(NUM_CLASSES)\n",
- "\n",
- "inputs = []\n",
- "outputs = []\n",
- "\n",
- "# read each csv file and push an input and output\n",
- "for class_index in range(NUM_CLASSES):\n",
- " objectClass = CLASSES[class_index]\n",
- " df = pd.read_csv(\"/content/\" + objectClass + \".csv\")\n",
- " columns = list(df)\n",
- " # get rid of pesky empty value lines of csv which cause NaN inputs to TensorFlow\n",
- " df = df.dropna()\n",
- " df = df.reset_index(drop=True)\n",
- " \n",
- " # calculate the number of objectClass recordings in the file\n",
- " num_recordings = int(df.shape[0] / SAMPLES_WINDOW_LEN)\n",
- " print(f\"\\u001b[32;4m{objectClass}\\u001b[0m class will be output \\u001b[32m{class_index}\\u001b[0m of the classifier\")\n",
- " print(f\"{num_recordings} samples captured for training with inputs {list(df)} \\n\")\n",
- "\n",
- " # graphing\n",
- " plt.rcParams[\"figure.figsize\"] = (10,1)\n",
- " pixels = np.array([df['Red'],df['Green'],df['Blue']],float)\n",
- " pixels = np.transpose(pixels)\n",
- " for i in range(num_recordings):\n",
- " plt.axvline(x=i, linewidth=8, color=tuple(pixels[i]/np.max(pixels[i], axis=0)))\n",
- " plt.show()\n",
- " \n",
- " #tensors\n",
- " output = ONE_HOT_ENCODED_CLASSES[class_index]\n",
- " for i in range(num_recordings):\n",
- " tensor = []\n",
- " row = []\n",
- " for c in columns:\n",
- " row.append(df[c][i])\n",
- " tensor += row\n",
- " inputs.append(tensor)\n",
- " outputs.append(output)\n",
- "\n",
- "# convert the list to numpy array\n",
- "inputs = np.array(inputs)\n",
- "outputs = np.array(outputs)\n",
- "\n",
- "print(\"Data set parsing and preparation complete.\")\n",
- "\n",
- "# Randomize the order of the inputs, so they can be evenly distributed for training, testing, and validation\n",
- "# https://stackoverflow.com/a/37710486/2020087\n",
- "num_inputs = len(inputs)\n",
- "randomize = np.arange(num_inputs)\n",
- "np.random.shuffle(randomize)\n",
- "\n",
- "# Swap the consecutive indexes (0, 1, 2, etc) with the randomized indexes\n",
- "inputs = inputs[randomize]\n",
- "outputs = outputs[randomize]\n",
- "\n",
- "# Split the recordings (group of samples) into three sets: training, testing and validation\n",
- "TRAIN_SPLIT = int(0.6 * num_inputs)\n",
- "TEST_SPLIT = int(0.2 * num_inputs + TRAIN_SPLIT)\n",
- "\n",
- "inputs_train, inputs_test, inputs_validate = np.split(inputs, [TRAIN_SPLIT, TEST_SPLIT])\n",
- "outputs_train, outputs_test, outputs_validate = np.split(outputs, [TRAIN_SPLIT, TEST_SPLIT])\n",
- "\n",
- "print(\"Data set randomization and splitting complete.\")\n"
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "v8qlSAX1b6Yv"
- },
- "source": [
- "## Build & Train the Model\n",
- "\n",
- "Build and train a [TensorFlow](https://www.tensorflow.org) model using the high-level [Keras](https://www.tensorflow.org/guide/keras) API."
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "kGNFa-lX24Qo",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "# build the model and train it\n",
- "model = tf.keras.Sequential()\n",
- "model.add(tf.keras.layers.Dense(8, activation='relu')) # relu is used for performance\n",
- "model.add(tf.keras.layers.Dense(5, activation='relu'))\n",
- "model.add(tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')) # softmax is used, because we only expect one class to occur per input\n",
- "model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])\n",
- "history = model.fit(inputs_train, outputs_train, epochs=400, batch_size=4, validation_data=(inputs_validate, outputs_validate))\n",
- "\n"
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "guMjtfa42ahM",
- "colab_type": "text"
- },
- "source": [
- "### Run with Test Data\n",
- "Put our test data into the model and plot the predictions\n"
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "V3Y0CCWJz2EK",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "# use the model to predict the test inputs\n",
- "predictions = model.predict(inputs_test)\n",
- "\n",
- "# print the predictions and the expected ouputs\n",
- "print(\"predictions =\\n\", np.round(predictions, decimals=3))\n",
- "print(\"actual =\\n\", outputs_test)\n",
- "\n",
- "# Plot the predictions along with to the test data\n",
- "plt.clf()\n",
- "plt.title('Training data predicted vs actual values')\n",
- "plt.plot(inputs_test, outputs_test, 'b.', label='Actual')\n",
- "plt.plot(inputs_test, predictions, 'r.', label='Predicted')\n",
- "plt.show()"
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "j7DO6xxXVCym",
- "colab_type": "text"
- },
- "source": [
- "# Convert the Trained Model to Tensor Flow Lite\n",
- "\n",
- "The next cell converts the model to TFlite format. The size in bytes of the model is also printed out."
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "0Xn1-Rn9Cp_8",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "# Convert the model to the TensorFlow Lite format without quantization\n",
- "converter = tf.lite.TFLiteConverter.from_keras_model(model)\n",
- "tflite_model = converter.convert()\n",
- "\n",
- "# Save the model to disk\n",
- "open(\"gesture_model.tflite\", \"wb\").write(tflite_model)\n",
- " \n",
- "import os\n",
- "basic_model_size = os.path.getsize(\"gesture_model.tflite\")\n",
- "print(\"Model is %d bytes\" % basic_model_size)\n",
- " \n",
- " "
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "ykccQn7SXrUX",
- "colab_type": "text"
- },
- "source": [
- "## Encode the Model in an Arduino Header File \n",
- "\n",
- "The next cell creates a constant byte array that contains the TFlite model. Import it as a tab with the sketch below."
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "9J33uwpNtAku",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "!echo \"const unsigned char model[] = {\" > /content/model.h\n",
- "!cat gesture_model.tflite | xxd -i >> /content/model.h\n",
- "!echo \"};\" >> /content/model.h\n",
- "\n",
- "import os\n",
- "model_h_size = os.path.getsize(\"model.h\")\n",
- "print(f\"Header file, model.h, is {model_h_size:,} bytes.\")\n",
- "print(\"\\nOpen the side panel (refresh if needed). Double click model.h to download the file.\")"
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "1eSkHZaLzMId",
- "colab_type": "text"
- },
- "source": [
- "# Realtime Classification of Sensor Data on Arduino\n",
- "\n",
- "Now it's time to switch back to the tutorial instructions and run our new model on the [Arduino Nano 33 BLE Sense](https://www.arduino.cc/en/Guide/NANO33BLE)"
- ]
- }
- ]
-}
\ No newline at end of file
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "uvDA8AK7QOq-"
+ },
+ "source": [
+ "## Setup Python Environment \n",
+ "\n",
+ "The next cell sets up the dependencies in required for the notebook, run it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "Y2gs-PL4xDkZ"
+ },
+ "outputs": [],
+ "source": [
+ "# Setup environment\n",
+ "!apt-get -qq install xxd\n",
+ "!pip install pandas numpy matplotlib\n",
+ "%tensorflow_version 2.x\n",
+ "!pip install tensorflow"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "9lwkeshJk7dg"
+ },
+ "source": [
+ "# Upload Data\n",
+ "\n",
+ "1. Open the panel on the left side of Colab by clicking on the __>__\n",
+ "1. Select the Files tab\n",
+ "1. Drag `csv` files from your computer to the tab to upload them into colab."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "kSxUeYPNQbOg"
+ },
+ "source": [
+ "# Train Neural Network\n",
+ "\n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "Gxk414PU3oy3"
+ },
+ "source": [
+ "## Parse and prepare the data\n",
+ "\n",
+ "The next cell parses the csv files and transforms them to a format that will be used to train the full connected neural network.\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "AGChd1FAk5_j"
+ },
+ "outputs": [],
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import tensorflow as tf\n",
+ "import os\n",
+ "import fileinput\n",
+ "\n",
+ "print(f\"TensorFlow version = {tf.__version__}\\n\")\n",
+ "\n",
+ "# Set a fixed random seed value, for reproducibility, this will allow us to get\n",
+ "# the same random numbers each time the notebook is run\n",
+ "SEED = 1337\n",
+ "np.random.seed(SEED)\n",
+ "tf.random.set_seed(SEED)\n",
+ "\n",
+ "CLASSES = [];\n",
+ "\n",
+ "for file in os.listdir(\"/content/\"):\n",
+ " if file.endswith(\".csv\"):\n",
+ " CLASSES.append(os.path.splitext(file)[0])\n",
+ "\n",
+ "CLASSES.sort()\n",
+ "\n",
+ "SAMPLES_WINDOW_LEN = 1\n",
+ "NUM_CLASSES = len(CLASSES)\n",
+ "\n",
+ "# create a one-hot encoded matrix that is used in the output\n",
+ "ONE_HOT_ENCODED_CLASSES = np.eye(NUM_CLASSES)\n",
+ "\n",
+ "inputs = []\n",
+ "outputs = []\n",
+ "\n",
+ "# read each csv file and push an input and output\n",
+ "for class_index in range(NUM_CLASSES):\n",
+ " objectClass = CLASSES[class_index]\n",
+ " df = pd.read_csv(\"/content/\" + objectClass + \".csv\")\n",
+ " columns = list(df)\n",
+ " # get rid of pesky empty value lines of csv which cause NaN inputs to TensorFlow\n",
+ " df = df.dropna()\n",
+ " df = df.reset_index(drop=True)\n",
+ " \n",
+ " # calculate the number of objectClass recordings in the file\n",
+ " num_recordings = int(df.shape[0] / SAMPLES_WINDOW_LEN)\n",
+ " print(f\"\\u001b[32;4m{objectClass}\\u001b[0m class will be output \\u001b[32m{class_index}\\u001b[0m of the classifier\")\n",
+ " print(f\"{num_recordings} samples captured for training with inputs {list(df)} \\n\")\n",
+ "\n",
+ " # graphing\n",
+ " plt.rcParams[\"figure.figsize\"] = (10,1)\n",
+ " pixels = np.array([df['Red'],df['Green'],df['Blue']],float)\n",
+ " pixels = np.transpose(pixels)\n",
+ " for i in range(num_recordings):\n",
+ " plt.axvline(x=i, linewidth=8, color=tuple(pixels[i]/np.max(pixels[i], axis=0)))\n",
+ " plt.show()\n",
+ " \n",
+ " #tensors\n",
+ " output = ONE_HOT_ENCODED_CLASSES[class_index]\n",
+ " for i in range(num_recordings):\n",
+ " tensor = []\n",
+ " row = []\n",
+ " for c in columns:\n",
+ " row.append(df[c][i])\n",
+ " tensor += row\n",
+ " inputs.append(tensor)\n",
+ " outputs.append(output)\n",
+ "\n",
+ "# convert the list to numpy array\n",
+ "inputs = np.array(inputs)\n",
+ "outputs = np.array(outputs)\n",
+ "\n",
+ "print(\"Data set parsing and preparation complete.\")\n",
+ "\n",
+ "# Randomize the order of the inputs, so they can be evenly distributed for training, testing, and validation\n",
+ "# https://stackoverflow.com/a/37710486/2020087\n",
+ "num_inputs = len(inputs)\n",
+ "randomize = np.arange(num_inputs)\n",
+ "np.random.shuffle(randomize)\n",
+ "\n",
+ "# Swap the consecutive indexes (0, 1, 2, etc) with the randomized indexes\n",
+ "inputs = inputs[randomize]\n",
+ "outputs = outputs[randomize]\n",
+ "\n",
+ "# Split the recordings (group of samples) into three sets: training, testing and validation\n",
+ "TRAIN_SPLIT = int(0.6 * num_inputs)\n",
+ "TEST_SPLIT = int(0.2 * num_inputs + TRAIN_SPLIT)\n",
+ "\n",
+ "inputs_train, inputs_test, inputs_validate = np.split(inputs, [TRAIN_SPLIT, TEST_SPLIT])\n",
+ "outputs_train, outputs_test, outputs_validate = np.split(outputs, [TRAIN_SPLIT, TEST_SPLIT])\n",
+ "\n",
+ "print(\"Data set randomization and splitting complete.\")\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "v8qlSAX1b6Yv"
+ },
+ "source": [
+ "## Build & Train the Model\n",
+ "\n",
+ "Build and train a [TensorFlow](https://www.tensorflow.org) model using the high-level [Keras](https://www.tensorflow.org/guide/keras) API."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "kGNFa-lX24Qo"
+ },
+ "outputs": [],
+ "source": [
+ "# build the model and train it\n",
+ "model = tf.keras.Sequential()\n",
+ "model.add(tf.keras.layers.Dense(8, activation='relu')) # relu is used for performance\n",
+ "model.add(tf.keras.layers.Dense(5, activation='relu'))\n",
+ "model.add(tf.keras.layers.Dense(NUM_CLASSES, activation='softmax')) # softmax is used, because we only expect one class to occur per input\n",
+ "model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])\n",
+ "history = model.fit(inputs_train, outputs_train, epochs=400, batch_size=4, validation_data=(inputs_validate, outputs_validate))\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "guMjtfa42ahM"
+ },
+ "source": [
+ "### Run with Test Data\n",
+ "Put our test data into the model and plot the predictions\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "V3Y0CCWJz2EK"
+ },
+ "outputs": [],
+ "source": [
+ "# use the model to predict the test inputs\n",
+ "predictions = model.predict(inputs_test)\n",
+ "\n",
+ "# print the predictions and the expected ouputs\n",
+ "print(\"predictions =\\n\", np.round(predictions, decimals=3))\n",
+ "print(\"actual =\\n\", outputs_test)\n",
+ "\n",
+ "# Plot the predictions along with to the test data\n",
+ "plt.clf()\n",
+ "plt.title('Training data predicted vs actual values')\n",
+ "plt.plot(inputs_test, outputs_test, 'b.', label='Actual')\n",
+ "plt.plot(inputs_test, predictions, 'r.', label='Predicted')\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "j7DO6xxXVCym"
+ },
+ "source": [
+ "# Convert the Trained Model to Tensor Flow Lite\n",
+ "\n",
+ "The next cell converts the model to TFlite format. The size in bytes of the model is also printed out."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "0Xn1-Rn9Cp_8"
+ },
+ "outputs": [],
+ "source": [
+ "# Convert the model to the TensorFlow Lite format without quantization\n",
+ "converter = tf.lite.TFLiteConverter.from_keras_model(model)\n",
+ "tflite_model = converter.convert()\n",
+ "\n",
+ "# Save the model to disk\n",
+ "open(\"gesture_model.tflite\", \"wb\").write(tflite_model)\n",
+ " \n",
+ "import os\n",
+ "basic_model_size = os.path.getsize(\"gesture_model.tflite\")\n",
+ "print(\"Model is %d bytes\" % basic_model_size)\n",
+ " \n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "ykccQn7SXrUX"
+ },
+ "source": [
+ "## Encode the Model in an Arduino Header File \n",
+ "\n",
+ "The next cell creates a constant byte array that contains the TFlite model. Import it as a tab with the sketch below."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "9J33uwpNtAku"
+ },
+ "outputs": [],
+ "source": [
+ "!printf \"// We need to keep the data array aligned on some architectures.\\n#ifdef __has_attribute\\n#define HAVE_ATTRIBUTE(x) __has_attribute(x)\\n#else\\n#define HAVE_ATTRIBUTE(x) 0\\n#endif\\n#if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__))\\n#define DATA_ALIGN_ATTRIBUTE __attribute__((aligned(4)))\\n#else\\n#define DATA_ALIGN_ATTRIBUTE\\n#endif\\n\\n const unsigned char model[] DATA_ALIGN_ATTRIBUTE = {\\n\" > /content/model.h\n",
+ "!cat gesture_model.tflite | xxd -i >> /content/model.h\n",
+ "!echo \"};\" >> /content/model.h\n",
+ "\n",
+ "import os\n",
+ "model_h_size = os.path.getsize(\"model.h\")\n",
+ "print(f\"Header file, model.h, is {model_h_size:,} bytes.\")\n",
+ "print(\"\\nOpen the side panel (refresh if needed). Double click model.h to download the file.\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "1eSkHZaLzMId"
+ },
+ "source": [
+ "# Realtime Classification of Sensor Data on Arduino\n",
+ "\n",
+ "Now it's time to switch back to the tutorial instructions and run our new model on the [Arduino Nano 33 BLE Sense](https://www.arduino.cc/en/Guide/NANO33BLE)"
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "collapsed_sections": [],
+ "name": "FruitToEmoji-GIT.ipynb",
+ "provenance": [],
+ "toc_visible": true
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.5"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
+}
diff --git a/GestureToEmoji/arduino_tinyml_workshop.ipynb b/GestureToEmoji/arduino_tinyml_workshop.ipynb
index fd7d033..0a736c6 100755
--- a/GestureToEmoji/arduino_tinyml_workshop.ipynb
+++ b/GestureToEmoji/arduino_tinyml_workshop.ipynb
@@ -1,550 +1,563 @@
{
- "nbformat": 4,
- "nbformat_minor": 0,
- "metadata": {
- "colab": {
- "name": "arduino_tinyml_workshop.ipynb",
- "provenance": [],
- "collapsed_sections": [],
- "toc_visible": true
- },
- "kernelspec": {
- "name": "python3",
- "display_name": "Python 3"
- }
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "f92-4Hjy7kA8"
+ },
+ "source": [
+ "
\n",
+ "# Tiny ML on Arduino\n",
+ "## Gesture recognition tutorial\n",
+ " * Sandeep Mistry - Arduino\n",
+ " * Don Coleman - Chariot Solutions\n",
+ "\n",
+ " \n",
+ "https://github.com/arduino/ArduinoTensorFlowLiteTutorials/"
+ ]
},
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "f92-4Hjy7kA8",
- "colab_type": "text"
- },
- "source": [
- "
\n",
- "# Tiny ML on Arduino\n",
- "## Gesture recognition tutorial\n",
- " * Sandeep Mistry - Arduino\n",
- " * Don Coleman - Chariot Solutions\n",
- "\n",
- " \n",
- "https://github.com/arduino/ArduinoTensorFlowLiteTutorials/"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "uvDA8AK7QOq-",
- "colab_type": "text"
- },
- "source": [
- "## Setup Python Environment \n",
- "\n",
- "The next cell sets up the dependencies in required for the notebook, run it."
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "Y2gs-PL4xDkZ",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "# Setup environment\n",
- "!apt-get -qq install xxd\n",
- "!pip install pandas numpy matplotlib\n",
- "!pip install tensorflow==2.0.0-rc1"
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "9lwkeshJk7dg",
- "colab_type": "text"
- },
- "source": [
- "# Upload Data\n",
- "\n",
- "1. Open the panel on the left side of Colab by clicking on the __>__\n",
- "1. Select the files tab\n",
- "1. Drag `punch.csv` and `flex.csv` files from your computer to the tab to upload them into colab."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "Eh9yve14gUyD",
- "colab_type": "text"
- },
- "source": [
- "# Graph Data (optional)\n",
- "\n",
- "We'll graph the input files on two separate graphs, acceleration and gyroscope, as each data set has different units and scale."
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "I65ukChEgyNp",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "import matplotlib.pyplot as plt\n",
- "import numpy as np\n",
- "import pandas as pd\n",
- "\n",
- "filename = \"punch.csv\"\n",
- "\n",
- "df = pd.read_csv(\"/content/\" + filename)\n",
- "\n",
- "index = range(1, len(df['aX']) + 1)\n",
- "\n",
- "plt.rcParams[\"figure.figsize\"] = (20,10)\n",
- "\n",
- "plt.plot(index, df['aX'], 'g.', label='x', linestyle='solid', marker=',')\n",
- "plt.plot(index, df['aY'], 'b.', label='y', linestyle='solid', marker=',')\n",
- "plt.plot(index, df['aZ'], 'r.', label='z', linestyle='solid', marker=',')\n",
- "plt.title(\"Acceleration\")\n",
- "plt.xlabel(\"Sample #\")\n",
- "plt.ylabel(\"Acceleration (G)\")\n",
- "plt.legend()\n",
- "plt.show()\n",
- "\n",
- "plt.plot(index, df['gX'], 'g.', label='x', linestyle='solid', marker=',')\n",
- "plt.plot(index, df['gY'], 'b.', label='y', linestyle='solid', marker=',')\n",
- "plt.plot(index, df['gZ'], 'r.', label='z', linestyle='solid', marker=',')\n",
- "plt.title(\"Gyroscope\")\n",
- "plt.xlabel(\"Sample #\")\n",
- "plt.ylabel(\"Gyroscope (deg/sec)\")\n",
- "plt.legend()\n",
- "plt.show()\n"
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "kSxUeYPNQbOg",
- "colab_type": "text"
- },
- "source": [
- "# Train Neural Network\n",
- "\n",
- "\n",
- "\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "Gxk414PU3oy3",
- "colab_type": "text"
- },
- "source": [
- "## Parse and prepare the data\n",
- "\n",
- "The next cell parses the csv files and transforms them to a format that will be used to train the fully connected neural network.\n",
- "\n",
- "Update the `GESTURES` list with the gesture data you've collected in `.csv` format.\n"
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "AGChd1FAk5_j",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "import matplotlib.pyplot as plt\n",
- "import numpy as np\n",
- "import pandas as pd\n",
- "import tensorflow as tf\n",
- "\n",
- "print(f\"TensorFlow version = {tf.__version__}\\n\")\n",
- "\n",
- "# Set a fixed random seed value, for reproducibility, this will allow us to get\n",
- "# the same random numbers each time the notebook is run\n",
- "SEED = 1337\n",
- "np.random.seed(SEED)\n",
- "tf.random.set_seed(SEED)\n",
- "\n",
- "# the list of gestures that data is available for\n",
- "GESTURES = [\n",
- " \"punch\",\n",
- " \"flex\",\n",
- "]\n",
- "\n",
- "SAMPLES_PER_GESTURE = 119\n",
- "\n",
- "NUM_GESTURES = len(GESTURES)\n",
- "\n",
- "# create a one-hot encoded matrix that is used in the output\n",
- "ONE_HOT_ENCODED_GESTURES = np.eye(NUM_GESTURES)\n",
- "\n",
- "inputs = []\n",
- "outputs = []\n",
- "\n",
- "# read each csv file and push an input and output\n",
- "for gesture_index in range(NUM_GESTURES):\n",
- " gesture = GESTURES[gesture_index]\n",
- " print(f\"Processing index {gesture_index} for gesture '{gesture}'.\")\n",
- " \n",
- " output = ONE_HOT_ENCODED_GESTURES[gesture_index]\n",
- " \n",
- " df = pd.read_csv(\"/content/\" + gesture + \".csv\")\n",
- " \n",
- " # calculate the number of gesture recordings in the file\n",
- " num_recordings = int(df.shape[0] / SAMPLES_PER_GESTURE)\n",
- " \n",
- " print(f\"\\tThere are {num_recordings} recordings of the {gesture} gesture.\")\n",
- " \n",
- " for i in range(num_recordings):\n",
- " tensor = []\n",
- " for j in range(SAMPLES_PER_GESTURE):\n",
- " index = i * SAMPLES_PER_GESTURE + j\n",
- " # normalize the input data, between 0 to 1:\n",
- " # - acceleration is between: -4 to +4\n",
- " # - gyroscope is between: -2000 to +2000\n",
- " tensor += [\n",
- " (df['aX'][index] + 4) / 8,\n",
- " (df['aY'][index] + 4) / 8,\n",
- " (df['aZ'][index] + 4) / 8,\n",
- " (df['gX'][index] + 2000) / 4000,\n",
- " (df['gY'][index] + 2000) / 4000,\n",
- " (df['gZ'][index] + 2000) / 4000\n",
- " ]\n",
- "\n",
- " inputs.append(tensor)\n",
- " outputs.append(output)\n",
- "\n",
- "# convert the list to numpy array\n",
- "inputs = np.array(inputs)\n",
- "outputs = np.array(outputs)\n",
- "\n",
- "print(\"Data set parsing and preparation complete.\")"
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "d5_61831d5AM",
- "colab_type": "text"
- },
- "source": [
- "## Randomize and split the input and output pairs for training\n",
- "\n",
- "Randomly split input and output pairs into sets of data: 60% for training, 20% for validation, and 20% for testing.\n",
- "\n",
- " - the training set is used to train the model\n",
- " - the validation set is used to measure how well the model is performing during training\n",
- " - the testing set is used to test the model after training"
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "QfNEmUZMeIEx",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "# Randomize the order of the inputs, so they can be evenly distributed for training, testing, and validation\n",
- "# https://stackoverflow.com/a/37710486/2020087\n",
- "num_inputs = len(inputs)\n",
- "randomize = np.arange(num_inputs)\n",
- "np.random.shuffle(randomize)\n",
- "\n",
- "# Swap the consecutive indexes (0, 1, 2, etc) with the randomized indexes\n",
- "inputs = inputs[randomize]\n",
- "outputs = outputs[randomize]\n",
- "\n",
- "# Split the recordings (group of samples) into three sets: training, testing and validation\n",
- "TRAIN_SPLIT = int(0.6 * num_inputs)\n",
- "TEST_SPLIT = int(0.2 * num_inputs + TRAIN_SPLIT)\n",
- "\n",
- "inputs_train, inputs_test, inputs_validate = np.split(inputs, [TRAIN_SPLIT, TEST_SPLIT])\n",
- "outputs_train, outputs_test, outputs_validate = np.split(outputs, [TRAIN_SPLIT, TEST_SPLIT])\n",
- "\n",
- "print(\"Data set randomization and splitting complete.\")"
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "a9g2n41p24nR",
- "colab_type": "text"
- },
- "source": [
- "## Build & Train the Model\n",
- "\n",
- "Build and train a [TensorFlow](https://www.tensorflow.org) model using the high-level [Keras](https://www.tensorflow.org/guide/keras) API."
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "kGNFa-lX24Qo",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "# build the model and train it\n",
- "model = tf.keras.Sequential()\n",
- "model.add(tf.keras.layers.Dense(50, activation='relu')) # relu is used for performance\n",
- "model.add(tf.keras.layers.Dense(15, activation='relu'))\n",
- "model.add(tf.keras.layers.Dense(NUM_GESTURES, activation='softmax')) # softmax is used, because we only expect one gesture to occur per input\n",
- "model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])\n",
- "history = model.fit(inputs_train, outputs_train, epochs=600, batch_size=1, validation_data=(inputs_validate, outputs_validate))\n",
- "\n"
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "NUDPvaJE1wRE",
- "colab_type": "text"
- },
- "source": [
- "## Verify \n",
- "\n",
- "Graph the models performance vs validation.\n"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "kxA0zCOaS35v",
- "colab_type": "text"
- },
- "source": [
- "### Graph the loss\n",
- "\n",
- "Graph the loss to see when the model stops improving."
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "bvFNHXoQzmcM",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "# increase the size of the graphs. The default size is (6,4).\n",
- "plt.rcParams[\"figure.figsize\"] = (20,10)\n",
- "\n",
- "# graph the loss, the model above is configure to use \"mean squared error\" as the loss function\n",
- "loss = history.history['loss']\n",
- "val_loss = history.history['val_loss']\n",
- "epochs = range(1, len(loss) + 1)\n",
- "plt.plot(epochs, loss, 'g.', label='Training loss')\n",
- "plt.plot(epochs, val_loss, 'b', label='Validation loss')\n",
- "plt.title('Training and validation loss')\n",
- "plt.xlabel('Epochs')\n",
- "plt.ylabel('Loss')\n",
- "plt.legend()\n",
- "plt.show()\n",
- "\n",
- "print(plt.rcParams[\"figure.figsize\"])"
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "DG3m-VpE1zOd",
- "colab_type": "text"
- },
- "source": [
- "### Graph the loss again, skipping a bit of the start\n",
- "\n",
- "We'll graph the same data as the previous code cell, but start at index 100 so we can further zoom in once the model starts to converge."
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "c3xT7ue2zovd",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "# graph the loss again skipping a bit of the start\n",
- "SKIP = 100\n",
- "plt.plot(epochs[SKIP:], loss[SKIP:], 'g.', label='Training loss')\n",
- "plt.plot(epochs[SKIP:], val_loss[SKIP:], 'b.', label='Validation loss')\n",
- "plt.title('Training and validation loss')\n",
- "plt.xlabel('Epochs')\n",
- "plt.ylabel('Loss')\n",
- "plt.legend()\n",
- "plt.show()"
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "CRjvkFQy2RgS",
- "colab_type": "text"
- },
- "source": [
- "### Graph the mean absolute error\n",
- "\n",
- "[Mean absolute error](https://en.wikipedia.org/wiki/Mean_absolute_error) is another metric to judge the performance of the model.\n",
- "\n"
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "mBjCf1-2zx9C",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "# graph of mean absolute error\n",
- "mae = history.history['mae']\n",
- "val_mae = history.history['val_mae']\n",
- "plt.plot(epochs[SKIP:], mae[SKIP:], 'g.', label='Training MAE')\n",
- "plt.plot(epochs[SKIP:], val_mae[SKIP:], 'b.', label='Validation MAE')\n",
- "plt.title('Training and validation mean absolute error')\n",
- "plt.xlabel('Epochs')\n",
- "plt.ylabel('MAE')\n",
- "plt.legend()\n",
- "plt.show()\n"
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "guMjtfa42ahM",
- "colab_type": "text"
- },
- "source": [
- "### Run with Test Data\n",
- "Put our test data into the model and plot the predictions\n"
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "V3Y0CCWJz2EK",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "# use the model to predict the test inputs\n",
- "predictions = model.predict(inputs_test)\n",
- "\n",
- "# print the predictions and the expected ouputs\n",
- "print(\"predictions =\\n\", np.round(predictions, decimals=3))\n",
- "print(\"actual =\\n\", outputs_test)\n",
- "\n",
- "# Plot the predictions along with to the test data\n",
- "plt.clf()\n",
- "plt.title('Training data predicted vs actual values')\n",
- "plt.plot(inputs_test, outputs_test, 'b.', label='Actual')\n",
- "plt.plot(inputs_test, predictions, 'r.', label='Predicted')\n",
- "plt.show()"
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "j7DO6xxXVCym",
- "colab_type": "text"
- },
- "source": [
- "# Convert the Trained Model to Tensor Flow Lite\n",
- "\n",
- "The next cell converts the model to TFlite format. The size in bytes of the model is also printed out."
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "0Xn1-Rn9Cp_8",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "# Convert the model to the TensorFlow Lite format without quantization\n",
- "converter = tf.lite.TFLiteConverter.from_keras_model(model)\n",
- "tflite_model = converter.convert()\n",
- "\n",
- "# Save the model to disk\n",
- "open(\"gesture_model.tflite\", \"wb\").write(tflite_model)\n",
- " \n",
- "import os\n",
- "basic_model_size = os.path.getsize(\"gesture_model.tflite\")\n",
- "print(\"Model is %d bytes\" % basic_model_size)\n",
- " \n",
- " "
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "ykccQn7SXrUX",
- "colab_type": "text"
- },
- "source": [
- "## Encode the Model in an Arduino Header File \n",
- "\n",
- "The next cell creates a constant byte array that contains the TFlite model. Import it as a tab with the sketch below."
- ]
- },
- {
- "cell_type": "code",
- "metadata": {
- "id": "9J33uwpNtAku",
- "colab_type": "code",
- "colab": {}
- },
- "source": [
- "!echo \"const unsigned char model[] = {\" > /content/model.h\n",
- "!cat gesture_model.tflite | xxd -i >> /content/model.h\n",
- "!echo \"};\" >> /content/model.h\n",
- "\n",
- "import os\n",
- "model_h_size = os.path.getsize(\"model.h\")\n",
- "print(f\"Header file, model.h, is {model_h_size:,} bytes.\")\n",
- "print(\"\\nOpen the side panel (refresh if needed). Double click model.h to download the file.\")"
- ],
- "execution_count": 0,
- "outputs": []
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "1eSkHZaLzMId",
- "colab_type": "text"
- },
- "source": [
- "# Classifying IMU Data\n",
- "\n",
- "Now it's time to switch back to the tutorial instructions and run our new model on the Arduino Nano 33 BLE Sense to classify the accelerometer and gyroscope data.\n"
- ]
- }
- ]
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "uvDA8AK7QOq-"
+ },
+ "source": [
+ "## Setup Python Environment \n",
+ "\n",
+ "The next cell sets up the dependencies in required for the notebook, run it."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "Y2gs-PL4xDkZ"
+ },
+ "outputs": [],
+ "source": [
+ "# Setup environment\n",
+ "!apt-get -qq install xxd\n",
+ "!pip install pandas numpy matplotlib\n",
+ "!pip install tensorflow==2.0.0-rc1"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "9lwkeshJk7dg"
+ },
+ "source": [
+ "# Upload Data\n",
+ "\n",
+ "1. Open the panel on the left side of Colab by clicking on the __>__\n",
+ "1. Select the files tab\n",
+ "1. Drag `punch.csv` and `flex.csv` files from your computer to the tab to upload them into colab."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "Eh9yve14gUyD"
+ },
+ "source": [
+ "# Graph Data (optional)\n",
+ "\n",
+ "We'll graph the input files on two separate graphs, acceleration and gyroscope, as each data set has different units and scale."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "I65ukChEgyNp"
+ },
+ "outputs": [],
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "\n",
+ "filename = \"punch.csv\"\n",
+ "\n",
+ "df = pd.read_csv(\"/content/\" + filename)\n",
+ "\n",
+ "index = range(1, len(df['aX']) + 1)\n",
+ "\n",
+ "plt.rcParams[\"figure.figsize\"] = (20,10)\n",
+ "\n",
+ "plt.plot(index, df['aX'], 'g.', label='x', linestyle='solid', marker=',')\n",
+ "plt.plot(index, df['aY'], 'b.', label='y', linestyle='solid', marker=',')\n",
+ "plt.plot(index, df['aZ'], 'r.', label='z', linestyle='solid', marker=',')\n",
+ "plt.title(\"Acceleration\")\n",
+ "plt.xlabel(\"Sample #\")\n",
+ "plt.ylabel(\"Acceleration (G)\")\n",
+ "plt.legend()\n",
+ "plt.show()\n",
+ "\n",
+ "plt.plot(index, df['gX'], 'g.', label='x', linestyle='solid', marker=',')\n",
+ "plt.plot(index, df['gY'], 'b.', label='y', linestyle='solid', marker=',')\n",
+ "plt.plot(index, df['gZ'], 'r.', label='z', linestyle='solid', marker=',')\n",
+ "plt.title(\"Gyroscope\")\n",
+ "plt.xlabel(\"Sample #\")\n",
+ "plt.ylabel(\"Gyroscope (deg/sec)\")\n",
+ "plt.legend()\n",
+ "plt.show()\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "kSxUeYPNQbOg"
+ },
+ "source": [
+ "# Train Neural Network\n",
+ "\n",
+ "\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "Gxk414PU3oy3"
+ },
+ "source": [
+ "## Parse and prepare the data\n",
+ "\n",
+ "The next cell parses the csv files and transforms them to a format that will be used to train the fully connected neural network.\n",
+ "\n",
+ "Update the `GESTURES` list with the gesture data you've collected in `.csv` format.\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "AGChd1FAk5_j"
+ },
+ "outputs": [],
+ "source": [
+ "import matplotlib.pyplot as plt\n",
+ "import numpy as np\n",
+ "import pandas as pd\n",
+ "import tensorflow as tf\n",
+ "\n",
+ "print(f\"TensorFlow version = {tf.__version__}\\n\")\n",
+ "\n",
+ "# Set a fixed random seed value, for reproducibility, this will allow us to get\n",
+ "# the same random numbers each time the notebook is run\n",
+ "SEED = 1337\n",
+ "np.random.seed(SEED)\n",
+ "tf.random.set_seed(SEED)\n",
+ "\n",
+ "# the list of gestures that data is available for\n",
+ "GESTURES = [\n",
+ " \"punch\",\n",
+ " \"flex\",\n",
+ "]\n",
+ "\n",
+ "SAMPLES_PER_GESTURE = 119\n",
+ "\n",
+ "NUM_GESTURES = len(GESTURES)\n",
+ "\n",
+ "# create a one-hot encoded matrix that is used in the output\n",
+ "ONE_HOT_ENCODED_GESTURES = np.eye(NUM_GESTURES)\n",
+ "\n",
+ "inputs = []\n",
+ "outputs = []\n",
+ "\n",
+ "# read each csv file and push an input and output\n",
+ "for gesture_index in range(NUM_GESTURES):\n",
+ " gesture = GESTURES[gesture_index]\n",
+ " print(f\"Processing index {gesture_index} for gesture '{gesture}'.\")\n",
+ " \n",
+ " output = ONE_HOT_ENCODED_GESTURES[gesture_index]\n",
+ " \n",
+ " df = pd.read_csv(\"/content/\" + gesture + \".csv\")\n",
+ " \n",
+ " # calculate the number of gesture recordings in the file\n",
+ " num_recordings = int(df.shape[0] / SAMPLES_PER_GESTURE)\n",
+ " \n",
+ " print(f\"\\tThere are {num_recordings} recordings of the {gesture} gesture.\")\n",
+ " \n",
+ " for i in range(num_recordings):\n",
+ " tensor = []\n",
+ " for j in range(SAMPLES_PER_GESTURE):\n",
+ " index = i * SAMPLES_PER_GESTURE + j\n",
+ " # normalize the input data, between 0 to 1:\n",
+ " # - acceleration is between: -4 to +4\n",
+ " # - gyroscope is between: -2000 to +2000\n",
+ " tensor += [\n",
+ " (df['aX'][index] + 4) / 8,\n",
+ " (df['aY'][index] + 4) / 8,\n",
+ " (df['aZ'][index] + 4) / 8,\n",
+ " (df['gX'][index] + 2000) / 4000,\n",
+ " (df['gY'][index] + 2000) / 4000,\n",
+ " (df['gZ'][index] + 2000) / 4000\n",
+ " ]\n",
+ "\n",
+ " inputs.append(tensor)\n",
+ " outputs.append(output)\n",
+ "\n",
+ "# convert the list to numpy array\n",
+ "inputs = np.array(inputs)\n",
+ "outputs = np.array(outputs)\n",
+ "\n",
+ "print(\"Data set parsing and preparation complete.\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "d5_61831d5AM"
+ },
+ "source": [
+ "## Randomize and split the input and output pairs for training\n",
+ "\n",
+ "Randomly split input and output pairs into sets of data: 60% for training, 20% for validation, and 20% for testing.\n",
+ "\n",
+ " - the training set is used to train the model\n",
+ " - the validation set is used to measure how well the model is performing during training\n",
+ " - the testing set is used to test the model after training"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "QfNEmUZMeIEx"
+ },
+ "outputs": [],
+ "source": [
+ "# Randomize the order of the inputs, so they can be evenly distributed for training, testing, and validation\n",
+ "# https://stackoverflow.com/a/37710486/2020087\n",
+ "num_inputs = len(inputs)\n",
+ "randomize = np.arange(num_inputs)\n",
+ "np.random.shuffle(randomize)\n",
+ "\n",
+ "# Swap the consecutive indexes (0, 1, 2, etc) with the randomized indexes\n",
+ "inputs = inputs[randomize]\n",
+ "outputs = outputs[randomize]\n",
+ "\n",
+ "# Split the recordings (group of samples) into three sets: training, testing and validation\n",
+ "TRAIN_SPLIT = int(0.6 * num_inputs)\n",
+ "TEST_SPLIT = int(0.2 * num_inputs + TRAIN_SPLIT)\n",
+ "\n",
+ "inputs_train, inputs_test, inputs_validate = np.split(inputs, [TRAIN_SPLIT, TEST_SPLIT])\n",
+ "outputs_train, outputs_test, outputs_validate = np.split(outputs, [TRAIN_SPLIT, TEST_SPLIT])\n",
+ "\n",
+ "print(\"Data set randomization and splitting complete.\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "a9g2n41p24nR"
+ },
+ "source": [
+ "## Build & Train the Model\n",
+ "\n",
+ "Build and train a [TensorFlow](https://www.tensorflow.org) model using the high-level [Keras](https://www.tensorflow.org/guide/keras) API."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "kGNFa-lX24Qo"
+ },
+ "outputs": [],
+ "source": [
+ "# build the model and train it\n",
+ "model = tf.keras.Sequential()\n",
+ "model.add(tf.keras.layers.Dense(50, activation='relu')) # relu is used for performance\n",
+ "model.add(tf.keras.layers.Dense(15, activation='relu'))\n",
+ "model.add(tf.keras.layers.Dense(NUM_GESTURES, activation='softmax')) # softmax is used, because we only expect one gesture to occur per input\n",
+ "model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])\n",
+ "history = model.fit(inputs_train, outputs_train, epochs=600, batch_size=1, validation_data=(inputs_validate, outputs_validate))\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "NUDPvaJE1wRE"
+ },
+ "source": [
+ "## Verify \n",
+ "\n",
+ "Graph the models performance vs validation.\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "kxA0zCOaS35v"
+ },
+ "source": [
+ "### Graph the loss\n",
+ "\n",
+ "Graph the loss to see when the model stops improving."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "bvFNHXoQzmcM"
+ },
+ "outputs": [],
+ "source": [
+ "# increase the size of the graphs. The default size is (6,4).\n",
+ "plt.rcParams[\"figure.figsize\"] = (20,10)\n",
+ "\n",
+ "# graph the loss, the model above is configure to use \"mean squared error\" as the loss function\n",
+ "loss = history.history['loss']\n",
+ "val_loss = history.history['val_loss']\n",
+ "epochs = range(1, len(loss) + 1)\n",
+ "plt.plot(epochs, loss, 'g.', label='Training loss')\n",
+ "plt.plot(epochs, val_loss, 'b', label='Validation loss')\n",
+ "plt.title('Training and validation loss')\n",
+ "plt.xlabel('Epochs')\n",
+ "plt.ylabel('Loss')\n",
+ "plt.legend()\n",
+ "plt.show()\n",
+ "\n",
+ "print(plt.rcParams[\"figure.figsize\"])"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "DG3m-VpE1zOd"
+ },
+ "source": [
+ "### Graph the loss again, skipping a bit of the start\n",
+ "\n",
+ "We'll graph the same data as the previous code cell, but start at index 100 so we can further zoom in once the model starts to converge."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "c3xT7ue2zovd"
+ },
+ "outputs": [],
+ "source": [
+ "# graph the loss again skipping a bit of the start\n",
+ "SKIP = 100\n",
+ "plt.plot(epochs[SKIP:], loss[SKIP:], 'g.', label='Training loss')\n",
+ "plt.plot(epochs[SKIP:], val_loss[SKIP:], 'b.', label='Validation loss')\n",
+ "plt.title('Training and validation loss')\n",
+ "plt.xlabel('Epochs')\n",
+ "plt.ylabel('Loss')\n",
+ "plt.legend()\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "CRjvkFQy2RgS"
+ },
+ "source": [
+ "### Graph the mean absolute error\n",
+ "\n",
+ "[Mean absolute error](https://en.wikipedia.org/wiki/Mean_absolute_error) is another metric to judge the performance of the model.\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "mBjCf1-2zx9C"
+ },
+ "outputs": [],
+ "source": [
+ "# graph of mean absolute error\n",
+ "mae = history.history['mae']\n",
+ "val_mae = history.history['val_mae']\n",
+ "plt.plot(epochs[SKIP:], mae[SKIP:], 'g.', label='Training MAE')\n",
+ "plt.plot(epochs[SKIP:], val_mae[SKIP:], 'b.', label='Validation MAE')\n",
+ "plt.title('Training and validation mean absolute error')\n",
+ "plt.xlabel('Epochs')\n",
+ "plt.ylabel('MAE')\n",
+ "plt.legend()\n",
+ "plt.show()\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "guMjtfa42ahM"
+ },
+ "source": [
+ "### Run with Test Data\n",
+ "Put our test data into the model and plot the predictions\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "V3Y0CCWJz2EK"
+ },
+ "outputs": [],
+ "source": [
+ "# use the model to predict the test inputs\n",
+ "predictions = model.predict(inputs_test)\n",
+ "\n",
+ "# print the predictions and the expected ouputs\n",
+ "print(\"predictions =\\n\", np.round(predictions, decimals=3))\n",
+ "print(\"actual =\\n\", outputs_test)\n",
+ "\n",
+ "# Plot the predictions along with to the test data\n",
+ "plt.clf()\n",
+ "plt.title('Training data predicted vs actual values')\n",
+ "plt.plot(inputs_test, outputs_test, 'b.', label='Actual')\n",
+ "plt.plot(inputs_test, predictions, 'r.', label='Predicted')\n",
+ "plt.show()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "j7DO6xxXVCym"
+ },
+ "source": [
+ "# Convert the Trained Model to Tensor Flow Lite\n",
+ "\n",
+ "The next cell converts the model to TFlite format. The size in bytes of the model is also printed out."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "0Xn1-Rn9Cp_8"
+ },
+ "outputs": [],
+ "source": [
+ "# Convert the model to the TensorFlow Lite format without quantization\n",
+ "converter = tf.lite.TFLiteConverter.from_keras_model(model)\n",
+ "tflite_model = converter.convert()\n",
+ "\n",
+ "# Save the model to disk\n",
+ "open(\"gesture_model.tflite\", \"wb\").write(tflite_model)\n",
+ " \n",
+ "import os\n",
+ "basic_model_size = os.path.getsize(\"gesture_model.tflite\")\n",
+ "print(\"Model is %d bytes\" % basic_model_size)\n",
+ " \n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "ykccQn7SXrUX"
+ },
+ "source": [
+ "## Encode the Model in an Arduino Header File \n",
+ "\n",
+ "The next cell creates a constant byte array that contains the TFlite model. Import it as a tab with the sketch below."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 0,
+ "metadata": {
+ "colab": {},
+ "colab_type": "code",
+ "id": "9J33uwpNtAku"
+ },
+ "outputs": [],
+ "source": [
+ "!printf \"// We need to keep the data array aligned on some architectures.\\n#ifdef __has_attribute\\n#define HAVE_ATTRIBUTE(x) __has_attribute(x)\\n#else\\n#define HAVE_ATTRIBUTE(x) 0\\n#endif\\n#if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__))\\n#define DATA_ALIGN_ATTRIBUTE __attribute__((aligned(4)))\\n#else\\n#define DATA_ALIGN_ATTRIBUTE\\n#endif\\n\\n const unsigned char model[] DATA_ALIGN_ATTRIBUTE = {\\n\" > /content/model.h\n",
+ "!cat gesture_model.tflite | xxd -i >> /content/model.h\n",
+ "!echo \"};\" >> /content/model.h\n",
+ "\n",
+ "import os\n",
+ "model_h_size = os.path.getsize(\"model.h\")\n",
+ "print(f\"Header file, model.h, is {model_h_size:,} bytes.\")\n",
+ "print(\"\\nOpen the side panel (refresh if needed). Double click model.h to download the file.\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "1eSkHZaLzMId"
+ },
+ "source": [
+ "# Classifying IMU Data\n",
+ "\n",
+ "Now it's time to switch back to the tutorial instructions and run our new model on the Arduino Nano 33 BLE Sense to classify the accelerometer and gyroscope data.\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "collapsed_sections": [],
+ "name": "arduino_tinyml_workshop.ipynb",
+ "provenance": [],
+ "toc_visible": true
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.7.5"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 1
}