Skip to content

Commit f29db4e

Browse files
committed
Fixing alignment header issue for Gesture model file generated.
1 parent 9183a15 commit f29db4e

File tree

1 file changed

+37
-23
lines changed

1 file changed

+37
-23
lines changed

GestureToEmoji/arduino_tinyml_workshop.ipynb

100755100644
+37-23
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
"## Gesture recognition tutorial\n",
2727
" * Sandeep Mistry - Arduino\n",
2828
" * Don Coleman - Chariot Solutions\n",
29+
" * [Kartik Thakore](https://www.linkedin.com/in/kartikthakore/) (simple alignment fix)\n",
2930
"\n",
3031
" \n",
3132
"https://github.com/arduino/ArduinoTensorFlowLiteTutorials/"
@@ -40,7 +41,7 @@
4041
"source": [
4142
"## Setup Python Environment \n",
4243
"\n",
43-
"The next cell sets up the dependencies in required for the notebook, run it."
44+
"Install up the Python libraries and Linux tools for the code in the notebook."
4445
]
4546
},
4647
{
@@ -54,7 +55,8 @@
5455
"# Setup environment\n",
5556
"!apt-get -qq install xxd\n",
5657
"!pip install pandas numpy matplotlib\n",
57-
"!pip install tensorflow==2.0.0-rc1"
58+
"%tensorflow_version 2.x\n",
59+
"!pip install tensorflow"
5860
],
5961
"execution_count": 0,
6062
"outputs": []
@@ -68,9 +70,9 @@
6870
"source": [
6971
"# Upload Data\n",
7072
"\n",
71-
"1. Open the panel on the left side of Colab by clicking on the __>__\n",
72-
"1. Select the files tab\n",
73-
"1. Drag `punch.csv` and `flex.csv` files from your computer to the tab to upload them into colab."
73+
"1. If necessary, open the panel on the left side of Colab by clicking on the __>__\n",
74+
"1. Select the files tab in the left panel\n",
75+
"1. Drag the `punch.csv` and `flex.csv` files from your computer to the tab to upload them into colab."
7476
]
7577
},
7678
{
@@ -82,7 +84,7 @@
8284
"source": [
8385
"# Graph Data (optional)\n",
8486
"\n",
85-
"We'll graph the input files on two separate graphs, acceleration and gyroscope, as each data set has different units and scale."
87+
"Plot the CSV data on two separate graphs, acceleration and gyroscope, because each data set has different units and scale."
8688
]
8789
},
8890
{
@@ -97,7 +99,7 @@
9799
"import numpy as np\n",
98100
"import pandas as pd\n",
99101
"\n",
100-
"filename = \"punch.csv\"\n",
102+
"filename = \"flex.csv\"\n",
101103
"\n",
102104
"df = pd.read_csv(\"/content/\" + filename)\n",
103105
"\n",
@@ -148,9 +150,9 @@
148150
"source": [
149151
"## Parse and prepare the data\n",
150152
"\n",
151-
"The next cell parses the csv files and transforms them to a format that will be used to train the fully connected neural network.\n",
153+
"Parse the CSV files and transforms them to a format that can be used to train the fully connected neural network.\n",
152154
"\n",
153-
"Update the `GESTURES` list with the gesture data you've collected in `.csv` format.\n"
155+
"If you've recorded additional gestures, update the `GESTURES` list with the names of the additional CSV files.\n"
154156
]
155157
},
156158
{
@@ -174,10 +176,10 @@
174176
"np.random.seed(SEED)\n",
175177
"tf.random.set_seed(SEED)\n",
176178
"\n",
177-
"# the list of gestures that data is available for\n",
179+
"# the list of gestures \n",
178180
"GESTURES = [\n",
179181
" \"punch\",\n",
180-
" \"flex\",\n",
182+
" \"flex\"\n",
181183
"]\n",
182184
"\n",
183185
"SAMPLES_PER_GESTURE = 119\n",
@@ -198,6 +200,10 @@
198200
" output = ONE_HOT_ENCODED_GESTURES[gesture_index]\n",
199201
" \n",
200202
" df = pd.read_csv(\"/content/\" + gesture + \".csv\")\n",
203+
"\n",
204+
" # get rid of pesky empty value lines of csv which cause NaN inputs to TensorFlow\n",
205+
" df = df.dropna()\n",
206+
" df = df.reset_index(drop=True)\n",
201207
" \n",
202208
" # calculate the number of gesture recordings in the file\n",
203209
" num_recordings = int(df.shape[0] / SAMPLES_PER_GESTURE)\n",
@@ -227,6 +233,7 @@
227233
"inputs = np.array(inputs)\n",
228234
"outputs = np.array(outputs)\n",
229235
"\n",
236+
"print(inputs.size)\n",
230237
"print(\"Data set parsing and preparation complete.\")"
231238
],
232239
"execution_count": 0,
@@ -302,7 +309,8 @@
302309
"model = tf.keras.Sequential()\n",
303310
"model.add(tf.keras.layers.Dense(50, activation='relu')) # relu is used for performance\n",
304311
"model.add(tf.keras.layers.Dense(15, activation='relu'))\n",
305-
"model.add(tf.keras.layers.Dense(NUM_GESTURES, activation='softmax')) # softmax is used, because we only expect one gesture to occur per input\n",
312+
"# the final layer is softmax because we only expect one gesture to occur per input\n",
313+
"model.add(tf.keras.layers.Dense(NUM_GESTURES, activation='softmax'))\n",
306314
"model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])\n",
307315
"history = model.fit(inputs_train, outputs_train, epochs=600, batch_size=1, validation_data=(inputs_validate, outputs_validate))\n",
308316
"\n"
@@ -438,7 +446,7 @@
438446
},
439447
"source": [
440448
"### Run with Test Data\n",
441-
"Put our test data into the model and plot the predictions\n"
449+
"Put our test data into the model and compare the predictions vs actual output\n"
442450
]
443451
},
444452
{
@@ -454,14 +462,7 @@
454462
"\n",
455463
"# print the predictions and the expected ouputs\n",
456464
"print(\"predictions =\\n\", np.round(predictions, decimals=3))\n",
457-
"print(\"actual =\\n\", outputs_test)\n",
458-
"\n",
459-
"# Plot the predictions along with to the test data\n",
460-
"plt.clf()\n",
461-
"plt.title('Training data predicted vs actual values')\n",
462-
"plt.plot(inputs_test, outputs_test, 'b.', label='Actual')\n",
463-
"plt.plot(inputs_test, predictions, 'r.', label='Predicted')\n",
464-
"plt.show()"
465+
"print(\"actual =\\n\", outputs_test)"
465466
],
466467
"execution_count": 0,
467468
"outputs": []
@@ -522,7 +523,7 @@
522523
"colab": {}
523524
},
524525
"source": [
525-
"!echo \"const unsigned char model[] = {\" > /content/model.h\n",
526+
"!printf \"// We need to keep the data array aligned on some architectures.\\n#ifdef __has_attribute\\n#define HAVE_ATTRIBUTE(x) __has_attribute(x)\\n#else\\n#define HAVE_ATTRIBUTE(x) 0\\n#endif\\n#if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__))\\n#define DATA_ALIGN_ATTRIBUTE __attribute__((aligned(4)))\\n#else\\n#define DATA_ALIGN_ATTRIBUTE\\n#endif\\n\\n const unsigned char model[] DATA_ALIGN_ATTRIBUTE = {\" > /content/model.h\n",
526527
"!cat gesture_model.tflite | xxd -i >> /content/model.h\n",
527528
"!echo \"};\" >> /content/model.h\n",
528529
"\n",
@@ -545,6 +546,19 @@
545546
"\n",
546547
"Now it's time to switch back to the tutorial instructions and run our new model on the Arduino Nano 33 BLE Sense to classify the accelerometer and gyroscope data.\n"
547548
]
549+
},
550+
{
551+
"cell_type": "code",
552+
"metadata": {
553+
"id": "fsg9_6rkeGvG",
554+
"colab_type": "code",
555+
"colab": {}
556+
},
557+
"source": [
558+
""
559+
],
560+
"execution_count": 0,
561+
"outputs": []
548562
}
549563
]
550-
}
564+
}

0 commit comments

Comments
 (0)