26
26
" ## Gesture recognition tutorial\n " ,
27
27
" * Sandeep Mistry - Arduino\n " ,
28
28
" * Don Coleman - Chariot Solutions\n " ,
29
+ " * [Kartik Thakore](https://www.linkedin.com/in/kartikthakore/) (simple alignment fix)\n " ,
29
30
" \n " ,
30
31
" \n " ,
31
32
" https://github.com/arduino/ArduinoTensorFlowLiteTutorials/"
40
41
"source" : [
41
42
" ## Setup Python Environment \n " ,
42
43
" \n " ,
43
- " The next cell sets up the dependencies in required for the notebook, run it ."
44
+ " Install up the Python libraries and Linux tools for the code in the notebook ."
44
45
]
45
46
},
46
47
{
54
55
" # Setup environment\n " ,
55
56
" !apt-get -qq install xxd\n " ,
56
57
" !pip install pandas numpy matplotlib\n " ,
57
- " !pip install tensorflow==2.0.0-rc1"
58
+ " %tensorflow_version 2.x\n " ,
59
+ " !pip install tensorflow"
58
60
],
59
61
"execution_count" : 0 ,
60
62
"outputs" : []
68
70
"source" : [
69
71
" # Upload Data\n " ,
70
72
" \n " ,
71
- " 1. Open the panel on the left side of Colab by clicking on the __>__\n " ,
72
- " 1. Select the files tab\n " ,
73
- " 1. Drag `punch.csv` and `flex.csv` files from your computer to the tab to upload them into colab."
73
+ " 1. If necessary, open the panel on the left side of Colab by clicking on the __>__\n " ,
74
+ " 1. Select the files tab in the left panel \n " ,
75
+ " 1. Drag the `punch.csv` and `flex.csv` files from your computer to the tab to upload them into colab."
74
76
]
75
77
},
76
78
{
82
84
"source" : [
83
85
" # Graph Data (optional)\n " ,
84
86
" \n " ,
85
- " We'll graph the input files on two separate graphs, acceleration and gyroscope, as each data set has different units and scale."
87
+ " Plot the CSV data on two separate graphs, acceleration and gyroscope, because each data set has different units and scale."
86
88
]
87
89
},
88
90
{
97
99
" import numpy as np\n " ,
98
100
" import pandas as pd\n " ,
99
101
" \n " ,
100
- " filename = \" punch .csv\"\n " ,
102
+ " filename = \" flex .csv\"\n " ,
101
103
" \n " ,
102
104
" df = pd.read_csv(\" /content/\" + filename)\n " ,
103
105
" \n " ,
148
150
"source" : [
149
151
" ## Parse and prepare the data\n " ,
150
152
" \n " ,
151
- " The next cell parses the csv files and transforms them to a format that will be used to train the fully connected neural network.\n " ,
153
+ " Parse the CSV files and transforms them to a format that can be used to train the fully connected neural network.\n " ,
152
154
" \n " ,
153
- " Update the `GESTURES` list with the gesture data you've collected in `.csv` format .\n "
155
+ " If you've recorded additional gestures, update the `GESTURES` list with the names of the additional CSV files .\n "
154
156
]
155
157
},
156
158
{
174
176
" np.random.seed(SEED)\n " ,
175
177
" tf.random.set_seed(SEED)\n " ,
176
178
" \n " ,
177
- " # the list of gestures that data is available for \n " ,
179
+ " # the list of gestures \n " ,
178
180
" GESTURES = [\n " ,
179
181
" \" punch\" ,\n " ,
180
- " \" flex\" , \n " ,
182
+ " \" flex\"\n " ,
181
183
" ]\n " ,
182
184
" \n " ,
183
185
" SAMPLES_PER_GESTURE = 119\n " ,
198
200
" output = ONE_HOT_ENCODED_GESTURES[gesture_index]\n " ,
199
201
" \n " ,
200
202
" df = pd.read_csv(\" /content/\" + gesture + \" .csv\" )\n " ,
203
+ " \n " ,
204
+ " # get rid of pesky empty value lines of csv which cause NaN inputs to TensorFlow\n " ,
205
+ " df = df.dropna()\n " ,
206
+ " df = df.reset_index(drop=True)\n " ,
201
207
" \n " ,
202
208
" # calculate the number of gesture recordings in the file\n " ,
203
209
" num_recordings = int(df.shape[0] / SAMPLES_PER_GESTURE)\n " ,
227
233
" inputs = np.array(inputs)\n " ,
228
234
" outputs = np.array(outputs)\n " ,
229
235
" \n " ,
236
+ " print(inputs.size)\n " ,
230
237
" print(\" Data set parsing and preparation complete.\" )"
231
238
],
232
239
"execution_count" : 0 ,
302
309
" model = tf.keras.Sequential()\n " ,
303
310
" model.add(tf.keras.layers.Dense(50, activation='relu')) # relu is used for performance\n " ,
304
311
" model.add(tf.keras.layers.Dense(15, activation='relu'))\n " ,
305
- " model.add(tf.keras.layers.Dense(NUM_GESTURES, activation='softmax')) # softmax is used, because we only expect one gesture to occur per input\n " ,
312
+ " # the final layer is softmax because we only expect one gesture to occur per input\n " ,
313
+ " model.add(tf.keras.layers.Dense(NUM_GESTURES, activation='softmax'))\n " ,
306
314
" model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])\n " ,
307
315
" history = model.fit(inputs_train, outputs_train, epochs=600, batch_size=1, validation_data=(inputs_validate, outputs_validate))\n " ,
308
316
" \n "
438
446
},
439
447
"source" : [
440
448
" ### Run with Test Data\n " ,
441
- " Put our test data into the model and plot the predictions\n "
449
+ " Put our test data into the model and compare the predictions vs actual output \n "
442
450
]
443
451
},
444
452
{
454
462
" \n " ,
455
463
" # print the predictions and the expected ouputs\n " ,
456
464
" print(\" predictions =\\ n\" , np.round(predictions, decimals=3))\n " ,
457
- " print(\" actual =\\ n\" , outputs_test)\n " ,
458
- " \n " ,
459
- " # Plot the predictions along with to the test data\n " ,
460
- " plt.clf()\n " ,
461
- " plt.title('Training data predicted vs actual values')\n " ,
462
- " plt.plot(inputs_test, outputs_test, 'b.', label='Actual')\n " ,
463
- " plt.plot(inputs_test, predictions, 'r.', label='Predicted')\n " ,
464
- " plt.show()"
465
+ " print(\" actual =\\ n\" , outputs_test)"
465
466
],
466
467
"execution_count" : 0 ,
467
468
"outputs" : []
522
523
"colab" : {}
523
524
},
524
525
"source" : [
525
- " !echo \" const unsigned char model[] = {\" > /content/model.h\n " ,
526
+ " !printf \" // We need to keep the data array aligned on some architectures. \\ n#ifdef __has_attribute \\ n#define HAVE_ATTRIBUTE(x) __has_attribute(x) \\ n#else \\ n#define HAVE_ATTRIBUTE(x) 0 \\ n#endif \\ n#if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__)) \\ n#define DATA_ALIGN_ATTRIBUTE __attribute__((aligned(4))) \\ n#else \\ n#define DATA_ALIGN_ATTRIBUTE \\ n#endif \\ n \\ n const unsigned char model[] DATA_ALIGN_ATTRIBUTE = {\" > /content/model.h\n " ,
526
527
" !cat gesture_model.tflite | xxd -i >> /content/model.h\n " ,
527
528
" !echo \" };\" >> /content/model.h\n " ,
528
529
" \n " ,
545
546
" \n " ,
546
547
" Now it's time to switch back to the tutorial instructions and run our new model on the Arduino Nano 33 BLE Sense to classify the accelerometer and gyroscope data.\n "
547
548
]
549
+ },
550
+ {
551
+ "cell_type" : " code" ,
552
+ "metadata" : {
553
+ "id" : " fsg9_6rkeGvG" ,
554
+ "colab_type" : " code" ,
555
+ "colab" : {}
556
+ },
557
+ "source" : [
558
+ " "
559
+ ],
560
+ "execution_count" : 0 ,
561
+ "outputs" : []
548
562
}
549
563
]
550
- }
564
+ }
0 commit comments