Skip to content

Commit 1da0ca8

Browse files
Add TFLite Micro examples (#8717)
* create TFLite library * add TFLite hello_world example * add TFLite micro_speech example --------- Co-authored-by: Sanket Wadekar <[email protected]>
1 parent ab6a25e commit 1da0ca8

29 files changed

+3711
-0
lines changed

Diff for: libraries/TFLiteMicro/examples/hello_world/README.md

+18
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
# Hello World Example
2+
3+
This example is designed to demonstrate the absolute basics of using [TensorFlow
4+
Lite for Microcontrollers](https://www.tensorflow.org/lite/microcontrollers).
5+
It includes the full end-to-end workflow of training a model, converting it for
6+
use with TensorFlow Lite for Microcontrollers for running inference on a
7+
microcontroller.
8+
9+
The model is trained to replicate a `sine` function and generates a pattern of
10+
data to either blink LEDs or control an animation, depending on the capabilities
11+
of the device.
12+
13+
## Deploy to ESP32
14+
15+
The sample has been tested on ESP-IDF version `release/v4.2` and `release/v4.4` with the following devices:
16+
- [ESP32-DevKitC](http://esp-idf.readthedocs.io/en/latest/get-started/get-started-devkitc.html)
17+
- [ESP32-S3-DevKitC](https://docs.espressif.com/projects/esp-idf/en/latest/esp32s3/hw-reference/esp32s3/user-guide-devkitc-1.html)
18+
- [ESP-EYE](https://github.com/espressif/esp-who/blob/master/docs/en/get-started/ESP-EYE_Getting_Started_Guide.md)
+19
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License.
14+
==============================================================================*/
15+
16+
#include "constants.h"
17+
18+
// This is a small number so that it's easy to read the logs
19+
const int kInferencesPerCycle = 20;
+32
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License.
14+
==============================================================================*/
15+
16+
#ifndef TENSORFLOW_LITE_MICRO_EXAMPLES_HELLO_WORLD_CONSTANTS_H_
17+
#define TENSORFLOW_LITE_MICRO_EXAMPLES_HELLO_WORLD_CONSTANTS_H_
18+
19+
// This constant represents the range of x values our model was trained on,
20+
// which is from 0 to (2 * Pi). We approximate Pi to avoid requiring additional
21+
// libraries.
22+
const float kXrange = 2.f * 3.14159265359f;
23+
24+
// This constant determines the number of inferences to perform across the range
25+
// of x values defined above. Since each inference takes time, the higher this
26+
// number, the more time it will take to run through the entire range. The value
27+
// of this constant can be tuned so that one full cycle takes a desired amount
28+
// of time. Since different devices take different amounts of time to perform
29+
// inference, this value should be defined per-device.
30+
extern const int kInferencesPerCycle;
31+
32+
#endif // TENSORFLOW_LITE_MICRO_EXAMPLES_HELLO_WORLD_CONSTANTS_H_
+111
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,111 @@
1+
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License.
14+
==============================================================================*/
15+
16+
17+
#include "tensorflow/lite/micro/micro_mutable_op_resolver.h"
18+
#include "tensorflow/lite/micro/micro_interpreter.h"
19+
#include "tensorflow/lite/micro/system_setup.h"
20+
#include "tensorflow/lite/schema/schema_generated.h"
21+
22+
#include "model.h"
23+
#include "constants.h"
24+
#include "output_handler.h"
25+
26+
// Globals, used for compatibility with Arduino-style sketches.
27+
namespace {
28+
const tflite::Model* model = nullptr;
29+
tflite::MicroInterpreter* interpreter = nullptr;
30+
TfLiteTensor* input = nullptr;
31+
TfLiteTensor* output = nullptr;
32+
int inference_count = 0;
33+
34+
constexpr int kTensorArenaSize = 2000;
35+
uint8_t tensor_arena[kTensorArenaSize];
36+
} // namespace
37+
38+
// The name of this function is important for Arduino compatibility.
39+
void setup() {
40+
// Map the model into a usable data structure. This doesn't involve any
41+
// copying or parsing, it's a very lightweight operation.
42+
model = tflite::GetModel(g_model);
43+
if (model->version() != TFLITE_SCHEMA_VERSION) {
44+
MicroPrintf("Model provided is schema version %d not equal to supported "
45+
"version %d.", model->version(), TFLITE_SCHEMA_VERSION);
46+
return;
47+
}
48+
49+
// Pull in only the operation implementations we need.
50+
static tflite::MicroMutableOpResolver<1> resolver;
51+
if (resolver.AddFullyConnected() != kTfLiteOk) {
52+
return;
53+
}
54+
55+
// Build an interpreter to run the model with.
56+
static tflite::MicroInterpreter static_interpreter(
57+
model, resolver, tensor_arena, kTensorArenaSize);
58+
interpreter = &static_interpreter;
59+
60+
// Allocate memory from the tensor_arena for the model's tensors.
61+
TfLiteStatus allocate_status = interpreter->AllocateTensors();
62+
if (allocate_status != kTfLiteOk) {
63+
MicroPrintf("AllocateTensors() failed");
64+
return;
65+
}
66+
67+
// Obtain pointers to the model's input and output tensors.
68+
input = interpreter->input(0);
69+
output = interpreter->output(0);
70+
71+
// Keep track of how many inferences we have performed.
72+
inference_count = 0;
73+
}
74+
75+
// The name of this function is important for Arduino compatibility.
76+
void loop() {
77+
// Calculate an x value to feed into the model. We compare the current
78+
// inference_count to the number of inferences per cycle to determine
79+
// our position within the range of possible x values the model was
80+
// trained on, and use this to calculate a value.
81+
float position = static_cast<float>(inference_count) /
82+
static_cast<float>(kInferencesPerCycle);
83+
float x = position * kXrange;
84+
85+
// Quantize the input from floating-point to integer
86+
int8_t x_quantized = x / input->params.scale + input->params.zero_point;
87+
// Place the quantized input in the model's input tensor
88+
input->data.int8[0] = x_quantized;
89+
90+
// Run inference, and report any error
91+
TfLiteStatus invoke_status = interpreter->Invoke();
92+
if (invoke_status != kTfLiteOk) {
93+
MicroPrintf("Invoke failed on x: %f\n",
94+
static_cast<double>(x));
95+
return;
96+
}
97+
98+
// Obtain the quantized output from model's output tensor
99+
int8_t y_quantized = output->data.int8[0];
100+
// Dequantize the output from integer to floating-point
101+
float y = (y_quantized - output->params.zero_point) * output->params.scale;
102+
103+
// Output the results. A custom HandleOutput function can be implemented
104+
// for each supported hardware target.
105+
HandleOutput(x, y);
106+
107+
// Increment the inference_counter, and reset it if we have reached
108+
// the total number per cycle
109+
inference_count += 1;
110+
if (inference_count >= kInferencesPerCycle) inference_count = 0;
111+
}

0 commit comments

Comments
 (0)