Skip to content

Commit 5fb8f3f

Browse files
support Cortex-M + ESP, support [email protected] on Cortex-M, support [email protected] on ESP32
1 parent 1cfd8b6 commit 5fb8f3f

File tree

509 files changed

+102741
-4338
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

509 files changed

+102741
-4338
lines changed

.DS_Store

0 Bytes
Binary file not shown.

README.md

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,14 @@ Clone this repo in you Arduino libraries folder.
1414
git clone https://github.com/eloquentarduino/EloquentTinyML.git
1515
```
1616

17+
## Export TensorFlow Lite model
18+
19+
To run a model on your microcontroller, you should first have a model.
20+
21+
I suggest you use [`tinymlgen`](https://github.com/eloquentarduino/tinymlgen) to complete this step:
22+
it will export your TensorFlow Lite model to a C array ready to be loaded
23+
by this library.
24+
1725

1826
## Use
1927

@@ -25,15 +33,15 @@ git clone https://github.com/eloquentarduino/EloquentTinyML.git
2533
#define NUMBER_OF_OUTPUTS 1
2634
#define TENSOR_ARENA_SIZE 2*1024
2735

28-
Eloquent::TinyML::TinyML<
36+
Eloquent::TinyML::TfLite<
2937
NUMBER_OF_INPUTS,
3038
NUMBER_OF_OUTPUTS,
3139
TENSOR_ARENA_SIZE> ml;
3240

3341

3442
void setup() {
3543
Serial.begin(115200);
36-
ml.begin(sine_model_quantized_tflite);
44+
ml.begin(sine_model);
3745
}
3846

3947
void loop() {
@@ -50,4 +58,13 @@ void loop() {
5058
Serial.println(predicted);
5159
delay(1000);
5260
}
53-
```
61+
```
62+
63+
## Compatibility
64+
65+
Latest version of this library (2.4.0) is compatible with Cortex-M and ESP32 chips and is built starting from:
66+
67+
- [Arduino_TensorFlowLite library version 2.4.0-ALPHA](https://www.tensorflow.org/lite/microcontrollers/overview)
68+
- [TensorFlowLite_ESP32 version 0.9.0](https://github.com/tanakamasayuki/Arduino_TensorFlowLite_ESP32)
69+
70+
ESP32 support is stuck at TensorFlow 2.1.1 at the moment.

examples/SineExample/SineExample.ino

Lines changed: 15 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55
#define NUMBER_OF_INPUTS 1
66
#define NUMBER_OF_OUTPUTS 1
7-
// in future projects you may need to tweek this value: it's a trial and error process
7+
// in future projects you may need to tweak this value: it's a trial and error process
88
#define TENSOR_ARENA_SIZE 2*1024
99

1010
Eloquent::TinyML::TfLite<NUMBER_OF_INPUTS, NUMBER_OF_OUTPUTS, TENSOR_ARENA_SIZE> ml;
@@ -16,17 +16,19 @@ void setup() {
1616
}
1717

1818
void loop() {
19-
// pick up a random x and predict its sine
20-
float x = 3.14 * random(100) / 100;
21-
float y = sin(x);
22-
float input[1] = { x };
23-
float predicted = ml.predict(input);
19+
for (float i = 0; i < 10; i++) {
20+
// pick x from 0 to PI
21+
float x = 3.14 * i / 10;
22+
float y = sin(x);
23+
float input[1] = { x };
24+
float predicted = ml.predict(input);
2425

25-
Serial.print("sin(");
26-
Serial.print(x);
27-
Serial.print(") = ");
28-
Serial.print(y);
29-
Serial.print("\t predicted: ");
30-
Serial.println(predicted);
31-
delay(1000);
26+
Serial.print("sin(");
27+
Serial.print(x);
28+
Serial.print(") = ");
29+
Serial.print(y);
30+
Serial.print("\t predicted: ");
31+
Serial.println(predicted);
32+
delay(1000);
33+
}
3234
}

library.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
"type": "git",
77
"url": "https://github.com/eloquentarduino/EloquentTinyML"
88
},
9-
"version": "0.0.10",
9+
"version": "2.4.0",
1010
"authors": {
1111
"name": "Simone Salerno",
1212
"url": "https://github.com/eloquentarduino"

library.properties

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
name=EloquentTinyML
2-
version=0.0.10
2+
version=2.4.0
33
author=Simone Salerno,[email protected]
44
maintainer=Simone Salerno,[email protected]
55
sentence=An eloquent interface to Tensorflow Lite for Microcontrollers

src/.DS_Store

-2 KB
Binary file not shown.

src/EloquentTinyML.h

Lines changed: 7 additions & 222 deletions
Original file line numberDiff line numberDiff line change
@@ -1,237 +1,22 @@
11
#pragma once
22

33
#include <Arduino.h>
4+
#include <math.h>
45

56
#ifdef max
67
#define REDEFINE_MAX
78
#undef max
89
#undef min
910
#endif
1011

11-
#include <math.h>
12-
#include "tensorflow/lite/version.h"
13-
#include "tensorflow/lite/micro/kernels/all_ops_resolver.h"
14-
#include "tensorflow/lite/micro/micro_error_reporter.h"
15-
#include "tensorflow/lite/micro/micro_interpreter.h"
12+
13+
#if defined(ESP32)
14+
#include "TfLiteESP32.h"
15+
#else
16+
#include "TfLiteARM.h"
17+
#endif
1618

1719
#ifdef REDEFINE_MAX
1820
#define max(a,b) ((a)>(b)?(a):(b))
1921
#define min(a,b) ((a)<(b)?(a):(b))
2022
#endif
21-
22-
23-
namespace Eloquent {
24-
namespace TinyML {
25-
26-
enum TfLiteError {
27-
OK,
28-
VERSION_MISMATCH,
29-
CANNOT_ALLOCATE_TENSORS,
30-
NOT_INITIALIZED,
31-
INVOKE_ERROR
32-
};
33-
34-
/**
35-
* Eloquent interface to Tensorflow Lite for Microcontrollers
36-
*
37-
* @tparam inputSize
38-
* @tparam outputSize
39-
* @tparam tensorArenaSize how much memory to allocate to the tensors
40-
*/
41-
template<size_t inputSize, size_t outputSize, size_t tensorArenaSize>
42-
class TfLite {
43-
public:
44-
/**
45-
* Contructor
46-
* @param modelData a model as exported by tinymlgen
47-
*/
48-
TfLite() :
49-
failed(false) {
50-
}
51-
52-
~TfLite() {
53-
delete reporter;
54-
delete interpreter;
55-
}
56-
57-
/**
58-
* Inizialize NN
59-
*
60-
* @param modelData
61-
* @return
62-
*/
63-
bool begin(const unsigned char *modelData) {
64-
tflite::ops::micro::AllOpsResolver resolver;
65-
reporter = new tflite::MicroErrorReporter();
66-
67-
model = tflite::GetModel(modelData);
68-
69-
// assert model version and runtime version match
70-
if (model->version() != TFLITE_SCHEMA_VERSION) {
71-
failed = true;
72-
error = VERSION_MISMATCH;
73-
74-
reporter->Report(
75-
"Model provided is schema version %d not equal "
76-
"to supported version %d.",
77-
model->version(), TFLITE_SCHEMA_VERSION);
78-
79-
return false;
80-
}
81-
82-
interpreter = new tflite::MicroInterpreter(model, resolver, tensorArena, tensorArenaSize, reporter);
83-
84-
if (interpreter->AllocateTensors() != kTfLiteOk) {
85-
failed = true;
86-
error = CANNOT_ALLOCATE_TENSORS;
87-
88-
return false;
89-
}
90-
91-
input = interpreter->input(0);
92-
output = interpreter->output(0);
93-
error = OK;
94-
95-
return true;
96-
}
97-
98-
/**
99-
* Test if the initialization completed fine
100-
*/
101-
bool initialized() {
102-
return !failed;
103-
}
104-
105-
/**
106-
*
107-
* @param input
108-
* @param output
109-
* @return
110-
*/
111-
uint8_t predict(uint8_t *input, uint8_t *output = NULL) {
112-
// abort if initialization failed
113-
if (!initialized())
114-
return sqrt(-1);
115-
116-
memcpy(this->input->data.uint8, input, sizeof(uint8_t) * inputSize);
117-
118-
if (interpreter->Invoke() != kTfLiteOk) {
119-
reporter->Report("Inference failed");
120-
121-
return sqrt(-1);
122-
}
123-
124-
// copy output
125-
if (output != NULL) {
126-
for (uint16_t i = 0; i < outputSize; i++)
127-
output[i] = this->output->data.uint8[i];
128-
}
129-
130-
return this->output->data.uint8[0];
131-
}
132-
133-
/**
134-
* Run inference
135-
* @return output[0], so you can use it directly if it's the only output
136-
*/
137-
float predict(float *input, float *output = NULL) {
138-
// abort if initialization failed
139-
if (!initialized()) {
140-
error = NOT_INITIALIZED;
141-
142-
return sqrt(-1);
143-
}
144-
145-
// copy input
146-
for (size_t i = 0; i < inputSize; i++)
147-
this->input->data.f[i] = input[i];
148-
149-
if (interpreter->Invoke() != kTfLiteOk) {
150-
error = INVOKE_ERROR;
151-
reporter->Report("Inference failed");
152-
153-
return sqrt(-1);
154-
}
155-
156-
// copy output
157-
if (output != NULL) {
158-
for (uint16_t i = 0; i < outputSize; i++)
159-
output[i] = this->output->data.f[i];
160-
}
161-
162-
return this->output->data.f[0];
163-
}
164-
165-
/**
166-
* Predict class
167-
* @param input
168-
* @return
169-
*/
170-
uint8_t predictClass(float *input) {
171-
float output[outputSize];
172-
173-
predict(input, output);
174-
175-
return probaToClass(output);
176-
}
177-
178-
/**
179-
* Get class with highest probability
180-
* @param output
181-
* @return
182-
*/
183-
uint8_t probaToClass(float *output) {
184-
uint8_t classIdx = 0;
185-
float maxProba = output[0];
186-
187-
for (uint8_t i = 1; i < outputSize; i++) {
188-
if (output[i] > maxProba) {
189-
classIdx = i;
190-
maxProba = output[i];
191-
}
192-
}
193-
194-
return classIdx;
195-
}
196-
197-
/**
198-
* Get error
199-
* @return
200-
*/
201-
TfLiteError getError() {
202-
return error;
203-
}
204-
205-
/**
206-
* Get error message
207-
* @return
208-
*/
209-
const char* errorMessage() {
210-
switch (error) {
211-
case OK:
212-
return "No error";
213-
case VERSION_MISMATCH:
214-
return "Version mismatch";
215-
case CANNOT_ALLOCATE_TENSORS:
216-
return "Cannot allocate tensors";
217-
case NOT_INITIALIZED:
218-
return "Interpreter has not been initialized";
219-
case INVOKE_ERROR:
220-
return "Interpreter invoke() returned an error";
221-
default:
222-
return "Unknown error";
223-
}
224-
}
225-
226-
protected:
227-
bool failed;
228-
TfLiteError error;
229-
uint8_t tensorArena[tensorArenaSize];
230-
tflite::ErrorReporter *reporter;
231-
tflite::MicroInterpreter *interpreter;
232-
TfLiteTensor *input;
233-
TfLiteTensor *output;
234-
const tflite::Model *model;
235-
};
236-
}
237-
}

src/TfLiteARM.h

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
//
2+
// Created by Simone on 28/10/2021.
3+
//
4+
5+
#ifndef ELOQUENTTINYML_TFLITEARM_H
6+
#define ELOQUENTTINYML_TFLITEARM_H
7+
8+
#include "tensorflow_arm/tensorflow/lite/version.h"
9+
#include "tensorflow_arm/tensorflow/lite/schema/schema_generated.h"
10+
#include "tensorflow_arm/tensorflow/lite/micro/all_ops_resolver.h"
11+
#include "tensorflow_arm/tensorflow/lite/micro/micro_error_reporter.h"
12+
#include "tensorflow_arm/tensorflow/lite/micro/micro_interpreter.h"
13+
#include "TfLiteAbstract.h"
14+
15+
16+
namespace Eloquent {
17+
namespace TinyML {
18+
19+
/**
20+
* Run TensorFlow Lite models on ARM
21+
*/
22+
template<size_t inputSize, size_t outputSize, size_t tensorArenaSize>
23+
class TfLite : public TfLiteAbstract<tflite::AllOpsResolver, inputSize, outputSize, tensorArenaSize> {
24+
};
25+
}
26+
}
27+
28+
#endif //ELOQUENTTINYML_TFLITEESP32_H

0 commit comments

Comments
 (0)