36
36
#define _EI_CLASSIFIER_INFERENCING_ENGINE_TENSORRT_H_
37
37
38
38
#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSORRT)
39
-
40
- #if (EI_CLASSIFIER_HAS_ANOMALY == EI_ANOMALY_TYPE_VISUAL_GMM)
41
-
42
- #include < thread>
43
- #include " tensorflow-lite/tensorflow/lite/c/common.h"
44
- #include " tensorflow-lite/tensorflow/lite/interpreter.h"
45
- #include " tensorflow-lite/tensorflow/lite/kernels/register.h"
46
- #include " tensorflow-lite/tensorflow/lite/model.h"
47
- #include " tensorflow-lite/tensorflow/lite/optional_debug_tools.h"
48
- #include " edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.h"
49
- #include " edge-impulse-sdk/classifier/ei_model_types.h"
50
- #include " edge-impulse-sdk/classifier/inferencing_engines/tflite_helper.h"
51
-
52
- typedef struct {
53
- std::unique_ptr<tflite::FlatBufferModel> model;
54
- std::unique_ptr<tflite::Interpreter> interpreter;
55
- } ei_tflite_state_t ;
56
-
57
- std::map<uint32_t , ei_tflite_state_t *> ei_tflite_instances;
58
-
59
- /* *
60
- * Construct a tflite interpreter (creates it if needed)
61
- */
62
- static EI_IMPULSE_ERROR get_interpreter (ei_learning_block_config_tflite_graph_t *block_config, tflite::Interpreter **interpreter) {
63
- // not in the map yet...
64
- if (!ei_tflite_instances.count (block_config->block_id )) {
65
- ei_config_tflite_graph_t *graph_config = (ei_config_tflite_graph_t *)block_config->graph_config ;
66
- ei_tflite_state_t *new_state = new ei_tflite_state_t ();
67
-
68
- auto new_model = tflite::FlatBufferModel::BuildFromBuffer ((const char *)graph_config->model , graph_config->model_size );
69
- new_state->model = std::move (new_model);
70
- if (!new_state->model ) {
71
- ei_printf (" Failed to build TFLite model from buffer\n " );
72
- return EI_IMPULSE_TFLITE_ERROR;
73
- }
74
-
75
- tflite::ops::builtin::BuiltinOpResolver resolver;
76
- #if EI_CLASSIFIER_HAS_TREE_ENSEMBLE_CLASSIFIER
77
- resolver.AddCustom (" TreeEnsembleClassifier" ,
78
- tflite::ops::custom::Register_TREE_ENSEMBLE_CLASSIFIER ());
79
- #endif
80
- tflite::InterpreterBuilder builder (*new_state->model , resolver);
81
- builder (&new_state->interpreter );
82
-
83
- if (!new_state->interpreter ) {
84
- ei_printf (" Failed to construct interpreter\n " );
85
- return EI_IMPULSE_TFLITE_ERROR;
86
- }
87
-
88
- if (new_state->interpreter ->AllocateTensors () != kTfLiteOk ) {
89
- ei_printf (" AllocateTensors failed\n " );
90
- return EI_IMPULSE_TFLITE_ERROR;
91
- }
92
-
93
- int hw_thread_count = (int )std::thread::hardware_concurrency ();
94
- hw_thread_count -= 1 ; // leave one thread free for the other application
95
- if (hw_thread_count < 1 ) {
96
- hw_thread_count = 1 ;
97
- }
98
-
99
- if (new_state->interpreter ->SetNumThreads (hw_thread_count) != kTfLiteOk ) {
100
- ei_printf (" SetNumThreads failed\n " );
101
- return EI_IMPULSE_TFLITE_ERROR;
102
- }
103
-
104
- ei_tflite_instances.insert (std::make_pair (block_config->block_id , new_state));
105
- }
106
-
107
- auto tflite_state = ei_tflite_instances[block_config->block_id ];
108
- *interpreter = tflite_state->interpreter .get ();
109
- return EI_IMPULSE_OK;
110
- }
111
-
112
- EI_IMPULSE_ERROR run_nn_inference_tflite_full (
113
- const ei_impulse_t *impulse,
114
- ei_feature_t *fmatrix,
115
- uint32_t learn_block_index,
116
- uint32_t * input_block_ids,
117
- uint32_t input_block_ids_size,
118
- ei_impulse_result_t *result,
119
- void *config_ptr,
120
- bool debug = false )
121
- {
122
- ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t *)config_ptr;
123
-
124
- tflite::Interpreter *interpreter;
125
- auto interpreter_ret = get_interpreter (block_config, &interpreter);
126
- if (interpreter_ret != EI_IMPULSE_OK) {
127
- return interpreter_ret;
128
- }
129
-
130
- TfLiteTensor *input = interpreter->input_tensor (0 );
131
- TfLiteTensor *output = interpreter->output_tensor (block_config->output_data_tensor );
132
-
133
- if (!input) {
134
- return EI_IMPULSE_INPUT_TENSOR_WAS_NULL;
135
- }
136
- if (!output) {
137
- return EI_IMPULSE_OUTPUT_TENSOR_WAS_NULL;
138
- }
139
-
140
- size_t mtx_size = impulse->dsp_blocks_size + impulse->learning_blocks_size ;
141
- auto input_res = fill_input_tensor_from_matrix (fmatrix, input, input_block_ids, input_block_ids_size, mtx_size);
142
- if (input_res != EI_IMPULSE_OK) {
143
- return input_res;
144
- }
145
-
146
- uint64_t ctx_start_us = ei_read_timer_us ();
147
-
148
- TfLiteStatus status = interpreter->Invoke ();
149
- if (status != kTfLiteOk ) {
150
- ei_printf (" ERR: interpreter->Invoke() failed with %d\n " , status);
151
- return EI_IMPULSE_TFLITE_ERROR;
152
- }
153
-
154
- uint64_t ctx_end_us = ei_read_timer_us ();
155
-
156
- result->timing .classification_us = ctx_end_us - ctx_start_us;
157
- result->timing .classification = (int )(result->timing .classification_us / 1000 );
158
-
159
- if (result->copy_output ) {
160
- auto output_res = fill_output_matrix_from_tensor (output, fmatrix[impulse->dsp_blocks_size + learn_block_index].matrix );
161
- if (output_res != EI_IMPULSE_OK) {
162
- return output_res;
163
- }
164
- }
165
-
166
- if (debug) {
167
- ei_printf (" Predictions (time: %d ms.):\n " , result->timing .classification );
168
- }
169
-
170
- TfLiteTensor *scores_tensor = interpreter->output_tensor (block_config->output_score_tensor );
171
- TfLiteTensor *labels_tensor = interpreter->output_tensor (block_config->output_labels_tensor );
172
-
173
- EI_IMPULSE_ERROR fill_res = fill_result_struct_from_output_tensor_tflite (
174
- impulse, block_config, output, labels_tensor, scores_tensor, result, debug);
175
-
176
- if (fill_res != EI_IMPULSE_OK) {
177
- return fill_res;
178
- }
179
-
180
- // on Linux we're not worried about free'ing (for now)
181
-
182
- return EI_IMPULSE_OK;
183
- }
184
- #endif // (EI_CLASSIFIER_HAS_ANOMALY == EI_ANOMALY_TYPE_VISUAL_GMM)
185
-
186
39
#include " model-parameters/model_metadata.h"
187
40
188
41
#include " edge-impulse-sdk/porting/ei_classifier_porting.h"
189
42
#include " edge-impulse-sdk/classifier/ei_fill_result_struct.h"
190
43
44
+ #if defined(__GNUC__)
45
+ #if (__GNUC__ > 8)
46
+ // Code for GCC version 8 or higher
47
+ // has std::filesystem
48
+ #include < filesystem>
49
+ namespace fs = std::filesystem;
50
+ #else
51
+ // Code for GCC version lower than 8
52
+ #include < experimental/filesystem>
53
+ namespace fs = std::experimental::filesystem;
54
+ #endif
55
+ #else
56
+ #error "This code requires GCC."
57
+ #endif
58
+
191
59
#include < stdio.h>
192
60
#include < string.h>
193
61
#include < unistd.h>
194
62
#include < string>
195
- #include < filesystem>
196
63
#include < stdlib.h>
197
64
#include < map>
198
65
#include " tflite/linux-jetson-nano/libeitrt.h"
@@ -260,7 +127,7 @@ EI_IMPULSE_ERROR write_model_to_file(
260
127
impulse->learning_blocks [learn_block_index].blockId );
261
128
}
262
129
else {
263
- std::filesystem ::path p (current_exe_path);
130
+ fs ::path p (current_exe_path);
264
131
snprintf (
265
132
model_file_name,
266
133
PATH_MAX,
@@ -317,13 +184,6 @@ EI_IMPULSE_ERROR run_nn_inference(
317
184
ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t *)config_ptr;
318
185
ei_config_tflite_graph_t *graph_config = (ei_config_tflite_graph_t *)block_config->graph_config ;
319
186
320
- #if (EI_CLASSIFIER_HAS_ANOMALY == EI_ANOMALY_TYPE_VISUAL_GMM)
321
- if (block_config->classification_mode == EI_CLASSIFIER_CLASSIFICATION_MODE_VISUAL_ANOMALY
322
- && !result->copy_output ) {
323
- return run_nn_inference_tflite_full (impulse, fmatrix, learn_block_index, input_block_ids, input_block_ids_size, result, config_ptr);
324
- }
325
- #endif
326
-
327
187
#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1
328
188
#error "TensorRT requires an unquantized network"
329
189
#endif
@@ -338,8 +198,8 @@ EI_IMPULSE_ERROR run_nn_inference(
338
198
libeitrt::setMaxWorkspaceSize (ei_trt_handle, 1 <<29 ); // 512 MB
339
199
340
200
if (debug) {
341
- ei_printf (" Using EI TensorRT lib v%d.%d.%d\r\n " , libeitrt::getMajorVersion (ei_trt_handle),
342
- libeitrt::getMinorVersion (ei_trt_handle), libeitrt::getPatchVersion (ei_trt_handle));
201
+ ei_printf (" Using EI TensorRT lib v%d.%d.%d\r\n " , libeitrt::getMajorVersion (ei_trt_handle),
202
+ libeitrt::getMinorVersion (ei_trt_handle), libeitrt::getPatchVersion (ei_trt_handle));
343
203
}
344
204
}
345
205
0 commit comments