Skip to content

Commit 5363795

Browse files
SDK version: v1.71.36
1 parent 15fd14e commit 5363795

File tree

6 files changed

+39
-181
lines changed

6 files changed

+39
-181
lines changed

classifier/ei_run_classifier.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -277,12 +277,12 @@ extern "C" EI_IMPULSE_ERROR process_impulse(ei_impulse_handle_t *handle,
277277
// smart pointer to features array
278278
std::unique_ptr<ei_feature_t[]> features_ptr(new ei_feature_t[block_num]);
279279
ei_feature_t* features = features_ptr.get();
280-
280+
281281
if (features == nullptr) {
282282
ei_printf("ERR: Out of memory, can't allocate features\n");
283283
return EI_IMPULSE_ALLOC_FAILED;
284284
}
285-
285+
286286
memset(features, 0, sizeof(ei_feature_t) * block_num);
287287

288288
// have it outside of the loop to avoid going out of scope
@@ -304,12 +304,12 @@ extern "C" EI_IMPULSE_ERROR process_impulse(ei_impulse_handle_t *handle,
304304

305305
matrix_ptrs[ix] = std::unique_ptr<ei::matrix_t>(new ei::matrix_t(1, block.n_output_features));
306306
if (matrix_ptrs[ix] == nullptr) {
307-
ei_printf("ERR: Out of memory, can't allocate matrix_ptrs[%lu]\n", ix);
307+
ei_printf("ERR: Out of memory, can't allocate matrix_ptrs[%lu]\n", (unsigned long)ix);
308308
return EI_IMPULSE_ALLOC_FAILED;
309309
}
310310

311311
if (matrix_ptrs[ix]->buffer == nullptr) {
312-
ei_printf("ERR: Out of memory, can't allocate matrix_ptrs[%lu]\n", ix);
312+
ei_printf("ERR: Out of memory, can't allocate matrix_ptrs[%lu]\n", (unsigned long)ix);
313313
delete[] matrix_ptrs;
314314
return EI_IMPULSE_ALLOC_FAILED;
315315
}
@@ -553,18 +553,18 @@ extern "C" EI_IMPULSE_ERROR process_impulse_continuous(ei_impulse_handle_t *hand
553553
for (size_t ix = 0; ix < impulse->dsp_blocks_size; ix++) {
554554
ei_model_dsp_t block = impulse->dsp_blocks[ix];
555555
matrix_ptrs[ix] = std::unique_ptr<ei::matrix_t>(new ei::matrix_t(1, block.n_output_features));
556-
556+
557557
if (matrix_ptrs[ix] == nullptr) {
558-
ei_printf("ERR: Out of memory, can't allocate matrix_ptrs[%lu]\n", ix);
558+
ei_printf("ERR: Out of memory, can't allocate matrix_ptrs[%lu]\n", (unsigned long)ix);
559559
return EI_IMPULSE_ALLOC_FAILED;
560560
}
561561

562562
if (matrix_ptrs[ix]->buffer == nullptr) {
563-
ei_printf("ERR: Out of memory, can't allocate matrix_ptrs[%lu]\n", ix);
563+
ei_printf("ERR: Out of memory, can't allocate matrix_ptrs[%lu]\n", (unsigned long)ix);
564564
delete[] matrix_ptrs;
565565
return EI_IMPULSE_ALLOC_FAILED;
566566
}
567-
567+
568568
features[ix].matrix = matrix_ptrs[ix].get();
569569
features[ix].blockId = block.blockId;
570570

classifier/inferencing_engines/tensorrt.h

Lines changed: 18 additions & 158 deletions
Original file line numberDiff line numberDiff line change
@@ -36,163 +36,30 @@
3636
#define _EI_CLASSIFIER_INFERENCING_ENGINE_TENSORRT_H_
3737

3838
#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSORRT)
39-
40-
#if (EI_CLASSIFIER_HAS_ANOMALY == EI_ANOMALY_TYPE_VISUAL_GMM)
41-
42-
#include <thread>
43-
#include "tensorflow-lite/tensorflow/lite/c/common.h"
44-
#include "tensorflow-lite/tensorflow/lite/interpreter.h"
45-
#include "tensorflow-lite/tensorflow/lite/kernels/register.h"
46-
#include "tensorflow-lite/tensorflow/lite/model.h"
47-
#include "tensorflow-lite/tensorflow/lite/optional_debug_tools.h"
48-
#include "edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.h"
49-
#include "edge-impulse-sdk/classifier/ei_model_types.h"
50-
#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_helper.h"
51-
52-
typedef struct {
53-
std::unique_ptr<tflite::FlatBufferModel> model;
54-
std::unique_ptr<tflite::Interpreter> interpreter;
55-
} ei_tflite_state_t;
56-
57-
std::map<uint32_t, ei_tflite_state_t*> ei_tflite_instances;
58-
59-
/**
60-
* Construct a tflite interpreter (creates it if needed)
61-
*/
62-
static EI_IMPULSE_ERROR get_interpreter(ei_learning_block_config_tflite_graph_t *block_config, tflite::Interpreter **interpreter) {
63-
// not in the map yet...
64-
if (!ei_tflite_instances.count(block_config->block_id)) {
65-
ei_config_tflite_graph_t *graph_config = (ei_config_tflite_graph_t*)block_config->graph_config;
66-
ei_tflite_state_t *new_state = new ei_tflite_state_t();
67-
68-
auto new_model = tflite::FlatBufferModel::BuildFromBuffer((const char*)graph_config->model, graph_config->model_size);
69-
new_state->model = std::move(new_model);
70-
if (!new_state->model) {
71-
ei_printf("Failed to build TFLite model from buffer\n");
72-
return EI_IMPULSE_TFLITE_ERROR;
73-
}
74-
75-
tflite::ops::builtin::BuiltinOpResolver resolver;
76-
#if EI_CLASSIFIER_HAS_TREE_ENSEMBLE_CLASSIFIER
77-
resolver.AddCustom("TreeEnsembleClassifier",
78-
tflite::ops::custom::Register_TREE_ENSEMBLE_CLASSIFIER());
79-
#endif
80-
tflite::InterpreterBuilder builder(*new_state->model, resolver);
81-
builder(&new_state->interpreter);
82-
83-
if (!new_state->interpreter) {
84-
ei_printf("Failed to construct interpreter\n");
85-
return EI_IMPULSE_TFLITE_ERROR;
86-
}
87-
88-
if (new_state->interpreter->AllocateTensors() != kTfLiteOk) {
89-
ei_printf("AllocateTensors failed\n");
90-
return EI_IMPULSE_TFLITE_ERROR;
91-
}
92-
93-
int hw_thread_count = (int)std::thread::hardware_concurrency();
94-
hw_thread_count -= 1; // leave one thread free for the other application
95-
if (hw_thread_count < 1) {
96-
hw_thread_count = 1;
97-
}
98-
99-
if (new_state->interpreter->SetNumThreads(hw_thread_count) != kTfLiteOk) {
100-
ei_printf("SetNumThreads failed\n");
101-
return EI_IMPULSE_TFLITE_ERROR;
102-
}
103-
104-
ei_tflite_instances.insert(std::make_pair(block_config->block_id, new_state));
105-
}
106-
107-
auto tflite_state = ei_tflite_instances[block_config->block_id];
108-
*interpreter = tflite_state->interpreter.get();
109-
return EI_IMPULSE_OK;
110-
}
111-
112-
EI_IMPULSE_ERROR run_nn_inference_tflite_full(
113-
const ei_impulse_t *impulse,
114-
ei_feature_t *fmatrix,
115-
uint32_t learn_block_index,
116-
uint32_t* input_block_ids,
117-
uint32_t input_block_ids_size,
118-
ei_impulse_result_t *result,
119-
void *config_ptr,
120-
bool debug = false)
121-
{
122-
ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr;
123-
124-
tflite::Interpreter *interpreter;
125-
auto interpreter_ret = get_interpreter(block_config, &interpreter);
126-
if (interpreter_ret != EI_IMPULSE_OK) {
127-
return interpreter_ret;
128-
}
129-
130-
TfLiteTensor *input = interpreter->input_tensor(0);
131-
TfLiteTensor *output = interpreter->output_tensor(block_config->output_data_tensor);
132-
133-
if (!input) {
134-
return EI_IMPULSE_INPUT_TENSOR_WAS_NULL;
135-
}
136-
if (!output) {
137-
return EI_IMPULSE_OUTPUT_TENSOR_WAS_NULL;
138-
}
139-
140-
size_t mtx_size = impulse->dsp_blocks_size + impulse->learning_blocks_size;
141-
auto input_res = fill_input_tensor_from_matrix(fmatrix, input, input_block_ids, input_block_ids_size, mtx_size);
142-
if (input_res != EI_IMPULSE_OK) {
143-
return input_res;
144-
}
145-
146-
uint64_t ctx_start_us = ei_read_timer_us();
147-
148-
TfLiteStatus status = interpreter->Invoke();
149-
if (status != kTfLiteOk) {
150-
ei_printf("ERR: interpreter->Invoke() failed with %d\n", status);
151-
return EI_IMPULSE_TFLITE_ERROR;
152-
}
153-
154-
uint64_t ctx_end_us = ei_read_timer_us();
155-
156-
result->timing.classification_us = ctx_end_us - ctx_start_us;
157-
result->timing.classification = (int)(result->timing.classification_us / 1000);
158-
159-
if (result->copy_output) {
160-
auto output_res = fill_output_matrix_from_tensor(output, fmatrix[impulse->dsp_blocks_size + learn_block_index].matrix);
161-
if (output_res != EI_IMPULSE_OK) {
162-
return output_res;
163-
}
164-
}
165-
166-
if (debug) {
167-
ei_printf("Predictions (time: %d ms.):\n", result->timing.classification);
168-
}
169-
170-
TfLiteTensor *scores_tensor = interpreter->output_tensor(block_config->output_score_tensor);
171-
TfLiteTensor *labels_tensor = interpreter->output_tensor(block_config->output_labels_tensor);
172-
173-
EI_IMPULSE_ERROR fill_res = fill_result_struct_from_output_tensor_tflite(
174-
impulse, block_config, output, labels_tensor, scores_tensor, result, debug);
175-
176-
if (fill_res != EI_IMPULSE_OK) {
177-
return fill_res;
178-
}
179-
180-
// on Linux we're not worried about free'ing (for now)
181-
182-
return EI_IMPULSE_OK;
183-
}
184-
#endif // (EI_CLASSIFIER_HAS_ANOMALY == EI_ANOMALY_TYPE_VISUAL_GMM)
185-
18639
#include "model-parameters/model_metadata.h"
18740

18841
#include "edge-impulse-sdk/porting/ei_classifier_porting.h"
18942
#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h"
19043

44+
#if defined(__GNUC__)
45+
#if (__GNUC__ > 8)
46+
// Code for GCC version 8 or higher
47+
// has std::filesystem
48+
#include <filesystem>
49+
namespace fs = std::filesystem;
50+
#else
51+
// Code for GCC version lower than 8
52+
#include <experimental/filesystem>
53+
namespace fs = std::experimental::filesystem;
54+
#endif
55+
#else
56+
#error "This code requires GCC."
57+
#endif
58+
19159
#include <stdio.h>
19260
#include <string.h>
19361
#include <unistd.h>
19462
#include <string>
195-
#include <filesystem>
19663
#include <stdlib.h>
19764
#include <map>
19865
#include "tflite/linux-jetson-nano/libeitrt.h"
@@ -260,7 +127,7 @@ EI_IMPULSE_ERROR write_model_to_file(
260127
impulse->learning_blocks[learn_block_index].blockId);
261128
}
262129
else {
263-
std::filesystem::path p(current_exe_path);
130+
fs::path p(current_exe_path);
264131
snprintf(
265132
model_file_name,
266133
PATH_MAX,
@@ -317,13 +184,6 @@ EI_IMPULSE_ERROR run_nn_inference(
317184
ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr;
318185
ei_config_tflite_graph_t *graph_config = (ei_config_tflite_graph_t*)block_config->graph_config;
319186

320-
#if (EI_CLASSIFIER_HAS_ANOMALY == EI_ANOMALY_TYPE_VISUAL_GMM)
321-
if (block_config->classification_mode == EI_CLASSIFIER_CLASSIFICATION_MODE_VISUAL_ANOMALY
322-
&& !result->copy_output) {
323-
return run_nn_inference_tflite_full(impulse, fmatrix, learn_block_index, input_block_ids, input_block_ids_size, result, config_ptr);
324-
}
325-
#endif
326-
327187
#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1
328188
#error "TensorRT requires an unquantized network"
329189
#endif
@@ -338,8 +198,8 @@ EI_IMPULSE_ERROR run_nn_inference(
338198
libeitrt::setMaxWorkspaceSize(ei_trt_handle, 1<<29); // 512 MB
339199

340200
if (debug) {
341-
ei_printf("Using EI TensorRT lib v%d.%d.%d\r\n", libeitrt::getMajorVersion(ei_trt_handle),
342-
libeitrt::getMinorVersion(ei_trt_handle), libeitrt::getPatchVersion(ei_trt_handle));
201+
ei_printf("Using EI TensorRT lib v%d.%d.%d\r\n", libeitrt::getMajorVersion(ei_trt_handle),
202+
libeitrt::getMinorVersion(ei_trt_handle), libeitrt::getPatchVersion(ei_trt_handle));
343203
}
344204
}
345205

classifier/postprocessing/alignment/ei_alignment.hpp

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -85,10 +85,12 @@ class JonkerVolgenantAlignment {
8585
}
8686

8787
std::vector<std::tuple<int, int, float>> matches;
88+
size_t num_iterations = traces.size() > detections.size() ? detections.size() : traces.size();
89+
90+
for (size_t i = 0; i < num_iterations; i++) {
91+
size_t trace_idx = alignments_a[i];
92+
size_t detection_idx = alignments_b[i];
8893

89-
for (size_t i = 0; i < traces.size(); i++) {
90-
size_t trace_idx = i;
91-
size_t detection_idx = alignments_b[alignments_a[i]];
9294
if (use_iou) {
9395
float iou = 1 - cost_mtx[trace_idx * detections.size() + detection_idx];
9496
if (iou > threshold) {
@@ -124,13 +126,15 @@ class GreedyAlignment {
124126
std::vector<std::tuple<int, int, float>> alignments;
125127
for (size_t trace_idx = 0; trace_idx < traces.size(); ++trace_idx) {
126128
for (size_t detection_idx = 0; detection_idx < detections.size(); ++detection_idx) {
129+
float cost = 0.0;
127130
if (use_iou) {
128131
float iou = intersection_over_union(traces[trace_idx], detections[detection_idx]);
132+
cost = 1 - iou;
129133
if (iou > threshold) {
130-
alignments.emplace_back(trace_idx, detection_idx, 1 - iou);
134+
alignments.emplace_back(trace_idx, detection_idx, cost);
131135
}
132136
} else {
133-
float cost = centroid_euclidean_distance(traces[trace_idx], detections[detection_idx]);
137+
cost = centroid_euclidean_distance(traces[trace_idx], detections[detection_idx]);
134138
if (cost < threshold) {
135139
alignments.emplace_back(trace_idx, detection_idx, cost);
136140
}
@@ -150,7 +154,7 @@ class GreedyAlignment {
150154
float cost = std::get<2>(alignments[i]);
151155

152156
if (trace_idxs_matched.find(trace_idx) == trace_idxs_matched.end() && detection_idxs_matched.find(detection_idx) == detection_idxs_matched.end()) {
153-
// (1 - cost) to get iou
157+
// calculate iou or simply use the distance
154158
matches.emplace_back(trace_idx, detection_idx, use_iou ? 1 - cost : cost);
155159
trace_idxs_matched.insert(trace_idx);
156160
if (trace_idxs_matched.size() == traces.size()) return matches;

classifier/postprocessing/ei_object_tracking.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -384,7 +384,7 @@ class Tracker {
384384
private:
385385
uint32_t trace_seq_id;
386386
uint32_t t;
387-
GreedyAlignment alignment;
387+
JonkerVolgenantAlignment alignment;
388388
};
389389

390390
EI_IMPULSE_ERROR init_object_tracking(ei_impulse_handle_t *handle, void** state, void *config)

porting/ei_classifier_porting.h

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -363,16 +363,10 @@ void ei_free(void *ptr);
363363
#endif
364364
// End load porting layer depending on target
365365

366-
// Additional configuration for specific architecture
367-
#if defined(__CORTEX_M)
366+
// Additional configuration for specific architecture for Armv8.1-M architecture ie CM55 and CM85
367+
#if defined (__ARM_ARCH ) && (__ARM_ARCH >= 8)
368368

369-
#if (__CORTEX_M == 55U)
370-
#define EI_MAX_OVERFLOW_BUFFER_COUNT 15
371-
#endif
372-
373-
#if (__CORTEX_M == 85U)
374369
#define EI_MAX_OVERFLOW_BUFFER_COUNT 50
375-
#endif
376370

377371
#endif
378372

0 commit comments

Comments
 (0)