-
Notifications
You must be signed in to change notification settings - Fork 27
/
Copy pathhello_world.cc
71 lines (60 loc) · 2.48 KB
/
hello_world.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
#include <iostream> // for check output
#include "tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"
#include "tensorflow/lite/micro/simple_memory_allocator.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/version.h"
// Create an area of memory to use for input, output, and intermediate arrays.
// The size of this will depend on the model you're using, and may need to be
// determined by experimentation.
static const int tensor_arena_size = 6 * 1024;
static uint8_t tensor_arena[tensor_arena_size];
extern const unsigned char g_model[];
// extern const int g_model_len;
// Set up logging.
static tflite::ErrorReporter* error_reporter = nullptr;
// This pulls in all the operation implementations we need.
static tflite::AllOpsResolver* resolver = nullptr;
static const tflite::Model* model = nullptr;
static tflite::MicroInterpreter* interpreter = nullptr;
void init(void) {
static tflite::MicroErrorReporter micro_error_reporter;
error_reporter = µ_error_reporter;
// Map the model into a usable data structure. This doesn't involve any
// copying or parsing, it's a very lightweight operation.
model = ::tflite::GetModel(g_model);
if (model->version() != TFLITE_SCHEMA_VERSION) {
TF_LITE_REPORT_ERROR(error_reporter,
"Model provided is schema version %d not equal "
"to supported version %d.\n",
model->version(), TFLITE_SCHEMA_VERSION);
return;
}
static tflite::AllOpsResolver local_resolver;
resolver = &local_resolver;
// Build an interpreter to run the model with.
static tflite::MicroInterpreter static_interpreter(
model, *resolver, tensor_arena, tensor_arena_size, error_reporter);
interpreter = &static_interpreter;
TfLiteStatus allocate_status = interpreter->AllocateTensors();
if (allocate_status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "AllocateTensors() failed");
return;
}
}
void run() {
TfLiteTensor* model_input = interpreter->input(0);
model_input->data.f[0] = 1.57f; // roughly PI/2
TfLiteStatus invoke_status = interpreter->Invoke();
if (invoke_status != kTfLiteOk) {
TF_LITE_REPORT_ERROR(error_reporter, "Invoke failed");
}
TfLiteTensor* model_output = interpreter->output(0);
std::cerr << "result " << model_output->data.f[0] << std::endl;
}
int main(int argc, char** argv) {
init();
run();
return 0;
}