raspberry pi 1でtensorflow lite その12
4490 ワード
概要
raspberry pi 1でtensorflow liteやってみた。
kerasモデルからtfliteファイルを作ってラズパイで、実行。
データセットは、九九.
Makefileを書く。
CXXFLAGS ?= -I../tensorflow -I../tensorflow/tensorflow/lite/tools/make/downloads/flatbuffers/include
LDFLAGS ?= -L../tensorflow/tensorflow/lite/tools/make/gen/rpi_armv6l/lib
.PHONY: all clean
all: lite2
lite2: main.cpp
g++ --std=c++11 main.cpp -O2 $(CXXFLAGS) $(LDFLAGS) -ltensorflow-lite -lstdc++ -lpthread -ldl -lm -o lite2
clean:
rm -f lite2
Makeして実行。
#include <vector>
#include <chrono>
#include <iostream>
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include <iostream>
#include <fstream>
#include <stdlib.h>
using namespace std;
bool is_error(TfLiteStatus const & status)
{
return status != kTfLiteOk;
}
int main(int argc, char const * argv[])
{
std::string a = "kuku.tflite";
TfLiteStatus status;
std::unique_ptr<tflite::FlatBufferModel> model;
std::unique_ptr<tflite::Interpreter> interpreter;
std::cout << "0: Loading model: " << a << std::endl;
model = tflite::FlatBufferModel::BuildFromFile(a.c_str());
if (!model)
{
std::cerr << "0: Failed to load the model." << std::endl;
return -1;
}
std::cout << "1: The model was loaded successful." << std::endl;
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder(* model, resolver)(& interpreter);
std::cout << "2: interpreter was build successful." << std::endl;
status = interpreter->AllocateTensors();
if (is_error(status))
{
std::cerr << "2: Failed to allocate the memory for tensors." << std::endl;
return -1;
}
std::cout << "3: The model was allocated successful." << std::endl;
float * in = interpreter->typed_input_tensor<float>(0);
float * out = interpreter->typed_output_tensor<float>(0);
int i,
j,
k;
std::printf (" ");
for (i = 1; i < 10; i++)
{
std::printf ("%2d ", i);
}
std::printf ("\n");
for (j = 1; j < 10; j++)
{
std::printf ("%2d ", j);
for (i = 1; i < 10; i++)
{
k = j * 16 + i;
in[0] = k & 0x1 ? 1.0f : 0.0f;
in[1] = (k >> 1) & 0x1 ? 1.0f : 0.0f;
in[2] = (k >> 2) & 0x1 ? 1.0f : 0.0f;
in[3] = (k >> 3) & 0x1 ? 1.0f : 0.0f;
in[4] = (k >> 4) & 0x1 ? 1.0f : 0.0f;
in[5] = (k >> 5) & 0x1 ? 1.0f : 0.0f;
in[6] = (k >> 6) & 0x1 ? 1.0f : 0.0f;
in[7] = (k >> 7) & 0x1 ? 1.0f : 0.0f;
status = interpreter->Invoke();
if (is_error(status))
{
std::cerr << "3: Failed to invoke the interpreter." << std::endl;
return -1;
}
k = 0;
if (out[0] > 0.5f) k += 1;
if (out[1] > 0.5f) k += 2;
if (out[2] > 0.5f) k += 4;
if (out[3] > 0.5f) k += 8;
if (out[4] > 0.5f) k += 16;
if (out[5] > 0.5f) k += 32;
if (out[6] > 0.5f) k += 64;
std::printf ("%2d ", k);
}
std::printf ("\n");
}
cout << "ok" << endl;
return 0;
}
結果
0: Loading model: kuku.tflite
1: The model was loaded successful.
2: interpreter was build successful.
3: The model was allocated successful.
1 2 3 4 5 6 7 8 9
1 1 2 3 4 5 6 7 8 9
2 2 4 6 8 10 12 14 16 18
3 3 6 1 12 15 2 21 24 27
4 4 8 12 16 20 24 28 32 36
5 5 10 15 20 17 30 35 40 45
6 6 12 22 24 30 44 58 48 54
7 7 14 21 28 35 42 49 56 63
8 8 16 24 32 40 48 56 64 72
9 9 18 27 36 45 54 63 72 81
ok
CXXFLAGS ?= -I../tensorflow -I../tensorflow/tensorflow/lite/tools/make/downloads/flatbuffers/include
LDFLAGS ?= -L../tensorflow/tensorflow/lite/tools/make/gen/rpi_armv6l/lib
.PHONY: all clean
all: lite2
lite2: main.cpp
g++ --std=c++11 main.cpp -O2 $(CXXFLAGS) $(LDFLAGS) -ltensorflow-lite -lstdc++ -lpthread -ldl -lm -o lite2
clean:
rm -f lite2
#include <vector>
#include <chrono>
#include <iostream>
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include <iostream>
#include <fstream>
#include <stdlib.h>
using namespace std;
bool is_error(TfLiteStatus const & status)
{
return status != kTfLiteOk;
}
int main(int argc, char const * argv[])
{
std::string a = "kuku.tflite";
TfLiteStatus status;
std::unique_ptr<tflite::FlatBufferModel> model;
std::unique_ptr<tflite::Interpreter> interpreter;
std::cout << "0: Loading model: " << a << std::endl;
model = tflite::FlatBufferModel::BuildFromFile(a.c_str());
if (!model)
{
std::cerr << "0: Failed to load the model." << std::endl;
return -1;
}
std::cout << "1: The model was loaded successful." << std::endl;
tflite::ops::builtin::BuiltinOpResolver resolver;
tflite::InterpreterBuilder(* model, resolver)(& interpreter);
std::cout << "2: interpreter was build successful." << std::endl;
status = interpreter->AllocateTensors();
if (is_error(status))
{
std::cerr << "2: Failed to allocate the memory for tensors." << std::endl;
return -1;
}
std::cout << "3: The model was allocated successful." << std::endl;
float * in = interpreter->typed_input_tensor<float>(0);
float * out = interpreter->typed_output_tensor<float>(0);
int i,
j,
k;
std::printf (" ");
for (i = 1; i < 10; i++)
{
std::printf ("%2d ", i);
}
std::printf ("\n");
for (j = 1; j < 10; j++)
{
std::printf ("%2d ", j);
for (i = 1; i < 10; i++)
{
k = j * 16 + i;
in[0] = k & 0x1 ? 1.0f : 0.0f;
in[1] = (k >> 1) & 0x1 ? 1.0f : 0.0f;
in[2] = (k >> 2) & 0x1 ? 1.0f : 0.0f;
in[3] = (k >> 3) & 0x1 ? 1.0f : 0.0f;
in[4] = (k >> 4) & 0x1 ? 1.0f : 0.0f;
in[5] = (k >> 5) & 0x1 ? 1.0f : 0.0f;
in[6] = (k >> 6) & 0x1 ? 1.0f : 0.0f;
in[7] = (k >> 7) & 0x1 ? 1.0f : 0.0f;
status = interpreter->Invoke();
if (is_error(status))
{
std::cerr << "3: Failed to invoke the interpreter." << std::endl;
return -1;
}
k = 0;
if (out[0] > 0.5f) k += 1;
if (out[1] > 0.5f) k += 2;
if (out[2] > 0.5f) k += 4;
if (out[3] > 0.5f) k += 8;
if (out[4] > 0.5f) k += 16;
if (out[5] > 0.5f) k += 32;
if (out[6] > 0.5f) k += 64;
std::printf ("%2d ", k);
}
std::printf ("\n");
}
cout << "ok" << endl;
return 0;
}
結果
0: Loading model: kuku.tflite
1: The model was loaded successful.
2: interpreter was build successful.
3: The model was allocated successful.
1 2 3 4 5 6 7 8 9
1 1 2 3 4 5 6 7 8 9
2 2 4 6 8 10 12 14 16 18
3 3 6 1 12 15 2 21 24 27
4 4 8 12 16 20 24 28 32 36
5 5 10 15 20 17 30 35 40 45
6 6 12 22 24 30 44 58 48 54
7 7 14 21 28 35 42 49 56 63
8 8 16 24 32 40 48 56 64 72
9 9 18 27 36 45 54 63 72 81
ok
0: Loading model: kuku.tflite
1: The model was loaded successful.
2: interpreter was build successful.
3: The model was allocated successful.
1 2 3 4 5 6 7 8 9
1 1 2 3 4 5 6 7 8 9
2 2 4 6 8 10 12 14 16 18
3 3 6 1 12 15 2 21 24 27
4 4 8 12 16 20 24 28 32 36
5 5 10 15 20 17 30 35 40 45
6 6 12 22 24 30 44 58 48 54
7 7 14 21 28 35 42 49 56 63
8 8 16 24 32 40 48 56 64 72
9 9 18 27 36 45 54 63 72 81
ok
以上。
Author And Source
この問題について(raspberry pi 1でtensorflow lite その12), 我々は、より多くの情報をここで見つけました https://qiita.com/ohisama@github/items/4db699f21358f07b982b著者帰属:元の著者の情報は、元のURLに含まれています。著作権は原作者に属する。
Content is automatically searched and collected through network algorithms . If there is a violation . Please contact us . We will adjust (correct author information ,or delete content ) as soon as possible .