TensorRT Inferenceプロセスの詳細
5853 ワード
TensorRTは、特に埋め込みデバイスTX 2上で、深さ学習ネットワークの数倍の加速を実現することができる.TensorRTエンジンとアクセラレータの概要を参照してくださいhttps://blog.csdn.net/xh_hit/article/details/79769599.本論文ではcaffeモデルを例にその推定過程を解析した.
全体的な推定過程はbuild,serialize,deserialize,runの4段階に分けられる.まずbuildとシーケンス化フェーズコードは次のとおりです.
caffeToGIEModel()関数のパラメータには,ネットワークのprototxt,訓練されたcaffemodel,ネットワーク定義の出力層などが含まれる.
シーケンス化後はdeserializeも必要です
逆シーケンス化後にcontextを作成すると推定が実行されます
ここでdoInference関数は次のとおりです.
を含む
次にGPUのメモリを割り当て、入力をホストからGPUにコピーし、推定を実行する.
推定終了後、GPUからホストに出力をコピーし、推定を完了します.
全体的な推定過程はbuild,serialize,deserialize,runの4段階に分けられる.まずbuildとシーケンス化フェーズコードは次のとおりです.
void caffeToGIEModel(const char* deployFile, // name for caffe prototxt
const char* modelFile, // name for model
const std::vector<:string>& outputs, // network outputs
unsigned int maxBatchSize, // batch size - NB must be at least as large as the batch we want to run with)
IHostMemory *&gieModelStream) // output buffer for the GIE model
{
// create the builder
IBuilder* builder = createInferBuilder(gLogger);
// parse the caffe model to populate the network, then set the outputs
INetworkDefinition* network = builder->createNetwork();
ICaffeParser* parser = createCaffeParser();
const IBlobNameToTensor* blobNameToTensor = parser->parse(deployFile,modelFile
*network, DataType::kFLOAT);
// specify which tensors are outputs
for (auto& s : outputs)
network->markOutput(*blobNameToTensor->find(s.c_str()));
// Build the engine
builder->setMaxBatchSize(maxBatchSize);
builder->setMaxWorkspaceSize(1 << 20);
ICudaEngine* engine = builder->buildCudaEngine(*network);
assert(engine);
// we don't need the network any more, and we can destroy the parser
network->destroy();
parser->destroy();
// serialize the engine, then close everything down
gieModelStream = engine->serialize();
engine->destroy();
builder->destroy();
shutdownProtobufLibrary();
}
caffeToGIEModel()関数のパラメータには,ネットワークのprototxt,訓練されたcaffemodel,ネットワーク定義の出力層などが含まれる.
IBuilder* builder = createInferBuilder(gLogger); # build tensorRT
INetworkDefinition* network = builder->createNetwork();#
ICaffeParser* parser = createCaffeParser(); #
# prototxt caffemodel
const IBlobNameToTensor* blobNameToTensor = parser->parse(deployFile,modelFile, *network, DataType::kFLOAT);
//
for (auto& s : outputs)
network->markOutput(*blobNameToTensor->find(s.c_str()));
ICudaEngine* engine = builder->buildCudaEngine(*network);#
gieModelStream = engine->serialize(); #
シーケンス化の結果gieModelStreamは、実行するたびにシーケンス化が必要にならないように保存できる文字列として理解できます。次のブログでは保存方法について説明します。
シーケンス化後はdeserializeも必要です
// deserialize the engine
IRuntime* runtime = createInferRuntime(gLogger);
ICudaEngine* engine = runtime->deserializeCudaEngine(gieModelStream->data(), gieModelStream->size(), nullptr);// gieModelStream ,
if (gieModelStream) gieModelStream->destroy();
逆シーケンス化後にcontextを作成すると推定が実行されます
IExecutionContext *context = engine->createExecutionContext();
ここでdoInference関数は次のとおりです.
void doInference(IExecutionContext& context, float* input, float* output, int batchSize)
{
const ICudaEngine& engine = context.getEngine();
assert(engine.getNbBindings() == 2);
void* buffers[2];
int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME),
outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);
// create GPU buffers and a stream
CHECK(cudaMalloc(&buffers[inputIndex], batchSize * INPUT_H * INPUT_W * sizeof(float)));
CHECK(cudaMalloc(&buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float)));
cudaStream_t stream;
CHECK(cudaStreamCreate(&stream));
// DMA the input to the GPU, execute the batch asynchronously, and DMA it back:
CHECK(cudaMemcpyAsync(buffers[inputIndex], input, batchSize * INPUT_H * INPUT_W * sizeof(float), cudaMemcpyHostToDevice, stream));
context.enqueue(batchSize, buffers, stream, nullptr);
CHECK(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * OUTPUT_SIZE*sizeof(float), cudaMemcpyDeviceToHost, stream));
cudaStreamSynchronize(stream);
void doInference(IExecutionContext& context, float* input, float* output, int batchSize)
{
const ICudaEngine& engine = context.getEngine();
// input and output buffer pointers that we pass to the engine - the engine requires exactly IEngine::getNbBindings(),
// of these, but in this case we know that there is exactly one input and one output.
assert(engine.getNbBindings() == 2);
void* buffers[2];
// In order to bind the buffers, we need to know the names of the input and output tensors.
// note that indices are guaranteed to be less than IEngine::getNbBindings()
int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME),
outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);
// create GPU buffers and a stream
CHECK(cudaMalloc(&buffers[inputIndex], batchSize * INPUT_H * INPUT_W * sizeof(float)));
CHECK(cudaMalloc(&buffers[outputIndex], batchSize * OUTPUT_SIZE * sizeof(float)));
cudaStream_t stream;
CHECK(cudaStreamCreate(&stream));
// DMA the input to the GPU, execute the batch asynchronously, and DMA it back:
CHECK(cudaMemcpyAsync(buffers[inputIndex], input, batchSize * INPUT_H * INPUT_W * sizeof(float), cudaMemcpyHostToDevice, stream));
context.enqueue(batchSize, buffers, stream, nullptr);
CHECK(cudaMemcpyAsync(output, buffers[outputIndex], batchSize * OUTPUT_SIZE*sizeof(float), cudaMemcpyDeviceToHost, stream));
cudaStreamSynchronize(stream);
cudaStreamDestroy(stream);
CHECK(cudaFree(buffers[inputIndex]));
CHECK(cudaFree(buffers[outputIndex]));
}
}
を含む
//
int inputIndex = engine.getBindingIndex(INPUT_BLOB_NAME), outputIndex = engine.getBindingIndex(OUTPUT_BLOB_NAME);
次にGPUのメモリを割り当て、入力をホストからGPUにコピーし、推定を実行する.
context.enqueue(batchSize, buffers, stream, nullptr);
推定終了後、GPUからホストに出力をコピーし、推定を完了します.