Adapted CNNPredictor template for the backend Caffe2.

parent c455ab90
#ifndef ${tc.fileNameWithoutEnding?upper_case}
#define ${tc.fileNameWithoutEnding?upper_case}
#include <mxnet/c_predict_api.h>
#include "caffe2/core/common.h"
#include "caffe2/utils/proto_utils.h"
#include "caffe2/core/workspace.h"
#include "caffe2/core/tensor.h"
#include "caffe2/core/init.h"
// Enable define USE_GPU if you want to use gpu
//#define USE_GPU
#ifdef USE_GPU
#include "caffe2/core/context_gpu.h"
#endif
#include <cassert>
#include <string>
#include <vector>
#include <iostream>
#include <map>
CAFFE2_DEFINE_string(init_net, "./model/${tc.fullArchitectureName}/init_net.pb", "The given path to the init protobuffer.");
CAFFE2_DEFINE_string(predict_net, "./model/${tc.fullArchitectureName}/predict_net.pb", "The given path to the predict protobuffer.");
#include <CNNBufferFile.h>
using namespace caffe2;
class ${tc.fileNameWithoutEnding}{
public:
const std::string json_file = "model/${tc.fullArchitectureName}/${tc.architectureName}_newest-symbol.json";
const std::string param_file = "model/${tc.fullArchitectureName}/${tc.architectureName}_newest-0000.params";
//const std::vector<std::string> input_keys = {"data"};
const std::vector<std::string> input_keys = {${tc.join(tc.architectureInputs, ",", "\"", "\"")}};
const std::vector<std::vector<mx_uint>> input_shapes = {<#list tc.architecture.inputs as input>{1,${tc.join(input.definition.type.dimensions, ",")}}<#if input?has_next>,</#if></#list>};
const bool use_gpu = false;
PredictorHandle handle;
explicit ${tc.fileNameWithoutEnding}(){
init(json_file, param_file, input_keys, input_shapes, use_gpu);
}
~${tc.fileNameWithoutEnding}(){
if(handle) MXPredFree(handle);
}
void predict(${tc.join(tc.architectureInputs, ", ", "const std::vector<float> &", "")},
${tc.join(tc.architectureOutputs, ", ", "std::vector<float> &", "")}){
<#list tc.architectureInputs as inputName>
MXPredSetInput(handle, "data", ${inputName}.data(), ${inputName}.size());
//MXPredSetInput(handle, "${inputName}", ${inputName}.data(), ${inputName}.size());
</#list>
private:
TensorCPU input;
Workspace workSpace;
NetDef initNet, predictNet;
MXPredForward(handle);
public:
const std::vector<TIndex> input_shapes = {<#list tc.architecture.inputs as input>{1,${tc.join(input.definition.type.dimensions, ",")}}<#if input?has_next>,</#if></#list>};
const bool use_gpu = false;
mx_uint output_index;
mx_uint *shape = 0;
mx_uint shape_len;
size_t size;
explicit ${tc.fileNameWithoutEnding}(){
init(input_shapes);
}
<#list tc.architectureOutputs as outputName>
output_index = ${outputName?index?c};
MXPredGetOutputShape(handle, output_index, &shape, &shape_len);
size = 1;
for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i];
assert(size == ${outputName}.size());
MXPredGetOutput(handle, ${outputName?index?c}, &(${outputName}[0]), ${outputName}.size());
//~${tc.fileNameWithoutEnding}(){};
</#list>
}
void init(const std::vector<TIndex> &input_shapes){
int n = 0;
char **a[1];
caffe2::GlobalInit(&n, a);
void init(const std::string &json_file,
const std::string &param_file,
const std::vector<std::string> &input_keys,
const std::vector<std::vector<mx_uint>> &input_shapes,
const bool &use_gpu){
if (!std::ifstream(FLAGS_init_net).good()) {
std::cerr << "Network loading failure, init_net file '" << FLAGS_init_net << "' does not exist." << std::endl;
return;
}
BufferFile json_data(json_file);
BufferFile param_data(param_file);
if (!std::ifstream(FLAGS_predict_net).good()) {
std::cerr << "Network loading failure, predict_net file '" << FLAGS_predict_net << "' does not exist." << std::endl;
return;
}
int dev_type = use_gpu ? 2 : 1;
int dev_id = 0;
std::cout << "****************************************************************" << std::endl;
std::cout << "Loading network..." << std::endl;
handle = 0;
// Read protobuf
CAFFE_ENFORCE(ReadProtoFromFile(FLAGS_init_net, &initNet));
CAFFE_ENFORCE(ReadProtoFromFile(FLAGS_predict_net, &predictNet));
if (json_data.GetLength() == 0 ||
param_data.GetLength() == 0) {
std::exit(-1);
}
// Set device type
#ifdef USE_GPU
predictNet.mutable_device_option()->set_device_type(CUDA);
initNet.mutable_device_option()->set_device_type(CUDA);
std::cout << "== GPU mode selected " << " ==" << std::endl;
#else
predictNet.mutable_device_option()->set_device_type(CPU);
initNet.mutable_device_option()->set_device_type(CPU);
for(int i = 0; i < predictNet.op_size(); ++i){
predictNet.mutable_op(i)->mutable_device_option()->set_device_type(CPU);
}
for(int i = 0; i < initNet.op_size(); ++i){
initNet.mutable_op(i)->mutable_device_option()->set_device_type(CPU);
}
std::cout << "== CPU mode selected " << " ==" << std::endl;
#endif
const mx_uint num_input_nodes = input_keys.size();
// Load network
CAFFE_ENFORCE(workSpace.RunNetOnce(initNet));
CAFFE_ENFORCE(workSpace.CreateNet(predictNet));
std::cout << "== Network loaded " << " ==" << std::endl;
const char* input_keys_ptr[num_input_nodes];
for(mx_uint i = 0; i < num_input_nodes; i++){
input_keys_ptr[i] = input_keys[i].c_str();
input.Resize(input_shapes);
}
mx_uint shape_data_size = 0;
mx_uint input_shape_indptr[input_shapes.size() + 1];
input_shape_indptr[0] = 0;
for(mx_uint i = 0; i < input_shapes.size(); i++){
input_shape_indptr[i+1] = input_shapes[i].size();
shape_data_size += input_shapes[i].size();
}
void predict(${tc.join(tc.architectureInputs, ", ", "const std::vector<float> &", "")}, ${tc.join(tc.architectureOutputs, ", ", "std::vector<float> &", "")}){
//Note: ShareExternalPointer requires a float pointer.
input.ShareExternalPointer((float *) ${tc.join(tc.architectureInputs, ",", "","")}.data());
mx_uint input_shape_data[shape_data_size];
mx_uint index = 0;
for(mx_uint i = 0; i < input_shapes.size(); i++){
for(mx_uint j = 0; j < input_shapes[i].size(); j++){
input_shape_data[index] = input_shapes[i][j];
index++;
}
}
// Get input blob
<#--<#list tc.architectureInputs as inputName>-->
#ifdef USE_GPU
<#--auto ${inputName + "Blob"} = workSpace.GetBlob("${inputName}")->GetMutable<TensorCUDA>();-->
auto dataBlob = workSpace.GetBlob("data")->GetMutable<TensorCUDA>();
#else
<#--auto ${inputName + "Blob"} = workSpace.GetBlob("${inputName}")->GetMutable<TensorCPU>();-->
auto dataBlob = workSpace.GetBlob("data")->GetMutable<TensorCPU>();
#endif
<#--</#list>-->
// Copy from input data
dataBlob->CopyFrom(input);
MXPredCreate((const char*)json_data.GetBuffer(),
(const char*)param_data.GetBuffer(),
static_cast<size_t>(param_data.GetLength()),
dev_type,
dev_id,
num_input_nodes,
input_keys_ptr,
input_shape_indptr,
input_shape_data,
&handle);
assert(handle);
}
// Forward
workSpace.RunNet(predictNet.name());
// Get output blob
<#list tc.architectureOutputs as outputName>
#ifdef USE_GPU
auto ${outputName + "Blob"} = TensorCPU(workSpace.GetBlob("${outputName}")->Get<TensorCUDA>());
#else
auto ${outputName + "Blob"} = workSpace.GetBlob("${outputName}")->Get<TensorCPU>();
#endif
${outputName}.assign(${outputName + "Blob"}.data<float>(),${outputName + "Blob"}.data<float>() + ${outputName + "Blob"}.size());
</#list>
google::protobuf::ShutdownProtobufLibrary();
}
};
#endif // ${tc.fileNameWithoutEnding?upper_case}
#ifndef CNNPREDICTOR_ALEXNET
#define CNNPREDICTOR_ALEXNET
#include <mxnet/c_predict_api.h>
#include "caffe2/core/common.h"
#include "caffe2/utils/proto_utils.h"
#include "caffe2/core/workspace.h"
#include "caffe2/core/tensor.h"
#include "caffe2/core/init.h"
// Enable define USE_GPU if you want to use gpu
//#define USE_GPU
#ifdef USE_GPU
#include "caffe2/core/context_gpu.h"
#endif
#include <cassert>
#include <string>
#include <vector>
#include <iostream>
#include <map>
CAFFE2_DEFINE_string(init_net, "./model/Alexnet/init_net.pb", "The given path to the init protobuffer.");
CAFFE2_DEFINE_string(predict_net, "./model/Alexnet/predict_net.pb", "The given path to the predict protobuffer.");
#include <CNNBufferFile.h>
using namespace caffe2;
class CNNPredictor_Alexnet{
public:
const std::string json_file = "model/Alexnet/Alexnet_newest-symbol.json";
const std::string param_file = "model/Alexnet/Alexnet_newest-0000.params";
//const std::vector<std::string> input_keys = {"data"};
const std::vector<std::string> input_keys = {"data"};
const std::vector<std::vector<mx_uint>> input_shapes = {{1,3,224,224}};
const bool use_gpu = false;
PredictorHandle handle;
explicit CNNPredictor_Alexnet(){
init(json_file, param_file, input_keys, input_shapes, use_gpu);
}
~CNNPredictor_Alexnet(){
if(handle) MXPredFree(handle);
}
void predict(const std::vector<float> &data,
std::vector<float> &predictions){
MXPredSetInput(handle, "data", data.data(), data.size());
//MXPredSetInput(handle, "data", data.data(), data.size());
MXPredForward(handle);
mx_uint output_index;
mx_uint *shape = 0;
mx_uint shape_len;
size_t size;
output_index = 0;
MXPredGetOutputShape(handle, output_index, &shape, &shape_len);
size = 1;
for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i];
assert(size == predictions.size());
MXPredGetOutput(handle, 0, &(predictions[0]), predictions.size());
}
void init(const std::string &json_file,
const std::string &param_file,
const std::vector<std::string> &input_keys,
const std::vector<std::vector<mx_uint>> &input_shapes,
const bool &use_gpu){
BufferFile json_data(json_file);
BufferFile param_data(param_file);
int dev_type = use_gpu ? 2 : 1;
int dev_id = 0;
handle = 0;
if (json_data.GetLength() == 0 ||
param_data.GetLength() == 0) {
std::exit(-1);
}
private:
TensorCPU input;
Workspace workSpace;
NetDef initNet, predictNet;
const mx_uint num_input_nodes = input_keys.size();
public:
const std::vector<TIndex> input_shapes = {{1,3,224,224}};
const bool use_gpu = false;
const char* input_keys_ptr[num_input_nodes];
for(mx_uint i = 0; i < num_input_nodes; i++){
input_keys_ptr[i] = input_keys[i].c_str();
explicit CNNPredictor_Alexnet(){
init(input_shapes);
}
mx_uint shape_data_size = 0;
mx_uint input_shape_indptr[input_shapes.size() + 1];
input_shape_indptr[0] = 0;
for(mx_uint i = 0; i < input_shapes.size(); i++){
input_shape_indptr[i+1] = input_shapes[i].size();
shape_data_size += input_shapes[i].size();
}
//~CNNPredictor_Alexnet(){};
void init(const std::vector<TIndex> &input_shapes){
int n = 0;
char **a[1];
caffe2::GlobalInit(&n, a);
if (!std::ifstream(FLAGS_init_net).good()) {
std::cerr << "Network loading failure, init_net file '" << FLAGS_init_net << "' does not exist." << std::endl;
return;
}
if (!std::ifstream(FLAGS_predict_net).good()) {
std::cerr << "Network loading failure, predict_net file '" << FLAGS_predict_net << "' does not exist." << std::endl;
return;
}
std::cout << "****************************************************************" << std::endl;
std::cout << "Loading network..." << std::endl;
mx_uint input_shape_data[shape_data_size];
mx_uint index = 0;
for(mx_uint i = 0; i < input_shapes.size(); i++){
for(mx_uint j = 0; j < input_shapes[i].size(); j++){
input_shape_data[index] = input_shapes[i][j];
index++;
// Read protobuf
CAFFE_ENFORCE(ReadProtoFromFile(FLAGS_init_net, &initNet));
CAFFE_ENFORCE(ReadProtoFromFile(FLAGS_predict_net, &predictNet));
// Set device type
#ifdef USE_GPU
predictNet.mutable_device_option()->set_device_type(CUDA);
initNet.mutable_device_option()->set_device_type(CUDA);
std::cout << "== GPU mode selected " << " ==" << std::endl;
#else
predictNet.mutable_device_option()->set_device_type(CPU);
initNet.mutable_device_option()->set_device_type(CPU);
for(int i = 0; i < predictNet.op_size(); ++i){
predictNet.mutable_op(i)->mutable_device_option()->set_device_type(CPU);
}
for(int i = 0; i < initNet.op_size(); ++i){
initNet.mutable_op(i)->mutable_device_option()->set_device_type(CPU);
}
std::cout << "== CPU mode selected " << " ==" << std::endl;
#endif
// Load network
CAFFE_ENFORCE(workSpace.RunNetOnce(initNet));
CAFFE_ENFORCE(workSpace.CreateNet(predictNet));
std::cout << "== Network loaded " << " ==" << std::endl;
input.Resize(input_shapes);
}
MXPredCreate((const char*)json_data.GetBuffer(),
(const char*)param_data.GetBuffer(),
static_cast<size_t>(param_data.GetLength()),
dev_type,
dev_id,
num_input_nodes,
input_keys_ptr,
input_shape_indptr,
input_shape_data,
&handle);
assert(handle);
}
void predict(const std::vector<float> &data, std::vector<float> &predictions){
//Note: ShareExternalPointer requires a float pointer.
input.ShareExternalPointer((float *) data.data());
// Get input blob
#ifdef USE_GPU
auto dataBlob = workSpace.GetBlob("data")->GetMutable<TensorCUDA>();
#else
auto dataBlob = workSpace.GetBlob("data")->GetMutable<TensorCPU>();
#endif
// Copy from input data
dataBlob->CopyFrom(input);
// Forward
workSpace.RunNet(predictNet.name());
// Get output blob
#ifdef USE_GPU
auto predictionsBlob = TensorCPU(workSpace.GetBlob("predictions")->Get<TensorCUDA>());
#else
auto predictionsBlob = workSpace.GetBlob("predictions")->Get<TensorCPU>();
#endif
predictions.assign(predictionsBlob.data<float>(),predictionsBlob.data<float>() + predictionsBlob.size());
google::protobuf::ShutdownProtobufLibrary();
}
};
#endif // CNNPREDICTOR_ALEXNET
\ No newline at end of file
#ifndef CNNPREDICTOR_CIFARCLASSIFIERNETWORK
#define CNNPREDICTOR_CIFARCLASSIFIERNETWORK
#include <mxnet/c_predict_api.h>
#include "caffe2/core/common.h"
#include "caffe2/utils/proto_utils.h"
#include "caffe2/core/workspace.h"
#include "caffe2/core/tensor.h"
#include "caffe2/core/init.h"
// Enable define USE_GPU if you want to use gpu
//#define USE_GPU
#ifdef USE_GPU
#include "caffe2/core/context_gpu.h"
#endif
#include <cassert>
#include <string>
#include <vector>
#include <iostream>
#include <map>
CAFFE2_DEFINE_string(init_net, "./model/CifarClassifierNetwork/init_net.pb", "The given path to the init protobuffer.");
CAFFE2_DEFINE_string(predict_net, "./model/CifarClassifierNetwork/predict_net.pb", "The given path to the predict protobuffer.");
#include <CNNBufferFile.h>
using namespace caffe2;
class CNNPredictor_CifarClassifierNetwork{
public:
const std::string json_file = "model/CifarClassifierNetwork/CifarClassifierNetwork_newest-symbol.json";
const std::string param_file = "model/CifarClassifierNetwork/CifarClassifierNetwork_newest-0000.params";
//const std::vector<std::string> input_keys = {"data"};
const std::vector<std::string> input_keys = {"data"};
const std::vector<std::vector<mx_uint>> input_shapes = {{1,3,32,32}};
const bool use_gpu = false;
PredictorHandle handle;
explicit CNNPredictor_CifarClassifierNetwork(){
init(json_file, param_file, input_keys, input_shapes, use_gpu);
}
~CNNPredictor_CifarClassifierNetwork(){
if(handle) MXPredFree(handle);
}
void predict(const std::vector<float> &data,
std::vector<float> &softmax){
MXPredSetInput(handle, "data", data.data(), data.size());
//MXPredSetInput(handle, "data", data.data(), data.size());
MXPredForward(handle);
mx_uint output_index;
mx_uint *shape = 0;
mx_uint shape_len;
size_t size;
output_index = 0;
MXPredGetOutputShape(handle, output_index, &shape, &shape_len);
size = 1;
for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i];
assert(size == softmax.size());
MXPredGetOutput(handle, 0, &(softmax[0]), softmax.size());
}
void init(const std::string &json_file,
const std::string &param_file,
const std::vector<std::string> &input_keys,
const std::vector<std::vector<mx_uint>> &input_shapes,
const bool &use_gpu){
BufferFile json_data(json_file);
BufferFile param_data(param_file);
int dev_type = use_gpu ? 2 : 1;
int dev_id = 0;
handle = 0;
if (json_data.GetLength() == 0 ||
param_data.GetLength() == 0) {
std::exit(-1);
}
private:
TensorCPU input;
Workspace workSpace;
NetDef initNet, predictNet;
const mx_uint num_input_nodes = input_keys.size();
public:
const std::vector<TIndex> input_shapes = {{1,3,32,32}};
const bool use_gpu = false;
const char* input_keys_ptr[num_input_nodes];
for(mx_uint i = 0; i < num_input_nodes; i++){
input_keys_ptr[i] = input_keys[i].c_str();
explicit CNNPredictor_CifarClassifierNetwork(){
init(input_shapes);
}
mx_uint shape_data_size = 0;
mx_uint input_shape_indptr[input_shapes.size() + 1];
input_shape_indptr[0] = 0;
for(mx_uint i = 0; i < input_shapes.size(); i++){
input_shape_indptr[i+1] = input_shapes[i].size();
shape_data_size += input_shapes[i].size();
}
//~CNNPredictor_CifarClassifierNetwork(){};
void init(const std::vector<TIndex> &input_shapes){
int n = 0;
char **a[1];
caffe2::GlobalInit(&n, a);
if (!std::ifstream(FLAGS_init_net).good()) {
std::cerr << "Network loading failure, init_net file '" << FLAGS_init_net << "' does not exist." << std::endl;
return;
}
if (!std::ifstream(FLAGS_predict_net).good()) {
std::cerr << "Network loading failure, predict_net file '" << FLAGS_predict_net << "' does not exist." << std::endl;
return;
}
std::cout << "****************************************************************" << std::endl;
std::cout << "Loading network..." << std::endl;
mx_uint input_shape_data[shape_data_size];
mx_uint index = 0;
for(mx_uint i = 0; i < input_shapes.size(); i++){
for(mx_uint j = 0; j < input_shapes[i].size(); j++){
input_shape_data[index] = input_shapes[i][j];
index++;
// Read protobuf
CAFFE_ENFORCE(ReadProtoFromFile(FLAGS_init_net, &initNet));
CAFFE_ENFORCE(ReadProtoFromFile(FLAGS_predict_net, &predictNet));
// Set device type
#ifdef USE_GPU
predictNet.mutable_device_option()->set_device_type(CUDA);
initNet.mutable_device_option()->set_device_type(CUDA);
std::cout << "== GPU mode selected " << " ==" << std::endl;
#else
predictNet.mutable_device_option()->set_device_type(CPU);
initNet.mutable_device_option()->set_device_type(CPU);
for(int i = 0; i < predictNet.op_size(); ++i){
predictNet.mutable_op(i)->mutable_device_option()->set_device_type(CPU);
}
for(int i = 0; i < initNet.op_size(); ++i){
initNet.mutable_op(i)->mutable_device_option()->set_device_type(CPU);
}
std::cout << "== CPU mode selected " << " ==" << std::endl;
#endif
// Load network
CAFFE_ENFORCE(workSpace.RunNetOnce(initNet));
CAFFE_ENFORCE(workSpace.CreateNet(predictNet));
std::cout << "== Network loaded " << " ==" << std::endl;
input.Resize(input_shapes);
}
MXPredCreate((const char*)json_data.GetBuffer(),
(const char*)param_data.GetBuffer(),
static_cast<size_t>(param_data.GetLength()),
dev_type,
dev_id,
num_input_nodes,
input_keys_ptr,
input_shape_indptr,
input_shape_data,
&handle);
assert(handle);
}
void predict(const std::vector<float> &data, std::vector<float> &softmax){
//Note: ShareExternalPointer requires a float pointer.
input.ShareExternalPointer((float *) data.data());
// Get input blob
#ifdef USE_GPU
auto dataBlob = workSpace.GetBlob("data")->GetMutable<TensorCUDA>();
#else
auto dataBlob = workSpace.GetBlob("data")->GetMutable<TensorCPU>();
#endif
// Copy from input data
dataBlob->CopyFrom(input);
// Forward
workSpace.RunNet(predictNet.name());
// Get output blob
#ifdef USE_GPU
auto softmaxBlob = TensorCPU(workSpace.GetBlob("softmax")->Get<TensorCUDA>());
#else
auto softmaxBlob = workSpace.GetBlob("softmax")->Get<TensorCPU>();
#endif
softmax.assign(softmaxBlob.data<float>(),softmaxBlob.data<float>() + softmaxBlob.size());
google::protobuf::ShutdownProtobufLibrary();
}
};
#endif // CNNPREDICTOR_CIFARCLASSIFIERNETWORK
\ No newline at end of file
#ifndef CNNPREDICTOR_VGG16
#define CNNPREDICTOR_VGG16
#include <mxnet/c_predict_api.h>
#include "caffe2/core/common.h"
#include "caffe2/utils/proto_utils.h"
#include "caffe2/core/workspace.h"
#include "caffe2/core/tensor.h"
#include "caffe2/core/init.h"
// Enable define USE_GPU if you want to use gpu
//#define USE_GPU
#ifdef USE_GPU
#include "caffe2/core/context_gpu.h"
#endif
#include <cassert>
#include <string>
#include <vector>
#include <iostream>
#include <map>
CAFFE2_DEFINE_string(init_net, "./model/VGG16/init_net.pb", "The given path to the init protobuffer.");
CAFFE2_DEFINE_string(predict_net, "./model/VGG16/predict_net.pb", "The given path to the predict protobuffer.");
#include <CNNBufferFile.h>
using namespace caffe2;
class CNNPredictor_VGG16{
public:
const std::string json_file = "model/VGG16/VGG16_newest-symbol.json";
const std::string param_file = "model/VGG16/VGG16_newest-0000.params";
//const std::vector<std::string> input_keys = {"data"};
const std::vector<std::string> input_keys = {"data"};
const std::vector<std::vector<mx_uint>> input_shapes = {{1,3,224,224}};
const bool use_gpu = false;
PredictorHandle handle;
explicit CNNPredictor_VGG16(){
init(json_file, param_file, input_keys, input_shapes, use_gpu);
}
~CNNPredictor_VGG16(){
if(handle) MXPredFree(handle);
}
void predict(const std::vector<float> &data,
std::vector<float> &predictions){
MXPredSetInput(handle, "data", data.data(), data.size());
//MXPredSetInput(handle, "data", data.data(), data.size());
MXPredForward(handle);
mx_uint output_index;
mx_uint *shape = 0;
mx_uint shape_len;
size_t size;
output_index = 0;
MXPredGetOutputShape(handle, output_index, &shape, &shape_len);
size = 1;
for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i];
assert(size == predictions.size());
MXPredGetOutput(handle, 0, &(predictions[0]), predictions.size());
}