Improved code readability for GPU/CPU computation. Minor changes.

parent 332bf565
......@@ -7,7 +7,7 @@
#include "caffe2/core/tensor.h"
#include "caffe2/core/init.h"
// Enable define USE_GPU if you want to use gpu
// Define USE_GPU for GPU computation. Default is CPU computation.
//#define USE_GPU
#ifdef USE_GPU
......@@ -31,13 +31,12 @@ class ${tc.fileNameWithoutEnding}{
public:
const std::vector<TIndex> input_shapes = {<#list tc.architecture.inputs as input>{1,${tc.join(input.definition.type.dimensions, ",")}}<#if input?has_next>,</#if></#list>};
const bool use_gpu = false;
explicit ${tc.fileNameWithoutEnding}(){
init(input_shapes);
}
//~${tc.fileNameWithoutEnding}(){};
~${tc.fileNameWithoutEnding}(){};
void init(const std::vector<TIndex> &input_shapes){
int n = 0;
......@@ -61,11 +60,11 @@ class ${tc.fileNameWithoutEnding}{
CAFFE_ENFORCE(ReadProtoFromFile(FLAGS_predict_net, &predictNet));
// Set device type
#ifdef USE_GPU
#ifdef USE_GPU
predictNet.mutable_device_option()->set_device_type(CUDA);
initNet.mutable_device_option()->set_device_type(CUDA);
std::cout << "== GPU mode selected " << " ==" << std::endl;
#else
#else
predictNet.mutable_device_option()->set_device_type(CPU);
initNet.mutable_device_option()->set_device_type(CPU);
......@@ -76,7 +75,7 @@ class ${tc.fileNameWithoutEnding}{
initNet.mutable_op(i)->mutable_device_option()->set_device_type(CPU);
}
std::cout << "== CPU mode selected " << " ==" << std::endl;
#endif
#endif
// Load network
CAFFE_ENFORCE(workSpace.RunNetOnce(initNet));
......@@ -91,16 +90,12 @@ class ${tc.fileNameWithoutEnding}{
input.ShareExternalPointer((float *) ${tc.join(tc.architectureInputs, ",", "","")}.data());
// Get input blob
<#--<#list tc.architectureInputs as inputName>-->
#ifdef USE_GPU
<#--auto ${inputName + "Blob"} = workSpace.GetBlob("${inputName}")->GetMutable<TensorCUDA>();-->
#ifdef USE_GPU
auto dataBlob = workSpace.GetBlob("data")->GetMutable<TensorCUDA>();
#else
<#--auto ${inputName + "Blob"} = workSpace.GetBlob("${inputName}")->GetMutable<TensorCPU>();-->
#else
auto dataBlob = workSpace.GetBlob("data")->GetMutable<TensorCPU>();
#endif
#endif
<#--</#list>-->
// Copy from input data
dataBlob->CopyFrom(input);
......@@ -109,11 +104,11 @@ class ${tc.fileNameWithoutEnding}{
// Get output blob
<#list tc.architectureOutputs as outputName>
#ifdef USE_GPU
#ifdef USE_GPU
auto ${outputName + "Blob"} = TensorCPU(workSpace.GetBlob("${outputName}")->Get<TensorCUDA>());
#else
#else
auto ${outputName + "Blob"} = workSpace.GetBlob("${outputName}")->Get<TensorCPU>();
#endif
#endif
${outputName}.assign(${outputName + "Blob"}.data<float>(),${outputName + "Blob"}.data<float>() + ${outputName + "Blob"}.size());
</#list>
......
......@@ -7,7 +7,7 @@
#include "caffe2/core/tensor.h"
#include "caffe2/core/init.h"
// Enable define USE_GPU if you want to use gpu
// Define USE_GPU for GPU computation. Default is CPU computation.
//#define USE_GPU
#ifdef USE_GPU
......@@ -31,13 +31,12 @@ class CNNPredictor_Alexnet{
public:
const std::vector<TIndex> input_shapes = {{1,3,224,224}};
const bool use_gpu = false;
explicit CNNPredictor_Alexnet(){
init(input_shapes);
}
//~CNNPredictor_Alexnet(){};
~CNNPredictor_Alexnet(){};
void init(const std::vector<TIndex> &input_shapes){
int n = 0;
......@@ -61,11 +60,11 @@ class CNNPredictor_Alexnet{
CAFFE_ENFORCE(ReadProtoFromFile(FLAGS_predict_net, &predictNet));
// Set device type
#ifdef USE_GPU
#ifdef USE_GPU
predictNet.mutable_device_option()->set_device_type(CUDA);
initNet.mutable_device_option()->set_device_type(CUDA);
std::cout << "== GPU mode selected " << " ==" << std::endl;
#else
#else
predictNet.mutable_device_option()->set_device_type(CPU);
initNet.mutable_device_option()->set_device_type(CPU);
......@@ -76,7 +75,7 @@ class CNNPredictor_Alexnet{
initNet.mutable_op(i)->mutable_device_option()->set_device_type(CPU);
}
std::cout << "== CPU mode selected " << " ==" << std::endl;
#endif
#endif
// Load network
CAFFE_ENFORCE(workSpace.RunNetOnce(initNet));
......@@ -91,11 +90,11 @@ class CNNPredictor_Alexnet{
input.ShareExternalPointer((float *) data.data());
// Get input blob
#ifdef USE_GPU
#ifdef USE_GPU
auto dataBlob = workSpace.GetBlob("data")->GetMutable<TensorCUDA>();
#else
#else
auto dataBlob = workSpace.GetBlob("data")->GetMutable<TensorCPU>();
#endif
#endif
// Copy from input data
dataBlob->CopyFrom(input);
......@@ -104,11 +103,11 @@ class CNNPredictor_Alexnet{
workSpace.RunNet(predictNet.name());
// Get output blob
#ifdef USE_GPU
#ifdef USE_GPU
auto predictionsBlob = TensorCPU(workSpace.GetBlob("predictions")->Get<TensorCUDA>());
#else
#else
auto predictionsBlob = workSpace.GetBlob("predictions")->Get<TensorCPU>();
#endif
#endif
predictions.assign(predictionsBlob.data<float>(),predictionsBlob.data<float>() + predictionsBlob.size());
google::protobuf::ShutdownProtobufLibrary();
......
......@@ -7,7 +7,7 @@
#include "caffe2/core/tensor.h"
#include "caffe2/core/init.h"
// Enable define USE_GPU if you want to use gpu
// Define USE_GPU for GPU computation. Default is CPU computation.
//#define USE_GPU
#ifdef USE_GPU
......@@ -31,13 +31,12 @@ class CNNPredictor_CifarClassifierNetwork{
public:
const std::vector<TIndex> input_shapes = {{1,3,32,32}};
const bool use_gpu = false;
explicit CNNPredictor_CifarClassifierNetwork(){
init(input_shapes);
}
//~CNNPredictor_CifarClassifierNetwork(){};
~CNNPredictor_CifarClassifierNetwork(){};
void init(const std::vector<TIndex> &input_shapes){
int n = 0;
......@@ -61,11 +60,11 @@ class CNNPredictor_CifarClassifierNetwork{
CAFFE_ENFORCE(ReadProtoFromFile(FLAGS_predict_net, &predictNet));
// Set device type
#ifdef USE_GPU
#ifdef USE_GPU
predictNet.mutable_device_option()->set_device_type(CUDA);
initNet.mutable_device_option()->set_device_type(CUDA);
std::cout << "== GPU mode selected " << " ==" << std::endl;
#else
#else
predictNet.mutable_device_option()->set_device_type(CPU);
initNet.mutable_device_option()->set_device_type(CPU);
......@@ -76,7 +75,7 @@ class CNNPredictor_CifarClassifierNetwork{
initNet.mutable_op(i)->mutable_device_option()->set_device_type(CPU);
}
std::cout << "== CPU mode selected " << " ==" << std::endl;
#endif
#endif
// Load network
CAFFE_ENFORCE(workSpace.RunNetOnce(initNet));
......@@ -91,11 +90,11 @@ class CNNPredictor_CifarClassifierNetwork{
input.ShareExternalPointer((float *) data.data());
// Get input blob
#ifdef USE_GPU
#ifdef USE_GPU
auto dataBlob = workSpace.GetBlob("data")->GetMutable<TensorCUDA>();
#else
#else
auto dataBlob = workSpace.GetBlob("data")->GetMutable<TensorCPU>();
#endif
#endif
// Copy from input data
dataBlob->CopyFrom(input);
......@@ -104,11 +103,11 @@ class CNNPredictor_CifarClassifierNetwork{
workSpace.RunNet(predictNet.name());
// Get output blob
#ifdef USE_GPU
#ifdef USE_GPU
auto softmaxBlob = TensorCPU(workSpace.GetBlob("softmax")->Get<TensorCUDA>());
#else
#else
auto softmaxBlob = workSpace.GetBlob("softmax")->Get<TensorCPU>();
#endif
#endif
softmax.assign(softmaxBlob.data<float>(),softmaxBlob.data<float>() + softmaxBlob.size());
google::protobuf::ShutdownProtobufLibrary();
......
......@@ -7,7 +7,7 @@
#include "caffe2/core/tensor.h"
#include "caffe2/core/init.h"
// Enable define USE_GPU if you want to use gpu
// Define USE_GPU for GPU computation. Default is CPU computation.
//#define USE_GPU
#ifdef USE_GPU
......@@ -31,13 +31,12 @@ class CNNPredictor_VGG16{
public:
const std::vector<TIndex> input_shapes = {{1,3,224,224}};
const bool use_gpu = false;
explicit CNNPredictor_VGG16(){
init(input_shapes);
}
//~CNNPredictor_VGG16(){};
~CNNPredictor_VGG16(){};
void init(const std::vector<TIndex> &input_shapes){
int n = 0;
......@@ -61,11 +60,11 @@ class CNNPredictor_VGG16{
CAFFE_ENFORCE(ReadProtoFromFile(FLAGS_predict_net, &predictNet));
// Set device type
#ifdef USE_GPU
#ifdef USE_GPU
predictNet.mutable_device_option()->set_device_type(CUDA);
initNet.mutable_device_option()->set_device_type(CUDA);
std::cout << "== GPU mode selected " << " ==" << std::endl;
#else
#else
predictNet.mutable_device_option()->set_device_type(CPU);
initNet.mutable_device_option()->set_device_type(CPU);
......@@ -76,7 +75,7 @@ class CNNPredictor_VGG16{
initNet.mutable_op(i)->mutable_device_option()->set_device_type(CPU);
}
std::cout << "== CPU mode selected " << " ==" << std::endl;
#endif
#endif
// Load network
CAFFE_ENFORCE(workSpace.RunNetOnce(initNet));
......@@ -91,11 +90,11 @@ class CNNPredictor_VGG16{
input.ShareExternalPointer((float *) data.data());
// Get input blob
#ifdef USE_GPU
#ifdef USE_GPU
auto dataBlob = workSpace.GetBlob("data")->GetMutable<TensorCUDA>();
#else
#else
auto dataBlob = workSpace.GetBlob("data")->GetMutable<TensorCPU>();
#endif
#endif
// Copy from input data
dataBlob->CopyFrom(input);
......@@ -104,11 +103,11 @@ class CNNPredictor_VGG16{
workSpace.RunNet(predictNet.name());
// Get output blob
#ifdef USE_GPU
#ifdef USE_GPU
auto predictionsBlob = TensorCPU(workSpace.GetBlob("predictions")->Get<TensorCUDA>());
#else
#else
auto predictionsBlob = workSpace.GetBlob("predictions")->Get<TensorCPU>();
#endif
#endif
predictions.assign(predictionsBlob.data<float>(),predictionsBlob.data<float>() + predictionsBlob.size());
google::protobuf::ShutdownProtobufLibrary();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment