Skip to content
GitLab
Menu
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
monticore
EmbeddedMontiArc
generators
CNNArch2Caffe2
Commits
8cc4614a
Commit
8cc4614a
authored
Oct 25, 2018
by
Carlos Alfredo Yeverino Rodriguez
Browse files
Adapted CNNPredictor template for the backend Caffe2.
parent
c455ab90
Changes
4
Hide whitespace changes
Inline
Side-by-side
src/main/resources/templates/caffe2/CNNPredictor.ftl
View file @
8cc4614a
#
ifndef
$
{
tc
.fileNameWithoutEnding
?
upper_case
}
#
define
$
{
tc
.fileNameWithoutEnding
?
upper_case
}
#
include
<mxnet/c_predict_api.h>
#
include
"caffe2/core/common.h"
#
include
"caffe2/utils/proto_utils.h"
#
include
"caffe2/core/workspace.h"
#
include
"caffe2/core/tensor.h"
#
include
"caffe2/core/init.h"
//
Enable define USE_GPU if you want to use gpu
//#
define
USE_GPU
#
ifdef
USE_GPU
#
include
"caffe2/core/context_gpu.h"
#
endif
#
include
<cassert>
#
include
<string>
#
include
<vector>
#
include
<iostream>
#
include
<map>
CAFFE2_DEFINE_string
(
init_net
,
"./model/$
{
tc
.fullArchitectureName
}
/init_net.pb", "The given path to the init protobuffer.");
CAFFE2_DEFINE_string
(
predict_net
,
"./model/$
{
tc
.fullArchitectureName
}
/predict_net.pb", "The given path to the predict protobuffer.");
#
include
<CNNBufferFile.h>
using
namespace caffe2;
class
$
{
tc
.fileNameWithoutEnding
}{
public
:
const
std
::
string
json_file
=
"model/${tc.fullArchitectureName}/${tc.architectureName}_newest-symbol.json"
;
const
std
::
string
param_file
=
"model/${tc.fullArchitectureName}/${tc.architectureName}_newest-0000.params"
;
//
const
std
::
vector
<
std
::
string
>
input_keys
=
{
"data"
}
;
const
std
::
vector
<
std
::
string
>
input_keys
=
{
$
{
tc
.join
(
tc
.architectureInputs
,
","
,
"
\"
"
,
"
\"
"
)}}
;
const
std
::
vector
<
std
::
vector
<
mx_uint
>>
input_shapes
=
{
<#
list
tc
.architecture.inputs
as
input
>
{
1
,
$
{
tc
.join
(
input
.definition.type.dimensions
,
","
)}}
<#
if
input
?
has_next
>
,
</#
if
></#
list
>
}
;
const
bool
use_gpu
=
false
;
P
redictorHandle
handle
;
explicit
$
{
tc
.fileNameWithoutEnding
}(){
init
(
json_file
,
param_file
,
input_keys
,
input_shapes
,
use_gpu
)
;
}
~$
{
tc
.fileNameWithoutEnding
}(){
if
(
handle
)
MXP
redFree
(
handle
)
;
}
void
predict
(
$
{
tc
.join
(
tc
.architectureInputs
,
", "
,
"const std::vector<float> &"
,
""
)},
$
{
tc
.join
(
tc
.architectureOutputs
,
", "
,
"std::vector<float> &"
,
""
)}){
<#
list
tc
.architectureInputs
as
inputName
>
MXP
redSetInput
(
handle
,
"data"
,
$
{
inputName
}
.data
(),
$
{
inputName
}
.size
())
;
//
MXP
redSetInput
(
handle
,
"${inputName}"
,
$
{
inputName
}
.data
(),
$
{
inputName
}
.size
())
;
</#
list
>
private
:
T
ensorCPU
input
;
W
orkspace
workSpace
;
N
etDef
initNet
,
predictNet
;
MXP
redForward
(
handle
)
;
public
:
const
std
::
vector
<
TI
ndex
>
input_shapes
=
{
<#
list
tc
.architecture.inputs
as
input
>
{
1
,
$
{
tc
.join
(
input
.definition.type.dimensions
,
","
)}}
<#
if
input
?
has_next
>
,
</#
if
></#
list
>
}
;
const
bool
use_gpu
=
false
;
mx_uint
output_index
;
mx_uint
*
shape
=
0
;
mx_uint
shape_len
;
size_t
size
;
explicit
$
{
tc
.fileNameWithoutEnding
}(){
init
(
input_shapes
)
;
}
<#
list
tc
.architectureOutputs
as
outputName
>
output_index
=
$
{
outputName
?
index
?
c
}
;
MXP
redGetOutputShape
(
handle
,
output_index
,
&
shape
,
&
shape_len
)
;
size
=
1
;
for
(
mx_uint
i
=
0
;
i
<
shape_len
;
++
i
)
size
*
=
shape
[
i
];
assert
(
size
==
$
{
outputName
}
.size
())
;
MXP
redGetOutput
(
handle
,
$
{
outputName
?
index
?
c
},
&
(
$
{
outputName
}[
0
]
), $
{
outputName
}
.size());
//~$
{
tc
.fileNameWithoutEnding
}(){}
;
</#
list
>
}
void
init
(
const
std
::
vector
<
TI
ndex
>
&
input_shapes
){
int
n
=
0
;
char
**
a
[
1
]
;
caffe2
::
G
lobalInit
(
&
n
,
a
)
;
void
init
(
const
std
::
string
&
json_file
,
const
std
::
string
&
param_file
,
const
std
::
vector
<
std
::
string
>
&
input_keys
,
const
std
::
vector
<
std
::
vector
<
mx_uint
>>
&
input_shapes
,
const
bool
&
use_gpu
){
if
(
!
std
::
ifstream
(
FLAGS
_
init_net
)
.good
())
{
std
::
cerr
<<
"Network loading failure, init_net file '"
<<
FLAGS
_
init_net
<<
"' does not exist."
<<
std
::
endl
;
return
;
}
B
ufferFile
json_data
(
json_file
)
;
B
ufferFile
param_data
(
param_file
)
;
if
(
!
std
::
ifstream
(
FLAGS
_
predict_net
)
.good
())
{
std
::
cerr
<<
"Network loading failure, predict_net file '"
<<
FLAGS
_
predict_net
<<
"' does not exist."
<<
std
::
endl
;
return
;
}
int
dev_type
=
use_gpu
?
2
:
1
;
int
dev_id
=
0
;
std
::
cout
<<
"****************************************************************"
<<
std
::
endl
;
std
::
cout
<<
"Loading network..."
<<
std
::
endl
;
handle
=
0
;
//
R
ead
protobuf
CAFFE
_
ENFORCE
(
R
eadProtoFromFile
(
FLAGS
_
init_net
,
&
initNet
))
;
CAFFE
_
ENFORCE
(
R
eadProtoFromFile
(
FLAGS
_
predict_net
,
&
predictNet
))
;
if
(
json_data
.GetLength
()
==
0
||
param_data
.GetLength
()
==
0
)
{
std
::
exit
(
-1
)
;
}
//
S
et
device
type
#
ifdef
USE
_
GPU
predictNet
.mutable_device_option
()->
set_device_type
(
CUDA
)
;
initNet
.mutable_device_option
()->
set_device_type
(
CUDA
)
;
std
::
cout
<<
"== GPU mode selected "
<<
" =="
<<
std
::
endl
;
#
else
predictNet
.mutable_device_option
()->
set_device_type
(
CPU
)
;
initNet
.mutable_device_option
()->
set_device_type
(
CPU
)
;
for
(
int
i
=
0
;
i
<
predictNet
.op_size
()
;
++
i
){
predictNet
.mutable_op
(
i
)->
mutable_device_option
()->
set_device_type
(
CPU
)
;
}
for
(
int
i
=
0
;
i
<
initNet
.op_size
()
;
++
i
){
initNet
.mutable_op
(
i
)->
mutable_device_option
()->
set_device_type
(
CPU
)
;
}
std
::
cout
<<
"== CPU mode selected "
<<
" =="
<<
std
::
endl
;
#
endif
const
mx_uint
num_input_nodes
=
input_keys
.size
()
;
//
L
oad
network
CAFFE
_
ENFORCE
(
workSpace
.RunNetOnce
(
initNet
))
;
CAFFE
_
ENFORCE
(
workSpace
.CreateNet
(
predictNet
))
;
std
::
cout
<<
"== Network loaded "
<<
" =="
<<
std
::
endl
;
const
char
*
input_keys_ptr
[
num_input_nodes
]
;
for
(
mx_uint
i
=
0
;
i
<
num_input_nodes
;
i
++
){
input_keys_ptr
[
i
]
=
input_keys
[
i
]
.c_str
()
;
input
.Resize
(
input_shapes
)
;
}
mx_uint
shape_data_size
=
0
;
mx_uint
input_shape_indptr
[
input_shapes
.size
()
+
1
];
input_shape_indptr
[
0
]
= 0;
for
(
mx_uint
i
=
0
;
i
<
input_shapes
.size
()
;
i
++
){
input_shape_indptr
[
i
+
1
]
=
input_shapes
[
i
]
.size
()
;
shape_data_size
+=
input_shapes
[
i
]
.size
()
;
}
void
predict
(
$
{
tc
.join
(
tc
.architectureInputs
,
", "
,
"const std::vector<float> &"
,
""
)},
$
{
tc
.join
(
tc
.architectureOutputs
,
", "
,
"std::vector<float> &"
,
""
)}){
//
N
ote
:
S
hareExternalPointer
requires
a
float
pointer
.
input
.ShareExternalPointer
((
float
*
)
$
{
tc
.join
(
tc
.architectureInputs
,
","
,
""
,
""
)}
.data
())
;
mx_uint
input_shape_data
[
shape_data_size
]
;
mx_uint
index
=
0
;
for
(
mx_uint
i
=
0
;
i
<
input_shapes
.size
()
;
i
++
){
for
(
mx_uint
j
=
0
;
j
<
input_shapes
[
i
]
.size
()
;
j
++
){
input_shape_data
[
index
]
= input_shapes[i][j];
index
++;
}
}
//
G
et
input
blob
<#--<#
list
tc
.architectureInputs
as
inputName
>-
->
#
ifdef
USE
_
GPU
<#-
-auto
$
{
inputName
+
"Blob"
}
=
workSpace
.GetBlob
(
"${inputName}"
)->
G
etMutable
<
T
ensorCUDA
>
()
;-
->
auto
dataBlob
=
workSpace
.GetBlob
(
"data"
)->
G
etMutable
<
T
ensorCUDA
>
()
;
#
else
<#-
-auto
$
{
inputName
+
"Blob"
}
=
workSpace
.GetBlob
(
"${inputName}"
)->
G
etMutable
<
T
ensorCPU
>
()
;-
->
auto
dataBlob
=
workSpace
.GetBlob
(
"data"
)->
G
etMutable
<
T
ensorCPU
>
()
;
#
endif
<#--</#
list
>-
->
//
C
opy
from
input
data
dataBlob-
>
C
opyFrom
(
input
)
;
MXP
redCreate
((
const
char
*
)
json_data
.GetBuffer
(),
(
const
char
*
)
param_data
.GetBuffer
(),
static_cast
<
size_t
>
(
param_data
.GetLength
()),
dev_type
,
dev_id
,
num_input_nodes
,
input_keys_ptr
,
input_shape_indptr
,
input_shape_data
,
&
handle
)
;
assert
(
handle
)
;
}
//
F
orward
workSpace
.RunNet
(
predictNet
.name
())
;
//
G
et
output
blob
<#
list
tc
.architectureOutputs
as
outputName
>
#
ifdef
USE
_
GPU
auto
$
{
outputName
+
"Blob"
}
=
T
ensorCPU
(
workSpace
.GetBlob
(
"${outputName}"
)->
G
et
<
T
ensorCUDA
>
())
;
#
else
auto
$
{
outputName
+
"Blob"
}
=
workSpace
.GetBlob
(
"${outputName}"
)->
G
et
<
T
ensorCPU
>
()
;
#
endif
$
{
outputName
}
.assign
(
$
{
outputName
+
"Blob"
}
.data
<
float
>
(),
$
{
outputName
+
"Blob"
}
.data
<
float
>
()
+
$
{
outputName
+
"Blob"
}
.size
())
;
</#
list
>
google
::
protobuf
::
S
hutdownProtobufLibrary
()
;
}
}
;
#
endif
// $
{
tc
.fileNameWithoutEnding
?
upper_case
}
src/test/resources/target_code/CNNPredictor_Alexnet.h
View file @
8cc4614a
#ifndef CNNPREDICTOR_ALEXNET
#define CNNPREDICTOR_ALEXNET
#include
<mxnet/c_predict_api.h>
#include
"caffe2/core/common.h"
#include
"caffe2/utils/proto_utils.h"
#include
"caffe2/core/workspace.h"
#include
"caffe2/core/tensor.h"
#include
"caffe2/core/init.h"
// Enable define USE_GPU if you want to use gpu
//#define USE_GPU
#ifdef USE_GPU
#include
"caffe2/core/context_gpu.h"
#endif
#include
<cassert>
#include
<string>
#include
<vector>
#include
<iostream>
#include
<map>
CAFFE2_DEFINE_string
(
init_net
,
"./model/Alexnet/init_net.pb"
,
"The given path to the init protobuffer."
);
CAFFE2_DEFINE_string
(
predict_net
,
"./model/Alexnet/predict_net.pb"
,
"The given path to the predict protobuffer."
);
#include
<CNNBufferFile.h>
using
namespace
caffe2
;
class
CNNPredictor_Alexnet
{
public:
const
std
::
string
json_file
=
"model/Alexnet/Alexnet_newest-symbol.json"
;
const
std
::
string
param_file
=
"model/Alexnet/Alexnet_newest-0000.params"
;
//const std::vector<std::string> input_keys = {"data"};
const
std
::
vector
<
std
::
string
>
input_keys
=
{
"data"
};
const
std
::
vector
<
std
::
vector
<
mx_uint
>>
input_shapes
=
{{
1
,
3
,
224
,
224
}};
const
bool
use_gpu
=
false
;
PredictorHandle
handle
;
explicit
CNNPredictor_Alexnet
(){
init
(
json_file
,
param_file
,
input_keys
,
input_shapes
,
use_gpu
);
}
~
CNNPredictor_Alexnet
(){
if
(
handle
)
MXPredFree
(
handle
);
}
void
predict
(
const
std
::
vector
<
float
>
&
data
,
std
::
vector
<
float
>
&
predictions
){
MXPredSetInput
(
handle
,
"data"
,
data
.
data
(),
data
.
size
());
//MXPredSetInput(handle, "data", data.data(), data.size());
MXPredForward
(
handle
);
mx_uint
output_index
;
mx_uint
*
shape
=
0
;
mx_uint
shape_len
;
size_t
size
;
output_index
=
0
;
MXPredGetOutputShape
(
handle
,
output_index
,
&
shape
,
&
shape_len
);
size
=
1
;
for
(
mx_uint
i
=
0
;
i
<
shape_len
;
++
i
)
size
*=
shape
[
i
];
assert
(
size
==
predictions
.
size
());
MXPredGetOutput
(
handle
,
0
,
&
(
predictions
[
0
]),
predictions
.
size
());
}
void
init
(
const
std
::
string
&
json_file
,
const
std
::
string
&
param_file
,
const
std
::
vector
<
std
::
string
>
&
input_keys
,
const
std
::
vector
<
std
::
vector
<
mx_uint
>>
&
input_shapes
,
const
bool
&
use_gpu
){
BufferFile
json_data
(
json_file
);
BufferFile
param_data
(
param_file
);
int
dev_type
=
use_gpu
?
2
:
1
;
int
dev_id
=
0
;
handle
=
0
;
if
(
json_data
.
GetLength
()
==
0
||
param_data
.
GetLength
()
==
0
)
{
std
::
exit
(
-
1
);
}
private:
TensorCPU
input
;
Workspace
workSpace
;
NetDef
initNet
,
predictNet
;
const
mx_uint
num_input_nodes
=
input_keys
.
size
();
public:
const
std
::
vector
<
TIndex
>
input_shapes
=
{{
1
,
3
,
224
,
224
}};
const
bool
use_gpu
=
false
;
const
char
*
input_keys_ptr
[
num_input_nodes
];
for
(
mx_uint
i
=
0
;
i
<
num_input_nodes
;
i
++
){
input_keys_ptr
[
i
]
=
input_keys
[
i
].
c_str
();
explicit
CNNPredictor_Alexnet
(){
init
(
input_shapes
);
}
mx_uint
shape_data_size
=
0
;
mx_uint
input_shape_indptr
[
input_shapes
.
size
()
+
1
];
input_shape_indptr
[
0
]
=
0
;
for
(
mx_uint
i
=
0
;
i
<
input_shapes
.
size
();
i
++
){
input_shape_indptr
[
i
+
1
]
=
input_shapes
[
i
].
size
();
shape_data_size
+=
input_shapes
[
i
].
size
();
}
//~CNNPredictor_Alexnet(){};
void
init
(
const
std
::
vector
<
TIndex
>
&
input_shapes
){
int
n
=
0
;
char
**
a
[
1
];
caffe2
::
GlobalInit
(
&
n
,
a
);
if
(
!
std
::
ifstream
(
FLAGS_init_net
).
good
())
{
std
::
cerr
<<
"Network loading failure, init_net file '"
<<
FLAGS_init_net
<<
"' does not exist."
<<
std
::
endl
;
return
;
}
if
(
!
std
::
ifstream
(
FLAGS_predict_net
).
good
())
{
std
::
cerr
<<
"Network loading failure, predict_net file '"
<<
FLAGS_predict_net
<<
"' does not exist."
<<
std
::
endl
;
return
;
}
std
::
cout
<<
"****************************************************************"
<<
std
::
endl
;
std
::
cout
<<
"Loading network..."
<<
std
::
endl
;
mx_uint
input_shape_data
[
shape_data_size
];
mx_uint
index
=
0
;
for
(
mx_uint
i
=
0
;
i
<
input_shapes
.
size
();
i
++
){
for
(
mx_uint
j
=
0
;
j
<
input_shapes
[
i
].
size
();
j
++
){
input_shape_data
[
index
]
=
input_shapes
[
i
][
j
];
index
++
;
// Read protobuf
CAFFE_ENFORCE
(
ReadProtoFromFile
(
FLAGS_init_net
,
&
initNet
));
CAFFE_ENFORCE
(
ReadProtoFromFile
(
FLAGS_predict_net
,
&
predictNet
));
// Set device type
#ifdef USE_GPU
predictNet
.
mutable_device_option
()
->
set_device_type
(
CUDA
);
initNet
.
mutable_device_option
()
->
set_device_type
(
CUDA
);
std
::
cout
<<
"== GPU mode selected "
<<
" =="
<<
std
::
endl
;
#else
predictNet
.
mutable_device_option
()
->
set_device_type
(
CPU
);
initNet
.
mutable_device_option
()
->
set_device_type
(
CPU
);
for
(
int
i
=
0
;
i
<
predictNet
.
op_size
();
++
i
){
predictNet
.
mutable_op
(
i
)
->
mutable_device_option
()
->
set_device_type
(
CPU
);
}
for
(
int
i
=
0
;
i
<
initNet
.
op_size
();
++
i
){
initNet
.
mutable_op
(
i
)
->
mutable_device_option
()
->
set_device_type
(
CPU
);
}
std
::
cout
<<
"== CPU mode selected "
<<
" =="
<<
std
::
endl
;
#endif
// Load network
CAFFE_ENFORCE
(
workSpace
.
RunNetOnce
(
initNet
));
CAFFE_ENFORCE
(
workSpace
.
CreateNet
(
predictNet
));
std
::
cout
<<
"== Network loaded "
<<
" =="
<<
std
::
endl
;
input
.
Resize
(
input_shapes
);
}
MXPredCreate
((
const
char
*
)
json_data
.
GetBuffer
(),
(
const
char
*
)
param_data
.
GetBuffer
(),
static_cast
<
size_t
>
(
param_data
.
GetLength
()),
dev_type
,
dev_id
,
num_input_nodes
,
input_keys_ptr
,
input_shape_indptr
,
input_shape_data
,
&
handle
);
assert
(
handle
);
}
void
predict
(
const
std
::
vector
<
float
>
&
data
,
std
::
vector
<
float
>
&
predictions
){
//Note: ShareExternalPointer requires a float pointer.
input
.
ShareExternalPointer
((
float
*
)
data
.
data
());
// Get input blob
#ifdef USE_GPU
auto
dataBlob
=
workSpace
.
GetBlob
(
"data"
)
->
GetMutable
<
TensorCUDA
>
();
#else
auto
dataBlob
=
workSpace
.
GetBlob
(
"data"
)
->
GetMutable
<
TensorCPU
>
();
#endif
// Copy from input data
dataBlob
->
CopyFrom
(
input
);
// Forward
workSpace
.
RunNet
(
predictNet
.
name
());
// Get output blob
#ifdef USE_GPU
auto
predictionsBlob
=
TensorCPU
(
workSpace
.
GetBlob
(
"predictions"
)
->
Get
<
TensorCUDA
>
());
#else
auto
predictionsBlob
=
workSpace
.
GetBlob
(
"predictions"
)
->
Get
<
TensorCPU
>
();
#endif
predictions
.
assign
(
predictionsBlob
.
data
<
float
>
(),
predictionsBlob
.
data
<
float
>
()
+
predictionsBlob
.
size
());
google
::
protobuf
::
ShutdownProtobufLibrary
();
}
};
#endif // CNNPREDICTOR_ALEXNET
\ No newline at end of file
src/test/resources/target_code/CNNPredictor_CifarClassifierNetwork.h
View file @
8cc4614a
#ifndef CNNPREDICTOR_CIFARCLASSIFIERNETWORK
#define CNNPREDICTOR_CIFARCLASSIFIERNETWORK
#include
<mxnet/c_predict_api.h>
#include
"caffe2/core/common.h"
#include
"caffe2/utils/proto_utils.h"
#include
"caffe2/core/workspace.h"
#include
"caffe2/core/tensor.h"
#include
"caffe2/core/init.h"
// Enable define USE_GPU if you want to use gpu
//#define USE_GPU
#ifdef USE_GPU
#include
"caffe2/core/context_gpu.h"
#endif
#include
<cassert>
#include
<string>
#include
<vector>
#include
<iostream>
#include
<map>
CAFFE2_DEFINE_string
(
init_net
,
"./model/CifarClassifierNetwork/init_net.pb"
,
"The given path to the init protobuffer."
);
CAFFE2_DEFINE_string
(
predict_net
,
"./model/CifarClassifierNetwork/predict_net.pb"
,
"The given path to the predict protobuffer."
);
#include
<CNNBufferFile.h>
using
namespace
caffe2
;
class
CNNPredictor_CifarClassifierNetwork
{
public:
const
std
::
string
json_file
=
"model/CifarClassifierNetwork/CifarClassifierNetwork_newest-symbol.json"
;
const
std
::
string
param_file
=
"model/CifarClassifierNetwork/CifarClassifierNetwork_newest-0000.params"
;
//const std::vector<std::string> input_keys = {"data"};
const
std
::
vector
<
std
::
string
>
input_keys
=
{
"data"
};
const
std
::
vector
<
std
::
vector
<
mx_uint
>>
input_shapes
=
{{
1
,
3
,
32
,
32
}};
const
bool
use_gpu
=
false
;
PredictorHandle
handle
;
explicit
CNNPredictor_CifarClassifierNetwork
(){
init
(
json_file
,
param_file
,
input_keys
,
input_shapes
,
use_gpu
);
}
~
CNNPredictor_CifarClassifierNetwork
(){
if
(
handle
)
MXPredFree
(
handle
);
}
void
predict
(
const
std
::
vector
<
float
>
&
data
,
std
::
vector
<
float
>
&
softmax
){
MXPredSetInput
(
handle
,
"data"
,
data
.
data
(),
data
.
size
());
//MXPredSetInput(handle, "data", data.data(), data.size());
MXPredForward
(
handle
);
mx_uint
output_index
;
mx_uint
*
shape
=
0
;
mx_uint
shape_len
;
size_t
size
;
output_index
=
0
;
MXPredGetOutputShape
(
handle
,
output_index
,
&
shape
,
&
shape_len
);
size
=
1
;
for
(
mx_uint
i
=
0
;
i
<
shape_len
;
++
i
)
size
*=
shape
[
i
];
assert
(
size
==
softmax
.
size
());
MXPredGetOutput
(
handle
,
0
,
&
(
softmax
[
0
]),
softmax
.
size
());
}
void
init
(
const
std
::
string
&
json_file
,
const
std
::
string
&
param_file
,
const
std
::
vector
<
std
::
string
>
&
input_keys
,
const
std
::
vector
<
std
::
vector
<
mx_uint
>>
&
input_shapes
,
const
bool
&
use_gpu
){
BufferFile
json_data
(
json_file
);
BufferFile
param_data
(
param_file
);
int
dev_type
=
use_gpu
?
2
:
1
;
int
dev_id
=
0
;
handle
=
0
;
if
(
json_data
.
GetLength
()
==
0
||
param_data
.
GetLength
()
==
0
)
{
std
::
exit
(
-
1
);
}
private:
TensorCPU
input
;
Workspace
workSpace
;
NetDef
initNet
,
predictNet
;
const
mx_uint
num_input_nodes
=
input_keys
.
size
();
public:
const
std
::
vector
<
TIndex
>
input_shapes
=
{{
1
,
3
,
32
,
32
}};
const
bool
use_gpu
=
false
;
const
char
*
input_keys_ptr
[
num_input_nodes
];
for
(
mx_uint
i
=
0
;
i
<
num_input_nodes
;
i
++
){
input_keys_ptr
[
i
]
=
input_keys
[
i
].
c_str
();
explicit
CNNPredictor_CifarClassifierNetwork
(){
init
(
input_shapes
);
}
mx_uint
shape_data_size
=
0
;
mx_uint
input_shape_indptr
[
input_shapes
.
size
()
+
1
];
input_shape_indptr
[
0
]
=
0
;
for
(
mx_uint
i
=
0
;
i
<
input_shapes
.
size
();
i
++
){
input_shape_indptr
[
i
+
1
]
=
input_shapes
[
i
].
size
();
shape_data_size
+=
input_shapes
[
i
].
size
();
}
//~CNNPredictor_CifarClassifierNetwork(){};
void
init
(
const
std
::
vector
<
TIndex
>
&
input_shapes
){
int
n
=
0
;
char
**
a
[
1
];
caffe2
::
GlobalInit
(
&
n
,
a
);
if
(
!
std
::
ifstream
(
FLAGS_init_net
).
good
())
{
std
::
cerr
<<
"Network loading failure, init_net file '"
<<
FLAGS_init_net
<<
"' does not exist."
<<
std
::
endl
;
return
;
}
if
(
!
std
::
ifstream
(
FLAGS_predict_net
).
good
())
{
std
::
cerr
<<
"Network loading failure, predict_net file '"
<<
FLAGS_predict_net
<<
"' does not exist."
<<
std
::
endl
;
return
;
}
std
::
cout
<<
"****************************************************************"
<<
std
::
endl
;
std
::
cout
<<
"Loading network..."
<<
std
::
endl
;
mx_uint
input_shape_data
[
shape_data_size
];
mx_uint
index
=
0
;
for
(
mx_uint
i
=
0
;
i
<
input_shapes
.
size
();
i
++
){
for
(
mx_uint
j
=
0
;
j
<
input_shapes
[
i
].
size
();
j
++
){
input_shape_data
[
index
]
=
input_shapes
[
i
][
j
];
index
++
;
// Read protobuf
CAFFE_ENFORCE
(
ReadProtoFromFile
(
FLAGS_init_net
,
&
initNet
));
CAFFE_ENFORCE
(
ReadProtoFromFile
(
FLAGS_predict_net
,
&
predictNet
));