Commit 52e348e6 authored by Evgeny Kusmenko's avatar Evgeny Kusmenko
Browse files

Merge branch 'adapt_cnnpredictor_template' into 'master'

Adapt cnnpredictor template

See merge request !9
parents 333a5840 465336a2
Pipeline #81374 passed with stages
in 7 minutes and 53 seconds
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
<groupId>de.monticore.lang.monticar</groupId> <groupId>de.monticore.lang.monticar</groupId>
<artifactId>cnnarch-caffe2-generator</artifactId> <artifactId>cnnarch-caffe2-generator</artifactId>
<version>0.2.5-SNAPSHOT</version> <version>0.2.6-SNAPSHOT</version>
<!-- == PROJECT DEPENDENCIES ============================================= --> <!-- == PROJECT DEPENDENCIES ============================================= -->
......
...@@ -102,9 +102,6 @@ public class CNNArch2Caffe2 implements CNNArchGenerator{ ...@@ -102,9 +102,6 @@ public class CNNArch2Caffe2 implements CNNArchGenerator{
temp = archTc.process("execute", Target.CPP); temp = archTc.process("execute", Target.CPP);
fileContentMap.put(temp.getKey().replace(".h", ""), temp.getValue()); fileContentMap.put(temp.getKey().replace(".h", ""), temp.getValue());
temp = archTc.process("CNNBufferFile", Target.CPP);
fileContentMap.put("CNNBufferFile.h", temp.getValue());
checkValidGeneration(architecture); checkValidGeneration(architecture);
return fileContentMap; return fileContentMap;
......
...@@ -2,6 +2,8 @@ package de.monticore.lang.monticar.cnnarch.caffe2generator; ...@@ -2,6 +2,8 @@ package de.monticore.lang.monticar.cnnarch.caffe2generator;
import de.monticore.io.paths.ModelPath; import de.monticore.io.paths.ModelPath;
import de.monticore.lang.monticar.cnntrain.CNNTrainGenerator; import de.monticore.lang.monticar.cnntrain.CNNTrainGenerator;
import de.monticore.lang.monticar.cnntrain._ast.ASTCNNTrainNode;
import de.monticore.lang.monticar.cnntrain._ast.ASTOptimizerEntry;
import de.monticore.lang.monticar.cnntrain._cocos.CNNTrainCocos; import de.monticore.lang.monticar.cnntrain._cocos.CNNTrainCocos;
import de.monticore.lang.monticar.cnntrain._symboltable.CNNTrainCompilationUnitSymbol; import de.monticore.lang.monticar.cnntrain._symboltable.CNNTrainCompilationUnitSymbol;
import de.monticore.lang.monticar.cnntrain._symboltable.CNNTrainLanguage; import de.monticore.lang.monticar.cnntrain._symboltable.CNNTrainLanguage;
...@@ -19,6 +21,43 @@ public class CNNTrain2Caffe2 implements CNNTrainGenerator { ...@@ -19,6 +21,43 @@ public class CNNTrain2Caffe2 implements CNNTrainGenerator {
private String generationTargetPath; private String generationTargetPath;
private String instanceName; private String instanceName;
private void supportCheck(ConfigurationSymbol configuration){
checkEntryParams(configuration);
checkOptimizerParams(configuration);
}
private void checkEntryParams(ConfigurationSymbol configuration){
TrainParamSupportChecker funcChecker = new TrainParamSupportChecker();
Iterator it = configuration.getEntryMap().keySet().iterator();
while (it.hasNext()) {
String key = it.next().toString();
ASTCNNTrainNode astTrainEntryNode = (ASTCNNTrainNode) configuration.getEntryMap().get(key).getAstNode().get();
astTrainEntryNode.accept(funcChecker);
}
it = configuration.getEntryMap().keySet().iterator();
while (it.hasNext()) {
String key = it.next().toString();
if (funcChecker.getUnsupportedElemList().contains(key)) it.remove();
}
}
private void checkOptimizerParams(ConfigurationSymbol configuration){
TrainParamSupportChecker funcChecker = new TrainParamSupportChecker();
if (configuration.getOptimizer() != null) {
ASTOptimizerEntry astOptimizer = (ASTOptimizerEntry) configuration.getOptimizer().getAstNode().get();
astOptimizer.accept(funcChecker);
if (funcChecker.getUnsupportedElemList().contains(funcChecker.unsupportedOptFlag)) {
configuration.setOptimizer(null);
}else {
Iterator it = configuration.getOptimizer().getOptimizerParamMap().keySet().iterator();
while (it.hasNext()) {
String key = it.next().toString();
if (funcChecker.getUnsupportedElemList().contains(key)) it.remove();
}
}
}
}
public CNNTrain2Caffe2() { public CNNTrain2Caffe2() {
setGenerationTargetPath("./target/generated-sources-cnnarch/"); setGenerationTargetPath("./target/generated-sources-cnnarch/");
} }
...@@ -54,6 +93,7 @@ public class CNNTrain2Caffe2 implements CNNTrainGenerator { ...@@ -54,6 +93,7 @@ public class CNNTrain2Caffe2 implements CNNTrainGenerator {
} }
setInstanceName(compilationUnit.get().getFullName()); setInstanceName(compilationUnit.get().getFullName());
CNNTrainCocos.checkAll(compilationUnit.get()); CNNTrainCocos.checkAll(compilationUnit.get());
supportCheck(compilationUnit.get().getConfiguration());
return compilationUnit.get().getConfiguration(); return compilationUnit.get().getConfiguration();
} }
......
package de.monticore.lang.monticar.cnnarch.caffe2generator;
import de.monticore.lang.monticar.cnntrain._ast.*;
import de.monticore.lang.monticar.cnntrain._visitor.CNNTrainVisitor;
import de.se_rwth.commons.logging.Log;
import java.util.ArrayList;
import java.util.List;
public class TrainParamSupportChecker implements CNNTrainVisitor {
private List<String> unsupportedElemList = new ArrayList();
private void printUnsupportedEntryParam(String nodeName){
Log.warn("Unsupported training parameter " + "'" + nodeName + "'" + " for the backend CAFFE2. It will be ignored.");
}
private void printUnsupportedOptimizer(String nodeName){
Log.warn("Unsupported optimizer parameter " + "'" + nodeName + "'" + " for the backend CAFFE2. It will be ignored.");
}
private void printUnsupportedOptimizerParam(String nodeName){
Log.warn("Unsupported training optimizer parameter " + "'" + nodeName + "'" + " for the backend CAFFE2. It will be ignored.");
}
public TrainParamSupportChecker() {
}
public String unsupportedOptFlag = "unsupported_optimizer";
public List getUnsupportedElemList(){
return this.unsupportedElemList;
}
public void visit(ASTNumEpochEntry node){}
public void visit(ASTBatchSizeEntry node){}
public void visit(ASTLoadCheckpointEntry node){
printUnsupportedEntryParam(node.getName());
this.unsupportedElemList.add(node.getName());
}
public void visit(ASTNormalizeEntry node){
printUnsupportedEntryParam(node.getName());
this.unsupportedElemList.add(node.getName());
}
public void visit(ASTTrainContextEntry node){}
public void visit(ASTEvalMetricEntry node){}
public void visit(ASTSGDOptimizer node){}
public void visit(ASTAdamOptimizer node){}
public void visit(ASTRmsPropOptimizer node){}
public void visit(ASTAdaGradOptimizer node){}
public void visit(ASTNesterovOptimizer node){
printUnsupportedOptimizer(node.getName());
this.unsupportedElemList.add(this.unsupportedOptFlag);
}
public void visit(ASTAdaDeltaOptimizer node){
printUnsupportedOptimizer(node.getName());
this.unsupportedElemList.add(this.unsupportedOptFlag);
}
public void visit(ASTLearningRateEntry node){}
public void visit(ASTMinimumLearningRateEntry node){
printUnsupportedOptimizerParam(node.getName());
this.unsupportedElemList.add(node.getName());
}
public void visit(ASTWeightDecayEntry node){}
public void visit(ASTLRDecayEntry node){
printUnsupportedOptimizerParam(node.getName());
this.unsupportedElemList.add(node.getName());
}
public void visit(ASTLRPolicyEntry node){}
public void visit(ASTRescaleGradEntry node){
printUnsupportedOptimizerParam(node.getName());
this.unsupportedElemList.add(node.getName());
}
public void visit(ASTClipGradEntry node){
printUnsupportedOptimizerParam(node.getName());
this.unsupportedElemList.add(node.getName());
}
public void visit(ASTStepSizeEntry node){}
public void visit(ASTMomentumEntry node){}
public void visit(ASTBeta1Entry node){}
public void visit(ASTBeta2Entry node){}
public void visit(ASTEpsilonEntry node){}
public void visit(ASTGamma1Entry node){}
public void visit(ASTGamma2Entry node){
printUnsupportedOptimizerParam(node.getName());
this.unsupportedElemList.add(node.getName());
}
public void visit(ASTCenteredEntry node){
printUnsupportedOptimizerParam(node.getName());
this.unsupportedElemList.add(node.getName());
}
public void visit(ASTClipWeightsEntry node){
printUnsupportedOptimizerParam(node.getName());
this.unsupportedElemList.add(node.getName());
}
public void visit(ASTRhoEntry node){}
}
#ifndef CNNBUFFERFILE_H
#define CNNBUFFERFILE_H
#include <stdio.h>
#include <iostream>
#include <fstream>
// Read file to buffer
class BufferFile {
public :
std::string file_path_;
int length_;
char* buffer_;
explicit BufferFile(std::string file_path)
:file_path_(file_path) {
std::ifstream ifs(file_path.c_str(), std::ios::in | std::ios::binary);
if (!ifs) {
std::cerr << "Can't open the file. Please check " << file_path << ". \n";
length_ = 0;
buffer_ = NULL;
return;
}
ifs.seekg(0, std::ios::end);
length_ = ifs.tellg();
ifs.seekg(0, std::ios::beg);
std::cout << file_path.c_str() << " ... "<< length_ << " bytes\n";
buffer_ = new char[sizeof(char) * length_];
ifs.read(buffer_, length_);
ifs.close();
}
int GetLength() {
return length_;
}
char* GetBuffer() {
return buffer_;
}
~BufferFile() {
if (buffer_) {
delete[] buffer_;
buffer_ = NULL;
}
}
};
#endif // CNNBUFFERFILE_H
...@@ -2,36 +2,19 @@ from caffe2.python import workspace, core, model_helper, brew, optimizer ...@@ -2,36 +2,19 @@ from caffe2.python import workspace, core, model_helper, brew, optimizer
from caffe2.python.predictor import mobile_exporter from caffe2.python.predictor import mobile_exporter
from caffe2.proto import caffe2_pb2 from caffe2.proto import caffe2_pb2
import numpy as np import numpy as np
import logging
#import logging
import os import os
#import shutil import sys
#import sys
#import cv2
class ${tc.fileNameWithoutEnding}: class ${tc.fileNameWithoutEnding}:
module = None module = None
_data_dir_ = "data/${tc.fullArchitectureName}/" _current_dir_ = os.path.join('./')
_model_dir_ = "model/${tc.fullArchitectureName}/" _data_dir_ = os.path.join(_current_dir_, 'data', '${tc.fullArchitectureName}')
_model_prefix_ = "${tc.architectureName}" _model_dir_ = os.path.join(_current_dir_, 'model', '${tc.fullArchitectureName}')
_input_names_ = [${tc.join(tc.architectureInputs, ",", "'", "'")}]
_input_shapes_ = [<#list tc.architecture.inputs as input>(${tc.join(input.definition.type.dimensions, ",")})</#list>]
_output_names_ = [${tc.join(tc.architectureOutputs, ",", "'", "_label'")}]
CURRENT_FOLDER = os.path.join('./')
DATA_FOLDER = os.path.join(CURRENT_FOLDER, 'data')
ROOT_FOLDER = os.path.join(CURRENT_FOLDER, 'model')
#TODO: Modify paths to make them dynamic INIT_NET = os.path.join(_model_dir_, 'init_net.pb')<#--TODO: Change name to _init_net_ once it is not used in CNNTrainer for quick testing purposes-->
#For Windows PREDICT_NET = os.path.join(_model_dir_, 'predict_net.pb')<#--TODO:Change name to _predict_net_ once it is not used in CNNTrainer for quick testing purposes-->
#INIT_NET = 'D:/Yeverino/git_projects/Caffe2_scripts/caffe2_ema_cnncreator/init_net'
#PREDICT_NET = 'D:/Yeverino/git_projects/Caffe2_scripts/caffe2_ema_cnncreator/predict_net'
#For Ubuntu
INIT_NET = './model/init_net'
PREDICT_NET = './model/predict_net'
def add_input(self, model, batch_size, db, db_type, device_opts): def add_input(self, model, batch_size, db, db_type, device_opts):
with core.DeviceScope(device_opts): with core.DeviceScope(device_opts):
...@@ -107,12 +90,12 @@ ${tc.include(tc.architecture.body)} ...@@ -107,12 +90,12 @@ ${tc.include(tc.architecture.body)}
device_opts = core.DeviceOption(caffe2_pb2.CUDA, 0) device_opts = core.DeviceOption(caffe2_pb2.CUDA, 0)
print("GPU mode selected") print("GPU mode selected")
workspace.ResetWorkspace(self.ROOT_FOLDER) workspace.ResetWorkspace(self._model_dir_)
arg_scope = {"order": "NCHW"} arg_scope = {"order": "NCHW"}
# == Training model == # == Training model ==
train_model= model_helper.ModelHelper(name="train_net", arg_scope=arg_scope) train_model= model_helper.ModelHelper(name="train_net", arg_scope=arg_scope)
data, label = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self.DATA_FOLDER, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts) data, label = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
${tc.join(tc.architectureOutputs, ",", "","")} = self.create_model(train_model, data, device_opts=device_opts) ${tc.join(tc.architectureOutputs, ",", "","")} = self.create_model(train_model, data, device_opts=device_opts)
self.add_training_operators(train_model, ${tc.join(tc.architectureOutputs, ",", "","")}, label, device_opts, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum) self.add_training_operators(train_model, ${tc.join(tc.architectureOutputs, ",", "","")}, label, device_opts, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum)
self.add_accuracy(train_model, ${tc.join(tc.architectureOutputs, ",", "","")}, label, device_opts, eval_metric) self.add_accuracy(train_model, ${tc.join(tc.architectureOutputs, ",", "","")}, label, device_opts, eval_metric)
...@@ -134,7 +117,7 @@ ${tc.include(tc.architecture.body)} ...@@ -134,7 +117,7 @@ ${tc.include(tc.architecture.body)}
print("== Running Test model ==") print("== Running Test model ==")
# == Testing model. == # == Testing model. ==
test_model= model_helper.ModelHelper(name="test_net", arg_scope=arg_scope, init_params=False) test_model= model_helper.ModelHelper(name="test_net", arg_scope=arg_scope, init_params=False)
data, label = self.add_input(test_model, batch_size=100, db=os.path.join(self.DATA_FOLDER, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts) data, label = self.add_input(test_model, batch_size=100, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
${tc.join(tc.architectureOutputs, ",", "","")} = self.create_model(test_model, data, device_opts=device_opts) ${tc.join(tc.architectureOutputs, ",", "","")} = self.create_model(test_model, data, device_opts=device_opts)
self.add_accuracy(test_model, predictions, label, device_opts, eval_metric) self.add_accuracy(test_model, predictions, label, device_opts, eval_metric)
workspace.RunNetOnce(test_model.param_init_net) workspace.RunNetOnce(test_model.param_init_net)
...@@ -169,29 +152,44 @@ ${tc.include(tc.architecture.body)} ...@@ -169,29 +152,44 @@ ${tc.include(tc.architecture.body)}
model.params model.params
) )
try:
os.makedirs(self._model_dir_)
except OSError:
if not os.path.isdir(self._model_dir_):
raise
print("Save the model to init_net.pb and predict_net.pb") print("Save the model to init_net.pb and predict_net.pb")
with open(predict_net_path + '.pb', 'wb') as f: with open(predict_net_path, 'wb') as f:
f.write(model.net._net.SerializeToString()) f.write(model.net._net.SerializeToString())
with open(init_net_path + '.pb', 'wb') as f: with open(init_net_path, 'wb') as f:
f.write(init_net.SerializeToString()) f.write(init_net.SerializeToString())
print("Save the model to init_net.pbtxt and predict_net.pbtxt") print("Save the model to init_net.pbtxt and predict_net.pbtxt")
with open(init_net_path + '.pbtxt', 'w') as f:
with open(init_net_path.replace('.pb','.pbtxt'), 'w') as f:
f.write(str(init_net)) f.write(str(init_net))
with open(predict_net_path + '.pbtxt', 'w') as f: with open(predict_net_path.replace('.pb','.pbtxt'), 'w') as f:
f.write(str(predict_net)) f.write(str(predict_net))
print("== Saved init_net and predict_net ==") print("== Saved init_net and predict_net ==")
def load_net(self, init_net_path, predict_net_path, device_opts): def load_net(self, init_net_path, predict_net_path, device_opts):
init_def = caffe2_pb2.NetDef() <#--#TODO: Verify that paths ends in '.pb' and not in '.pbtxt'. The extension '.pbtxt' is not supported at the moment.-->
with open(init_net_path + '.pb', 'rb') as f: if not os.path.isfile(init_net_path):
logging.error("Network loading failure. File '" + os.path.abspath(init_net_path) + "' does not exist.")
sys.exit(1)
elif not os.path.isfile(predict_net_path):
logging.error("Network loading failure. File '" + os.path.abspath(predict_net_path) + "' does not exist.")
sys.exit(1)
init_def = caffe2_pb2.NetDef()
with open(init_net_path, 'rb') as f:
init_def.ParseFromString(f.read()) init_def.ParseFromString(f.read())
init_def.device_option.CopyFrom(device_opts) init_def.device_option.CopyFrom(device_opts)
workspace.RunNetOnce(init_def.SerializeToString()) workspace.RunNetOnce(init_def.SerializeToString())
net_def = caffe2_pb2.NetDef() net_def = caffe2_pb2.NetDef()
with open(predict_net_path + '.pb', 'rb') as f: with open(predict_net_path, 'rb') as f:
net_def.ParseFromString(f.read()) net_def.ParseFromString(f.read())
net_def.device_option.CopyFrom(device_opts) net_def.device_option.CopyFrom(device_opts)
workspace.CreateNet(net_def.SerializeToString(), overwrite=True) workspace.CreateNet(net_def.SerializeToString(), overwrite=True)
print("== Loaded init_net and predict_net ==") print("== Loaded init_net and predict_net ==")
\ No newline at end of file
#ifndef ${tc.fileNameWithoutEnding?upper_case} #ifndef ${tc.fileNameWithoutEnding?upper_case}
#define ${tc.fileNameWithoutEnding?upper_case} #define ${tc.fileNameWithoutEnding?upper_case}
#include <mxnet/c_predict_api.h> #include "caffe2/core/common.h"
#include "caffe2/utils/proto_utils.h"
#include "caffe2/core/workspace.h"
#include "caffe2/core/tensor.h"
#include "caffe2/core/init.h"
// Define USE_GPU for GPU computation. Default is CPU computation.
//#define USE_GPU
#ifdef USE_GPU
#include "caffe2/core/context_gpu.h"
#endif
#include <cassert>
#include <string> #include <string>
#include <vector> #include <iostream>
#include <map>
CAFFE2_DEFINE_string(init_net, "./model/${tc.fullArchitectureName}/init_net.pb", "The given path to the init protobuffer.");
CAFFE2_DEFINE_string(predict_net, "./model/${tc.fullArchitectureName}/predict_net.pb", "The given path to the predict protobuffer.");
#include <CNNBufferFile.h> using namespace caffe2;
class ${tc.fileNameWithoutEnding}{ class ${tc.fileNameWithoutEnding}{
public: private:
const std::string json_file = "model/${tc.fullArchitectureName}/${tc.architectureName}_newest-symbol.json"; TensorCPU input;
const std::string param_file = "model/${tc.fullArchitectureName}/${tc.architectureName}_newest-0000.params"; Workspace workSpace;
//const std::vector<std::string> input_keys = {"data"}; NetDef initNet, predictNet;
const std::vector<std::string> input_keys = {${tc.join(tc.architectureInputs, ",", "\"", "\"")}};
const std::vector<std::vector<mx_uint>> input_shapes = {<#list tc.architecture.inputs as input>{1,${tc.join(input.definition.type.dimensions, ",")}}<#if input?has_next>,</#if></#list>};
const bool use_gpu = false;
PredictorHandle handle;
explicit ${tc.fileNameWithoutEnding}(){
init(json_file, param_file, input_keys, input_shapes, use_gpu);
}
~${tc.fileNameWithoutEnding}(){
if(handle) MXPredFree(handle);
}
void predict(${tc.join(tc.architectureInputs, ", ", "const std::vector<float> &", "")},
${tc.join(tc.architectureOutputs, ", ", "std::vector<float> &", "")}){
<#list tc.architectureInputs as inputName>
MXPredSetInput(handle, "data", ${inputName}.data(), ${inputName}.size());
//MXPredSetInput(handle, "${inputName}", ${inputName}.data(), ${inputName}.size());
</#list>
MXPredForward(handle); public:
const std::vector<TIndex> input_shapes = {<#list tc.architecture.inputs as input>{1,${tc.join(input.definition.type.dimensions, ",")}}<#if input?has_next>,</#if></#list>};
mx_uint output_index; explicit ${tc.fileNameWithoutEnding}(){
mx_uint *shape = 0; init(input_shapes);
mx_uint shape_len; }
size_t size;
<#list tc.architectureOutputs as outputName> ~${tc.fileNameWithoutEnding}(){};
output_index = ${outputName?index?c};
MXPredGetOutputShape(handle, output_index, &shape, &shape_len);
size = 1;
for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i];
assert(size == ${outputName}.size());
MXPredGetOutput(handle, ${outputName?index?c}, &(${outputName}[0]), ${outputName}.size());
</#list> void init(const std::vector<TIndex> &input_shapes){
} int n = 0;
char **a[1];
caffe2::GlobalInit(&n, a);
void init(const std::string &json_file, if (!std::ifstream(FLAGS_init_net).good()) {
const std::string &param_file, std::cerr << "\nNetwork loading failure, init_net file '" << FLAGS_init_net << "' does not exist." << std::endl;
const std::vector<std::string> &input_keys, exit(1);
const std::vector<std::vector<mx_uint>> &input_shapes, }
const bool &use_gpu){
BufferFile json_data(json_file); if (!std::ifstream(FLAGS_predict_net).good()) {
BufferFile param_data(param_file); std::cerr << "\nNetwork loading failure, predict_net file '" << FLAGS_predict_net << "' does not exist." << std::endl;
exit(1);
}
int dev_type = use_gpu ? 2 : 1; std::cout << "\nLoading network..." << std::endl;
int dev_id = 0;
handle = 0; // Read protobuf
CAFFE_ENFORCE(ReadProtoFromFile(FLAGS_init_net, &initNet));
CAFFE_ENFORCE(ReadProtoFromFile(FLAGS_predict_net, &predictNet));
if (json_data.GetLength() == 0 || // Set device type
param_data.GetLength() == 0) { #ifdef USE_GPU
std::exit(-1); predictNet.mutable_device_option()->set_device_type(CUDA);
} initNet.mutable_device_option()->set_device_type(CUDA);
std::cout << "== GPU mode selected " << " ==" << std::endl;
#else
predictNet.mutable_device_option()->set_device_type(CPU);
initNet.mutable_device_option()->set_device_type(CPU);
for(int i = 0; i < predictNet.op_size(); ++i){
predictNet.mutable_op(i)->mutable_device_option()->set_device_type(CPU);
}
for(int i = 0; i < initNet.op_size(); ++i){
initNet.mutable_op(i)->mutable_device_option()->set_device_type(CPU);
}
std::cout << "== CPU mode selected " << " ==" << std::endl;
#endif
const mx_uint num_input_nodes = input_keys.size(); // Load network
CAFFE_ENFORCE(workSpace.RunNetOnce(initNet));
CAFFE_ENFORCE(workSpace.CreateNet(predictNet));
std::cout << "== Network loaded " << " ==" << std::endl;
const char* input_keys_ptr[num_input_nodes]; input.Resize(input_shapes);
for(mx_uint i = 0; i < num_input_nodes; i++){
input_keys_ptr[i] = input_keys[i].c_str();
} }