Commit 16023357 authored by Evgeny Kusmenko's avatar Evgeny Kusmenko

Merge branch 'integrate-gluon-backend' into 'master'

Integrate gluon backend

See merge request !23
parents 9953bcd8 18993f09
Pipeline #167552 failed with stages
in 1 minute and 11 seconds
......@@ -47,6 +47,12 @@ integrationCaffe2JobLinux:
script:
- mvn -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -B clean install --settings settings.xml -Dtest=IntegrationCaffe2Test
integrationGluonJobLinux:
stage: linux
image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/generators/emadl2cpp/integrationtests/mxnet:v0.0.3
script:
- mvn -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -B clean install --settings settings.xml -Dtest=IntegrationGluonTest
masterJobWindows:
stage: windows
......
......@@ -8,7 +8,7 @@
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>embedded-montiarc-emadl-generator</artifactId>
<version>0.2.13</version>
<version>0.3.0</version>
<!-- == PROJECT DEPENDENCIES ============================================= -->
......@@ -19,6 +19,7 @@
<CNNTrain.version>0.2.6</CNNTrain.version>
<cnnarch-mxnet-generator.version>0.2.14-SNAPSHOT</cnnarch-mxnet-generator.version>
<cnnarch-caffe2-generator.version>0.2.11-SNAPSHOT</cnnarch-caffe2-generator.version>
<cnnarch-gluon-generator.version>0.1.6</cnnarch-gluon-generator.version>
<embedded-montiarc-math-opt-generator>0.1.4</embedded-montiarc-math-opt-generator>
<!-- .. Libraries .................................................. -->
......@@ -73,6 +74,12 @@
<version>${cnnarch-mxnet-generator.version}</version>
</dependency>
<dependency>
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>cnnarch-gluon-generator</artifactId>
<version>${cnnarch-gluon-generator.version}</version>
</dependency>
<dependency>
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>cnnarch-caffe2-generator</artifactId>
......
......@@ -2,6 +2,8 @@ package de.monticore.lang.monticar.emadl.generator;
import de.monticore.lang.monticar.cnnarch.CNNArchGenerator;
import de.monticore.lang.monticar.cnnarch.gluongenerator.CNNArch2Gluon;
import de.monticore.lang.monticar.cnnarch.gluongenerator.CNNTrain2Gluon;
import de.monticore.lang.monticar.cnnarch.mxnetgenerator.CNNArch2MxNet;
import de.monticore.lang.monticar.cnnarch.caffe2generator.CNNArch2Caffe2;
import de.monticore.lang.monticar.cnnarch.mxnetgenerator.CNNTrain2MxNet;
......@@ -30,6 +32,16 @@ public enum Backend {
public CNNTrainGenerator getCNNTrainGenerator() {
return new CNNTrain2Caffe2();
}
},
GLUON{
@Override
public CNNArchGenerator getCNNArchGenerator() {
return new CNNArch2Gluon();
}
@Override
public CNNTrainGenerator getCNNTrainGenerator() {
return new CNNTrain2Gluon();
}
};
public abstract CNNArchGenerator getCNNArchGenerator();
......@@ -43,6 +55,9 @@ public enum Backend {
case "CAFFE2":
return Optional.of(CAFFE2);
case "GLUON":
return Optional.of(GLUON);
default:
return Optional.empty();
}
......@@ -52,8 +67,10 @@ public enum Backend {
switch (backend){
case CAFFE2:
return "CAFFE2";
case GLUON:
return "GLUON";
default:
return "MXNET";
}
}
}
}
\ No newline at end of file
......@@ -160,7 +160,7 @@ public class EMADLGenerator {
printWriter.println("#!/bin/bash");
printWriter.println("cd " + getGenerationTargetPath());
printWriter.println("mkdir --parents build");
printWriter.println("mkdir -p build");
printWriter.println("cd build");
printWriter.println("cmake ..");
printWriter.println("make");
......
......@@ -160,6 +160,33 @@ public class GenerationTest extends AbstractSymtabTest {
"CNNTrainer_mnist_mnistClassifier_net.py"));
}
@Test
public void testMnistClassifierForGluon() throws IOException, TemplateException {
Log.getFindings().clear();
String[] args = {"-m", "src/test/resources/models/", "-r", "mnist.MnistClassifier", "-b", "GLUON", "-f", "n", "-c", "n"};
EMADLGeneratorCli.main(args);
assertTrue(Log.getFindings().isEmpty());
checkFilesAreEqual(
Paths.get("./target/generated-sources-emadl"),
Paths.get("./src/test/resources/target_code/gluon"),
Arrays.asList(
"CNNBufferFile.h",
"CNNNet_mnist_mnistClassifier_net.py",
"mnist_mnistClassifier.cpp",
"mnist_mnistClassifier.h",
"CNNCreator_mnist_mnistClassifier_net.py",
"CNNPredictor_mnist_mnistClassifier_net.h",
"CNNDataLoader_mnist_mnistClassifier_net.py",
"supervised_trainer.py",
"mnist_mnistClassifier_net.h",
"HelperA.h",
"CNNTranslator.h",
"mnist_mnistClassifier_calculateClass.h",
"CNNTrainer_mnist_mnistClassifier_net.py",
"mnist_mnistClassifier_net.h"));
}
@Test
public void testHashFunction() {
EMADLGenerator tester = new EMADLGenerator(Backend.MXNET);
......
/**
*
* ******************************************************************************
* MontiCAR Modeling Family, www.se-rwth.de
* Copyright (c) 2017, Software Engineering Group at RWTH Aachen,
* All rights reserved.
*
* This project is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3.0 of the License, or (at your option) any later version.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this project. If not, see <http://www.gnu.org/licenses/>.
* *******************************************************************************
*/
package de.monticore.lang.monticar.emadl;
public class IntegrationGluonTest extends IntegrationTest {
public IntegrationGluonTest() {
super("GLUON", "39253EC049D4A4E5FA0536AD34874B9D#1DBAEE1B1BD83FB7CB5F70AE91B29638#C4C23549E737A759721D6694C75D9771#5AF0CE68E408E8C1F000E49D72AC214A");
}
}
......@@ -25,9 +25,13 @@ import de.monticore.lang.monticar.emadl.generator.EMADLGenerator;
import de.monticore.lang.monticar.emadl.generator.EMADLGeneratorCli;
import de.se_rwth.commons.logging.Log;
import freemarker.template.TemplateException;
import org.apache.commons.io.FileUtils;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.file.Files;
......@@ -44,6 +48,20 @@ public abstract class IntegrationTest extends AbstractSymtabTest {
private String backend;
private String trainingHash;
@BeforeClass
public static void setupClass() throws IOException {
if (new File("model").exists()) {
FileUtils.deleteDirectory(new File("model"));
}
}
@AfterClass
public static void tearDown() throws IOException {
if (new File("model").exists()) {
FileUtils.deleteDirectory(new File("model"));
}
}
public IntegrationTest(String backend, String trainingHash) {
this.backend = backend;
this.trainingHash = trainingHash;
......@@ -116,6 +134,18 @@ public abstract class IntegrationTest extends AbstractSymtabTest {
EMADLGeneratorCli.main(args);
assertTrue(Log.getFindings().size() == 1);
assertTrue(Log.getFindings().get(0).getMsg().contains("skipped"));
deleteInstanceTestCifarHashFile();
}
private void deleteInstanceTestCifarHashFile() {
final Path instanceTestCifarHasFile
= Paths.get("./target/generated-sources-emadl/instanceTestCifar/CifarNetwork.training_hash");
try {
Files.delete(instanceTestCifarHasFile);
}
catch(Exception e) {
assertFalse("Could not delete hash file", true);
}
}
@Test
......@@ -130,7 +160,4 @@ public abstract class IntegrationTest extends AbstractSymtabTest {
deleteHashFile();
}
}
#ifndef CNNBUFFERFILE_H
#define CNNBUFFERFILE_H
#include <stdio.h>
#include <iostream>
#include <fstream>
// Read file to buffer
class BufferFile {
public :
std::string file_path_;
int length_;
char* buffer_;
explicit BufferFile(std::string file_path)
:file_path_(file_path) {
std::ifstream ifs(file_path.c_str(), std::ios::in | std::ios::binary);
if (!ifs) {
std::cerr << "Can't open the file. Please check " << file_path << ". \n";
length_ = 0;
buffer_ = NULL;
return;
}
ifs.seekg(0, std::ios::end);
length_ = ifs.tellg();
ifs.seekg(0, std::ios::beg);
std::cout << file_path.c_str() << " ... "<< length_ << " bytes\n";
buffer_ = new char[sizeof(char) * length_];
ifs.read(buffer_, length_);
ifs.close();
}
int GetLength() {
return length_;
}
char* GetBuffer() {
return buffer_;
}
~BufferFile() {
if (buffer_) {
delete[] buffer_;
buffer_ = NULL;
}
}
};
#endif // CNNBUFFERFILE_H
import mxnet as mx
import logging
import os
from CNNNet_mnist_mnistClassifier_net import Net
class CNNCreator_mnist_mnistClassifier_net:
_model_dir_ = "model/mnist.LeNetNetwork/"
_model_prefix_ = "model"
_input_shapes_ = [(1,28,28)]
def __init__(self):
self.weight_initializer = mx.init.Normal()
self.net = None
def load(self, context):
lastEpoch = 0
param_file = None
try:
os.remove(self._model_dir_ + self._model_prefix_ + "_newest-0000.params")
except OSError:
pass
try:
os.remove(self._model_dir_ + self._model_prefix_ + "_newest-symbol.json")
except OSError:
pass
if os.path.isdir(self._model_dir_):
for file in os.listdir(self._model_dir_):
if ".params" in file and self._model_prefix_ in file:
epochStr = file.replace(".params","").replace(self._model_prefix_ + "-","")
epoch = int(epochStr)
if epoch > lastEpoch:
lastEpoch = epoch
param_file = file
if param_file is None:
return 0
else:
logging.info("Loading checkpoint: " + param_file)
self.net.load_parameters(self._model_dir_ + param_file)
return lastEpoch
def construct(self, context, data_mean=None, data_std=None):
self.net = Net(data_mean=data_mean, data_std=data_std)
self.net.collect_params().initialize(self.weight_initializer, ctx=context)
self.net.hybridize()
self.net(mx.nd.zeros((1,)+self._input_shapes_[0], ctx=context))
if not os.path.exists(self._model_dir_):
os.makedirs(self._model_dir_)
self.net.export(self._model_dir_ + self._model_prefix_, epoch=0)
import os
import h5py
import mxnet as mx
import logging
import sys
class mnist_mnistClassifier_netDataLoader:
_input_names_ = ['image']
_output_names_ = ['predictions_label']
def __init__(self):
self._data_dir = "data/mnist.LeNetNetwork/"
def load_data(self, batch_size):
train_h5, test_h5 = self.load_h5_files()
data_mean = train_h5[self._input_names_[0]][:].mean(axis=0)
data_std = train_h5[self._input_names_[0]][:].std(axis=0) + 1e-5
train_iter = mx.io.NDArrayIter(train_h5[self._input_names_[0]],
train_h5[self._output_names_[0]],
batch_size=batch_size,
data_name=self._input_names_[0],
label_name=self._output_names_[0])
test_iter = None
if test_h5 != None:
test_iter = mx.io.NDArrayIter(test_h5[self._input_names_[0]],
test_h5[self._output_names_[0]],
batch_size=batch_size,
data_name=self._input_names_[0],
label_name=self._output_names_[0])
return train_iter, test_iter, data_mean, data_std
def load_h5_files(self):
train_h5 = None
test_h5 = None
train_path = self._data_dir + "train.h5"
test_path = self._data_dir + "test.h5"
if os.path.isfile(train_path):
train_h5 = h5py.File(train_path, 'r')
if not (self._input_names_[0] in train_h5 and self._output_names_[0] in train_h5):
logging.error("The HDF5 file '" + os.path.abspath(train_path) + "' has to contain the datasets: "
+ "'" + self._input_names_[0] + "', '" + self._output_names_[0] + "'")
sys.exit(1)
test_iter = None
if os.path.isfile(test_path):
test_h5 = h5py.File(test_path, 'r')
if not (self._input_names_[0] in test_h5 and self._output_names_[0] in test_h5):
logging.error("The HDF5 file '" + os.path.abspath(test_path) + "' has to contain the datasets: "
+ "'" + self._input_names_[0] + "', '" + self._output_names_[0] + "'")
sys.exit(1)
else:
logging.warning("Couldn't load test set. File '" + os.path.abspath(test_path) + "' does not exist.")
return train_h5, test_h5
else:
logging.error("Data loading failure. File '" + os.path.abspath(train_path) + "' does not exist.")
sys.exit(1)
\ No newline at end of file
import mxnet as mx
import numpy as np
from mxnet import gluon
class Softmax(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Softmax, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return F.softmax(x)
class Split(gluon.HybridBlock):
def __init__(self, num_outputs, axis=1, **kwargs):
super(Split, self).__init__(**kwargs)
with self.name_scope():
self.axis = axis
self.num_outputs = num_outputs
def hybrid_forward(self, F, x):
return F.split(data=x, axis=self.axis, num_outputs=self.num_outputs)
class Concatenate(gluon.HybridBlock):
def __init__(self, dim=1, **kwargs):
super(Concatenate, self).__init__(**kwargs)
with self.name_scope():
self.dim = dim
def hybrid_forward(self, F, *x):
return F.concat(*x, dim=self.dim)
class ZScoreNormalization(gluon.HybridBlock):
def __init__(self, data_mean, data_std, **kwargs):
super(ZScoreNormalization, self).__init__(**kwargs)
with self.name_scope():
self.data_mean = self.params.get('data_mean', shape=data_mean.shape,
init=mx.init.Constant(data_mean.asnumpy().tolist()), differentiable=False)
self.data_std = self.params.get('data_std', shape=data_mean.shape,
init=mx.init.Constant(data_std.asnumpy().tolist()), differentiable=False)
def hybrid_forward(self, F, x, data_mean, data_std):
x = F.broadcast_sub(x, data_mean)
x = F.broadcast_div(x, data_std)
return x
class Padding(gluon.HybridBlock):
def __init__(self, padding, **kwargs):
super(Padding, self).__init__(**kwargs)
with self.name_scope():
self.pad_width = padding
def hybrid_forward(self, F, x):
x = F.pad(data=x,
mode='constant',
pad_width=self.pad_width,
constant_value=0)
return x
class NoNormalization(gluon.HybridBlock):
def __init__(self, **kwargs):
super(NoNormalization, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return x
class Net(gluon.HybridBlock):
def __init__(self, data_mean=None, data_std=None, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
if not data_mean is None:
assert(not data_std is None)
self.input_normalization = ZScoreNormalization(data_mean=data_mean, data_std=data_std)
else:
self.input_normalization = NoNormalization()
self.conv1_ = gluon.nn.Conv2D(channels=20,
kernel_size=(5,5),
strides=(1,1),
use_bias=True)
# conv1_, output shape: {[20,24,24]}
self.pool1_ = gluon.nn.MaxPool2D(
pool_size=(2,2),
strides=(2,2))
# pool1_, output shape: {[20,12,12]}
self.conv2_ = gluon.nn.Conv2D(channels=50,
kernel_size=(5,5),
strides=(1,1),
use_bias=True)
# conv2_, output shape: {[50,8,8]}
self.pool2_ = gluon.nn.MaxPool2D(
pool_size=(2,2),
strides=(2,2))
# pool2_, output shape: {[50,4,4]}
self.fc2_flatten = gluon.nn.Flatten()
self.fc2_ = gluon.nn.Dense(units=500, use_bias=True)
# fc2_, output shape: {[500,1,1]}
self.relu2_ = gluon.nn.Activation(activation='relu')
self.fc3_ = gluon.nn.Dense(units=10, use_bias=True)
# fc3_, output shape: {[10,1,1]}
self.last_layer = 'softmax'
def hybrid_forward(self, F, x):
image = self.input_normalization(x)
conv1_ = self.conv1_(image)
pool1_ = self.pool1_(conv1_)
conv2_ = self.conv2_(pool1_)
pool2_ = self.pool2_(conv2_)
fc2_flatten_ = self.fc2_flatten(pool2_)
fc2_ = self.fc2_(fc2_flatten_)
relu2_ = self.relu2_(fc2_)
fc3_ = self.fc3_(relu2_)
return fc3_
#ifndef CNNPREDICTOR_MNIST_MNISTCLASSIFIER_NET
#define CNNPREDICTOR_MNIST_MNISTCLASSIFIER_NET
#include <mxnet/c_predict_api.h>
#include <cassert>
#include <string>
#include <vector>
#include <CNNBufferFile.h>
class CNNPredictor_mnist_mnistClassifier_net{
public:
const std::string json_file = "model/mnist.LeNetNetwork/model_newest-symbol.json";
const std::string param_file = "model/mnist.LeNetNetwork/model_newest-0000.params";
//const std::vector<std::string> input_keys = {"data"};
const std::vector<std::string> input_keys = {"image"};
const std::vector<std::vector<mx_uint>> input_shapes = {{1,1,28,28}};
const bool use_gpu = false;
PredictorHandle handle;
explicit CNNPredictor_mnist_mnistClassifier_net(){
init(json_file, param_file, input_keys, input_shapes, use_gpu);
}
~CNNPredictor_mnist_mnistClassifier_net(){
if(handle) MXPredFree(handle);
}
void predict(const std::vector<float> &image,
std::vector<float> &predictions){
MXPredSetInput(handle, "data", image.data(), image.size());
//MXPredSetInput(handle, "image", image.data(), image.size());
MXPredForward(handle);
mx_uint output_index;
mx_uint *shape = 0;
mx_uint shape_len;
size_t size;
output_index = 0;
MXPredGetOutputShape(handle, output_index, &shape, &shape_len);
size = 1;
for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i];
assert(size == predictions.size());
MXPredGetOutput(handle, 0, &(predictions[0]), predictions.size());
}
void init(const std::string &json_file,
const std::string &param_file,
const std::vector<std::string> &input_keys,
const std::vector<std::vector<mx_uint>> &input_shapes,
const bool &use_gpu){
BufferFile json_data(json_file);
BufferFile param_data(param_file);
int dev_type = use_gpu ? 2 : 1;
int dev_id = 0;
handle = 0;
if (json_data.GetLength() == 0 ||
param_data.GetLength() == 0) {
std::exit(-1);
}
const mx_uint num_input_nodes = input_keys.size();
const char* input_keys_ptr[num_input_nodes];
for(mx_uint i = 0; i < num_input_nodes; i++){
input_keys_ptr[i] = input_keys[i].c_str();
}
mx_uint shape_data_size = 0;
mx_uint input_shape_indptr[input_shapes.size() + 1];
input_shape_indptr[0] = 0;
for(mx_uint i = 0; i < input_shapes.size(); i++){
input_shape_indptr[i+1] = input_shapes[i].size();
shape_data_size += input_shapes[i].size();
}
mx_uint input_shape_data[shape_data_size];
mx_uint index = 0;
for(mx_uint i = 0; i < input_shapes.size(); i++){
for(mx_uint j = 0; j < input_shapes[i].size(); j++){
input_shape_data[index] = input_shapes[i][j];
index++;
}
}
MXPredCreate((const char*)json_data.GetBuffer(),
(const char*)param_data.GetBuffer(),
static_cast<size_t>(param_data.GetLength()),
dev_type,
dev_id,
num_input_nodes,
input_keys_ptr,
input_shape_indptr,
input_shape_data,
&handle);
assert(handle);
}
};
#endif // CNNPREDICTOR_MNIST_MNISTCLASSIFIER_NET
import logging
import mxnet as mx
import supervised_trainer
import CNNCreator_mnist_mnistClassifier_net
import CNNDataLoader_mnist_mnistClassifier_net
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
handler = logging.FileHandler("train.log", "w", encoding=None, delay="true")
logger.addHandler(handler)
mnist_mnistClassifier_net_creator = CNNCreator_mnist_mnistClassifier_net.CNNCreator_mnist_mnistClassifier_net()
mnist_mnistClassifier_net_loader = CNNDataLoader_mnist_mnistClassifier_net.mnist_mnistClassifier_netDataLoader()
mnist_mnistClassifier_net_trainer = supervised_trainer.CNNSupervisedTrainer(mnist_mnistClassifier_net_loader,
mnist_mnistClassifier_net_creator)
mnist_mnistClassifier_net_trainer.train(
batch_size=64,
num_epoch=11,
context='gpu',
eval_metric='accuracy',
optimizer='adam',
optimizer_params={
'epsilon': 1.0E-8,
'weight_decay': 0.001,
'beta1': 0.9,
'beta2': 0.999,
'learning_rate_policy': 'fixed',
'learning_rate': 0.001}
)
#ifndef CNNTRANSLATOR_H
#define CNNTRANSLATOR_H
#include <armadillo>
#include <cassert>
using namespace std;
using namespace arma;
class CNNTranslator{
public:
template<typename T> static void addColToSTDVector(const Col<T> &source, vector<float> &data){
for(size_t i = 0; i < source.n_elem; i++){
data.push_back((float) source(i));