Commit 7c707a98 authored by Nicola Gatto's avatar Nicola Gatto

Integrate Gluon generator

parent f13e436a
......@@ -19,6 +19,7 @@
<CNNTrain.version>0.2.6</CNNTrain.version>
<cnnarch-mxnet-generator.version>0.2.14-SNAPSHOT</cnnarch-mxnet-generator.version>
<cnnarch-caffe2-generator.version>0.2.11-SNAPSHOT</cnnarch-caffe2-generator.version>
<cnnarch-gluon-generator.version>0.1.5</cnnarch-gluon-generator.version>
<embedded-montiarc-math-opt-generator>0.1.4</embedded-montiarc-math-opt-generator>
<!-- .. Libraries .................................................. -->
......@@ -73,6 +74,12 @@
<version>${cnnarch-mxnet-generator.version}</version>
</dependency>
<dependency>
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>cnnarch-gluon-generator</artifactId>
<version>${cnnarch-gluon-generator.version}</version>
</dependency>
<dependency>
<groupId>de.monticore.lang.monticar</groupId>
<artifactId>cnnarch-caffe2-generator</artifactId>
......
......@@ -2,6 +2,8 @@ package de.monticore.lang.monticar.emadl.generator;
import de.monticore.lang.monticar.cnnarch.CNNArchGenerator;
import de.monticore.lang.monticar.cnnarch.gluongenerator.CNNArch2Gluon;
import de.monticore.lang.monticar.cnnarch.gluongenerator.CNNTrain2Gluon;
import de.monticore.lang.monticar.cnnarch.mxnetgenerator.CNNArch2MxNet;
import de.monticore.lang.monticar.cnnarch.caffe2generator.CNNArch2Caffe2;
import de.monticore.lang.monticar.cnnarch.mxnetgenerator.CNNTrain2MxNet;
......@@ -30,6 +32,16 @@ public enum Backend {
public CNNTrainGenerator getCNNTrainGenerator() {
return new CNNTrain2Caffe2();
}
},
GLUON{
@Override
public CNNArchGenerator getCNNArchGenerator() {
return new CNNArch2Gluon();
}
@Override
public CNNTrainGenerator getCNNTrainGenerator() {
return new CNNTrain2Gluon();
}
};
public abstract CNNArchGenerator getCNNArchGenerator();
......@@ -43,6 +55,9 @@ public enum Backend {
case "CAFFE2":
return Optional.of(CAFFE2);
case "GLUON":
return Optional.of(GLUON);
default:
return Optional.empty();
}
......@@ -52,8 +67,10 @@ public enum Backend {
switch (backend){
case CAFFE2:
return "CAFFE2";
case GLUON:
return "GLUON";
default:
return "MXNET";
}
}
}
}
\ No newline at end of file
......@@ -160,7 +160,7 @@ public class EMADLGenerator {
printWriter.println("#!/bin/bash");
printWriter.println("cd " + getGenerationTargetPath());
printWriter.println("mkdir --parents build");
printWriter.println("mkdir -p build");
printWriter.println("cd build");
printWriter.println("cmake ..");
printWriter.println("make");
......
......@@ -160,6 +160,31 @@ public class GenerationTest extends AbstractSymtabTest {
"CNNTrainer_mnist_mnistClassifier_net.py"));
}
@Test
public void testMnistClassifierForGluon() throws IOException, TemplateException {
Log.getFindings().clear();
String[] args = {"-m", "src/test/resources/models/", "-r", "mnist.MnistClassifier", "-b", "GLUON", "-f", "n", "-c", "n"};
EMADLGeneratorCli.main(args);
assertTrue(Log.getFindings().isEmpty());
checkFilesAreEqual(
Paths.get("./target/generated-sources-emadl"),
Paths.get("./src/test/resources/target_code/gluon"),
Arrays.asList(
"CNNBufferFile.h",
"CNNNet_mnist_mnistClassifier_net.py",
"mnist_mnistClassifier.cpp",
"mnist_mnistClassifier.h",
"CNNCreator_mnist_mnistClassifier_net.py",
"CNNPredictor_mnist_mnistClassifier_net.h",
"mnist_mnistClassifier_net.h",
"HelperA.h",
"CNNTranslator.h",
"mnist_mnistClassifier_calculateClass.h",
"CNNTrainer_mnist_mnistClassifier_net.py",
"mnist_mnistClassifier_net.h"));
}
@Test
public void testHashFunction() {
EMADLGenerator tester = new EMADLGenerator(Backend.MXNET);
......
/**
*
* ******************************************************************************
* MontiCAR Modeling Family, www.se-rwth.de
* Copyright (c) 2017, Software Engineering Group at RWTH Aachen,
* All rights reserved.
*
* This project is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3.0 of the License, or (at your option) any later version.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this project. If not, see <http://www.gnu.org/licenses/>.
* *******************************************************************************
*/
package de.monticore.lang.monticar.emadl;
public class IntegrationGluonTest extends IntegrationTest {
public IntegrationGluonTest() {
super("GLUON", "39253EC049D4A4E5FA0536AD34874B9D#1DBAEE1B1BD83FB7CB5F70AE91B29638#C4C23549E737A759721D6694C75D9771#5AF0CE68E408E8C1F000E49D72AC214A");
}
}
#ifndef CNNBUFFERFILE_H
#define CNNBUFFERFILE_H
#include <stdio.h>
#include <iostream>
#include <fstream>
// Read file to buffer
class BufferFile {
public :
std::string file_path_;
int length_;
char* buffer_;
explicit BufferFile(std::string file_path)
:file_path_(file_path) {
std::ifstream ifs(file_path.c_str(), std::ios::in | std::ios::binary);
if (!ifs) {
std::cerr << "Can't open the file. Please check " << file_path << ". \n";
length_ = 0;
buffer_ = NULL;
return;
}
ifs.seekg(0, std::ios::end);
length_ = ifs.tellg();
ifs.seekg(0, std::ios::beg);
std::cout << file_path.c_str() << " ... "<< length_ << " bytes\n";
buffer_ = new char[sizeof(char) * length_];
ifs.read(buffer_, length_);
ifs.close();
}
int GetLength() {
return length_;
}
char* GetBuffer() {
return buffer_;
}
~BufferFile() {
if (buffer_) {
delete[] buffer_;
buffer_ = NULL;
}
}
};
#endif // CNNBUFFERFILE_H
import mxnet as mx
import numpy as np
from mxnet import gluon
class Softmax(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Softmax, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return F.softmax(x)
class Split(gluon.HybridBlock):
def __init__(self, num_outputs, axis=1, **kwargs):
super(Split, self).__init__(**kwargs)
with self.name_scope():
self.axis = axis
self.num_outputs = num_outputs
def hybrid_forward(self, F, x):
return F.split(data=x, axis=self.axis, num_outputs=self.num_outputs)
class Concatenate(gluon.HybridBlock):
def __init__(self, dim=1, **kwargs):
super(Concatenate, self).__init__(**kwargs)
with self.name_scope():
self.dim = dim
def hybrid_forward(self, F, *x):
return F.concat(*x, dim=self.dim)
class ZScoreNormalization(gluon.HybridBlock):
def __init__(self, data_mean, data_std, **kwargs):
super(ZScoreNormalization, self).__init__(**kwargs)
with self.name_scope():
self.data_mean = self.params.get('data_mean', shape=data_mean.shape,
init=mx.init.Constant(data_mean.asnumpy().tolist()), differentiable=False)
self.data_std = self.params.get('data_std', shape=data_mean.shape,
init=mx.init.Constant(data_std.asnumpy().tolist()), differentiable=False)
def hybrid_forward(self, F, x, data_mean, data_std):
x = F.broadcast_sub(x, data_mean)
x = F.broadcast_div(x, data_std)
return x
class Padding(gluon.HybridBlock):
def __init__(self, padding, **kwargs):
super(Padding, self).__init__(**kwargs)
with self.name_scope():
self.pad_width = padding
def hybrid_forward(self, F, x):
x = F.pad(data=x,
mode='constant',
pad_width=self.pad_width,
constant_value=0)
return x
class NoNormalization(gluon.HybridBlock):
def __init__(self, **kwargs):
super(NoNormalization, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return x
class Net(gluon.HybridBlock):
def __init__(self, data_mean=None, data_std=None, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
if not data_mean is None:
assert(not data_std is None)
self.input_normalization = ZScoreNormalization(data_mean=data_mean, data_std=data_std)
else:
self.input_normalization = NoNormalization()
self.conv1_ = gluon.nn.Conv2D(channels=20,
kernel_size=(5,5),
strides=(1,1),
use_bias=True)
# conv1_, output shape: {[20,24,24]}
self.pool1_ = gluon.nn.MaxPool2D(
pool_size=(2,2),
strides=(2,2))
# pool1_, output shape: {[20,12,12]}
self.conv2_ = gluon.nn.Conv2D(channels=50,
kernel_size=(5,5),
strides=(1,1),
use_bias=True)
# conv2_, output shape: {[50,8,8]}
self.pool2_ = gluon.nn.MaxPool2D(
pool_size=(2,2),
strides=(2,2))
# pool2_, output shape: {[50,4,4]}
self.fc2_flatten = gluon.nn.Flatten()
self.fc2_ = gluon.nn.Dense(units=500, use_bias=True)
# fc2_, output shape: {[500,1,1]}
self.relu2_ = gluon.nn.Activation(activation='relu')
self.fc3_ = gluon.nn.Dense(units=10, use_bias=True)
# fc3_, output shape: {[10,1,1]}
self.last_layer = 'softmax'
def hybrid_forward(self, F, x):
image = self.input_normalization(x)
conv1_ = self.conv1_(image)
pool1_ = self.pool1_(conv1_)
conv2_ = self.conv2_(pool1_)
pool2_ = self.pool2_(conv2_)
fc2_flatten_ = self.fc2_flatten(pool2_)
fc2_ = self.fc2_(fc2_flatten_)
relu2_ = self.relu2_(fc2_)
fc3_ = self.fc3_(relu2_)
return fc3_
#ifndef CNNPREDICTOR_MNIST_MNISTCLASSIFIER_NET
#define CNNPREDICTOR_MNIST_MNISTCLASSIFIER_NET
#include <mxnet/c_predict_api.h>
#include <cassert>
#include <string>
#include <vector>
#include <CNNBufferFile.h>
class CNNPredictor_mnist_mnistClassifier_net{
public:
const std::string json_file = "model/mnist.LeNetNetwork/model_newest-symbol.json";
const std::string param_file = "model/mnist.LeNetNetwork/model_newest-0000.params";
//const std::vector<std::string> input_keys = {"data"};
const std::vector<std::string> input_keys = {"image"};
const std::vector<std::vector<mx_uint>> input_shapes = {{1,1,28,28}};
const bool use_gpu = false;
PredictorHandle handle;
explicit CNNPredictor_mnist_mnistClassifier_net(){
init(json_file, param_file, input_keys, input_shapes, use_gpu);
}
~CNNPredictor_mnist_mnistClassifier_net(){
if(handle) MXPredFree(handle);
}
void predict(const std::vector<float> &image,
std::vector<float> &predictions){
MXPredSetInput(handle, "data", image.data(), image.size());
//MXPredSetInput(handle, "image", image.data(), image.size());
MXPredForward(handle);
mx_uint output_index;
mx_uint *shape = 0;
mx_uint shape_len;
size_t size;
output_index = 0;
MXPredGetOutputShape(handle, output_index, &shape, &shape_len);
size = 1;
for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i];
assert(size == predictions.size());
MXPredGetOutput(handle, 0, &(predictions[0]), predictions.size());
}
void init(const std::string &json_file,
const std::string &param_file,
const std::vector<std::string> &input_keys,
const std::vector<std::vector<mx_uint>> &input_shapes,
const bool &use_gpu){
BufferFile json_data(json_file);
BufferFile param_data(param_file);
int dev_type = use_gpu ? 2 : 1;
int dev_id = 0;
handle = 0;
if (json_data.GetLength() == 0 ||
param_data.GetLength() == 0) {
std::exit(-1);
}
const mx_uint num_input_nodes = input_keys.size();
const char* input_keys_ptr[num_input_nodes];
for(mx_uint i = 0; i < num_input_nodes; i++){
input_keys_ptr[i] = input_keys[i].c_str();
}
mx_uint shape_data_size = 0;
mx_uint input_shape_indptr[input_shapes.size() + 1];
input_shape_indptr[0] = 0;
for(mx_uint i = 0; i < input_shapes.size(); i++){
input_shape_indptr[i+1] = input_shapes[i].size();
shape_data_size += input_shapes[i].size();
}
mx_uint input_shape_data[shape_data_size];
mx_uint index = 0;
for(mx_uint i = 0; i < input_shapes.size(); i++){
for(mx_uint j = 0; j < input_shapes[i].size(); j++){
input_shape_data[index] = input_shapes[i][j];
index++;
}
}
MXPredCreate((const char*)json_data.GetBuffer(),
(const char*)param_data.GetBuffer(),
static_cast<size_t>(param_data.GetLength()),
dev_type,
dev_id,
num_input_nodes,
input_keys_ptr,
input_shape_indptr,
input_shape_data,
&handle);
assert(handle);
}
};
#endif // CNNPREDICTOR_MNIST_MNISTCLASSIFIER_NET
import logging
import mxnet as mx
import CNNCreator_mnist_mnistClassifier_net
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
handler = logging.FileHandler("train.log", "w", encoding=None, delay="true")
logger.addHandler(handler)
mnist_mnistClassifier_net = CNNCreator_mnist_mnistClassifier_net.CNNCreator_mnist_mnistClassifier_net()
mnist_mnistClassifier_net.train(
batch_size=64,
num_epoch=11,
context='gpu',
eval_metric='accuracy',
optimizer='adam',
optimizer_params={
'epsilon': 1.0E-8,
'weight_decay': 0.001,
'beta1': 0.9,
'beta2': 0.999,
'learning_rate_policy': 'fixed',
'learning_rate': 0.001}
)
#ifndef CNNTRANSLATOR_H
#define CNNTRANSLATOR_H
#include <armadillo>
#include <cassert>
using namespace std;
using namespace arma;
class CNNTranslator{
public:
template<typename T> static void addColToSTDVector(const Col<T> &source, vector<float> &data){
for(size_t i = 0; i < source.n_elem; i++){
data.push_back((float) source(i));
}
}
template<typename T> static void addRowToSTDVector(const subview_row<T> &source, vector<float> &data){
for(size_t i = 0; i < source.n_elem; i++){
data.push_back((float) source(i));
}
}
template<typename T> static void addRowToSTDVector(const Row<T> &source, vector<float> &data){
for(size_t i = 0; i < source.n_elem; i++){
data.push_back((float) source(i));
}
}
template<typename T> static void addMatToSTDVector(const Mat<T> &source, vector<float> &data){
for(size_t i = 0; i < source.n_rows; i++){
addRowToSTDVector(source.row(i), data);
}
}
template<typename T> static vector<float> translate(const Col<T> &source){
size_t size = source.n_elem;
vector<float> data;
data.reserve(size);
addColToSTDVector(source, data);
return data;
}
template<typename T> static vector<float> translate(const Row<T> &source){
size_t size = source.n_elem;
vector<float> data;
data.reserve(size);
addRowToSTDVector(source, data);
return data;
}
template<typename T> static vector<float> translate(const Mat<T> &source){
size_t size = source.n_elem;
vector<float> data;
data.reserve(size);
addMatToSTDVector(source, data);
return data;
}
template<typename T> static vector<float> translate(const Cube<T> &source){
size_t size = source.n_elem;
vector<float> data;
data.reserve(size);
for(size_t i = 0; i < source.n_slices; i++){
addMatToSTDVector(source.slice(i), data);
}
return data;
}
static vec translateToCol(const vector<float> &source, const vector<size_t> &shape){
assert(shape.size() == 1);
vec column(shape[0]);
for(size_t i = 0; i < source.size(); i++){
column(i) = (double) source[i];
}
return column;
}
static mat translateToMat(const vector<float> &source, const vector<size_t> &shape){
assert(shape.size() == 2);
mat matrix(shape[1], shape[0]); //create transposed version of the matrix
int startPos = 0;
int endPos = matrix.n_rows;
const vector<size_t> columnShape = {matrix.n_rows};
for(size_t i = 0; i < matrix.n_cols; i++){
vector<float> colSource(&source[startPos], &source[endPos]);
matrix.col(i) = translateToCol(colSource, columnShape);
startPos = endPos;
endPos += matrix.n_rows;
}
return matrix.t();
}
static cube translateToCube(const vector<float> &source, const vector<size_t> &shape){
assert(shape.size() == 3);
cube cubeMatrix(shape[1], shape[2], shape[0]);
const int matrixSize = shape[1] * shape[2];
const vector<size_t> matrixShape = {shape[1], shape[2]};
int startPos = 0;
int endPos = matrixSize;
for(size_t i = 0; i < cubeMatrix.n_slices; i++){
vector<float> matrixSource(&source[startPos], &source[endPos]);
cubeMatrix.slice(i) = translateToMat(matrixSource, matrixShape);
startPos = endPos;
endPos += matrixSize;
}
return cubeMatrix;
}
template<typename T> static vector<size_t> getShape(const Col<T> &source){
return {source.n_elem};
}
template<typename T> static vector<size_t> getShape(const Row<T> &source){
return {source.n_elem};
}
template<typename T> static vector<size_t> getShape(const Mat<T> &source){
return {source.n_rows, source.n_cols};
}
template<typename T> static vector<size_t> getShape(const Cube<T> &source){
return {source.n_slices, source.n_rows, source.n_cols};
}
};
#endif
#ifndef HELPERA_H
#define HELPERA_H
#include <iostream>
#include "armadillo"
#include <stdarg.h>
#include <initializer_list>
#include <fstream>
using namespace arma;
#ifndef _FILESTRING_CONVERSION___A
#define _FILESTRING_CONVERSION___A
void toFileString(std::ofstream& myfile, mat A){
myfile << "[";
for (int i = 0; i < A.n_rows; i++){
for (int j = 0; j < A.n_cols; j++){
myfile << A(i,j);
if(j + 1 < A.n_cols){
myfile << ", ";
}
}
if(i + 1 < A.n_rows){
myfile << ";";
}
}
myfile << "]";
}
void toFileString(std::ofstream& myfile, double A){
myfile << A;
}
void toFileString(std::ofstream& myfile, float A){
myfile << A;
}
void toFileString(std::ofstream& myfile, int A){
myfile << A;
}
void toFileString(std::ofstream& myfile, bool A){
myfile << A;
}
bool Is_close(mat& X, mat& Y, double tol)
{
// abs returns a mat type then max checks columns and returns a row_vec
// max used again will return the biggest element in the row_vec
bool close(false);
if(arma::max(arma::max(arma::abs(X-Y))) < tol)
{
close = true;
}
return close;
}
#endif
class HelperA{
public:
static mat getEigenVectors(mat A){
vec eigenValues;
mat eigenVectors;
eig_sym(eigenValues,eigenVectors,A);
return eigenVectors;
}
static vec getEigenValues(mat A){
vec eigenValues;
mat eigenVectors;
eig_sym(eigenValues,eigenVectors,A);
return eigenValues;
}
static mat getKMeansClusters(mat A, int k){
mat clusters;
kmeans(clusters,A.t(),k,random_subset,20,true);
/*printf("cluster centroid calculation done\n");
std::ofstream myfile;
myfile.open("data after cluster.txt");
myfile << A;
myfile.close();
std::ofstream myfile2;
myfile2.open("cluster centroids.txt");
myfile2 << clusters;
myfile2.close();*/