diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 628562800985a55c7cebfef471eee240e54d9bb4..593bfc6b8bb19ec328e8f9fe7ee1205f0661ff13 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -47,6 +47,12 @@ integrationCaffe2JobLinux:
script:
- mvn -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -B clean install --settings settings.xml -Dtest=IntegrationCaffe2Test
+integrationGluonJobLinux:
+ stage: linux
+ image: registry.git.rwth-aachen.de/monticore/embeddedmontiarc/generators/emadl2cpp/integrationtests/mxnet:v0.0.3
+ script:
+ - mvn -Dorg.slf4j.simpleLogger.log.org.apache.maven.cli.transfer.Slf4jMavenTransferListener=warn -B clean install --settings settings.xml -Dtest=IntegrationGluonTest
+
masterJobWindows:
stage: windows
diff --git a/pom.xml b/pom.xml
index c4a76cc72652c6b86c7511cffd5206939f88c772..007cea102930abef4a921ddc0d977b76dc812334 100644
--- a/pom.xml
+++ b/pom.xml
@@ -8,7 +8,7 @@
de.monticore.lang.monticar
embedded-montiarc-emadl-generator
- 0.2.13
+ 0.3.0
@@ -19,6 +19,7 @@
0.2.6
0.2.14-SNAPSHOT
0.2.11-SNAPSHOT
+ 0.1.6
0.1.4
@@ -73,6 +74,12 @@
${cnnarch-mxnet-generator.version}
+
+ de.monticore.lang.monticar
+ cnnarch-gluon-generator
+ ${cnnarch-gluon-generator.version}
+
+
de.monticore.lang.monticar
cnnarch-caffe2-generator
diff --git a/src/main/java/de/monticore/lang/monticar/emadl/generator/Backend.java b/src/main/java/de/monticore/lang/monticar/emadl/generator/Backend.java
index 4e40a4e92df3ecc40ebcdada5ae302d2ef562757..f08096e066c95814709224030141176d89bb043e 100644
--- a/src/main/java/de/monticore/lang/monticar/emadl/generator/Backend.java
+++ b/src/main/java/de/monticore/lang/monticar/emadl/generator/Backend.java
@@ -2,6 +2,8 @@ package de.monticore.lang.monticar.emadl.generator;
import de.monticore.lang.monticar.cnnarch.CNNArchGenerator;
+import de.monticore.lang.monticar.cnnarch.gluongenerator.CNNArch2Gluon;
+import de.monticore.lang.monticar.cnnarch.gluongenerator.CNNTrain2Gluon;
import de.monticore.lang.monticar.cnnarch.mxnetgenerator.CNNArch2MxNet;
import de.monticore.lang.monticar.cnnarch.caffe2generator.CNNArch2Caffe2;
import de.monticore.lang.monticar.cnnarch.mxnetgenerator.CNNTrain2MxNet;
@@ -30,6 +32,16 @@ public enum Backend {
public CNNTrainGenerator getCNNTrainGenerator() {
return new CNNTrain2Caffe2();
}
+ },
+ GLUON{
+ @Override
+ public CNNArchGenerator getCNNArchGenerator() {
+ return new CNNArch2Gluon();
+ }
+ @Override
+ public CNNTrainGenerator getCNNTrainGenerator() {
+ return new CNNTrain2Gluon();
+ }
};
public abstract CNNArchGenerator getCNNArchGenerator();
@@ -43,6 +55,9 @@ public enum Backend {
case "CAFFE2":
return Optional.of(CAFFE2);
+ case "GLUON":
+ return Optional.of(GLUON);
+
default:
return Optional.empty();
}
@@ -52,8 +67,10 @@ public enum Backend {
switch (backend){
case CAFFE2:
return "CAFFE2";
+ case GLUON:
+ return "GLUON";
default:
return "MXNET";
}
}
-}
+}
\ No newline at end of file
diff --git a/src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLGenerator.java b/src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLGenerator.java
index 5da6e5acaf256ea4d89b25f6637ce09bffc6f113..32fa3be35b65656ae0b0df448b38fb958e4e9f2d 100644
--- a/src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLGenerator.java
+++ b/src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLGenerator.java
@@ -160,7 +160,7 @@ public class EMADLGenerator {
printWriter.println("#!/bin/bash");
printWriter.println("cd " + getGenerationTargetPath());
- printWriter.println("mkdir --parents build");
+ printWriter.println("mkdir -p build");
printWriter.println("cd build");
printWriter.println("cmake ..");
printWriter.println("make");
diff --git a/src/test/java/de/monticore/lang/monticar/emadl/GenerationTest.java b/src/test/java/de/monticore/lang/monticar/emadl/GenerationTest.java
index 7e762c1d4043ab976b4abcbcc6f1ce5771833f32..c6f2e784ef89d9262257a1e5483e01d687d0b2fc 100644
--- a/src/test/java/de/monticore/lang/monticar/emadl/GenerationTest.java
+++ b/src/test/java/de/monticore/lang/monticar/emadl/GenerationTest.java
@@ -160,6 +160,33 @@ public class GenerationTest extends AbstractSymtabTest {
"CNNTrainer_mnist_mnistClassifier_net.py"));
}
+ @Test
+ public void testMnistClassifierForGluon() throws IOException, TemplateException {
+ Log.getFindings().clear();
+ String[] args = {"-m", "src/test/resources/models/", "-r", "mnist.MnistClassifier", "-b", "GLUON", "-f", "n", "-c", "n"};
+ EMADLGeneratorCli.main(args);
+ assertTrue(Log.getFindings().isEmpty());
+
+ checkFilesAreEqual(
+ Paths.get("./target/generated-sources-emadl"),
+ Paths.get("./src/test/resources/target_code/gluon"),
+ Arrays.asList(
+ "CNNBufferFile.h",
+ "CNNNet_mnist_mnistClassifier_net.py",
+ "mnist_mnistClassifier.cpp",
+ "mnist_mnistClassifier.h",
+ "CNNCreator_mnist_mnistClassifier_net.py",
+ "CNNPredictor_mnist_mnistClassifier_net.h",
+ "CNNDataLoader_mnist_mnistClassifier_net.py",
+ "supervised_trainer.py",
+ "mnist_mnistClassifier_net.h",
+ "HelperA.h",
+ "CNNTranslator.h",
+ "mnist_mnistClassifier_calculateClass.h",
+ "CNNTrainer_mnist_mnistClassifier_net.py",
+ "mnist_mnistClassifier_net.h"));
+ }
+
@Test
public void testHashFunction() {
EMADLGenerator tester = new EMADLGenerator(Backend.MXNET);
diff --git a/src/test/java/de/monticore/lang/monticar/emadl/IntegrationGluonTest.java b/src/test/java/de/monticore/lang/monticar/emadl/IntegrationGluonTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..e6ce78003a07ff8f18f83b19ba5b36f1780f8d02
--- /dev/null
+++ b/src/test/java/de/monticore/lang/monticar/emadl/IntegrationGluonTest.java
@@ -0,0 +1,27 @@
+/**
+ *
+ * ******************************************************************************
+ * MontiCAR Modeling Family, www.se-rwth.de
+ * Copyright (c) 2017, Software Engineering Group at RWTH Aachen,
+ * All rights reserved.
+ *
+ * This project is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 3.0 of the License, or (at your option) any later version.
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this project. If not, see .
+ * *******************************************************************************
+ */
+package de.monticore.lang.monticar.emadl;
+
+public class IntegrationGluonTest extends IntegrationTest {
+ public IntegrationGluonTest() {
+ super("GLUON", "39253EC049D4A4E5FA0536AD34874B9D#1DBAEE1B1BD83FB7CB5F70AE91B29638#C4C23549E737A759721D6694C75D9771#5AF0CE68E408E8C1F000E49D72AC214A");
+ }
+}
diff --git a/src/test/java/de/monticore/lang/monticar/emadl/IntegrationTest.java b/src/test/java/de/monticore/lang/monticar/emadl/IntegrationTest.java
index 976a2566bc97003ed1d9e1d2bdeb6e33a5c50724..114651bd12a6a843465acdcb30989ade55a5510b 100644
--- a/src/test/java/de/monticore/lang/monticar/emadl/IntegrationTest.java
+++ b/src/test/java/de/monticore/lang/monticar/emadl/IntegrationTest.java
@@ -25,9 +25,13 @@ import de.monticore.lang.monticar.emadl.generator.EMADLGenerator;
import de.monticore.lang.monticar.emadl.generator.EMADLGeneratorCli;
import de.se_rwth.commons.logging.Log;
import freemarker.template.TemplateException;
+import org.apache.commons.io.FileUtils;
+import org.junit.AfterClass;
import org.junit.Before;
+import org.junit.BeforeClass;
import org.junit.Test;
+import java.io.File;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.file.Files;
@@ -44,6 +48,20 @@ public abstract class IntegrationTest extends AbstractSymtabTest {
private String backend;
private String trainingHash;
+ @BeforeClass
+ public static void setupClass() throws IOException {
+ if (new File("model").exists()) {
+ FileUtils.deleteDirectory(new File("model"));
+ }
+ }
+
+ @AfterClass
+ public static void tearDown() throws IOException {
+ if (new File("model").exists()) {
+ FileUtils.deleteDirectory(new File("model"));
+ }
+ }
+
public IntegrationTest(String backend, String trainingHash) {
this.backend = backend;
this.trainingHash = trainingHash;
@@ -116,6 +134,18 @@ public abstract class IntegrationTest extends AbstractSymtabTest {
EMADLGeneratorCli.main(args);
assertTrue(Log.getFindings().size() == 1);
assertTrue(Log.getFindings().get(0).getMsg().contains("skipped"));
+ deleteInstanceTestCifarHashFile();
+ }
+
+ private void deleteInstanceTestCifarHashFile() {
+ final Path instanceTestCifarHasFile
+ = Paths.get("./target/generated-sources-emadl/instanceTestCifar/CifarNetwork.training_hash");
+ try {
+ Files.delete(instanceTestCifarHasFile);
+ }
+ catch(Exception e) {
+ assertFalse("Could not delete hash file", true);
+ }
}
@Test
@@ -130,7 +160,4 @@ public abstract class IntegrationTest extends AbstractSymtabTest {
deleteHashFile();
}
-
-
-
}
diff --git a/src/test/resources/target_code/gluon/CNNBufferFile.h b/src/test/resources/target_code/gluon/CNNBufferFile.h
new file mode 100644
index 0000000000000000000000000000000000000000..c0d8dd9cbe6878e07be976dda5ce9046e6c05606
--- /dev/null
+++ b/src/test/resources/target_code/gluon/CNNBufferFile.h
@@ -0,0 +1,51 @@
+#ifndef CNNBUFFERFILE_H
+#define CNNBUFFERFILE_H
+
+#include
+#include
+#include
+
+// Read file to buffer
+class BufferFile {
+ public :
+ std::string file_path_;
+ int length_;
+ char* buffer_;
+
+ explicit BufferFile(std::string file_path)
+ :file_path_(file_path) {
+
+ std::ifstream ifs(file_path.c_str(), std::ios::in | std::ios::binary);
+ if (!ifs) {
+ std::cerr << "Can't open the file. Please check " << file_path << ". \n";
+ length_ = 0;
+ buffer_ = NULL;
+ return;
+ }
+
+ ifs.seekg(0, std::ios::end);
+ length_ = ifs.tellg();
+ ifs.seekg(0, std::ios::beg);
+ std::cout << file_path.c_str() << " ... "<< length_ << " bytes\n";
+
+ buffer_ = new char[sizeof(char) * length_];
+ ifs.read(buffer_, length_);
+ ifs.close();
+ }
+
+ int GetLength() {
+ return length_;
+ }
+ char* GetBuffer() {
+ return buffer_;
+ }
+
+ ~BufferFile() {
+ if (buffer_) {
+ delete[] buffer_;
+ buffer_ = NULL;
+ }
+ }
+};
+
+#endif // CNNBUFFERFILE_H
diff --git a/src/test/resources/target_code/gluon/CNNCreator_mnist_mnistClassifier_net.py b/src/test/resources/target_code/gluon/CNNCreator_mnist_mnistClassifier_net.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a699b4c5307d153901d695286d8ab4c7116abbe
--- /dev/null
+++ b/src/test/resources/target_code/gluon/CNNCreator_mnist_mnistClassifier_net.py
@@ -0,0 +1,53 @@
+import mxnet as mx
+import logging
+import os
+from CNNNet_mnist_mnistClassifier_net import Net
+
+class CNNCreator_mnist_mnistClassifier_net:
+ _model_dir_ = "model/mnist.LeNetNetwork/"
+ _model_prefix_ = "model"
+ _input_shapes_ = [(1,28,28)]
+
+ def __init__(self):
+ self.weight_initializer = mx.init.Normal()
+ self.net = None
+
+ def load(self, context):
+ lastEpoch = 0
+ param_file = None
+
+ try:
+ os.remove(self._model_dir_ + self._model_prefix_ + "_newest-0000.params")
+ except OSError:
+ pass
+ try:
+ os.remove(self._model_dir_ + self._model_prefix_ + "_newest-symbol.json")
+ except OSError:
+ pass
+
+ if os.path.isdir(self._model_dir_):
+ for file in os.listdir(self._model_dir_):
+ if ".params" in file and self._model_prefix_ in file:
+ epochStr = file.replace(".params","").replace(self._model_prefix_ + "-","")
+ epoch = int(epochStr)
+ if epoch > lastEpoch:
+ lastEpoch = epoch
+ param_file = file
+ if param_file is None:
+ return 0
+ else:
+ logging.info("Loading checkpoint: " + param_file)
+ self.net.load_parameters(self._model_dir_ + param_file)
+ return lastEpoch
+
+
+ def construct(self, context, data_mean=None, data_std=None):
+ self.net = Net(data_mean=data_mean, data_std=data_std)
+ self.net.collect_params().initialize(self.weight_initializer, ctx=context)
+ self.net.hybridize()
+ self.net(mx.nd.zeros((1,)+self._input_shapes_[0], ctx=context))
+
+ if not os.path.exists(self._model_dir_):
+ os.makedirs(self._model_dir_)
+
+ self.net.export(self._model_dir_ + self._model_prefix_, epoch=0)
diff --git a/src/test/resources/target_code/gluon/CNNDataLoader_mnist_mnistClassifier_net.py b/src/test/resources/target_code/gluon/CNNDataLoader_mnist_mnistClassifier_net.py
new file mode 100644
index 0000000000000000000000000000000000000000..d60a7bd20c3d742622386570e21386c4effcc8d9
--- /dev/null
+++ b/src/test/resources/target_code/gluon/CNNDataLoader_mnist_mnistClassifier_net.py
@@ -0,0 +1,57 @@
+import os
+import h5py
+import mxnet as mx
+import logging
+import sys
+
+class mnist_mnistClassifier_netDataLoader:
+ _input_names_ = ['image']
+ _output_names_ = ['predictions_label']
+
+ def __init__(self):
+ self._data_dir = "data/mnist.LeNetNetwork/"
+
+ def load_data(self, batch_size):
+ train_h5, test_h5 = self.load_h5_files()
+
+ data_mean = train_h5[self._input_names_[0]][:].mean(axis=0)
+ data_std = train_h5[self._input_names_[0]][:].std(axis=0) + 1e-5
+
+ train_iter = mx.io.NDArrayIter(train_h5[self._input_names_[0]],
+ train_h5[self._output_names_[0]],
+ batch_size=batch_size,
+ data_name=self._input_names_[0],
+ label_name=self._output_names_[0])
+ test_iter = None
+ if test_h5 != None:
+ test_iter = mx.io.NDArrayIter(test_h5[self._input_names_[0]],
+ test_h5[self._output_names_[0]],
+ batch_size=batch_size,
+ data_name=self._input_names_[0],
+ label_name=self._output_names_[0])
+ return train_iter, test_iter, data_mean, data_std
+
+ def load_h5_files(self):
+ train_h5 = None
+ test_h5 = None
+ train_path = self._data_dir + "train.h5"
+ test_path = self._data_dir + "test.h5"
+ if os.path.isfile(train_path):
+ train_h5 = h5py.File(train_path, 'r')
+ if not (self._input_names_[0] in train_h5 and self._output_names_[0] in train_h5):
+ logging.error("The HDF5 file '" + os.path.abspath(train_path) + "' has to contain the datasets: "
+ + "'" + self._input_names_[0] + "', '" + self._output_names_[0] + "'")
+ sys.exit(1)
+ test_iter = None
+ if os.path.isfile(test_path):
+ test_h5 = h5py.File(test_path, 'r')
+ if not (self._input_names_[0] in test_h5 and self._output_names_[0] in test_h5):
+ logging.error("The HDF5 file '" + os.path.abspath(test_path) + "' has to contain the datasets: "
+ + "'" + self._input_names_[0] + "', '" + self._output_names_[0] + "'")
+ sys.exit(1)
+ else:
+ logging.warning("Couldn't load test set. File '" + os.path.abspath(test_path) + "' does not exist.")
+ return train_h5, test_h5
+ else:
+ logging.error("Data loading failure. File '" + os.path.abspath(train_path) + "' does not exist.")
+ sys.exit(1)
\ No newline at end of file
diff --git a/src/test/resources/target_code/gluon/CNNNet_mnist_mnistClassifier_net.py b/src/test/resources/target_code/gluon/CNNNet_mnist_mnistClassifier_net.py
new file mode 100644
index 0000000000000000000000000000000000000000..72964c17f29053ce2c60e5542237805c6f6f8503
--- /dev/null
+++ b/src/test/resources/target_code/gluon/CNNNet_mnist_mnistClassifier_net.py
@@ -0,0 +1,125 @@
+import mxnet as mx
+import numpy as np
+from mxnet import gluon
+
+class Softmax(gluon.HybridBlock):
+ def __init__(self, **kwargs):
+ super(Softmax, self).__init__(**kwargs)
+
+ def hybrid_forward(self, F, x):
+ return F.softmax(x)
+
+
+class Split(gluon.HybridBlock):
+ def __init__(self, num_outputs, axis=1, **kwargs):
+ super(Split, self).__init__(**kwargs)
+ with self.name_scope():
+ self.axis = axis
+ self.num_outputs = num_outputs
+
+ def hybrid_forward(self, F, x):
+ return F.split(data=x, axis=self.axis, num_outputs=self.num_outputs)
+
+
+class Concatenate(gluon.HybridBlock):
+ def __init__(self, dim=1, **kwargs):
+ super(Concatenate, self).__init__(**kwargs)
+ with self.name_scope():
+ self.dim = dim
+
+ def hybrid_forward(self, F, *x):
+ return F.concat(*x, dim=self.dim)
+
+
+class ZScoreNormalization(gluon.HybridBlock):
+ def __init__(self, data_mean, data_std, **kwargs):
+ super(ZScoreNormalization, self).__init__(**kwargs)
+ with self.name_scope():
+ self.data_mean = self.params.get('data_mean', shape=data_mean.shape,
+ init=mx.init.Constant(data_mean.asnumpy().tolist()), differentiable=False)
+ self.data_std = self.params.get('data_std', shape=data_mean.shape,
+ init=mx.init.Constant(data_std.asnumpy().tolist()), differentiable=False)
+
+ def hybrid_forward(self, F, x, data_mean, data_std):
+ x = F.broadcast_sub(x, data_mean)
+ x = F.broadcast_div(x, data_std)
+ return x
+
+
+class Padding(gluon.HybridBlock):
+ def __init__(self, padding, **kwargs):
+ super(Padding, self).__init__(**kwargs)
+ with self.name_scope():
+ self.pad_width = padding
+
+ def hybrid_forward(self, F, x):
+ x = F.pad(data=x,
+ mode='constant',
+ pad_width=self.pad_width,
+ constant_value=0)
+ return x
+
+
+class NoNormalization(gluon.HybridBlock):
+ def __init__(self, **kwargs):
+ super(NoNormalization, self).__init__(**kwargs)
+
+ def hybrid_forward(self, F, x):
+ return x
+
+
+class Net(gluon.HybridBlock):
+ def __init__(self, data_mean=None, data_std=None, **kwargs):
+ super(Net, self).__init__(**kwargs)
+ with self.name_scope():
+ if not data_mean is None:
+ assert(not data_std is None)
+ self.input_normalization = ZScoreNormalization(data_mean=data_mean, data_std=data_std)
+ else:
+ self.input_normalization = NoNormalization()
+
+ self.conv1_ = gluon.nn.Conv2D(channels=20,
+ kernel_size=(5,5),
+ strides=(1,1),
+ use_bias=True)
+ # conv1_, output shape: {[20,24,24]}
+
+ self.pool1_ = gluon.nn.MaxPool2D(
+ pool_size=(2,2),
+ strides=(2,2))
+ # pool1_, output shape: {[20,12,12]}
+
+ self.conv2_ = gluon.nn.Conv2D(channels=50,
+ kernel_size=(5,5),
+ strides=(1,1),
+ use_bias=True)
+ # conv2_, output shape: {[50,8,8]}
+
+ self.pool2_ = gluon.nn.MaxPool2D(
+ pool_size=(2,2),
+ strides=(2,2))
+ # pool2_, output shape: {[50,4,4]}
+
+ self.fc2_flatten = gluon.nn.Flatten()
+ self.fc2_ = gluon.nn.Dense(units=500, use_bias=True)
+ # fc2_, output shape: {[500,1,1]}
+
+ self.relu2_ = gluon.nn.Activation(activation='relu')
+ self.fc3_ = gluon.nn.Dense(units=10, use_bias=True)
+ # fc3_, output shape: {[10,1,1]}
+
+
+ self.last_layer = 'softmax'
+
+
+ def hybrid_forward(self, F, x):
+ image = self.input_normalization(x)
+ conv1_ = self.conv1_(image)
+ pool1_ = self.pool1_(conv1_)
+ conv2_ = self.conv2_(pool1_)
+ pool2_ = self.pool2_(conv2_)
+ fc2_flatten_ = self.fc2_flatten(pool2_)
+ fc2_ = self.fc2_(fc2_flatten_)
+ relu2_ = self.relu2_(fc2_)
+ fc3_ = self.fc3_(relu2_)
+ return fc3_
diff --git a/src/test/resources/target_code/gluon/CNNNet_mnist_mnistClassifier_net.pyc b/src/test/resources/target_code/gluon/CNNNet_mnist_mnistClassifier_net.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..556dda70bdd75e904587aaed7e5e4282d3bbc5c5
Binary files /dev/null and b/src/test/resources/target_code/gluon/CNNNet_mnist_mnistClassifier_net.pyc differ
diff --git a/src/test/resources/target_code/gluon/CNNPredictor_mnist_mnistClassifier_net.h b/src/test/resources/target_code/gluon/CNNPredictor_mnist_mnistClassifier_net.h
new file mode 100644
index 0000000000000000000000000000000000000000..5823ff3412e0939fcdecb69afefa3411b7cceb56
--- /dev/null
+++ b/src/test/resources/target_code/gluon/CNNPredictor_mnist_mnistClassifier_net.h
@@ -0,0 +1,109 @@
+#ifndef CNNPREDICTOR_MNIST_MNISTCLASSIFIER_NET
+#define CNNPREDICTOR_MNIST_MNISTCLASSIFIER_NET
+
+#include
+
+#include
+#include
+#include
+
+#include
+
+class CNNPredictor_mnist_mnistClassifier_net{
+public:
+ const std::string json_file = "model/mnist.LeNetNetwork/model_newest-symbol.json";
+ const std::string param_file = "model/mnist.LeNetNetwork/model_newest-0000.params";
+ //const std::vector input_keys = {"data"};
+ const std::vector input_keys = {"image"};
+ const std::vector> input_shapes = {{1,1,28,28}};
+ const bool use_gpu = false;
+
+ PredictorHandle handle;
+
+ explicit CNNPredictor_mnist_mnistClassifier_net(){
+ init(json_file, param_file, input_keys, input_shapes, use_gpu);
+ }
+
+ ~CNNPredictor_mnist_mnistClassifier_net(){
+ if(handle) MXPredFree(handle);
+ }
+
+ void predict(const std::vector &image,
+ std::vector &predictions){
+ MXPredSetInput(handle, "data", image.data(), image.size());
+ //MXPredSetInput(handle, "image", image.data(), image.size());
+
+ MXPredForward(handle);
+
+ mx_uint output_index;
+ mx_uint *shape = 0;
+ mx_uint shape_len;
+ size_t size;
+
+ output_index = 0;
+ MXPredGetOutputShape(handle, output_index, &shape, &shape_len);
+ size = 1;
+ for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i];
+ assert(size == predictions.size());
+ MXPredGetOutput(handle, 0, &(predictions[0]), predictions.size());
+
+ }
+
+ void init(const std::string &json_file,
+ const std::string ¶m_file,
+ const std::vector &input_keys,
+ const std::vector> &input_shapes,
+ const bool &use_gpu){
+
+ BufferFile json_data(json_file);
+ BufferFile param_data(param_file);
+
+ int dev_type = use_gpu ? 2 : 1;
+ int dev_id = 0;
+
+ handle = 0;
+
+ if (json_data.GetLength() == 0 ||
+ param_data.GetLength() == 0) {
+ std::exit(-1);
+ }
+
+ const mx_uint num_input_nodes = input_keys.size();
+
+ const char* input_keys_ptr[num_input_nodes];
+ for(mx_uint i = 0; i < num_input_nodes; i++){
+ input_keys_ptr[i] = input_keys[i].c_str();
+ }
+
+ mx_uint shape_data_size = 0;
+ mx_uint input_shape_indptr[input_shapes.size() + 1];
+ input_shape_indptr[0] = 0;
+ for(mx_uint i = 0; i < input_shapes.size(); i++){
+ input_shape_indptr[i+1] = input_shapes[i].size();
+ shape_data_size += input_shapes[i].size();
+ }
+
+ mx_uint input_shape_data[shape_data_size];
+ mx_uint index = 0;
+ for(mx_uint i = 0; i < input_shapes.size(); i++){
+ for(mx_uint j = 0; j < input_shapes[i].size(); j++){
+ input_shape_data[index] = input_shapes[i][j];
+ index++;
+ }
+ }
+
+ MXPredCreate((const char*)json_data.GetBuffer(),
+ (const char*)param_data.GetBuffer(),
+ static_cast(param_data.GetLength()),
+ dev_type,
+ dev_id,
+ num_input_nodes,
+ input_keys_ptr,
+ input_shape_indptr,
+ input_shape_data,
+ &handle);
+ assert(handle);
+ }
+};
+
+#endif // CNNPREDICTOR_MNIST_MNISTCLASSIFIER_NET
diff --git a/src/test/resources/target_code/gluon/CNNTrainer_mnist_mnistClassifier_net.py b/src/test/resources/target_code/gluon/CNNTrainer_mnist_mnistClassifier_net.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8402a60024f4f082c468510b3000e50fd966c30
--- /dev/null
+++ b/src/test/resources/target_code/gluon/CNNTrainer_mnist_mnistClassifier_net.py
@@ -0,0 +1,31 @@
+import logging
+import mxnet as mx
+import supervised_trainer
+import CNNCreator_mnist_mnistClassifier_net
+import CNNDataLoader_mnist_mnistClassifier_net
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.DEBUG)
+ logger = logging.getLogger()
+ handler = logging.FileHandler("train.log", "w", encoding=None, delay="true")
+ logger.addHandler(handler)
+
+ mnist_mnistClassifier_net_creator = CNNCreator_mnist_mnistClassifier_net.CNNCreator_mnist_mnistClassifier_net()
+ mnist_mnistClassifier_net_loader = CNNDataLoader_mnist_mnistClassifier_net.mnist_mnistClassifier_netDataLoader()
+ mnist_mnistClassifier_net_trainer = supervised_trainer.CNNSupervisedTrainer(mnist_mnistClassifier_net_loader,
+ mnist_mnistClassifier_net_creator)
+
+ mnist_mnistClassifier_net_trainer.train(
+ batch_size=64,
+ num_epoch=11,
+ context='gpu',
+ eval_metric='accuracy',
+ optimizer='adam',
+ optimizer_params={
+ 'epsilon': 1.0E-8,
+ 'weight_decay': 0.001,
+ 'beta1': 0.9,
+ 'beta2': 0.999,
+ 'learning_rate_policy': 'fixed',
+ 'learning_rate': 0.001}
+ )
diff --git a/src/test/resources/target_code/gluon/CNNTranslator.h b/src/test/resources/target_code/gluon/CNNTranslator.h
new file mode 100644
index 0000000000000000000000000000000000000000..5a036659d2def63b2d697234733eace9663ea58a
--- /dev/null
+++ b/src/test/resources/target_code/gluon/CNNTranslator.h
@@ -0,0 +1,127 @@
+#ifndef CNNTRANSLATOR_H
+#define CNNTRANSLATOR_H
+#include
+#include
+
+using namespace std;
+using namespace arma;
+
+class CNNTranslator{
+public:
+ template static void addColToSTDVector(const Col &source, vector &data){
+ for(size_t i = 0; i < source.n_elem; i++){
+ data.push_back((float) source(i));
+ }
+ }
+
+ template static void addRowToSTDVector(const subview_row &source, vector &data){
+ for(size_t i = 0; i < source.n_elem; i++){
+ data.push_back((float) source(i));
+ }
+ }
+
+ template static void addRowToSTDVector(const Row &source, vector &data){
+ for(size_t i = 0; i < source.n_elem; i++){
+ data.push_back((float) source(i));
+ }
+ }
+
+ template static void addMatToSTDVector(const Mat &source, vector &data){
+ for(size_t i = 0; i < source.n_rows; i++){
+ addRowToSTDVector(source.row(i), data);
+ }
+ }
+
+
+ template static vector translate(const Col &source){
+ size_t size = source.n_elem;
+ vector data;
+ data.reserve(size);
+ addColToSTDVector(source, data);
+ return data;
+ }
+
+ template static vector translate(const Row &source){
+ size_t size = source.n_elem;
+ vector data;
+ data.reserve(size);
+ addRowToSTDVector(source, data);
+ return data;
+ }
+
+ template static vector translate(const Mat &source){
+ size_t size = source.n_elem;
+ vector data;
+ data.reserve(size);
+ addMatToSTDVector(source, data);
+ return data;
+ }
+
+ template static vector translate(const Cube &source){
+ size_t size = source.n_elem;
+ vector data;
+ data.reserve(size);
+ for(size_t i = 0; i < source.n_slices; i++){
+ addMatToSTDVector(source.slice(i), data);
+ }
+ return data;
+ }
+
+ static vec translateToCol(const vector &source, const vector &shape){
+ assert(shape.size() == 1);
+ vec column(shape[0]);
+ for(size_t i = 0; i < source.size(); i++){
+ column(i) = (double) source[i];
+ }
+ return column;
+ }
+
+ static mat translateToMat(const vector &source, const vector &shape){
+ assert(shape.size() == 2);
+ mat matrix(shape[1], shape[0]); //create transposed version of the matrix
+ int startPos = 0;
+ int endPos = matrix.n_rows;
+ const vector columnShape = {matrix.n_rows};
+ for(size_t i = 0; i < matrix.n_cols; i++){
+ vector colSource(&source[startPos], &source[endPos]);
+ matrix.col(i) = translateToCol(colSource, columnShape);
+ startPos = endPos;
+ endPos += matrix.n_rows;
+ }
+ return matrix.t();
+ }
+
+ static cube translateToCube(const vector &source, const vector &shape){
+ assert(shape.size() == 3);
+ cube cubeMatrix(shape[1], shape[2], shape[0]);
+ const int matrixSize = shape[1] * shape[2];
+ const vector matrixShape = {shape[1], shape[2]};
+ int startPos = 0;
+ int endPos = matrixSize;
+ for(size_t i = 0; i < cubeMatrix.n_slices; i++){
+ vector matrixSource(&source[startPos], &source[endPos]);
+ cubeMatrix.slice(i) = translateToMat(matrixSource, matrixShape);
+ startPos = endPos;
+ endPos += matrixSize;
+ }
+ return cubeMatrix;
+ }
+
+ template static vector getShape(const Col &source){
+ return {source.n_elem};
+ }
+
+ template static vector getShape(const Row &source){
+ return {source.n_elem};
+ }
+
+ template static vector getShape(const Mat &source){
+ return {source.n_rows, source.n_cols};
+ }
+
+ template static vector getShape(const Cube &source){
+ return {source.n_slices, source.n_rows, source.n_cols};
+ }
+};
+
+#endif
diff --git a/src/test/resources/target_code/gluon/HelperA.h b/src/test/resources/target_code/gluon/HelperA.h
new file mode 100644
index 0000000000000000000000000000000000000000..733fd62c8a5b6309dea275e1514d90d0c2a2772b
--- /dev/null
+++ b/src/test/resources/target_code/gluon/HelperA.h
@@ -0,0 +1,141 @@
+#ifndef HELPERA_H
+#define HELPERA_H
+#include
+#include "armadillo"
+#include
+#include
+#include
+using namespace arma;
+#ifndef _FILESTRING_CONVERSION___A
+#define _FILESTRING_CONVERSION___A
+void toFileString(std::ofstream& myfile, mat A){
+ myfile << "[";
+ for (int i = 0; i < A.n_rows; i++){
+ for (int j = 0; j < A.n_cols; j++){
+ myfile << A(i,j);
+ if(j + 1 < A.n_cols){
+ myfile << ", ";
+ }
+ }
+ if(i + 1 < A.n_rows){
+ myfile << ";";
+ }
+ }
+ myfile << "]";
+}
+void toFileString(std::ofstream& myfile, double A){
+ myfile << A;
+}
+void toFileString(std::ofstream& myfile, float A){
+ myfile << A;
+}
+void toFileString(std::ofstream& myfile, int A){
+ myfile << A;
+}
+void toFileString(std::ofstream& myfile, bool A){
+ myfile << A;
+}
+bool Is_close(mat& X, mat& Y, double tol)
+{
+ // abs returns a mat type then max checks columns and returns a row_vec
+ // max used again will return the biggest element in the row_vec
+ bool close(false);
+ if(arma::max(arma::max(arma::abs(X-Y))) < tol)
+ {
+ close = true;
+ }
+ return close;
+}
+#endif
+class HelperA{
+public:
+static mat getEigenVectors(mat A){
+vec eigenValues;
+mat eigenVectors;
+eig_sym(eigenValues,eigenVectors,A);
+return eigenVectors;
+}
+static vec getEigenValues(mat A){
+vec eigenValues;
+mat eigenVectors;
+eig_sym(eigenValues,eigenVectors,A);
+return eigenValues;
+}
+
+static mat getKMeansClusters(mat A, int k){
+mat clusters;
+kmeans(clusters,A.t(),k,random_subset,20,true);
+/*printf("cluster centroid calculation done\n");
+std::ofstream myfile;
+ myfile.open("data after cluster.txt");
+ myfile << A;
+ myfile.close();
+
+ std::ofstream myfile2;
+ myfile2.open("cluster centroids.txt");
+ myfile2 << clusters;
+ myfile2.close();*/
+mat indexedData=getKMeansClustersIndexData(A.t(), clusters);
+
+/*std::ofstream myfile3;
+ myfile3.open("data after index.txt");
+ myfile3 << indexedData;
+ myfile3.close();
+ */
+return indexedData;
+}
+
+static mat getKMeansClustersIndexData(mat A, mat centroids){
+ mat result=mat(A.n_cols, 1);
+ for(int i=0;i maxValue)){
+maxIndex = i-1;
+maxValue = inputVector(i-1);
+}
+}
+}
+
+};
+#endif
diff --git a/src/test/resources/target_code/gluon/mnist_mnistClassifier_net.h b/src/test/resources/target_code/gluon/mnist_mnistClassifier_net.h
new file mode 100644
index 0000000000000000000000000000000000000000..739ea37957d7de8c5e19f7635e616fd0caffbf26
--- /dev/null
+++ b/src/test/resources/target_code/gluon/mnist_mnistClassifier_net.h
@@ -0,0 +1,32 @@
+#ifndef MNIST_MNISTCLASSIFIER_NET
+#define MNIST_MNISTCLASSIFIER_NET
+#ifndef M_PI
+#define M_PI 3.14159265358979323846
+#endif
+#include "armadillo"
+#include "CNNPredictor_mnist_mnistClassifier_net.h"
+#include "CNNTranslator.h"
+using namespace arma;
+class mnist_mnistClassifier_net{
+const int classes = 10;
+public:
+CNNPredictor_mnist_mnistClassifier_net _cnn_;
+icube image;
+colvec predictions;
+void init()
+{
+image = icube(1, 28, 28);
+predictions=colvec(classes);
+}
+void execute(){
+ vector CNN_predictions(10);
+
+ _cnn_.predict(CNNTranslator::translate(image),
+ CNN_predictions);
+
+ predictions = CNNTranslator::translateToCol(CNN_predictions, std::vector {10});
+
+}
+
+};
+#endif
diff --git a/src/test/resources/target_code/gluon/supervised_trainer.py b/src/test/resources/target_code/gluon/supervised_trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a47148877c0bc841d0f00b3282460012ba33521
--- /dev/null
+++ b/src/test/resources/target_code/gluon/supervised_trainer.py
@@ -0,0 +1,141 @@
+import mxnet as mx
+import logging
+import numpy as np
+import time
+import os
+import shutil
+from mxnet import gluon, autograd, nd
+
+class CNNSupervisedTrainer(object):
+ def __init__(self, data_loader, net_constructor, net=None):
+ self._data_loader = data_loader
+ self._net_creator = net_constructor
+ self._net = net
+
+ def train(self, batch_size=64,
+ num_epoch=10,
+ eval_metric='acc',
+ optimizer='adam',
+ optimizer_params=(('learning_rate', 0.001),),
+ load_checkpoint=True,
+ context='gpu',
+ checkpoint_period=5,
+ normalize=True):
+ if context == 'gpu':
+ mx_context = mx.gpu()
+ elif context == 'cpu':
+ mx_context = mx.cpu()
+ else:
+ logging.error("Context argument is '" + context + "'. Only 'cpu' and 'gpu are valid arguments'.")
+
+ if 'weight_decay' in optimizer_params:
+ optimizer_params['wd'] = optimizer_params['weight_decay']
+ del optimizer_params['weight_decay']
+ if 'learning_rate_decay' in optimizer_params:
+ min_learning_rate = 1e-08
+ if 'learning_rate_minimum' in optimizer_params:
+ min_learning_rate = optimizer_params['learning_rate_minimum']
+ del optimizer_params['learning_rate_minimum']
+ optimizer_params['lr_scheduler'] = mx.lr_scheduler.FactorScheduler(
+ optimizer_params['step_size'],
+ factor=optimizer_params['learning_rate_decay'],
+ stop_factor_lr=min_learning_rate)
+ del optimizer_params['step_size']
+ del optimizer_params['learning_rate_decay']
+
+
+ train_iter, test_iter, data_mean, data_std = self._data_loader.load_data(batch_size)
+ if self._net is None:
+ if normalize:
+ self._net_creator.construct(
+ context=mx_context, data_mean=nd.array(data_mean), data_std=nd.array(data_std))
+ else:
+ self._net_creator.construct(context=mx_context)
+
+ begin_epoch = 0
+ if load_checkpoint:
+ begin_epoch = self._net_creator.load(mx_context)
+ else:
+ if os.path.isdir(self._net_creator._model_dir_):
+ shutil.rmtree(self._net_creator._model_dir_)
+
+ self._net = self._net_creator.net
+
+ try:
+ os.makedirs(self._net_creator._model_dir_)
+ except OSError:
+ if not os.path.isdir(self._net_creator._model_dir_):
+ raise
+
+ trainer = mx.gluon.Trainer(self._net.collect_params(), optimizer, optimizer_params)
+
+ if self._net.last_layer == 'softmax':
+ loss_function = mx.gluon.loss.SoftmaxCrossEntropyLoss()
+ elif self._net.last_layer == 'sigmoid':
+ loss_function = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss()
+ elif self._net.last_layer == 'linear':
+ loss_function = mx.gluon.loss.L2Loss()
+ else: # TODO: Change default?
+ loss_function = mx.gluon.loss.L2Loss()
+ logging.warning("Invalid last_layer, defaulting to L2 loss")
+
+ speed_period = 50
+ tic = None
+
+ for epoch in range(begin_epoch, begin_epoch + num_epoch):
+ train_iter.reset()
+ for batch_i, batch in enumerate(train_iter):
+ data = batch.data[0].as_in_context(mx_context)
+ label = batch.label[0].as_in_context(mx_context)
+ with autograd.record():
+ output = self._net(data)
+ loss = loss_function(output, label)
+
+ loss.backward()
+ trainer.step(batch_size)
+
+ if tic is None:
+ tic = time.time()
+ else:
+ if batch_i % speed_period == 0:
+ try:
+ speed = speed_period * batch_size / (time.time() - tic)
+ except ZeroDivisionError:
+ speed = float("inf")
+
+ logging.info("Epoch[%d] Batch[%d] Speed: %.2f samples/sec" % (epoch, batch_i, speed))
+
+ tic = time.time()
+
+ tic = None
+
+ train_iter.reset()
+ metric = mx.metric.create(eval_metric)
+ for batch_i, batch in enumerate(train_iter):
+ data = batch.data[0].as_in_context(mx_context)
+ label = batch.label[0].as_in_context(mx_context)
+ output = self._net(data)
+ predictions = mx.nd.argmax(output, axis=1)
+ metric.update(preds=predictions, labels=label)
+ train_metric_score = metric.get()[1]
+
+ test_iter.reset()
+ metric = mx.metric.create(eval_metric)
+ for batch_i, batch in enumerate(test_iter):
+ data = batch.data[0].as_in_context(mx_context)
+ label = batch.label[0].as_in_context(mx_context)
+ output = self._net(data)
+ predictions = mx.nd.argmax(output, axis=1)
+ metric.update(preds=predictions, labels=label)
+ test_metric_score = metric.get()[1]
+
+ logging.info("Epoch[%d] Train: %f, Test: %f" % (epoch, train_metric_score, test_metric_score))
+
+ if (epoch - begin_epoch) % checkpoint_period == 0:
+ self._net.save_parameters(self.parameter_path() + '-' + str(epoch).zfill(4) + '.params')
+
+ self._net.save_parameters(self.parameter_path() + '-' + str(num_epoch + begin_epoch).zfill(4) + '.params')
+ self._net.export(self.parameter_path() + '_newest', epoch=0)
+
+ def parameter_path(self):
+ return self._net_creator._model_dir_ + self._net_creator._model_prefix_
\ No newline at end of file