Commit 9d15e90e authored by Sebastian Nickels's avatar Sebastian Nickels
Browse files

Fixed tests and reverted _label change

parent efcd6240
Pipeline #144204 failed with stages
in 5 minutes and 8 seconds
...@@ -5,6 +5,6 @@ nppBackup ...@@ -5,6 +5,6 @@ nppBackup
.classpath .classpath
.idea .idea
.git .git
.vscode
*.iml *.iml
train.log
{
"configurations": [
{
"type": "java",
"name": "CodeLens (Launch) - EMADLGeneratorCli",
"request": "launch",
"mainClass": "de.monticore.lang.monticar.emadl.generator.EMADLGeneratorCli",
"projectName": "embedded-montiarc-emadl-generator"
}
]
}
\ No newline at end of file
...@@ -40,7 +40,7 @@ public enum Backend { ...@@ -40,7 +40,7 @@ public enum Backend {
} }
@Override @Override
public CNNTrainGenerator getCNNTrainGenerator() { public CNNTrainGenerator getCNNTrainGenerator() {
return new CNNTrain2Gluon(); return new CNNTrain2Gluon(new RewardFunctionCppGenerator());
} }
}; };
......
...@@ -360,6 +360,8 @@ public class EMADLGenerator { ...@@ -360,6 +360,8 @@ public class EMADLGenerator {
EMADLCocos.checkAll(componentInstanceSymbol); EMADLCocos.checkAll(componentInstanceSymbol);
if (architecture.isPresent()){ if (architecture.isPresent()){
cnnArchGenerator.checkSupport(architecture.get());
DataPathConfigParser newParserConfig = new DataPathConfigParser(getModelsPath() + "data_paths.txt"); DataPathConfigParser newParserConfig = new DataPathConfigParser(getModelsPath() + "data_paths.txt");
String dPath = newParserConfig.getDataPath(EMAComponentSymbol.getFullName()); String dPath = newParserConfig.getDataPath(EMAComponentSymbol.getFullName());
......
...@@ -52,7 +52,7 @@ public class EMADLGeneratorCli { ...@@ -52,7 +52,7 @@ public class EMADLGeneratorCli {
.build(); .build();
public static final Option OPTION_BACKEND = Option.builder("b") public static final Option OPTION_BACKEND = Option.builder("b")
.longOpt("backend") .longOpt("backend")
.desc("deep-learning-framework backend. Options: MXNET, CAFFE2") .desc("deep-learning-framework backend. Options: MXNET, CAFFE2, GLUON")
.hasArg(true) .hasArg(true)
.required(false) .required(false)
.build(); .build();
......
package de.monticore.lang.monticar.emadl.generator;
import de.monticore.lang.embeddedmontiarc.embeddedmontiarc._symboltable.instanceStructure.EMAComponentInstanceSymbol;
import de.monticore.lang.monticar.cnnarch.gluongenerator.reinforcement.RewardFunctionSourceGenerator;
import de.monticore.lang.monticar.generator.cpp.GeneratorEMAMOpt2CPP;
import de.monticore.lang.tagging._symboltable.TaggingResolver;
import de.se_rwth.commons.logging.Log;
import java.io.IOException;
import java.util.Optional;
// TODO: Only added temporarily to make compilation work
public class RewardFunctionCppGenerator implements RewardFunctionSourceGenerator {
public RewardFunctionCppGenerator() {
}
@Override
public void generate(String modelPath, String rootModel, String targetPath) {
GeneratorEMAMOpt2CPP generator = new GeneratorEMAMOpt2CPP();
generator.useArmadilloBackend();
TaggingResolver taggingResolver = EMADLAbstractSymtab.createSymTabAndTaggingResolver(modelPath);
Optional<EMAComponentInstanceSymbol> instanceSymbol = taggingResolver
.<EMAComponentInstanceSymbol>resolve(rootModel, EMAComponentInstanceSymbol.KIND);
if (!instanceSymbol.isPresent()) {
Log.error("Generation of reward function is not possible: Cannot resolve component instance "
+ rootModel);
}
generator.setGenerationTargetPath(targetPath);
try {
generator.generate(instanceSymbol.get(), taggingResolver);
} catch (IOException e) {
Log.error("Generation of reward function is not possible: " + e.getMessage());
}
}
}
...@@ -140,7 +140,7 @@ public class GenerationTest extends AbstractSymtabTest { ...@@ -140,7 +140,7 @@ public class GenerationTest extends AbstractSymtabTest {
} }
@Test @Test
public void testMnistClassifier() throws IOException, TemplateException { public void testMnistClassifierForCaffe2() throws IOException, TemplateException {
Log.getFindings().clear(); Log.getFindings().clear();
String[] args = {"-m", "src/test/resources/models/", "-r", "mnist.MnistClassifier", "-b", "CAFFE2", "-f", "n", "-c", "n"}; String[] args = {"-m", "src/test/resources/models/", "-r", "mnist.MnistClassifier", "-b", "CAFFE2", "-f", "n", "-c", "n"};
EMADLGeneratorCli.main(args); EMADLGeneratorCli.main(args);
...@@ -178,7 +178,7 @@ public class GenerationTest extends AbstractSymtabTest { ...@@ -178,7 +178,7 @@ public class GenerationTest extends AbstractSymtabTest {
"CNNCreator_mnist_mnistClassifier_net.py", "CNNCreator_mnist_mnistClassifier_net.py",
"CNNPredictor_mnist_mnistClassifier_net.h", "CNNPredictor_mnist_mnistClassifier_net.h",
"CNNDataLoader_mnist_mnistClassifier_net.py", "CNNDataLoader_mnist_mnistClassifier_net.py",
"supervised_trainer.py", "CNNSupervisedTrainer_mnist_mnistClassifier_net.py",
"mnist_mnistClassifier_net.h", "mnist_mnistClassifier_net.h",
"HelperA.h", "HelperA.h",
"CNNTranslator.h", "CNNTranslator.h",
......
...@@ -20,27 +20,8 @@ ...@@ -20,27 +20,8 @@
*/ */
package de.monticore.lang.monticar.emadl; package de.monticore.lang.monticar.emadl;
import de.monticore.lang.monticar.emadl.generator.Backend;
import de.monticore.lang.monticar.emadl.generator.EMADLGenerator;
import de.monticore.lang.monticar.emadl.generator.EMADLGeneratorCli;
import de.se_rwth.commons.logging.Log;
import freemarker.template.TemplateException;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.List;
import static junit.framework.TestCase.assertTrue;
import static org.junit.Assert.assertFalse;
public class IntegrationCaffe2Test extends IntegrationTest { public class IntegrationCaffe2Test extends IntegrationTest {
public IntegrationCaffe2Test() { public IntegrationCaffe2Test() {
super("CAFFE2", "39253EC049D4A4E5FA0536AD34874B9D#1DBAEE1B1BD83FB7CB5F70AE91B29638#13D139510DC5681639AA91D7250288D3#1A42D4842D0664937A9F6B727BD60CEF"); super("CAFFE2", "39253EC049D4A4E5FA0536AD34874B9D#1DBAEE1B1BD83FB7CB5F70AE91B29638#13D139510DC5681639AA91D7250288D3#1A42D4842D0664937A9F6B727BD60CEF");
} }
} }
\ No newline at end of file
...@@ -20,25 +20,6 @@ ...@@ -20,25 +20,6 @@
*/ */
package de.monticore.lang.monticar.emadl; package de.monticore.lang.monticar.emadl;
import de.monticore.lang.monticar.emadl.generator.Backend;
import de.monticore.lang.monticar.emadl.generator.EMADLGenerator;
import de.monticore.lang.monticar.emadl.generator.EMADLGeneratorCli;
import de.se_rwth.commons.logging.Log;
import freemarker.template.TemplateException;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.List;
import static junit.framework.TestCase.assertTrue;
import static org.junit.Assert.assertFalse;
public class IntegrationMXNetTest extends IntegrationTest { public class IntegrationMXNetTest extends IntegrationTest {
public IntegrationMXNetTest() { public IntegrationMXNetTest() {
super("MXNET", "39253EC049D4A4E5FA0536AD34874B9D#1DBAEE1B1BD83FB7CB5F70AE91B29638#C4C23549E737A759721D6694C75D9771#5AF0CE68E408E8C1F000E49D72AC214A"); super("MXNET", "39253EC049D4A4E5FA0536AD34874B9D#1DBAEE1B1BD83FB7CB5F70AE91B29638#C4C23549E737A759721D6694C75D9771#5AF0CE68E408E8C1F000E49D72AC214A");
......
...@@ -6,12 +6,15 @@ from CNNNet_mnist_mnistClassifier_net import Net ...@@ -6,12 +6,15 @@ from CNNNet_mnist_mnistClassifier_net import Net
class CNNCreator_mnist_mnistClassifier_net: class CNNCreator_mnist_mnistClassifier_net:
_model_dir_ = "model/mnist.LeNetNetwork/" _model_dir_ = "model/mnist.LeNetNetwork/"
_model_prefix_ = "model" _model_prefix_ = "model"
_input_shapes_ = [(1,28,28)] _input_shapes_ = [(1,28,28,)]
def __init__(self): def __init__(self):
self.weight_initializer = mx.init.Normal() self.weight_initializer = mx.init.Normal()
self.net = None self.net = None
def get_input_shapes(self):
return self._input_shapes_
def load(self, context): def load(self, context):
lastEpoch = 0 lastEpoch = 0
param_file = None param_file = None
...@@ -40,12 +43,11 @@ class CNNCreator_mnist_mnistClassifier_net: ...@@ -40,12 +43,11 @@ class CNNCreator_mnist_mnistClassifier_net:
self.net.load_parameters(self._model_dir_ + param_file) self.net.load_parameters(self._model_dir_ + param_file)
return lastEpoch return lastEpoch
def construct(self, context, data_mean=None, data_std=None): def construct(self, context, data_mean=None, data_std=None):
self.net = Net(data_mean=data_mean, data_std=data_std) self.net = Net(data_mean=data_mean, data_std=data_std)
self.net.collect_params().initialize(self.weight_initializer, ctx=context) self.net.collect_params().initialize(self.weight_initializer, ctx=context)
self.net.hybridize() self.net.hybridize()
self.net(mx.nd.zeros((1,)+self._input_shapes_[0], ctx=context)) self.net(mx.nd.zeros((1,) + self._input_shapes_[0], ctx=context))
if not os.path.exists(self._model_dir_): if not os.path.exists(self._model_dir_):
os.makedirs(self._model_dir_) os.makedirs(self._model_dir_)
......
...@@ -3,8 +3,9 @@ import h5py ...@@ -3,8 +3,9 @@ import h5py
import mxnet as mx import mxnet as mx
import logging import logging
import sys import sys
from mxnet import nd
class mnist_mnistClassifier_netDataLoader: class CNNDataLoader_mnist_mnistClassifier_net:
_input_names_ = ['image'] _input_names_ = ['image']
_output_names_ = ['predictions_label'] _output_names_ = ['predictions_label']
...@@ -14,21 +15,38 @@ class mnist_mnistClassifier_netDataLoader: ...@@ -14,21 +15,38 @@ class mnist_mnistClassifier_netDataLoader:
def load_data(self, batch_size): def load_data(self, batch_size):
train_h5, test_h5 = self.load_h5_files() train_h5, test_h5 = self.load_h5_files()
data_mean = train_h5[self._input_names_[0]][:].mean(axis=0) train_data = {}
data_std = train_h5[self._input_names_[0]][:].std(axis=0) + 1e-5 data_mean = {}
data_std = {}
for input_name in self._input_names_:
train_data[input_name] = train_h5[input_name]
data_mean[input_name] = nd.array(train_h5[input_name][:].mean(axis=0))
data_std[input_name] = nd.array(train_h5[input_name][:].std(axis=0) + 1e-5)
train_label = {}
for output_name in self._output_names_:
train_label[output_name] = train_h5[output_name]
train_iter = mx.io.NDArrayIter(data=train_data,
label=train_label,
batch_size=batch_size)
train_iter = mx.io.NDArrayIter(train_h5[self._input_names_[0]],
train_h5[self._output_names_[0]],
batch_size=batch_size,
data_name=self._input_names_[0],
label_name=self._output_names_[0])
test_iter = None test_iter = None
if test_h5 != None: if test_h5 != None:
test_iter = mx.io.NDArrayIter(test_h5[self._input_names_[0]], test_data = {}
test_h5[self._output_names_[0]], for input_name in self._input_names_:
batch_size=batch_size, test_data[input_name] = test_h5[input_name]
data_name=self._input_names_[0],
label_name=self._output_names_[0]) test_label = {}
for output_name in self._output_names_:
test_label[output_name] = test_h5[output_name]
test_iter = mx.io.NDArrayIter(data=test_data,
label=test_label,
batch_size=batch_size)
return train_iter, test_iter, data_mean, data_std return train_iter, test_iter, data_mean, data_std
def load_h5_files(self): def load_h5_files(self):
...@@ -36,21 +54,39 @@ class mnist_mnistClassifier_netDataLoader: ...@@ -36,21 +54,39 @@ class mnist_mnistClassifier_netDataLoader:
test_h5 = None test_h5 = None
train_path = self._data_dir + "train.h5" train_path = self._data_dir + "train.h5"
test_path = self._data_dir + "test.h5" test_path = self._data_dir + "test.h5"
if os.path.isfile(train_path): if os.path.isfile(train_path):
train_h5 = h5py.File(train_path, 'r') train_h5 = h5py.File(train_path, 'r')
if not (self._input_names_[0] in train_h5 and self._output_names_[0] in train_h5):
logging.error("The HDF5 file '" + os.path.abspath(train_path) + "' has to contain the datasets: " for input_name in self._input_names_:
+ "'" + self._input_names_[0] + "', '" + self._output_names_[0] + "'") if not input_name in train_h5:
sys.exit(1) logging.error("The HDF5 file '" + os.path.abspath(train_path) + "' has to contain the dataset "
test_iter = None + "'" + input_name + "'")
sys.exit(1)
for output_name in self._output_names_:
if not output_name in train_h5:
logging.error("The HDF5 file '" + os.path.abspath(train_path) + "' has to contain the dataset "
+ "'" + output_name + "'")
sys.exit(1)
if os.path.isfile(test_path): if os.path.isfile(test_path):
test_h5 = h5py.File(test_path, 'r') test_h5 = h5py.File(test_path, 'r')
if not (self._input_names_[0] in test_h5 and self._output_names_[0] in test_h5):
logging.error("The HDF5 file '" + os.path.abspath(test_path) + "' has to contain the datasets: " for input_name in self._input_names_:
+ "'" + self._input_names_[0] + "', '" + self._output_names_[0] + "'") if not input_name in test_h5:
sys.exit(1) logging.error("The HDF5 file '" + os.path.abspath(test_path) + "' has to contain the dataset "
+ "'" + input_name + "'")
sys.exit(1)
for output_name in self._output_names_:
if not output_name in test_h5:
logging.error("The HDF5 file '" + os.path.abspath(test_path) + "' has to contain the dataset "
+ "'" + output_name + "'")
sys.exit(1)
else: else:
logging.warning("Couldn't load test set. File '" + os.path.abspath(test_path) + "' does not exist.") logging.warning("Couldn't load test set. File '" + os.path.abspath(test_path) + "' does not exist.")
return train_h5, test_h5 return train_h5, test_h5
else: else:
logging.error("Data loading failure. File '" + os.path.abspath(train_path) + "' does not exist.") logging.error("Data loading failure. File '" + os.path.abspath(train_path) + "' does not exist.")
......
...@@ -72,11 +72,12 @@ class Net(gluon.HybridBlock): ...@@ -72,11 +72,12 @@ class Net(gluon.HybridBlock):
def __init__(self, data_mean=None, data_std=None, **kwargs): def __init__(self, data_mean=None, data_std=None, **kwargs):
super(Net, self).__init__(**kwargs) super(Net, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
if not data_mean is None: if data_mean:
assert(not data_std is None) assert(data_std)
self.input_normalization = ZScoreNormalization(data_mean=data_mean, data_std=data_std) self.input_normalization_image = ZScoreNormalization(data_mean=data_mean['image'],
data_std=data_std['image'])
else: else:
self.input_normalization = NoNormalization() self.input_normalization_image = NoNormalization()
self.conv1_ = gluon.nn.Conv2D(channels=20, self.conv1_ = gluon.nn.Conv2D(channels=20,
kernel_size=(5,5), kernel_size=(5,5),
...@@ -112,8 +113,8 @@ class Net(gluon.HybridBlock): ...@@ -112,8 +113,8 @@ class Net(gluon.HybridBlock):
self.last_layer = 'softmax' self.last_layer = 'softmax'
def hybrid_forward(self, F, x): def hybrid_forward(self, F, image):
image = self.input_normalization(x) image = self.input_normalization_image(image)
conv1_ = self.conv1_(image) conv1_ = self.conv1_(image)
pool1_ = self.pool1_(conv1_) pool1_ = self.pool1_(conv1_)
conv2_ = self.conv2_(pool1_) conv2_ = self.conv2_(pool1_)
......
...@@ -30,8 +30,7 @@ public: ...@@ -30,8 +30,7 @@ public:
void predict(const std::vector<float> &image, void predict(const std::vector<float> &image,
std::vector<float> &predictions){ std::vector<float> &predictions){
MXPredSetInput(handle, "data", image.data(), image.size()); MXPredSetInput(handle, "data", image.data(), static_cast<mx_uint>(image.size()));
//MXPredSetInput(handle, "image", image.data(), image.size());
MXPredForward(handle); MXPredForward(handle);
...@@ -61,8 +60,6 @@ public: ...@@ -61,8 +60,6 @@ public:
int dev_type = use_gpu ? 2 : 1; int dev_type = use_gpu ? 2 : 1;
int dev_id = 0; int dev_id = 0;
handle = 0;
if (json_data.GetLength() == 0 || if (json_data.GetLength() == 0 ||
param_data.GetLength() == 0) { param_data.GetLength() == 0) {
std::exit(-1); std::exit(-1);
...@@ -70,10 +67,8 @@ public: ...@@ -70,10 +67,8 @@ public:
const mx_uint num_input_nodes = input_keys.size(); const mx_uint num_input_nodes = input_keys.size();
const char* input_keys_ptr[num_input_nodes]; const char* input_key[1] = { "data" };
for(mx_uint i = 0; i < num_input_nodes; i++){ const char** input_keys_ptr = input_key;
input_keys_ptr[i] = input_keys[i].c_str();
}
mx_uint shape_data_size = 0; mx_uint shape_data_size = 0;
mx_uint input_shape_indptr[input_shapes.size() + 1]; mx_uint input_shape_indptr[input_shapes.size() + 1];
...@@ -92,8 +87,8 @@ public: ...@@ -92,8 +87,8 @@ public:
} }
} }
MXPredCreate((const char*)json_data.GetBuffer(), MXPredCreate(static_cast<const char*>(json_data.GetBuffer()),
(const char*)param_data.GetBuffer(), static_cast<const char*>(param_data.GetBuffer()),
static_cast<size_t>(param_data.GetLength()), static_cast<size_t>(param_data.GetLength()),
dev_type, dev_type,
dev_id, dev_id,
......
...@@ -6,7 +6,7 @@ import os ...@@ -6,7 +6,7 @@ import os
import shutil import shutil
from mxnet import gluon, autograd, nd from mxnet import gluon, autograd, nd
class CNNSupervisedTrainer(object): class CNNSupervisedTrainer_mnist_mnistClassifier_net:
def __init__(self, data_loader, net_constructor, net=None): def __init__(self, data_loader, net_constructor, net=None):
self._data_loader = data_loader self._data_loader = data_loader
self._net_creator = net_constructor self._net_creator = net_constructor
...@@ -48,7 +48,7 @@ class CNNSupervisedTrainer(object): ...@@ -48,7 +48,7 @@ class CNNSupervisedTrainer(object):
if self._net is None: if self._net is None:
if normalize: if normalize:
self._net_creator.construct( self._net_creator.construct(
context=mx_context, data_mean=nd.array(data_mean), data_std=nd.array(data_std)) context=mx_context, data_mean=data_mean, data_std=data_std)
else: else:
self._net_creator.construct(context=mx_context) self._net_creator.construct(context=mx_context)
...@@ -75,7 +75,7 @@ class CNNSupervisedTrainer(object): ...@@ -75,7 +75,7 @@ class CNNSupervisedTrainer(object):
loss_function = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss() loss_function = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss()
elif self._net.last_layer == 'linear': elif self._net.last_layer == 'linear':
loss_function = mx.gluon.loss.L2Loss() loss_function = mx.gluon.loss.L2Loss()
else: # TODO: Change default? else:
loss_function = mx.gluon.loss.L2Loss() loss_function = mx.gluon.loss.L2Loss()
logging.warning("Invalid last_layer, defaulting to L2 loss") logging.warning("Invalid last_layer, defaulting to L2 loss")
...@@ -85,10 +85,11 @@ class CNNSupervisedTrainer(object): ...@@ -85,10 +85,11 @@ class CNNSupervisedTrainer(object):
for epoch in range(begin_epoch, begin_epoch + num_epoch): for epoch in range(begin_epoch, begin_epoch + num_epoch):
train_iter.reset() train_iter.reset()
for batch_i, batch in enumerate(train_iter): for batch_i, batch in enumerate(train_iter):
data = batch.data[0].as_in_context(mx_context) image = batch.data[0].as_in_context(mx_context)
label = batch.label[0].as_in_context(mx_context) label = batch.label[0].as_in_context(mx_context)
with autograd.record(): with autograd.record():
output = self._net(data) output = self._net(image)
loss = loss_function(output, label) loss = loss_function(output, label)
loss.backward() loss.backward()
...@@ -112,9 +113,10 @@ class CNNSupervisedTrainer(object): ...@@ -112,9 +113,10 @@ class CNNSupervisedTrainer(object):
train_iter.reset() train_iter.reset()
metric = mx.metric.create(eval_metric) metric = mx.metric.create(eval_metric)
for batch_i, batch in enumerate(train_iter): for batch_i, batch in enumerate(train_iter):
data = batch.data[0].as_in_context(mx_context) image = batch.data[0].as_in_context(mx_context)
label = batch.label[0].as_in_context(mx_context) label = batch.label[0].as_in_context(mx_context)
output = self._net(data)
output = self._net(image)
predictions = mx.nd.argmax(output, axis=1) predictions = mx.nd.argmax(output, axis=1)
metric.update(preds=predictions, labels=label)