Commit 9d15e90e authored by Sebastian Nickels's avatar Sebastian Nickels

Fixed tests and reverted _label change

parent efcd6240
Pipeline #144204 failed with stages
in 5 minutes and 8 seconds
......@@ -5,6 +5,6 @@ nppBackup
.classpath
.idea
.git
.vscode
*.iml
train.log
{
"configurations": [
{
"type": "java",
"name": "CodeLens (Launch) - EMADLGeneratorCli",
"request": "launch",
"mainClass": "de.monticore.lang.monticar.emadl.generator.EMADLGeneratorCli",
"projectName": "embedded-montiarc-emadl-generator"
}
]
}
\ No newline at end of file
......@@ -40,7 +40,7 @@ public enum Backend {
}
@Override
public CNNTrainGenerator getCNNTrainGenerator() {
return new CNNTrain2Gluon();
return new CNNTrain2Gluon(new RewardFunctionCppGenerator());
}
};
......
......@@ -360,6 +360,8 @@ public class EMADLGenerator {
EMADLCocos.checkAll(componentInstanceSymbol);
if (architecture.isPresent()){
cnnArchGenerator.checkSupport(architecture.get());
DataPathConfigParser newParserConfig = new DataPathConfigParser(getModelsPath() + "data_paths.txt");
String dPath = newParserConfig.getDataPath(EMAComponentSymbol.getFullName());
......
......@@ -52,7 +52,7 @@ public class EMADLGeneratorCli {
.build();
public static final Option OPTION_BACKEND = Option.builder("b")
.longOpt("backend")
.desc("deep-learning-framework backend. Options: MXNET, CAFFE2")
.desc("deep-learning-framework backend. Options: MXNET, CAFFE2, GLUON")
.hasArg(true)
.required(false)
.build();
......
package de.monticore.lang.monticar.emadl.generator;
import de.monticore.lang.embeddedmontiarc.embeddedmontiarc._symboltable.instanceStructure.EMAComponentInstanceSymbol;
import de.monticore.lang.monticar.cnnarch.gluongenerator.reinforcement.RewardFunctionSourceGenerator;
import de.monticore.lang.monticar.generator.cpp.GeneratorEMAMOpt2CPP;
import de.monticore.lang.tagging._symboltable.TaggingResolver;
import de.se_rwth.commons.logging.Log;
import java.io.IOException;
import java.util.Optional;
// TODO: Only added temporarily to make compilation work
public class RewardFunctionCppGenerator implements RewardFunctionSourceGenerator {
public RewardFunctionCppGenerator() {
}
@Override
public void generate(String modelPath, String rootModel, String targetPath) {
GeneratorEMAMOpt2CPP generator = new GeneratorEMAMOpt2CPP();
generator.useArmadilloBackend();
TaggingResolver taggingResolver = EMADLAbstractSymtab.createSymTabAndTaggingResolver(modelPath);
Optional<EMAComponentInstanceSymbol> instanceSymbol = taggingResolver
.<EMAComponentInstanceSymbol>resolve(rootModel, EMAComponentInstanceSymbol.KIND);
if (!instanceSymbol.isPresent()) {
Log.error("Generation of reward function is not possible: Cannot resolve component instance "
+ rootModel);
}
generator.setGenerationTargetPath(targetPath);
try {
generator.generate(instanceSymbol.get(), taggingResolver);
} catch (IOException e) {
Log.error("Generation of reward function is not possible: " + e.getMessage());
}
}
}
......@@ -140,7 +140,7 @@ public class GenerationTest extends AbstractSymtabTest {
}
@Test
public void testMnistClassifier() throws IOException, TemplateException {
public void testMnistClassifierForCaffe2() throws IOException, TemplateException {
Log.getFindings().clear();
String[] args = {"-m", "src/test/resources/models/", "-r", "mnist.MnistClassifier", "-b", "CAFFE2", "-f", "n", "-c", "n"};
EMADLGeneratorCli.main(args);
......@@ -178,7 +178,7 @@ public class GenerationTest extends AbstractSymtabTest {
"CNNCreator_mnist_mnistClassifier_net.py",
"CNNPredictor_mnist_mnistClassifier_net.h",
"CNNDataLoader_mnist_mnistClassifier_net.py",
"supervised_trainer.py",
"CNNSupervisedTrainer_mnist_mnistClassifier_net.py",
"mnist_mnistClassifier_net.h",
"HelperA.h",
"CNNTranslator.h",
......
......@@ -20,27 +20,8 @@
*/
package de.monticore.lang.monticar.emadl;
import de.monticore.lang.monticar.emadl.generator.Backend;
import de.monticore.lang.monticar.emadl.generator.EMADLGenerator;
import de.monticore.lang.monticar.emadl.generator.EMADLGeneratorCli;
import de.se_rwth.commons.logging.Log;
import freemarker.template.TemplateException;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.List;
import static junit.framework.TestCase.assertTrue;
import static org.junit.Assert.assertFalse;
public class IntegrationCaffe2Test extends IntegrationTest {
public IntegrationCaffe2Test() {
super("CAFFE2", "39253EC049D4A4E5FA0536AD34874B9D#1DBAEE1B1BD83FB7CB5F70AE91B29638#13D139510DC5681639AA91D7250288D3#1A42D4842D0664937A9F6B727BD60CEF");
}
}
}
\ No newline at end of file
......@@ -20,25 +20,6 @@
*/
package de.monticore.lang.monticar.emadl;
import de.monticore.lang.monticar.emadl.generator.Backend;
import de.monticore.lang.monticar.emadl.generator.EMADLGenerator;
import de.monticore.lang.monticar.emadl.generator.EMADLGeneratorCli;
import de.se_rwth.commons.logging.Log;
import freemarker.template.TemplateException;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import java.nio.charset.Charset;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.List;
import static junit.framework.TestCase.assertTrue;
import static org.junit.Assert.assertFalse;
public class IntegrationMXNetTest extends IntegrationTest {
public IntegrationMXNetTest() {
super("MXNET", "39253EC049D4A4E5FA0536AD34874B9D#1DBAEE1B1BD83FB7CB5F70AE91B29638#C4C23549E737A759721D6694C75D9771#5AF0CE68E408E8C1F000E49D72AC214A");
......
......@@ -6,12 +6,15 @@ from CNNNet_mnist_mnistClassifier_net import Net
class CNNCreator_mnist_mnistClassifier_net:
_model_dir_ = "model/mnist.LeNetNetwork/"
_model_prefix_ = "model"
_input_shapes_ = [(1,28,28)]
_input_shapes_ = [(1,28,28,)]
def __init__(self):
self.weight_initializer = mx.init.Normal()
self.net = None
def get_input_shapes(self):
return self._input_shapes_
def load(self, context):
lastEpoch = 0
param_file = None
......@@ -40,12 +43,11 @@ class CNNCreator_mnist_mnistClassifier_net:
self.net.load_parameters(self._model_dir_ + param_file)
return lastEpoch
def construct(self, context, data_mean=None, data_std=None):
self.net = Net(data_mean=data_mean, data_std=data_std)
self.net.collect_params().initialize(self.weight_initializer, ctx=context)
self.net.hybridize()
self.net(mx.nd.zeros((1,)+self._input_shapes_[0], ctx=context))
self.net(mx.nd.zeros((1,) + self._input_shapes_[0], ctx=context))
if not os.path.exists(self._model_dir_):
os.makedirs(self._model_dir_)
......
......@@ -3,8 +3,9 @@ import h5py
import mxnet as mx
import logging
import sys
from mxnet import nd
class mnist_mnistClassifier_netDataLoader:
class CNNDataLoader_mnist_mnistClassifier_net:
_input_names_ = ['image']
_output_names_ = ['predictions_label']
......@@ -14,21 +15,38 @@ class mnist_mnistClassifier_netDataLoader:
def load_data(self, batch_size):
train_h5, test_h5 = self.load_h5_files()
data_mean = train_h5[self._input_names_[0]][:].mean(axis=0)
data_std = train_h5[self._input_names_[0]][:].std(axis=0) + 1e-5
train_data = {}
data_mean = {}
data_std = {}
for input_name in self._input_names_:
train_data[input_name] = train_h5[input_name]
data_mean[input_name] = nd.array(train_h5[input_name][:].mean(axis=0))
data_std[input_name] = nd.array(train_h5[input_name][:].std(axis=0) + 1e-5)
train_label = {}
for output_name in self._output_names_:
train_label[output_name] = train_h5[output_name]
train_iter = mx.io.NDArrayIter(data=train_data,
label=train_label,
batch_size=batch_size)
train_iter = mx.io.NDArrayIter(train_h5[self._input_names_[0]],
train_h5[self._output_names_[0]],
batch_size=batch_size,
data_name=self._input_names_[0],
label_name=self._output_names_[0])
test_iter = None
if test_h5 != None:
test_iter = mx.io.NDArrayIter(test_h5[self._input_names_[0]],
test_h5[self._output_names_[0]],
batch_size=batch_size,
data_name=self._input_names_[0],
label_name=self._output_names_[0])
test_data = {}
for input_name in self._input_names_:
test_data[input_name] = test_h5[input_name]
test_label = {}
for output_name in self._output_names_:
test_label[output_name] = test_h5[output_name]
test_iter = mx.io.NDArrayIter(data=test_data,
label=test_label,
batch_size=batch_size)
return train_iter, test_iter, data_mean, data_std
def load_h5_files(self):
......@@ -36,21 +54,39 @@ class mnist_mnistClassifier_netDataLoader:
test_h5 = None
train_path = self._data_dir + "train.h5"
test_path = self._data_dir + "test.h5"
if os.path.isfile(train_path):
train_h5 = h5py.File(train_path, 'r')
if not (self._input_names_[0] in train_h5 and self._output_names_[0] in train_h5):
logging.error("The HDF5 file '" + os.path.abspath(train_path) + "' has to contain the datasets: "
+ "'" + self._input_names_[0] + "', '" + self._output_names_[0] + "'")
sys.exit(1)
test_iter = None
for input_name in self._input_names_:
if not input_name in train_h5:
logging.error("The HDF5 file '" + os.path.abspath(train_path) + "' has to contain the dataset "
+ "'" + input_name + "'")
sys.exit(1)
for output_name in self._output_names_:
if not output_name in train_h5:
logging.error("The HDF5 file '" + os.path.abspath(train_path) + "' has to contain the dataset "
+ "'" + output_name + "'")
sys.exit(1)
if os.path.isfile(test_path):
test_h5 = h5py.File(test_path, 'r')
if not (self._input_names_[0] in test_h5 and self._output_names_[0] in test_h5):
logging.error("The HDF5 file '" + os.path.abspath(test_path) + "' has to contain the datasets: "
+ "'" + self._input_names_[0] + "', '" + self._output_names_[0] + "'")
sys.exit(1)
for input_name in self._input_names_:
if not input_name in test_h5:
logging.error("The HDF5 file '" + os.path.abspath(test_path) + "' has to contain the dataset "
+ "'" + input_name + "'")
sys.exit(1)
for output_name in self._output_names_:
if not output_name in test_h5:
logging.error("The HDF5 file '" + os.path.abspath(test_path) + "' has to contain the dataset "
+ "'" + output_name + "'")
sys.exit(1)
else:
logging.warning("Couldn't load test set. File '" + os.path.abspath(test_path) + "' does not exist.")
return train_h5, test_h5
else:
logging.error("Data loading failure. File '" + os.path.abspath(train_path) + "' does not exist.")
......
......@@ -72,11 +72,12 @@ class Net(gluon.HybridBlock):
def __init__(self, data_mean=None, data_std=None, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
if not data_mean is None:
assert(not data_std is None)
self.input_normalization = ZScoreNormalization(data_mean=data_mean, data_std=data_std)
if data_mean:
assert(data_std)
self.input_normalization_image = ZScoreNormalization(data_mean=data_mean['image'],
data_std=data_std['image'])
else:
self.input_normalization = NoNormalization()
self.input_normalization_image = NoNormalization()
self.conv1_ = gluon.nn.Conv2D(channels=20,
kernel_size=(5,5),
......@@ -112,8 +113,8 @@ class Net(gluon.HybridBlock):
self.last_layer = 'softmax'
def hybrid_forward(self, F, x):
image = self.input_normalization(x)
def hybrid_forward(self, F, image):
image = self.input_normalization_image(image)
conv1_ = self.conv1_(image)
pool1_ = self.pool1_(conv1_)
conv2_ = self.conv2_(pool1_)
......
......@@ -30,8 +30,7 @@ public:
void predict(const std::vector<float> &image,
std::vector<float> &predictions){
MXPredSetInput(handle, "data", image.data(), image.size());
//MXPredSetInput(handle, "image", image.data(), image.size());
MXPredSetInput(handle, "data", image.data(), static_cast<mx_uint>(image.size()));
MXPredForward(handle);
......@@ -61,8 +60,6 @@ public:
int dev_type = use_gpu ? 2 : 1;
int dev_id = 0;
handle = 0;
if (json_data.GetLength() == 0 ||
param_data.GetLength() == 0) {
std::exit(-1);
......@@ -70,10 +67,8 @@ public:
const mx_uint num_input_nodes = input_keys.size();
const char* input_keys_ptr[num_input_nodes];
for(mx_uint i = 0; i < num_input_nodes; i++){
input_keys_ptr[i] = input_keys[i].c_str();
}
const char* input_key[1] = { "data" };
const char** input_keys_ptr = input_key;
mx_uint shape_data_size = 0;
mx_uint input_shape_indptr[input_shapes.size() + 1];
......@@ -92,8 +87,8 @@ public:
}
}
MXPredCreate((const char*)json_data.GetBuffer(),
(const char*)param_data.GetBuffer(),
MXPredCreate(static_cast<const char*>(json_data.GetBuffer()),
static_cast<const char*>(param_data.GetBuffer()),
static_cast<size_t>(param_data.GetLength()),
dev_type,
dev_id,
......
......@@ -6,7 +6,7 @@ import os
import shutil
from mxnet import gluon, autograd, nd
class CNNSupervisedTrainer(object):
class CNNSupervisedTrainer_mnist_mnistClassifier_net:
def __init__(self, data_loader, net_constructor, net=None):
self._data_loader = data_loader
self._net_creator = net_constructor
......@@ -48,7 +48,7 @@ class CNNSupervisedTrainer(object):
if self._net is None:
if normalize:
self._net_creator.construct(
context=mx_context, data_mean=nd.array(data_mean), data_std=nd.array(data_std))
context=mx_context, data_mean=data_mean, data_std=data_std)
else:
self._net_creator.construct(context=mx_context)
......@@ -75,7 +75,7 @@ class CNNSupervisedTrainer(object):
loss_function = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss()
elif self._net.last_layer == 'linear':
loss_function = mx.gluon.loss.L2Loss()
else: # TODO: Change default?
else:
loss_function = mx.gluon.loss.L2Loss()
logging.warning("Invalid last_layer, defaulting to L2 loss")
......@@ -85,10 +85,11 @@ class CNNSupervisedTrainer(object):
for epoch in range(begin_epoch, begin_epoch + num_epoch):
train_iter.reset()
for batch_i, batch in enumerate(train_iter):
data = batch.data[0].as_in_context(mx_context)
image = batch.data[0].as_in_context(mx_context)
label = batch.label[0].as_in_context(mx_context)
with autograd.record():
output = self._net(data)
output = self._net(image)
loss = loss_function(output, label)
loss.backward()
......@@ -112,9 +113,10 @@ class CNNSupervisedTrainer(object):
train_iter.reset()
metric = mx.metric.create(eval_metric)
for batch_i, batch in enumerate(train_iter):
data = batch.data[0].as_in_context(mx_context)
image = batch.data[0].as_in_context(mx_context)
label = batch.label[0].as_in_context(mx_context)
output = self._net(data)
output = self._net(image)
predictions = mx.nd.argmax(output, axis=1)
metric.update(preds=predictions, labels=label)
train_metric_score = metric.get()[1]
......@@ -122,9 +124,10 @@ class CNNSupervisedTrainer(object):
test_iter.reset()
metric = mx.metric.create(eval_metric)
for batch_i, batch in enumerate(test_iter):
data = batch.data[0].as_in_context(mx_context)
image = batch.data[0].as_in_context(mx_context)
label = batch.label[0].as_in_context(mx_context)
output = self._net(data)
output = self._net(image)
predictions = mx.nd.argmax(output, axis=1)
metric.update(preds=predictions, labels=label)
test_metric_score = metric.get()[1]
......
import logging
import mxnet as mx
import supervised_trainer
import CNNCreator_mnist_mnistClassifier_net
import CNNDataLoader_mnist_mnistClassifier_net
import CNNSupervisedTrainer_mnist_mnistClassifier_net
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
......@@ -11,9 +11,11 @@ if __name__ == "__main__":
logger.addHandler(handler)
mnist_mnistClassifier_net_creator = CNNCreator_mnist_mnistClassifier_net.CNNCreator_mnist_mnistClassifier_net()
mnist_mnistClassifier_net_loader = CNNDataLoader_mnist_mnistClassifier_net.mnist_mnistClassifier_netDataLoader()
mnist_mnistClassifier_net_trainer = supervised_trainer.CNNSupervisedTrainer(mnist_mnistClassifier_net_loader,
mnist_mnistClassifier_net_creator)
mnist_mnistClassifier_net_loader = CNNDataLoader_mnist_mnistClassifier_net.CNNDataLoader_mnist_mnistClassifier_net()
mnist_mnistClassifier_net_trainer = CNNSupervisedTrainer_mnist_mnistClassifier_net.CNNSupervisedTrainer_mnist_mnistClassifier_net(
mnist_mnistClassifier_net_loader,
mnist_mnistClassifier_net_creator
)
mnist_mnistClassifier_net_trainer.train(
batch_size=64,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment