Aufgrund von Umarbeiten des s3 Storage wird es in GitLab, in nächster Zeit, mögliche Performance-Einbußen geben. Näheres dazu unter: https://maintenance.itc.rwth-aachen.de/ticket/status/messages/43/show_ticket/6670

clean up

parent 5bee43c5
Pipeline #186559 failed with stages
in 20 seconds
This diff is collapsed.
This diff is collapsed.
......@@ -158,6 +158,30 @@ public class GenerationTest extends AbstractSymtabTest {
"CNNTrainer_mnist_mnistClassifier_net.py"));
}
@Test
public void testMnistClassifierForTensorflow() throws IOException, TemplateException {
Log.getFindings().clear();
String[] args = {"-m", "src/test/resources/models/", "-r", "mnist.MnistClassifier", "-b", "TENSORFLOW", "-f", "n", "-c", "n"};
EMADLGeneratorCli.main(args);
assertTrue(Log.getFindings().isEmpty());
checkFilesAreEqual(
Paths.get("./target/generated-sources-emadl"),
Paths.get("./src/test/resources/target_code/tensorflow"),
Arrays.asList(
"mnist_mnistClassifier.cpp",
"mnist_mnistClassifier.h",
"CNNCreator_mnist_mnistClassifier_net.py",
"CNNPredictor_mnist_mnistClassifier_net.h",
"CNNDataLoader_mnist_mnistClassifier_net.py",
"mnist_mnistClassifier_net.h",
"HelperA.h",
"CNNTranslator.h",
"mnist_mnistClassifier_calculateClass.h",
"CNNTrainer_mnist_mnistClassifier_net.py",
"mnist_mnistClassifier_net.h"));
}
@Test
public void testMnistClassifierForGluon() throws IOException, TemplateException {
Log.getFindings().clear();
......@@ -184,7 +208,7 @@ public class GenerationTest extends AbstractSymtabTest {
"CNNTrainer_mnist_mnistClassifier_net.py",
"mnist_mnistClassifier_net.h"));
}
@Test
public void testInvariantForGluon() throws IOException, TemplateException {
Log.getFindings().clear();
......
/**
*
* ******************************************************************************
* MontiCAR Modeling Family, www.se-rwth.de
* Copyright (c) 2017, Software Engineering Group at RWTH Aachen,
* All rights reserved.
*
* This project is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3.0 of the License, or (at your option) any later version.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this project. If not, see <http://www.gnu.org/licenses/>.
* *******************************************************************************
*/
package de.monticore.lang.monticar.emadl;
import de.monticore.lang.monticar.emadl.generator.EMADLGeneratorCli;
import de.se_rwth.commons.logging.Log;
import org.junit.Ignore;
import org.junit.Test;
import java.nio.file.Files;
import java.nio.file.NoSuchFileException;
import java.nio.file.Path;
import java.nio.file.Paths;
import static junit.framework.TestCase.assertTrue;
import static org.junit.Assert.assertFalse;
public class IntegrationTensorflowTest extends IntegrationTest {
private Path multipleStreamsHashFile = Paths.get("./target/generated-sources-emadl/MultipleStreams.training_hash");
public IntegrationTensorflowTest() {
super("TENSORFLOW", "39253EC049D4A4E5FA0536AD34874B9D#1DBAEE1B1BD83FB7CB5F70AE91B29638#C4C23549E737A759721D6694C75D9771#5AF0CE68E408E8C1F000E49D72AC214A");
}
@Test
public void testMultipleStreams() {
Log.getFindings().clear();
deleteHashFile(multipleStreamsHashFile);
String[] args = {"-m", "src/test/resources/models/", "-r", "MultipleStreams", "-b", "TENSORFLOW"};
EMADLGeneratorCli.main(args);
assertTrue(Log.getFindings().isEmpty());
}
private void deleteHashFile(Path hashFile) {
try {
Files.delete(hashFile);
}
catch (NoSuchFileException e) {
}
catch(Exception e) {
assertFalse("Could not delete hash file", true);
}
}
}
......@@ -4,8 +4,8 @@ component MultipleStreams{
implementation CNN {
data[0] ->
Convolution(kernel=(5,5), channels=8, padding="valid") ->
Convolution(kernel=(5,5), channels=8, padding="valid") ->
Convolution(kernel=(5,5), channels=8) ->
Convolution(kernel=(5,5), channels=8) ->
FullyConnected(units=128) ->
Dropout()->
FullyConnected(units=10) ->
......@@ -13,8 +13,8 @@ component MultipleStreams{
softmax[0];
data[1] ->
Convolution(kernel=(5,5), channels=8, padding="valid") ->
Convolution(kernel=(5,5), channels=8, padding="valid") ->
Convolution(kernel=(5,5), channels=8) ->
Convolution(kernel=(5,5), channels=8) ->
FullyConnected(units=128) ->
Dropout()->
FullyConnected(units=10) ->
......
......@@ -6,10 +6,10 @@ component Network{
implementation CNN {
image ->
Convolution(kernel=(5,5), channels=20, padding="valid") ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2), padding="valid") ->
Convolution(kernel=(5,5), channels=50, padding="valid") ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2), padding="valid") ->
Convolution(kernel=(5,5), channels=20) ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2)) ->
Convolution(kernel=(5,5), channels=50) ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2)) ->
FullyConnected(units=500) ->
Relu() ->
FullyConnected(units=10) ->
......
......@@ -6,8 +6,8 @@ component CifarNetwork<Z(2:oo) classes = 10>{
implementation CNN {
data ->
Convolution(kernel=(5,5), channels=8, padding="valid") ->
Convolution(kernel=(5,5), channels=8, padding="valid") ->
Convolution(kernel=(5,5), channels=8) ->
Convolution(kernel=(5,5), channels=8) ->
FullyConnected(units=128) ->
Dropout()->
FullyConnected(units=classes) ->
......
......@@ -6,10 +6,10 @@ component LeNetNetwork<Z(2:oo) classes = 10>{
implementation CNN {
image ->
Convolution(kernel=(5,5), channels=20, padding="valid") ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2), padding="valid") ->
Convolution(kernel=(5,5), channels=50, padding="valid") ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2), padding="valid") ->
Convolution(kernel=(5,5), channels=20) ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2)) ->
Convolution(kernel=(5,5), channels=50) ->
Pooling(pool_type="max", kernel=(2,2), stride=(2,2)) ->
FullyConnected(units=500) ->
Relu() ->
FullyConnected(units=classes) ->
......
......@@ -6,8 +6,8 @@ component CifarNetwork<Z(2:oo) classes = 10>{
implementation CNN {
data ->
Convolution(kernel=(5,5), channels=8, padding="valid") ->
Convolution(kernel=(5,5), channels=8, padding="valid") ->
Convolution(kernel=(5,5), channels=8) ->
Convolution(kernel=(5,5), channels=8) ->
FullyConnected(units=128) ->
Dropout()->
FullyConnected(units=classes) ->
......
......@@ -74,15 +74,15 @@ class CNNCreator_mnist_mnistClassifier_net:
image_ = data
# image_, output shape: {[1,28,28]}
conv1_ = brew.conv(model, image_, 'conv1_', dim_in=1, dim_out=20, kernel=5, stride=1)
# conv1_, output shape: {[20,24,24]}
pool1_ = brew.max_pool(model, conv1_, 'pool1_', kernel=2, stride=2)
# pool1_, output shape: {[20,12,12]}
conv2_ = brew.conv(model, pool1_, 'conv2_', dim_in=20, dim_out=50, kernel=5, stride=1)
# conv2_, output shape: {[50,8,8]}
pool2_ = brew.max_pool(model, conv2_, 'pool2_', kernel=2, stride=2)
# pool2_, output shape: {[50,4,4]}
fc2_ = brew.fc(model, pool2_, 'fc2_', dim_in=50 * 4 * 4, dim_out=500)
conv1_ = brew.conv(model, image_, 'conv1_', dim_in=1, dim_out=20, kernel=5, stride=1, pad=1)
# conv1_, output shape: {[20,28,28]}
pool1_ = brew.max_pool(model, conv1_, 'pool1_', kernel=2, stride=2, pad=1)
# pool1_, output shape: {[20,14,14]}
conv2_ = brew.conv(model, pool1_, 'conv2_', dim_in=20, dim_out=50, kernel=5, stride=1, pad=1)
# conv2_, output shape: {[50,14,14]}
pool2_ = brew.max_pool(model, conv2_, 'pool2_', kernel=2, stride=2, pad=1)
# pool2_, output shape: {[50,7,7]}
fc2_ = brew.fc(model, pool2_, 'fc2_', dim_in=50 * 7 * 7, dim_out=500)
# fc2_, output shape: {[500,1,1]}
relu2_ = brew.relu(model, fc2_, fc2_)
fc3_ = brew.fc(model, relu2_, 'fc3_', dim_in=500, dim_out=10)
......
......@@ -90,27 +90,29 @@ class Net_0(gluon.HybridBlock):
else:
self.input_normalization_image_ = NoNormalization()
self.conv1_padding = Padding(padding=(0,0,0,0,2,2,2,2))
self.conv1_ = gluon.nn.Conv2D(channels=20,
kernel_size=(5,5),
strides=(1,1),
use_bias=True)
# conv1_, output shape: {[20,24,24]}
# conv1_, output shape: {[20,28,28]}
self.pool1_ = gluon.nn.MaxPool2D(
pool_size=(2,2),
strides=(2,2))
# pool1_, output shape: {[20,12,12]}
# pool1_, output shape: {[20,14,14]}
self.conv2_padding = Padding(padding=(0,0,0,0,2,2,2,2))
self.conv2_ = gluon.nn.Conv2D(channels=50,
kernel_size=(5,5),
strides=(1,1),
use_bias=True)
# conv2_, output shape: {[50,8,8]}
# conv2_, output shape: {[50,14,14]}
self.pool2_ = gluon.nn.MaxPool2D(
pool_size=(2,2),
strides=(2,2))
# pool2_, output shape: {[50,4,4]}
# pool2_, output shape: {[50,7,7]}
self.fc2_ = gluon.nn.Dense(units=500, use_bias=True, flatten=True)
# fc2_, output shape: {[500,1,1]}
......@@ -124,9 +126,11 @@ class Net_0(gluon.HybridBlock):
def hybrid_forward(self, F, image_):
image_ = self.input_normalization_image_(image_)
conv1_ = self.conv1_(image_)
conv1_padding = self.conv1_padding(image_)
conv1_ = self.conv1_(conv1_padding)
pool1_ = self.pool1_(conv1_)
conv2_ = self.conv2_(pool1_)
conv2_padding = self.conv2_padding(pool1_)
conv2_ = self.conv2_(conv2_padding)
pool2_ = self.pool2_(conv2_)
fc2_ = self.fc2_(pool2_)
relu2_ = self.relu2_(fc2_)
......
import logging
import os
import errno
import shutil
import h5py
import sys
import numpy as np
import tensorflow as tf
from CNNDataLoader_mnist_mnistClassifier_net import CNNDataLoader_mnist_mnistClassifier_net as CNNDataLoader
def huber_loss(y_true, y_pred):
return tf.losses.huber_loss(y_true, y_pred)
def epe(y_true, y_pred):
return tf.keras.backend.mean(tf.keras.backend.sqrt(tf.keras.backend.sum(tf.keras.backend.square(y_pred - y_true), axis=[1])),axis=[1,2])
def rmse(y_true, y_pred):
return tf.keras.backend.sqrt(keras.losses.mean_squared_error(y_true, y_pred))
def f1(y_true, y_pred):
frac1 = tf.metrics.precision(y_true, y_pred) * tf.metrics.recall(y_true, y_pred)
frac2 = tf.metrics.precision(y_true, y_pred) + tf.metrics.recall(y_true, y_pred)
return 2 * frac1 / frac2
class LRScheduler:
def __init__(self, params):
self._decay = params["lr_decay"]
if "lr_policy" in params:
self._policy = params["lr_policy"]
logging.info("Using %s learning_rate_policy!\n\n", self._policy)
else:
self._policy = "step"
logging.warning("No lerning_rate_policy specified. Using step scheduler!\n\n")
if "lr_minimum" in params:
self._minimum = params["lr_minimum"]
else:
self._minimum = 1e-08
if "step_size" in params:
self._step_size = params["step_size"]
else:
self._step_size = None
self.scheduler = self.get_lr_scheduler()
def get_lr_scheduler(self):
mapping = {
"fixed": self.fixed_scheduler,
"step": self.step_scheduler}
mapping_not_supported = {
"exp": "exp",
"inv": "inv",
"poly": "poly",
"sigmoid": "sigmoid"}
if self._policy in mapping:
return mapping[self._policy]
elif self._policy in mapping_not_supported:
#This is due to some parameters neccessery for this policies, missing in the CNNTrainLang grammar, and as the MXNET generator also only implements
#the step policy at the time of implementing this, we chose to not add it for now. These policies can be added by using the respective commented out functions below
logging.warning("The %s learning_rate_policy is currently not supported by the keras/tensorlfow generator. \n", self._policy)
else:
logging.warning("The following learning_rate_policy is not supported by the keras/tensorflow generator: %s \n", self._policy)
#note that the keras callback for lr scheduling only gets called inbetween epochs, not single iterations
def fixed_scheduler(self, epoch_ind, old_lr):
return old_lr
def step_scheduler(self, epoch_ind, old_lr):
if (epoch_ind % self._step_size == 0) and epoch_ind > 0:
new_lr = old_lr * self._decay
if new_lr < self._minimum:
new_lr = self._minimum
return new_lr
else:
return old_lr
#def exp_scheduler(self, epoch_ind, old_lr):
#return old_lr
#def inv_scheduler(self, epoch_ind, old_lr):
#return old_lr
#def poly_scheduler(self, epoch_ind, old_lr):
#return old_lr
#def sigmoid_scheduler(self, epoch_ind, old_lr):
#return old_lr
#If clip weights for rmsProp optimizer is specified this class is needed, as the keras/ tensorflow variant of rmsProp does not support weight clipping
class WeightClip(tf.keras.constraints.Constraint):
def __init__(self, clip_val=2):
self.clip_val = clip_val
def __call__(self, w):
return K.clip(w, -self.clip_val, self.clip_val)
def get_config(self):
return {'name': self.__class__.__name__,
'clip_val': self.clip_val}
class CNNCreator_mnist_mnistClassifier_net:
def __init__(self):
self.model = None
self._data_dir_ = "data/mnist.LeNetNetwork/"
self._model_dir_ = "model/mnist.LeNetNetwork/"
self._model_prefix_ = "model"
self._input_names_ = ['image']
self._output_names_ = ['predictions_label']
self._output_shapes_ = [(10,)]
self._weight_constraint_ = None
self._regularizer_ = None
def load(self):
lastEpoch = 0
model_file = None
try:
os.remove(self._model_dir_ + self._model_prefix_ + ".newest.hdf5")
except OSError:
pass
if os.path.isdir(self._model_dir_):
for file in os.listdir(self._model_dir_):
if ".hdf5" in file and self._model_prefix_ in file:
epochStr = file.replace(".hdf5", "").replace(self._model_prefix_ + ".", "")
epoch = int(epochStr)
if epoch > lastEpoch:
lastEpoch = epoch
model_file = file
if model_file is None:
return 0
else:
logging.info("Loading checkpoint: " + model_file)
self.model = tf.keras.models.load_model(self._model_dir_ + model_file)
return lastEpoch
def build_optimizer(self, optimizer_name, params):
fixed_params, lr_scheduler_params = self.translate_optimizer_param_names(params)
if optimizer_name == "adam":
return tf.keras.optimizers.Adam(**fixed_params), lr_scheduler_params
elif optimizer_name == "sgd":
return tf.keras.optimizers.SGD(nesterov=False, **fixed_params), lr_scheduler_params
elif optimizer_name == "nag":
return tf.keras.optimizers.SGD(nesterov=True, **fixed_params), lr_scheduler_params
elif optimizer_name == "rmsprop":
return tf.keras.optimizers.RMSprop(**fixed_params), lr_scheduler_params
elif optimizer_name == "adagrad":
return tf.keras.optimizers.Adagrad(**fixed_params), lr_scheduler_params
elif optimizer_name == "adadelta":
return tf.keras.optimizers.Adadelta(**fixed_params), lr_scheduler_params
else:
logging.warning("Optimizer not supported by keras/tensorflow: %s \n", optimizer_name)
def translate_optimizer_param_names(self, params):
mapping = {
"learning_rate": "lr",
"momentum": "momentum",
"beta1": "beta_1",
"beta2": "beta_2",
"gamma1": "rho",
"gamma2": "momentum",
"centered": "centered",
"epsilon": "epsilon",
"rho": "rho",
"clip_gradient": "clipvalue"}
mapping_lr_scheduler = {
"learning_rate_decay": "lr_decay",
"learning_rate_policy": "lr_policy",
"learning_rate_minimum": "lr_minimum",
"step_size": "step_size"}
fixed_params = {}
lr_scheduler_params = {}
for k in params:
if k == "clip_weights":
self._weight_constraint_ = WeightClip(params[k])
elif k == "weight_decay":
self._regularizer_ = tf.keras.regularizers.l2(params[k])
elif k in mapping_lr_scheduler:
lr_scheduler_params[mapping_lr_scheduler[k]] = params[k]
elif k in mapping.keys():
fixed_params[mapping[k]] = params[k]
else:
logging.warning("The following parameter is not supported by the keras/tensorflow generator %s \n", k)
return fixed_params, lr_scheduler_params
def translate_loss_name(self, loss, num_outputs):
mapping = {
"l2": "mean_squared_error",
"l1": "mean_absolute_error",
"cross_entropy": "sparse_categorical_crossentropy" if num_outputs > 1 else "binary_crossentropy",
"log_cosh": "logcosh",
"hinge": "hinge",
"squared_hinge": "squared_hinge",
"kullback_leibler": "kullback_leibler_divergence",
"huber_loss": huber_loss,
"epe": epe}
if loss in mapping.keys():
fixed_loss = mapping[loss]
else:
logging.warning("The following loss is not supported by the keras/tensorflow generator:%s \n", k)
return fixed_loss
def translate_eval_metric_names(self, metrics, num_outputs):
mapping = {
"accuracy": "acc",
"mse": "mse",
"mae": "mae",
"rmse": rmse,
"top_k_accuracy": "top_k_categorical_accuracy",
"cross_entropy": "sparse_categorical_crossentropy" if num_outputs > 1 else "binary_crossentropy",
"f1": f1}
fixed_metric_names = []
for k in metrics:
if k in mapping.keys():
fixed_metric_names.append(mapping[k])
elif k != []:
logging.warning("The following metric is not supported by the keras/tensorflow generator: %s \n", k)
return fixed_metric_names
def train(self, batch_size=64,
num_epoch=10,
eval_metric=[],
loss="cross_entropy",
loss_weights=None,
optimizer='adam',
optimizer_params=(('learning_rate', 0.001),),
load_checkpoint=True,
context='gpu',
checkpoint_period=5,
normalize=True):
if context=="cpu":
os.environ["CUDA_VISIBLE_DEVICES2"] = '-1'
dataLoader = CNNDataLoader(self._data_dir_, self._input_names_, self._output_names_, self._output_shapes_)
train_gen, test_gen, data_mean, data_std, steps_per_epoch, validation_steps = dataLoader.load_data_generators(batch_size, normalize)
if self.model== None:
if normalize:
self.construct(data_mean, data_std)
else:
self.construct()
optimizer_instance, lr_scheduler_params = self.build_optimizer(optimizer, optimizer_params)
num_outputs = self.model.layers[-1].output_shape[1]
metrics = self.translate_eval_metric_names([eval_metric], num_outputs)
tf_loss = self.translate_loss_name(loss, num_outputs)
begin_epoch = 0
if load_checkpoint:
begin_epoch = self.load()
if begin_epoch == 0:
if os.path.isdir(self._model_dir_):
shutil.rmtree(self._model_dir_)
self.model.compile(
optimizer=optimizer_instance,
loss=tf_loss,