Commit 118babe2 authored by Julian Dierkes's avatar Julian Dierkes

added GAN features

parent 38d0735d
File added
This diff is collapsed.
......@@ -18,12 +18,13 @@
<!-- .. SE-Libraries .................................................. -->
<emadl.version>0.2.10-SNAPSHOT</emadl.version>
<CNNTrain.version>0.3.8-SNAPSHOT</CNNTrain.version>
<cnnarch-generator.version>0.0.4-SNAPSHOT</cnnarch-generator.version>
<cnnarch-generator.version>0.0.5-SNAPSHOT</cnnarch-generator.version>
<cnnarch-mxnet-generator.version>0.2.17-SNAPSHOT</cnnarch-mxnet-generator.version>
<cnnarch-caffe2-generator.version>0.2.13-SNAPSHOT</cnnarch-caffe2-generator.version>
<cnnarch-gluon-generator.version>0.2.9-SNAPSHOT</cnnarch-gluon-generator.version>
<cnnarch-tensorflow-generator.version>0.1.0-SNAPSHOT</cnnarch-tensorflow-generator.version>
<embedded-montiarc-math-opt-generator>0.1.4</embedded-montiarc-math-opt-generator>
<embedded-montiarc-math-opt-generator>0.1.5</embedded-montiarc-math-opt-generator>
<embedded-montiarc-emadl-pythonwrapper-generator>0.0.2</embedded-montiarc-emadl-pythonwrapper-generator>
<!-- .. Libraries .................................................. -->
<guava.version>18.0</guava.version>
......
......@@ -28,12 +28,15 @@ import de.monticore.lang.monticar.generator.cpp.SimulatorIntegrationHelper;
import de.monticore.lang.monticar.generator.cpp.TypesGeneratorCPP;
import de.monticore.lang.monticar.generator.pythonwrapper.GeneratorPythonWrapper;
import de.monticore.lang.monticar.generator.cpp.converter.TypeConverter;
import de.monticore.lang.monticar.generator.pythonwrapper.GeneratorPythonWrapperFactory;
import de.monticore.lang.monticar.generator.pythonwrapper.GeneratorPythonWrapperStandaloneApi;
import de.monticore.lang.tagging._symboltable.TagSymbol;
import de.monticore.lang.tagging._symboltable.TaggingResolver;
import de.monticore.symboltable.Scope;
import de.se_rwth.commons.Splitters;
import de.se_rwth.commons.logging.Log;
import freemarker.template.TemplateException;
import org.antlr.v4.codegen.target.Python2Target;
import javax.xml.bind.DatatypeConverter;
import java.io.*;
......@@ -52,7 +55,7 @@ public class EMADLGenerator {
private GeneratorEMAMOpt2CPP emamGen;
private CNNArchGenerator cnnArchGenerator;
private CNNTrainGenerator cnnTrainGenerator;
private GeneratorPythonWrapper pythonWrapper;
private GeneratorPythonWrapperStandaloneApi pythonWrapper;
private Backend backend;
private String modelsPath;
......@@ -64,7 +67,8 @@ public class EMADLGenerator {
emamGen = new GeneratorEMAMOpt2CPP();
emamGen.useArmadilloBackend();
emamGen.setGenerationTargetPath("./target/generated-sources-emadl/");
pythonWrapper.setGenerationTargetPath("./target/");
GeneratorPythonWrapperFactory pythonWrapperFactory = new GeneratorPythonWrapperFactory();
pythonWrapper = new GeneratorPythonWrapperStandaloneApi();
cnnArchGenerator = backend.getCNNArchGenerator();
cnnTrainGenerator = backend.getCNNTrainGenerator();
}
......@@ -620,18 +624,54 @@ public class EMADLGenerator {
//CNNTrainCocos.checkCriticCocos(configuration);
}
if (configuration.hasPreprocessor()) {
String preprocessor_name = configuration.getPreprocessingName().get();
// Resolve QNetwork if present
if (configuration.getQNetworkName().isPresent()) {
String fullQNetworkName = configuration.getQNetworkName().get();
int indexOfFirstNameCharacter = fullQNetworkName.lastIndexOf('.') + 1;
fullQNetworkName = fullQNetworkName.substring(0, indexOfFirstNameCharacter)
+ fullQNetworkName.substring(indexOfFirstNameCharacter, indexOfFirstNameCharacter + 1).toUpperCase()
+ fullQNetworkName.substring(indexOfFirstNameCharacter + 1);
TaggingResolver symtab = EMADLAbstractSymtab.createSymTabAndTaggingResolver(getModelsPath());
EMAComponentInstanceSymbol instance = resolveComponentInstanceSymbol(preprocessor_name, symtab);
generateComponent(fileContents, allInstances, symtab, instance, symtab);
EMAComponentInstanceSymbol instanceSymbol = resolveComponentInstanceSymbol(fullQNetworkName, symtab);
EMADLCocos.checkAll(instanceSymbol);
Optional<ArchitectureSymbol> qnetwork = instanceSymbol.getSpannedScope().resolve("", ArchitectureSymbol.KIND);
if (!qnetwork.isPresent()) {
Log.error("During the resolving of qnetwork component: qnetwork component "
+ fullQNetworkName + " does not have a CNN implementation but is required to have one");
System.exit(-1);
}
qnetwork.get().setComponentName(fullQNetworkName);
configuration.setQNetwork(new ArchitectureAdapter(fullQNetworkName, qnetwork.get()));
//CNNTrainCocos.checkCriticCocos(configuration);
}
try {
pythonWrapper.generateFiles(instance);
} catch (IOException e) {
// todo: add fancy error message here
e.printStackTrace();
if (configuration.hasPreprocessor()) {
String fullPreprocessorName = configuration.getPreprocessingName().get();
int indexOfFirstNameCharacter = fullPreprocessorName.lastIndexOf('.') + 1;
fullPreprocessorName = fullPreprocessorName.substring(0, indexOfFirstNameCharacter)
+ fullPreprocessorName.substring(indexOfFirstNameCharacter, indexOfFirstNameCharacter + 1).toUpperCase()
+ fullPreprocessorName.substring(indexOfFirstNameCharacter + 1);
String instanceName = componentInstance.getFullName().replaceAll("\\.", "_");
TaggingResolver symtab = EMADLAbstractSymtab.createSymTabAndTaggingResolver(getModelsPath());
EMAComponentInstanceSymbol processor_instance = resolveComponentInstanceSymbol(fullPreprocessorName, symtab);
processor_instance.setFullName("CNNPreprocessor_" + instanceName);
List<FileContent> processorContents = new ArrayList<>();
generateComponent(processorContents, new HashSet<EMAComponentInstanceSymbol>(), symtab, processor_instance, symtab);
fixArmadilloImports(processorContents);
for (FileContent fileContent : processorContents) {
try {
emamGen.generateFile(fileContent);
} catch (IOException e) {
//todo: fancy error message
e.printStackTrace();
}
}
String targetPath = getGenerationTargetPath();
pythonWrapper.generateAndTryBuilding(processor_instance, targetPath + "/pythonWrapper", targetPath);
}
cnnTrainGenerator.setInstanceName(componentInstance.getFullName().replaceAll("\\.", "_"));
......
import mxnet as mx
import numpy as np
import math
from mxnet import gluon
......@@ -147,3 +148,16 @@ class Net_0(gluon.HybridBlock):
return predictions_
def getInputs(self):
inputs = {}
input_dimensions = (1,28,28)
input_domains = (int,0.0,255.0)
inputs["image_"] = input_domains + (input_dimensions,)
return inputs
def getOutputs(self):
outputs = {}
output_dimensions = (10,1,1)
output_domains = (float,0.0,1.0)
outputs["predictions_"] = output_domains + (output_dimensions,)
return outputs
import mxnet as mx
import numpy as np
import math
from mxnet import gluon
......@@ -122,3 +123,16 @@ class Net_0(gluon.HybridBlock):
return qvalues_
def getInputs(self):
inputs = {}
input_dimensions = (4)
input_domains = (float,float('-inf'),float('inf'))
inputs["state_"] = input_domains + (input_dimensions,)
return inputs
def getOutputs(self):
outputs = {}
output_dimensions = (2,1,1)
output_domains = (float,float('-inf'),float('inf'))
outputs["qvalues_"] = output_domains + (output_dimensions,)
return outputs
# (c) https://github.com/MontiCore/monticore
import os
import h5py
import mxnet as mx
import logging
import sys
import numpy as np
import cv2
import importlib
from mxnet import nd
class CNNDataLoader_mountaincar_master_actor:
......@@ -13,43 +15,197 @@ class CNNDataLoader_mountaincar_master_actor:
def __init__(self):
self._data_dir = "data/"
def load_data(self, batch_size):
def load_data(self, train_batch_size, test_batch_size):
train_h5, test_h5 = self.load_h5_files()
train_data = {}
data_mean = {}
data_std = {}
train_images = {}
for input_name in self._input_names_:
train_data[input_name] = train_h5[input_name]
data_mean[input_name] = nd.array(train_h5[input_name][:].mean(axis=0))
data_std[input_name] = nd.array(train_h5[input_name][:].std(axis=0) + 1e-5)
data_mean[input_name + '_'] = nd.array(train_h5[input_name][:].mean(axis=0))
data_std[input_name + '_'] = nd.array(train_h5[input_name][:].std(axis=0) + 1e-5)
if 'images' in train_h5:
train_images = train_h5['images']
train_label = {}
index = 0
for output_name in self._output_names_:
train_label[output_name] = train_h5[output_name]
train_label[index] = train_h5[output_name]
index += 1
train_iter = mx.io.NDArrayIter(data=train_data,
label=train_label,
batch_size=batch_size)
batch_size=train_batch_size)
train_test_iter = mx.io.NDArrayIter(data=train_data,
label=train_label,
batch_size=test_batch_size)
test_iter = None
if test_h5 != None:
test_data = {}
test_images = {}
for input_name in self._input_names_:
test_data[input_name] = test_h5[input_name]
if 'images' in test_h5:
test_images = test_h5['images']
test_label = {}
index = 0
for output_name in self._output_names_:
test_label[output_name] = test_h5[output_name]
test_label[index] = test_h5[output_name]
index += 1
test_iter = mx.io.NDArrayIter(data=test_data,
label=test_label,
batch_size=batch_size)
batch_size=test_batch_size)
return train_iter, train_test_iter, test_iter, data_mean, data_std, train_images, test_images
def load_data(self, batch_size, img_size):
train_h5, test_h5 = self.load_h5_files()
width = img_size[0]
height = img_size[1]
comb_data = {}
data_mean = {}
data_std = {}
for input_name in self._input_names_:
train_data = train_h5[input_name][:]
test_data = test_h5[input_name][:]
train_shape = train_data.shape
test_shape = test_data.shape
comb_data[input_name] = mx.nd.zeros((train_shape[0]+test_shape[0], train_shape[1], width, height))
for i, img in enumerate(train_data):
img = img.transpose(1,2,0)
comb_data[input_name][i] = cv2.resize(img, (width, height)).reshape((train_shape[1],width,height))
for i, img in enumerate(test_data):
img = img.transpose(1, 2, 0)
comb_data[input_name][i+train_shape[0]] = cv2.resize(img, (width, height)).reshape((train_shape[1], width, height))
data_mean[input_name + '_'] = nd.array(comb_data[input_name][:].mean(axis=0))
data_std[input_name + '_'] = nd.array(comb_data[input_name][:].asnumpy().std(axis=0) + 1e-5)
comb_label = {}
for output_name in self._output_names_:
train_labels = train_h5[output_name][:]
test_labels = test_h5[output_name][:]
comb_label[output_name] = np.append(train_labels, test_labels, axis=0)
train_iter = mx.io.NDArrayIter(data=comb_data,
label=comb_label,
batch_size=batch_size)
test_iter = None
return train_iter, test_iter, data_mean, data_std
def load_preprocessed_data(self, batch_size, preproc_lib):
train_h5, test_h5 = self.load_h5_files()
wrapper = importlib.import_module(preproc_lib)
instance = getattr(wrapper, preproc_lib)()
instance.init()
lib_head, _sep, tail = preproc_lib.rpartition('_')
inp = getattr(wrapper, lib_head + "_input")()
train_data = {}
train_label = {}
data_mean = {}
data_std = {}
shape_output = self.preprocess_data(instance, inp, 0, train_h5)
train_len = len(train_h5[self._input_names_[0]])
for input_name in self._input_names_:
if type(getattr(shape_output, input_name + "_out")) == np.ndarray:
cur_shape = (train_len,) + getattr(shape_output, input_name + "_out").shape
else:
cur_shape = (train_len, 1)
train_data[input_name] = mx.nd.zeros(cur_shape)
for output_name in self._output_names_:
if type(getattr(shape_output, output_name + "_out")) == nd.array:
cur_shape = (train_len,) + getattr(shape_output, output_name + "_out").shape
else:
cur_shape = (train_len, 1)
train_label[output_name] = mx.nd.zeros(cur_shape)
for i in range(train_len):
output = self.preprocess_data(instance, inp, i, train_h5)
for input_name in self._input_names_:
train_data[input_name][i] = getattr(output, input_name + "_out")
for output_name in self._output_names_:
train_label[output_name][i] = getattr(shape_output, output_name + "_out")
for input_name in self._input_names_:
data_mean[input_name + '_'] = nd.array(train_data[input_name][:].mean(axis=0))
data_std[input_name + '_'] = nd.array(train_data[input_name][:].asnumpy().std(axis=0) + 1e-5)
train_iter = mx.io.NDArrayIter(data=train_data,
label=train_label,
batch_size=batch_size)
test_data = {}
test_label = {}
shape_output = self.preprocess_data(instance, inp, 0, test_h5)
test_len = len(test_h5[self._input_names_[0]])
for input_name in self._input_names_:
if type(getattr(shape_output, input_name + "_out")) == np.ndarray:
cur_shape = (test_len,) + getattr(shape_output, input_name + "_out").shape
else:
cur_shape = (test_len, 1)
test_data[input_name] = mx.nd.zeros(cur_shape)
for output_name in self._output_names_:
if type(getattr(shape_output, output_name + "_out")) == nd.array:
cur_shape = (test_len,) + getattr(shape_output, output_name + "_out").shape
else:
cur_shape = (test_len, 1)
test_label[output_name] = mx.nd.zeros(cur_shape)
for i in range(test_len):
output = self.preprocess_data(instance, inp, i, test_h5)
for input_name in self._input_names_:
test_data[input_name][i] = getattr(output, input_name + "_out")
for output_name in self._output_names_:
test_label[output_name][i] = getattr(shape_output, output_name + "_out")
test_iter = mx.io.NDArrayIter(data=test_data,
label=test_label,
batch_size=batch_size)
return train_iter, test_iter, data_mean, data_std
def preprocess_data(self, instance_wrapper, input_wrapper, index, data_h5):
for input_name in self._input_names_:
data = data_h5[input_name][0]
attr = getattr(input_wrapper, input_name)
if (type(data)) == np.ndarray:
data = np.asfortranarray(data).astype(attr.dtype)
else:
data = type(attr)(data)
setattr(input_wrapper, input_name, data)
for output_name in self._output_names_:
data = data_h5[output_name][0]
attr = getattr(input_wrapper, output_name)
if (type(data)) == np.ndarray:
data = np.asfortranarray(data).astype(attr.dtype)
else:
data = type(attr)(data)
setattr(input_wrapper, output_name, data)
return instance_wrapper.execute(input_wrapper)
def load_h5_files(self):
train_h5 = None
test_h5 = None
......@@ -58,6 +214,7 @@ class CNNDataLoader_mountaincar_master_actor:
if os.path.isfile(train_path):
train_h5 = h5py.File(train_path, 'r')
print(train_path)
for input_name in self._input_names_:
if not input_name in train_h5:
......
import mxnet as mx
import numpy as np
import math
from mxnet import gluon
......@@ -124,3 +125,16 @@ class Net_0(gluon.HybridBlock):
return action_
def getInputs(self):
inputs = {}
input_dimensions = (2)
input_domains = (float,float('-inf'),float('inf'))
inputs["state_"] = input_domains + (input_dimensions,)
return inputs
def getOutputs(self):
outputs = {}
output_dimensions = (1,1,1)
output_domains = (float,-1.0,1.0)
outputs["action_"] = output_domains + (output_dimensions,)
return outputs
......@@ -44,8 +44,8 @@ public:
MXPredGetOutputShape(handle, output_index, &shape, &shape_len);
size = 1;
for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i];
assert(out_action_.size() == 1 || size == out_action_.size());
MXPredGetOutput(handle, output_index, &(out_action_[0]), out_action_.size());
assert(size == out_action_.size());
MXPredGetOutput(handle, 0, &(out_action_[0]), out_action_.size());
}
......
import mxnet as mx
import numpy as np
import math
from mxnet import gluon
......@@ -135,3 +136,19 @@ class Net_0(gluon.HybridBlock):
return qvalues_
def getInputs(self):
inputs = {}
input_dimensions = (2)
input_domains = (float,float('-inf'),float('inf'))
inputs["state_"] = input_domains + (input_dimensions,)
input_dimensions = (1)
input_domains = (float,-1.0,1.0)
inputs["action_"] = input_domains + (input_dimensions,)
return inputs
def getOutputs(self):
outputs = {}
output_dimensions = (1,1,1)
output_domains = (float,float('-inf'),float('inf'))
outputs["qvalues_"] = output_domains + (output_dimensions,)
return outputs
import mxnet as mx
import numpy as np
import math
from mxnet import gluon
......@@ -122,3 +123,16 @@ class Net_0(gluon.HybridBlock):
return qvalues_
def getInputs(self):
inputs = {}
input_dimensions = (5)
input_domains = (float,float('-inf'),float('inf'))
inputs["state_"] = input_domains + (input_dimensions,)
return inputs
def getOutputs(self):
outputs = {}
output_dimensions = (30,1,1)
output_domains = (float,float('-inf'),float('inf'))
outputs["qvalues_"] = output_domains + (output_dimensions,)
return outputs
import mxnet as mx
import numpy as np
import math
from mxnet import gluon
......@@ -124,3 +125,16 @@ class Net_0(gluon.HybridBlock):
return commands_
def getInputs(self):
inputs = {}
input_dimensions = (29)
input_domains = (float,float('-inf'),float('inf'))
inputs["state_"] = input_domains + (input_dimensions,)
return inputs
def getOutputs(self):
outputs = {}
output_dimensions = (3,1,1)
output_domains = (float,-1.0,1.0)
outputs["commands_"] = output_domains + (output_dimensions,)
return outputs
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment