...
 
Commits (41)
File added
This diff is collapsed.
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
<cnnarch-gluon-generator.version>0.2.10-SNAPSHOT</cnnarch-gluon-generator.version> <cnnarch-gluon-generator.version>0.2.10-SNAPSHOT</cnnarch-gluon-generator.version>
<cnnarch-tensorflow-generator.version>0.1.0-SNAPSHOT</cnnarch-tensorflow-generator.version> <cnnarch-tensorflow-generator.version>0.1.0-SNAPSHOT</cnnarch-tensorflow-generator.version>
<Common-MontiCar.version>0.0.14-20180704.113055-2</Common-MontiCar.version> <Common-MontiCar.version>0.0.14-20180704.113055-2</Common-MontiCar.version>
<embedded-montiarc-math-opt-generator>0.1.4</embedded-montiarc-math-opt-generator> <embedded-montiarc-math-opt-generator>0.1.5</embedded-montiarc-math-opt-generator>
<!-- .. Libraries .................................................. --> <!-- .. Libraries .................................................. -->
<guava.version>18.0</guava.version> <guava.version>18.0</guava.version>
......
...@@ -26,13 +26,17 @@ import de.monticore.lang.monticar.generator.cpp.ArmadilloHelper; ...@@ -26,13 +26,17 @@ import de.monticore.lang.monticar.generator.cpp.ArmadilloHelper;
import de.monticore.lang.monticar.generator.cpp.GeneratorEMAMOpt2CPP; import de.monticore.lang.monticar.generator.cpp.GeneratorEMAMOpt2CPP;
import de.monticore.lang.monticar.generator.cpp.SimulatorIntegrationHelper; import de.monticore.lang.monticar.generator.cpp.SimulatorIntegrationHelper;
import de.monticore.lang.monticar.generator.cpp.TypesGeneratorCPP; import de.monticore.lang.monticar.generator.cpp.TypesGeneratorCPP;
import de.monticore.lang.monticar.generator.pythonwrapper.GeneratorPythonWrapper;
import de.monticore.lang.monticar.generator.cpp.converter.TypeConverter; import de.monticore.lang.monticar.generator.cpp.converter.TypeConverter;
import de.monticore.lang.monticar.generator.pythonwrapper.GeneratorPythonWrapperFactory;
import de.monticore.lang.monticar.generator.pythonwrapper.GeneratorPythonWrapperStandaloneApi;
import de.monticore.lang.tagging._symboltable.TagSymbol; import de.monticore.lang.tagging._symboltable.TagSymbol;
import de.monticore.lang.tagging._symboltable.TaggingResolver; import de.monticore.lang.tagging._symboltable.TaggingResolver;
import de.monticore.symboltable.Scope; import de.monticore.symboltable.Scope;
import de.se_rwth.commons.Splitters; import de.se_rwth.commons.Splitters;
import de.se_rwth.commons.logging.Log; import de.se_rwth.commons.logging.Log;
import freemarker.template.TemplateException; import freemarker.template.TemplateException;
import org.antlr.v4.codegen.target.Python2Target;
import javax.xml.bind.DatatypeConverter; import javax.xml.bind.DatatypeConverter;
import java.io.*; import java.io.*;
...@@ -51,6 +55,7 @@ public class EMADLGenerator { ...@@ -51,6 +55,7 @@ public class EMADLGenerator {
private GeneratorEMAMOpt2CPP emamGen; private GeneratorEMAMOpt2CPP emamGen;
private CNNArchGenerator cnnArchGenerator; private CNNArchGenerator cnnArchGenerator;
private CNNTrainGenerator cnnTrainGenerator; private CNNTrainGenerator cnnTrainGenerator;
private GeneratorPythonWrapperStandaloneApi pythonWrapper;
private Backend backend; private Backend backend;
private String modelsPath; private String modelsPath;
...@@ -62,6 +67,8 @@ public class EMADLGenerator { ...@@ -62,6 +67,8 @@ public class EMADLGenerator {
emamGen = new GeneratorEMAMOpt2CPP(); emamGen = new GeneratorEMAMOpt2CPP();
emamGen.useArmadilloBackend(); emamGen.useArmadilloBackend();
emamGen.setGenerationTargetPath("./target/generated-sources-emadl/"); emamGen.setGenerationTargetPath("./target/generated-sources-emadl/");
GeneratorPythonWrapperFactory pythonWrapperFactory = new GeneratorPythonWrapperFactory();
pythonWrapper = new GeneratorPythonWrapperStandaloneApi();
cnnArchGenerator = backend.getCNNArchGenerator(); cnnArchGenerator = backend.getCNNArchGenerator();
cnnTrainGenerator = backend.getCNNTrainGenerator(); cnnTrainGenerator = backend.getCNNTrainGenerator();
} }
...@@ -123,7 +130,10 @@ public class EMADLGenerator { ...@@ -123,7 +130,10 @@ public class EMADLGenerator {
System.exit(1); System.exit(1);
} }
return component.getEnclosingScope().<EMAComponentInstanceSymbol>resolve(instanceName, EMAComponentInstanceSymbol.KIND).get(); Scope c1 = component.getEnclosingScope();
Optional<EMAComponentInstanceSymbol> c2 = c1.<EMAComponentInstanceSymbol>resolve(instanceName, EMAComponentInstanceSymbol.KIND);
EMAComponentInstanceSymbol c3 = c2.get();
return c3;
} }
public void compile() throws IOException { public void compile() throws IOException {
...@@ -592,6 +602,77 @@ public class EMADLGenerator { ...@@ -592,6 +602,77 @@ public class EMADLGenerator {
CNNTrainCocos.checkCriticCocos(configuration); CNNTrainCocos.checkCriticCocos(configuration);
} }
// Resolve discriminator network if discriminator is present
if (configuration.getDiscriminatorName().isPresent()) {
String fullDiscriminatorName = configuration.getDiscriminatorName().get();
int indexOfFirstNameCharacter = fullDiscriminatorName.lastIndexOf('.') + 1;
fullDiscriminatorName = fullDiscriminatorName.substring(0, indexOfFirstNameCharacter)
+ fullDiscriminatorName.substring(indexOfFirstNameCharacter, indexOfFirstNameCharacter + 1).toUpperCase()
+ fullDiscriminatorName.substring(indexOfFirstNameCharacter + 1);
TaggingResolver symtab = EMADLAbstractSymtab.createSymTabAndTaggingResolver(getModelsPath());
EMAComponentInstanceSymbol instanceSymbol = resolveComponentInstanceSymbol(fullDiscriminatorName, symtab);
EMADLCocos.checkAll(instanceSymbol);
Optional<ArchitectureSymbol> discriminator = instanceSymbol.getSpannedScope().resolve("", ArchitectureSymbol.KIND);
if (!discriminator.isPresent()) {
Log.error("During the resolving of critic component: Critic component "
+ fullDiscriminatorName + " does not have a CNN implementation but is required to have one");
System.exit(-1);
}
discriminator.get().setComponentName(fullDiscriminatorName);
configuration.setDiscriminatorNetwork(new ArchitectureAdapter(fullDiscriminatorName, discriminator.get()));
//CNNTrainCocos.checkCriticCocos(configuration);
}
// Resolve QNetwork if present
if (configuration.getQNetworkName().isPresent()) {
String fullQNetworkName = configuration.getQNetworkName().get();
int indexOfFirstNameCharacter = fullQNetworkName.lastIndexOf('.') + 1;
fullQNetworkName = fullQNetworkName.substring(0, indexOfFirstNameCharacter)
+ fullQNetworkName.substring(indexOfFirstNameCharacter, indexOfFirstNameCharacter + 1).toUpperCase()
+ fullQNetworkName.substring(indexOfFirstNameCharacter + 1);
TaggingResolver symtab = EMADLAbstractSymtab.createSymTabAndTaggingResolver(getModelsPath());
EMAComponentInstanceSymbol instanceSymbol = resolveComponentInstanceSymbol(fullQNetworkName, symtab);
EMADLCocos.checkAll(instanceSymbol);
Optional<ArchitectureSymbol> qnetwork = instanceSymbol.getSpannedScope().resolve("", ArchitectureSymbol.KIND);
if (!qnetwork.isPresent()) {
Log.error("During the resolving of qnetwork component: qnetwork component "
+ fullQNetworkName + " does not have a CNN implementation but is required to have one");
System.exit(-1);
}
qnetwork.get().setComponentName(fullQNetworkName);
configuration.setQNetwork(new ArchitectureAdapter(fullQNetworkName, qnetwork.get()));
//CNNTrainCocos.checkCriticCocos(configuration);
}
if (configuration.hasPreprocessor()) {
String fullPreprocessorName = configuration.getPreprocessingName().get();
int indexOfFirstNameCharacter = fullPreprocessorName.lastIndexOf('.') + 1;
fullPreprocessorName = fullPreprocessorName.substring(0, indexOfFirstNameCharacter)
+ fullPreprocessorName.substring(indexOfFirstNameCharacter, indexOfFirstNameCharacter + 1).toUpperCase()
+ fullPreprocessorName.substring(indexOfFirstNameCharacter + 1);
String instanceName = componentInstance.getFullName().replaceAll("\\.", "_");
TaggingResolver symtab = EMADLAbstractSymtab.createSymTabAndTaggingResolver(getModelsPath());
EMAComponentInstanceSymbol processor_instance = resolveComponentInstanceSymbol(fullPreprocessorName, symtab);
processor_instance.setFullName("CNNPreprocessor_" + instanceName);
List<FileContent> processorContents = new ArrayList<>();
generateComponent(processorContents, new HashSet<EMAComponentInstanceSymbol>(), symtab, processor_instance, symtab);
fixArmadilloImports(processorContents);
for (FileContent fileContent : processorContents) {
try {
emamGen.generateFile(fileContent);
} catch (IOException e) {
//todo: fancy error message
e.printStackTrace();
}
}
String targetPath = getGenerationTargetPath();
pythonWrapper.generateAndTryBuilding(processor_instance, targetPath + "/pythonWrapper", targetPath);
}
cnnTrainGenerator.setInstanceName(componentInstance.getFullName().replaceAll("\\.", "_")); cnnTrainGenerator.setInstanceName(componentInstance.getFullName().replaceAll("\\.", "_"));
Map<String, String> fileContentMap = cnnTrainGenerator.generateStrings(configuration); Map<String, String> fileContentMap = cnnTrainGenerator.generateStrings(configuration);
......
...@@ -5,6 +5,7 @@ import logging ...@@ -5,6 +5,7 @@ import logging
import sys import sys
import numpy as np import numpy as np
import cv2 import cv2
import importlib
from mxnet import nd from mxnet import nd
class CNNDataLoader_mnist_mnistClassifier_net: class CNNDataLoader_mnist_mnistClassifier_net:
...@@ -14,7 +15,7 @@ class CNNDataLoader_mnist_mnistClassifier_net: ...@@ -14,7 +15,7 @@ class CNNDataLoader_mnist_mnistClassifier_net:
def __init__(self): def __init__(self):
self._data_dir = "data/mnist.LeNetNetwork/" self._data_dir = "data/mnist.LeNetNetwork/"
def load_data(self, train_batch_size, test_batch_size): def load_data(self, batch_size, shuffle=False):
train_h5, test_h5 = self.load_h5_files() train_h5, test_h5 = self.load_h5_files()
train_data = {} train_data = {}
...@@ -38,11 +39,8 @@ class CNNDataLoader_mnist_mnistClassifier_net: ...@@ -38,11 +39,8 @@ class CNNDataLoader_mnist_mnistClassifier_net:
train_iter = mx.io.NDArrayIter(data=train_data, train_iter = mx.io.NDArrayIter(data=train_data,
label=train_label, label=train_label,
batch_size=train_batch_size) batch_size=batch_size,
shuffle=shuffle)
train_test_iter = mx.io.NDArrayIter(data=train_data,
label=train_label,
batch_size=test_batch_size)
test_iter = None test_iter = None
...@@ -63,51 +61,112 @@ class CNNDataLoader_mnist_mnistClassifier_net: ...@@ -63,51 +61,112 @@ class CNNDataLoader_mnist_mnistClassifier_net:
test_iter = mx.io.NDArrayIter(data=test_data, test_iter = mx.io.NDArrayIter(data=test_data,
label=test_label, label=test_label,
batch_size=test_batch_size) batch_size=batch_size)
return train_iter, train_test_iter, test_iter, data_mean, data_std, train_images, test_images return train_iter, test_iter, data_mean, data_std, train_images, test_images
def load_data_img(self, batch_size, img_size): def load_preprocessed_data(self, batch_size, preproc_lib, shuffle=False):
train_h5, test_h5 = self.load_h5_files() train_h5, test_h5 = self.load_h5_files()
width = img_size[0]
height = img_size[1]
comb_data = {} wrapper = importlib.import_module(preproc_lib)
instance = getattr(wrapper, preproc_lib)()
instance.init()
lib_head, _sep, tail = preproc_lib.rpartition('_')
inp = getattr(wrapper, lib_head + "_input")()
train_data = {}
train_label = {}
data_mean = {} data_mean = {}
data_std = {} data_std = {}
shape_output = self.preprocess_data(instance, inp, 0, train_h5)
train_len = len(train_h5[self._input_names_[0]])
for input_name in self._input_names_: for input_name in self._input_names_:
train_data = train_h5[input_name][:] if type(getattr(shape_output, input_name + "_out")) == np.ndarray:
test_data = test_h5[input_name][:] cur_shape = (train_len,) + getattr(shape_output, input_name + "_out").shape
else:
cur_shape = (train_len, 1)
train_data[input_name] = mx.nd.zeros(cur_shape)
for output_name in self._output_names_:
if type(getattr(shape_output, output_name + "_out")) == nd.array:
cur_shape = (train_len,) + getattr(shape_output, output_name + "_out").shape
else:
cur_shape = (train_len, 1)
train_label[output_name] = mx.nd.zeros(cur_shape)
train_shape = train_data.shape for i in range(train_len):
test_shape = test_data.shape output = self.preprocess_data(instance, inp, i, train_h5)
for input_name in self._input_names_:
train_data[input_name][i] = getattr(output, input_name + "_out")
for output_name in self._output_names_:
train_label[output_name][i] = getattr(shape_output, output_name + "_out")
comb_data[input_name] = mx.nd.zeros((train_shape[0]+test_shape[0], train_shape[1], width, height)) for input_name in self._input_names_:
for i, img in enumerate(train_data): data_mean[input_name + '_'] = nd.array(train_data[input_name][:].mean(axis=0))
img = img.transpose(1,2,0) data_std[input_name + '_'] = nd.array(train_data[input_name][:].asnumpy().std(axis=0) + 1e-5)
comb_data[input_name][i] = cv2.resize(img, (width, height)).reshape((train_shape[1],width,height))
for i, img in enumerate(test_data):
img = img.transpose(1, 2, 0)
comb_data[input_name][i+train_shape[0]] = cv2.resize(img, (width, height)).reshape((train_shape[1], width, height))
data_mean[input_name + '_'] = nd.array(comb_data[input_name][:].mean(axis=0)) if 'images' in train_h5:
data_std[input_name + '_'] = nd.array(comb_data[input_name][:].asnumpy().std(axis=0) + 1e-5) train_images = train_h5['images']
train_iter = mx.io.NDArrayIter(data=train_data,
label=train_label,
batch_size=batch_size,
shuffle=shuffle)
comb_label = {} test_data = {}
test_label = {}
shape_output = self.preprocess_data(instance, inp, 0, test_h5)
test_len = len(test_h5[self._input_names_[0]])
for input_name in self._input_names_:
if type(getattr(shape_output, input_name + "_out")) == np.ndarray:
cur_shape = (test_len,) + getattr(shape_output, input_name + "_out").shape
else:
cur_shape = (test_len, 1)
test_data[input_name] = mx.nd.zeros(cur_shape)
for output_name in self._output_names_: for output_name in self._output_names_:
train_labels = train_h5[output_name][:] if type(getattr(shape_output, output_name + "_out")) == nd.array:
test_labels = test_h5[output_name][:] cur_shape = (test_len,) + getattr(shape_output, output_name + "_out").shape
comb_label[output_name] = np.append(train_labels, test_labels, axis=0) else:
cur_shape = (test_len, 1)
test_label[output_name] = mx.nd.zeros(cur_shape)
for i in range(test_len):
output = self.preprocess_data(instance, inp, i, test_h5)
for input_name in self._input_names_:
test_data[input_name][i] = getattr(output, input_name + "_out")
for output_name in self._output_names_:
test_label[output_name][i] = getattr(shape_output, output_name + "_out")
train_iter = mx.io.NDArrayIter(data=comb_data, if 'images' in test_h5:
label=comb_label, test_images = test_h5['images']
test_iter = mx.io.NDArrayIter(data=test_data,
label=test_label,
batch_size=batch_size) batch_size=batch_size)
test_iter = None return train_iter, test_iter, data_mean, data_std, train_images, test_images
return train_iter, test_iter, data_mean, data_std def preprocess_data(self, instance_wrapper, input_wrapper, index, data_h5):
for input_name in self._input_names_:
data = data_h5[input_name][0]
attr = getattr(input_wrapper, input_name)
if (type(data)) == np.ndarray:
data = np.asfortranarray(data).astype(attr.dtype)
else:
data = type(attr)(data)
setattr(input_wrapper, input_name, data)
for output_name in self._output_names_:
data = data_h5[output_name][0]
attr = getattr(input_wrapper, output_name)
if (type(data)) == np.ndarray:
data = np.asfortranarray(data).astype(attr.dtype)
else:
data = type(attr)(data)
setattr(input_wrapper, output_name, data)
return instance_wrapper.execute(input_wrapper)
def load_h5_files(self): def load_h5_files(self):
train_h5 = None train_h5 = None
......
import mxnet as mx import mxnet as mx
import numpy as np import numpy as np
import math
from mxnet import gluon from mxnet import gluon
...@@ -51,10 +52,10 @@ class Reshape(gluon.HybridBlock): ...@@ -51,10 +52,10 @@ class Reshape(gluon.HybridBlock):
class CustomRNN(gluon.HybridBlock): class CustomRNN(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomRNN, self).__init__(**kwargs) super(CustomRNN, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.rnn = gluon.rnn.RNN(hidden_size=hidden_size, num_layers=num_layers, self.rnn = gluon.rnn.RNN(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, activation='tanh', layout='NTC') bidirectional=bidirectional, activation='tanh', layout='NTC')
def hybrid_forward(self, F, data, state0): def hybrid_forward(self, F, data, state0):
...@@ -63,10 +64,10 @@ class CustomRNN(gluon.HybridBlock): ...@@ -63,10 +64,10 @@ class CustomRNN(gluon.HybridBlock):
class CustomLSTM(gluon.HybridBlock): class CustomLSTM(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomLSTM, self).__init__(**kwargs) super(CustomLSTM, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.lstm = gluon.rnn.LSTM(hidden_size=hidden_size, num_layers=num_layers, self.lstm = gluon.rnn.LSTM(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, layout='NTC') bidirectional=bidirectional, layout='NTC')
def hybrid_forward(self, F, data, state0, state1): def hybrid_forward(self, F, data, state0, state1):
...@@ -75,10 +76,10 @@ class CustomLSTM(gluon.HybridBlock): ...@@ -75,10 +76,10 @@ class CustomLSTM(gluon.HybridBlock):
class CustomGRU(gluon.HybridBlock): class CustomGRU(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomGRU, self).__init__(**kwargs) super(CustomGRU, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.gru = gluon.rnn.GRU(hidden_size=hidden_size, num_layers=num_layers, self.gru = gluon.rnn.GRU(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, layout='NTC') bidirectional=bidirectional, layout='NTC')
def hybrid_forward(self, F, data, state0): def hybrid_forward(self, F, data, state0):
...@@ -147,3 +148,16 @@ class Net_0(gluon.HybridBlock): ...@@ -147,3 +148,16 @@ class Net_0(gluon.HybridBlock):
return predictions_ return predictions_
def getInputs(self):
inputs = {}
input_dimensions = (1,28,28)
input_domains = (int,0.0,255.0)
inputs["image_"] = input_domains + (input_dimensions,)
return inputs
def getOutputs(self):
outputs = {}
output_dimensions = (10,1,1)
output_domains = (float,0.0,1.0)
outputs["predictions_"] = output_domains + (output_dimensions,)
return outputs
import mxnet as mx import mxnet as mx
import numpy as np import numpy as np
import math
from mxnet import gluon from mxnet import gluon
...@@ -51,10 +52,10 @@ class Reshape(gluon.HybridBlock): ...@@ -51,10 +52,10 @@ class Reshape(gluon.HybridBlock):
class CustomRNN(gluon.HybridBlock): class CustomRNN(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomRNN, self).__init__(**kwargs) super(CustomRNN, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.rnn = gluon.rnn.RNN(hidden_size=hidden_size, num_layers=num_layers, self.rnn = gluon.rnn.RNN(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, activation='tanh', layout='NTC') bidirectional=bidirectional, activation='tanh', layout='NTC')
def hybrid_forward(self, F, data, state0): def hybrid_forward(self, F, data, state0):
...@@ -63,10 +64,10 @@ class CustomRNN(gluon.HybridBlock): ...@@ -63,10 +64,10 @@ class CustomRNN(gluon.HybridBlock):
class CustomLSTM(gluon.HybridBlock): class CustomLSTM(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomLSTM, self).__init__(**kwargs) super(CustomLSTM, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.lstm = gluon.rnn.LSTM(hidden_size=hidden_size, num_layers=num_layers, self.lstm = gluon.rnn.LSTM(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, layout='NTC') bidirectional=bidirectional, layout='NTC')
def hybrid_forward(self, F, data, state0, state1): def hybrid_forward(self, F, data, state0, state1):
...@@ -75,10 +76,10 @@ class CustomLSTM(gluon.HybridBlock): ...@@ -75,10 +76,10 @@ class CustomLSTM(gluon.HybridBlock):
class CustomGRU(gluon.HybridBlock): class CustomGRU(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomGRU, self).__init__(**kwargs) super(CustomGRU, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.gru = gluon.rnn.GRU(hidden_size=hidden_size, num_layers=num_layers, self.gru = gluon.rnn.GRU(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, layout='NTC') bidirectional=bidirectional, layout='NTC')
def hybrid_forward(self, F, data, state0): def hybrid_forward(self, F, data, state0):
...@@ -122,3 +123,16 @@ class Net_0(gluon.HybridBlock): ...@@ -122,3 +123,16 @@ class Net_0(gluon.HybridBlock):
return qvalues_ return qvalues_
def getInputs(self):
inputs = {}
input_dimensions = (4)
input_domains = (float,0,1)
inputs["state_"] = input_domains + (input_dimensions,)
return inputs
def getOutputs(self):
outputs = {}
output_dimensions = (2,1,1)
output_domains = (float,float('-inf'),float('inf'))
outputs["qvalues_"] = output_domains + (output_dimensions,)
return outputs
import mxnet as mx import mxnet as mx
import numpy as np import numpy as np
import math
from mxnet import gluon from mxnet import gluon
...@@ -51,10 +52,10 @@ class Reshape(gluon.HybridBlock): ...@@ -51,10 +52,10 @@ class Reshape(gluon.HybridBlock):
class CustomRNN(gluon.HybridBlock): class CustomRNN(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomRNN, self).__init__(**kwargs) super(CustomRNN, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.rnn = gluon.rnn.RNN(hidden_size=hidden_size, num_layers=num_layers, self.rnn = gluon.rnn.RNN(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, activation='tanh', layout='NTC') bidirectional=bidirectional, activation='tanh', layout='NTC')
def hybrid_forward(self, F, data, state0): def hybrid_forward(self, F, data, state0):
...@@ -63,10 +64,10 @@ class CustomRNN(gluon.HybridBlock): ...@@ -63,10 +64,10 @@ class CustomRNN(gluon.HybridBlock):
class CustomLSTM(gluon.HybridBlock): class CustomLSTM(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomLSTM, self).__init__(**kwargs) super(CustomLSTM, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.lstm = gluon.rnn.LSTM(hidden_size=hidden_size, num_layers=num_layers, self.lstm = gluon.rnn.LSTM(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, layout='NTC') bidirectional=bidirectional, layout='NTC')
def hybrid_forward(self, F, data, state0, state1): def hybrid_forward(self, F, data, state0, state1):
...@@ -75,10 +76,10 @@ class CustomLSTM(gluon.HybridBlock): ...@@ -75,10 +76,10 @@ class CustomLSTM(gluon.HybridBlock):
class CustomGRU(gluon.HybridBlock): class CustomGRU(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomGRU, self).__init__(**kwargs) super(CustomGRU, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.gru = gluon.rnn.GRU(hidden_size=hidden_size, num_layers=num_layers, self.gru = gluon.rnn.GRU(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, layout='NTC') bidirectional=bidirectional, layout='NTC')
def hybrid_forward(self, F, data, state0): def hybrid_forward(self, F, data, state0):
...@@ -124,3 +125,16 @@ class Net_0(gluon.HybridBlock): ...@@ -124,3 +125,16 @@ class Net_0(gluon.HybridBlock):
return action_ return action_
def getInputs(self):
inputs = {}
input_dimensions = (2)
input_domains = (float,0,1)
inputs["state_"] = input_domains + (input_dimensions,)
return inputs
def getOutputs(self):
outputs = {}
output_dimensions = (1,1,1)
output_domains = (float,-1.0,1.0)
outputs["action_"] = output_domains + (output_dimensions,)
return outputs
import mxnet as mx import mxnet as mx
import numpy as np import numpy as np
import math
from mxnet import gluon from mxnet import gluon
...@@ -51,10 +52,10 @@ class Reshape(gluon.HybridBlock): ...@@ -51,10 +52,10 @@ class Reshape(gluon.HybridBlock):
class CustomRNN(gluon.HybridBlock): class CustomRNN(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomRNN, self).__init__(**kwargs) super(CustomRNN, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.rnn = gluon.rnn.RNN(hidden_size=hidden_size, num_layers=num_layers, self.rnn = gluon.rnn.RNN(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, activation='tanh', layout='NTC') bidirectional=bidirectional, activation='tanh', layout='NTC')
def hybrid_forward(self, F, data, state0): def hybrid_forward(self, F, data, state0):
...@@ -63,10 +64,10 @@ class CustomRNN(gluon.HybridBlock): ...@@ -63,10 +64,10 @@ class CustomRNN(gluon.HybridBlock):
class CustomLSTM(gluon.HybridBlock): class CustomLSTM(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomLSTM, self).__init__(**kwargs) super(CustomLSTM, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.lstm = gluon.rnn.LSTM(hidden_size=hidden_size, num_layers=num_layers, self.lstm = gluon.rnn.LSTM(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, layout='NTC') bidirectional=bidirectional, layout='NTC')
def hybrid_forward(self, F, data, state0, state1): def hybrid_forward(self, F, data, state0, state1):
...@@ -75,10 +76,10 @@ class CustomLSTM(gluon.HybridBlock): ...@@ -75,10 +76,10 @@ class CustomLSTM(gluon.HybridBlock):
class CustomGRU(gluon.HybridBlock): class CustomGRU(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomGRU, self).__init__(**kwargs) super(CustomGRU, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.gru = gluon.rnn.GRU(hidden_size=hidden_size, num_layers=num_layers, self.gru = gluon.rnn.GRU(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, layout='NTC') bidirectional=bidirectional, layout='NTC')
def hybrid_forward(self, F, data, state0): def hybrid_forward(self, F, data, state0):
...@@ -135,3 +136,19 @@ class Net_0(gluon.HybridBlock): ...@@ -135,3 +136,19 @@ class Net_0(gluon.HybridBlock):
return qvalues_ return qvalues_
def getInputs(self):
inputs = {}
input_dimensions = (2)
input_domains = (float,0,1)
inputs["state_"] = input_domains + (input_dimensions,)
input_dimensions = (1)
input_domains = (float,-1.0,1.0)
inputs["action_"] = input_domains + (input_dimensions,)
return inputs
def getOutputs(self):
outputs = {}
output_dimensions = (1,1,1)
output_domains = (float,float('-inf'),float('inf'))
outputs["qvalues_"] = output_domains + (output_dimensions,)
return outputs
import mxnet as mx import mxnet as mx
import numpy as np import numpy as np
import math
from mxnet import gluon from mxnet import gluon
...@@ -51,10 +52,10 @@ class Reshape(gluon.HybridBlock): ...@@ -51,10 +52,10 @@ class Reshape(gluon.HybridBlock):
class CustomRNN(gluon.HybridBlock): class CustomRNN(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomRNN, self).__init__(**kwargs) super(CustomRNN, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.rnn = gluon.rnn.RNN(hidden_size=hidden_size, num_layers=num_layers, self.rnn = gluon.rnn.RNN(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, activation='tanh', layout='NTC') bidirectional=bidirectional, activation='tanh', layout='NTC')
def hybrid_forward(self, F, data, state0): def hybrid_forward(self, F, data, state0):
...@@ -63,10 +64,10 @@ class CustomRNN(gluon.HybridBlock): ...@@ -63,10 +64,10 @@ class CustomRNN(gluon.HybridBlock):
class CustomLSTM(gluon.HybridBlock): class CustomLSTM(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomLSTM, self).__init__(**kwargs) super(CustomLSTM, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.lstm = gluon.rnn.LSTM(hidden_size=hidden_size, num_layers=num_layers, self.lstm = gluon.rnn.LSTM(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, layout='NTC') bidirectional=bidirectional, layout='NTC')
def hybrid_forward(self, F, data, state0, state1): def hybrid_forward(self, F, data, state0, state1):
...@@ -75,10 +76,10 @@ class CustomLSTM(gluon.HybridBlock): ...@@ -75,10 +76,10 @@ class CustomLSTM(gluon.HybridBlock):
class CustomGRU(gluon.HybridBlock): class CustomGRU(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomGRU, self).__init__(**kwargs) super(CustomGRU, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.gru = gluon.rnn.GRU(hidden_size=hidden_size, num_layers=num_layers, self.gru = gluon.rnn.GRU(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, layout='NTC') bidirectional=bidirectional, layout='NTC')
def hybrid_forward(self, F, data, state0): def hybrid_forward(self, F, data, state0):
...@@ -122,3 +123,16 @@ class Net_0(gluon.HybridBlock): ...@@ -122,3 +123,16 @@ class Net_0(gluon.HybridBlock):
return qvalues_ return qvalues_
def getInputs(self):
inputs = {}
input_dimensions = (5)
input_domains = (float,0,1)
inputs["state_"] = input_domains + (input_dimensions,)
return inputs
def getOutputs(self):
outputs = {}
output_dimensions = (30,1,1)
output_domains = (float,float('-inf'),float('inf'))
outputs["qvalues_"] = output_domains + (output_dimensions,)
return outputs
#ifndef HELPERA_H #ifndef HELPERA_H
#define HELPERA_H #define HELPERA_H
#include <iostream> #include <iostream>
#include "armadillo.h" #include "armadillo"
#include <stdarg.h> #include <stdarg.h>
#include <initializer_list> #include <initializer_list>
#include <fstream> #include <fstream>
......
import mxnet as mx import mxnet as mx
import numpy as np import numpy as np
import math
from mxnet import gluon from mxnet import gluon
...@@ -51,10 +52,10 @@ class Reshape(gluon.HybridBlock): ...@@ -51,10 +52,10 @@ class Reshape(gluon.HybridBlock):
class CustomRNN(gluon.HybridBlock): class CustomRNN(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomRNN, self).__init__(**kwargs) super(CustomRNN, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.rnn = gluon.rnn.RNN(hidden_size=hidden_size, num_layers=num_layers, self.rnn = gluon.rnn.RNN(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, activation='tanh', layout='NTC') bidirectional=bidirectional, activation='tanh', layout='NTC')
def hybrid_forward(self, F, data, state0): def hybrid_forward(self, F, data, state0):
...@@ -63,10 +64,10 @@ class CustomRNN(gluon.HybridBlock): ...@@ -63,10 +64,10 @@ class CustomRNN(gluon.HybridBlock):
class CustomLSTM(gluon.HybridBlock): class CustomLSTM(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomLSTM, self).__init__(**kwargs) super(CustomLSTM, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.lstm = gluon.rnn.LSTM(hidden_size=hidden_size, num_layers=num_layers, self.lstm = gluon.rnn.LSTM(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, layout='NTC') bidirectional=bidirectional, layout='NTC')
def hybrid_forward(self, F, data, state0, state1): def hybrid_forward(self, F, data, state0, state1):
...@@ -75,10 +76,10 @@ class CustomLSTM(gluon.HybridBlock): ...@@ -75,10 +76,10 @@ class CustomLSTM(gluon.HybridBlock):
class CustomGRU(gluon.HybridBlock): class CustomGRU(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomGRU, self).__init__(**kwargs) super(CustomGRU, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.gru = gluon.rnn.GRU(hidden_size=hidden_size, num_layers=num_layers, self.gru = gluon.rnn.GRU(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, layout='NTC') bidirectional=bidirectional, layout='NTC')
def hybrid_forward(self, F, data, state0): def hybrid_forward(self, F, data, state0):
...@@ -124,3 +125,16 @@ class Net_0(gluon.HybridBlock): ...@@ -124,3 +125,16 @@ class Net_0(gluon.HybridBlock):
return commands_ return commands_
def getInputs(self):
inputs = {}
input_dimensions = (29)
input_domains = (float,0,1)
inputs["state_"] = input_domains + (input_dimensions,)
return inputs
def getOutputs(self):
outputs = {}
output_dimensions = (3,1,1)
output_domains = (float,-1.0,1.0)
outputs["commands_"] = output_domains + (output_dimensions,)
return outputs
import mxnet as mx import mxnet as mx
import numpy as np import numpy as np
import math
from mxnet import gluon from mxnet import gluon
...@@ -51,10 +52,10 @@ class Reshape(gluon.HybridBlock): ...@@ -51,10 +52,10 @@ class Reshape(gluon.HybridBlock):
class CustomRNN(gluon.HybridBlock): class CustomRNN(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomRNN, self).__init__(**kwargs) super(CustomRNN, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.rnn = gluon.rnn.RNN(hidden_size=hidden_size, num_layers=num_layers, self.rnn = gluon.rnn.RNN(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, activation='tanh', layout='NTC') bidirectional=bidirectional, activation='tanh', layout='NTC')
def hybrid_forward(self, F, data, state0): def hybrid_forward(self, F, data, state0):
...@@ -63,10 +64,10 @@ class CustomRNN(gluon.HybridBlock): ...@@ -63,10 +64,10 @@ class CustomRNN(gluon.HybridBlock):
class CustomLSTM(gluon.HybridBlock): class CustomLSTM(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomLSTM, self).__init__(**kwargs) super(CustomLSTM, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.lstm = gluon.rnn.LSTM(hidden_size=hidden_size, num_layers=num_layers, self.lstm = gluon.rnn.LSTM(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, layout='NTC') bidirectional=bidirectional, layout='NTC')
def hybrid_forward(self, F, data, state0, state1): def hybrid_forward(self, F, data, state0, state1):
...@@ -75,10 +76,10 @@ class CustomLSTM(gluon.HybridBlock): ...@@ -75,10 +76,10 @@ class CustomLSTM(gluon.HybridBlock):
class CustomGRU(gluon.HybridBlock): class CustomGRU(gluon.HybridBlock):
def __init__(self, hidden_size, num_layers, bidirectional, **kwargs): def __init__(self, hidden_size, num_layers, dropout, bidirectional, **kwargs):
super(CustomGRU, self).__init__(**kwargs) super(CustomGRU, self).__init__(**kwargs)
with self.name_scope(): with self.name_scope():
self.gru = gluon.rnn.GRU(hidden_size=hidden_size, num_layers=num_layers, self.gru = gluon.rnn.GRU(hidden_size=hidden_size, num_layers=num_layers, dropout=dropout,
bidirectional=bidirectional, layout='NTC') bidirectional=bidirectional, layout='NTC')
def hybrid_forward(self, F, data, state0): def hybrid_forward(self, F, data, state0):
...@@ -131,3 +132,19 @@ class Net_0(gluon.HybridBlock): ...@@ -131,3 +132,19 @@ class Net_0(gluon.HybridBlock):
return qvalues_ return qvalues_
def getInputs(self):
inputs = {}
input_dimensions = (29)
input_domains = (float,0,1)
inputs["state_"] = input_domains + (input_dimensions,)
input_dimensions = (3)
input_domains = (float,-1.0,1.0)
inputs["action_"] = input_domains + (input_dimensions,)
return inputs
def getOutputs(self):
outputs = {}
output_dimensions = (1,1,1)
output_domains = (float,float('-inf'),float('inf'))
outputs["qvalues_"] = output_domains + (output_dimensions,)
return outputs
wget http://images.cocodataset.org/zips/train2014.zip