Commit a0fc79fc authored by Sebastian N.'s avatar Sebastian N.

Changed OneHot layer, added support for constants and implemented invariants

parent 5573c542
Pipeline #155897 failed with stages
in 2 minutes and 59 seconds
......@@ -7,16 +7,24 @@ public class CNNArch2GluonArchitectureSupportChecker extends ArchitectureSupport
public CNNArch2GluonArchitectureSupportChecker() {}
@Override
protected boolean checkMultipleStreams(ArchitectureSymbol architecture) {
return true;
}
@Override
protected boolean checkMultipleInputs(ArchitectureSymbol architecture) {
return true;
}
@Override
protected boolean checkMultipleOutputs(ArchitectureSymbol architecture) {
return true;
}
@Override
protected boolean checkConstants(ArchitectureSymbol architecture) {
return true;
}
}
......@@ -65,13 +65,27 @@ public class CNNArch2GluonTemplateController extends CNNArchTemplateController {
setCurrentElement(previousElement);
}
public void include(ConstantSymbol constant, Writer writer, NetDefinitionMode netDefinitionMode) {
ArchitectureElementData previousElement = getCurrentElement();
setCurrentElement(constant);
if (constant.isAtomic()) {
include(TEMPLATE_ELEMENTS_DIR_PATH, "Const", writer, netDefinitionMode);
}
else {
include(constant.getResolvedThis().get(), writer, netDefinitionMode);
}
setCurrentElement(previousElement);
}
public void include(LayerSymbol layer, Writer writer, NetDefinitionMode netDefinitionMode){
ArchitectureElementData previousElement = getCurrentElement();
setCurrentElement(layer);
if (layer.isAtomic()){
ArchitectureElementSymbol nextElement = layer.getOutputElement().get();
if (!isSoftmaxOutput(nextElement) && !isLogisticRegressionOutput(nextElement) && !isOneHotOutput(nextElement)){
if (!isSoftmaxOutput(nextElement) && !isLogisticRegressionOutput(nextElement)){
String templateName = layer.getDeclaration().getName();
include(TEMPLATE_ELEMENTS_DIR_PATH, templateName, writer, netDefinitionMode);
}
......@@ -101,6 +115,9 @@ public class CNNArch2GluonTemplateController extends CNNArchTemplateController {
else if (architectureElement instanceof LayerSymbol){
include((LayerSymbol) architectureElement, writer, netDefinitionMode);
}
else if (architectureElement instanceof ConstantSymbol) {
include((ConstantSymbol) architectureElement, writer, netDefinitionMode);
}
else {
include((IOSymbol) architectureElement, writer, netDefinitionMode);
}
......@@ -117,6 +134,10 @@ public class CNNArch2GluonTemplateController extends CNNArchTemplateController {
include(architectureElement, getWriter(), netDefinitionMode);
}
public String ioNameToCpp(String ioName) {
return ioName.replaceAll("_([0-9]+)_", "[$1]");
}
public List<String> getStreamInputNames(SerialCompositeElementSymbol stream) {
List<String> names = new ArrayList<>();
......
......@@ -5,7 +5,9 @@ package de.monticore.lang.monticar.cnnarch.gluongenerator;
*/
public enum NetDefinitionMode {
ARCHITECTURE_DEFINITION,
FORWARD_FUNCTION;
FORWARD_FUNCTION,
PYTHON_INLINE,
CPP_INLINE;
public static NetDefinitionMode fromString(final String netDefinitionMode) {
switch(netDefinitionMode) {
......@@ -13,6 +15,10 @@ public enum NetDefinitionMode {
return ARCHITECTURE_DEFINITION;
case "FORWARD_FUNCTION":
return FORWARD_FUNCTION;
case "PYTHON_INLINE":
return PYTHON_INLINE;
case "CPP_INLINE":
return CPP_INLINE;
default:
throw new IllegalArgumentException("Unknown Net Definition Mode");
}
......
......@@ -100,11 +100,19 @@ class ${tc.fileNameWithoutEnding}:
<#if stream.isNetwork()>
${tc.join(tc.getStreamOutputNames(stream), ", ", "", "_output")} = self._networks[${stream?index}](${tc.join(tc.getStreamInputNames(stream), ", ", "", "_data")})
<#else>
# TODO: Implement non network streams
${tc.include(stream, "PYTHON_INLINE")}
</#if>
</#list>
loss = \
<#list tc.architecture.streams as stream>
<#if stream.isNetwork()>
<#list tc.getStreamOutputNames(stream) as output_name>
loss_functions['${output_name}'](${output_name}_output, ${output_name}_label)<#sep> + \
</#list><#sep> + \
</#if>
</#list>
loss = <#list tc.architectureOutputs as output_name>loss_functions['${output_name}'](${output_name}_output, ${output_name}_label)<#sep> + </#list>
loss.backward()
......@@ -134,19 +142,26 @@ class ${tc.fileNameWithoutEnding}:
</#list>
labels = [
<#list tc.architectureOutputs as output_name>batch.label[${output_name?index}].as_in_context(mx_context)<#sep>, </#list>
<#list tc.architectureOutputs as output_name>
batch.label[${output_name?index}].as_in_context(mx_context)<#sep>,
</#list>
]
if True: # Fix indentation
<#list tc.architecture.streams as stream>
<#if stream.isNetwork()>
${tc.join(tc.getStreamOutputNames(stream), ", ", "", "_output")} = self._networks[${stream?index}](${tc.join(tc.getStreamInputNames(stream), ", ", "", "_data")})
${tc.join(tc.getStreamOutputNames(stream), ", ", "", "_output")} = self._networks[${stream?index}](${tc.join(tc.getStreamInputNames(stream), ", ", "", "_data")})
<#else>
# TODO: Implement non network streams
${tc.include(stream, "PYTHON_INLINE")}
</#if>
</#list>
predictions = [
<#list tc.architectureOutputs as output_name>mx.nd.argmax(${output_name}_output, axis=1)<#sep>, </#list>
<#list tc.architectureOutputs as output_name>
mx.nd.argmax(${output_name}_output, axis=1)<#sep>,
</#list>
]
metric.update(preds=predictions, labels=labels)
......@@ -160,18 +175,26 @@ class ${tc.fileNameWithoutEnding}:
</#list>
labels = [
<#list tc.architectureOutputs as output_name>batch.label[${output_name?index}].as_in_context(mx_context)<#sep>, </#list>
<#list tc.architectureOutputs as output_name>
batch.label[${output_name?index}].as_in_context(mx_context)<#sep>,
</#list>
]
if True: # Fix indentation
<#list tc.architecture.streams as stream>
<#if stream.isNetwork()>
${tc.join(tc.getStreamOutputNames(stream), ", ", "", "_output")} = self._networks[${stream?index}](${tc.join(tc.getStreamInputNames(stream), ", ", "", "_data")})
${tc.join(tc.getStreamOutputNames(stream), ", ", "", "_output")} = self._networks[${stream?index}](${tc.join(tc.getStreamInputNames(stream), ", ", "", "_data")})
<#else>
# TODO: Implement non network streams
${tc.include(stream, "PYTHON_INLINE")}
</#if>
</#list>
predictions = [
<#list tc.architectureOutputs as output_name>mx.nd.argmax(${output_name}_output, axis=1)<#sep>, </#list>
<#list tc.architectureOutputs as output_name>
mx.nd.argmax(${output_name}_output, axis=1)<#sep>,
</#list>
]
metric.update(preds=predictions, labels=labels)
......
<#if mode == "FORWARD_FUNCTION">
${element.name} = ${tc.join(element.inputs, " + ")}
<#elseif mode == "PYTHON_INLINE">
${element.name} = ${tc.join(element.inputs, " + ")}
<#elseif mode == "CPP_INLINE">
vector<float> ${element.name}(${element.inputs[0]}.size());
for (size_t i = 0; i != ${element.name}.size(); ++i) {
${element.name}[i] = ${tc.join(element.inputs, " + ", "", "[i]")};
}
</#if>
\ No newline at end of file
......@@ -3,7 +3,6 @@
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = gluon.nn.BatchNorm()
<#include "OutputShape.ftl">
</#if>
<#if mode == "FORWARD_FUNCTION">
<#elseif mode == "FORWARD_FUNCTION">
${element.name} = self.${element.name}(${input})
</#if>
\ No newline at end of file
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = Concatenate(dim=1)
<#include "OutputShape.ftl">
</#if>
<#if mode == "FORWARD_FUNCTION">
<#elseif mode == "FORWARD_FUNCTION">
${element.name} = self.${element.name}(${tc.join(element.inputs, ", ")})
</#if>
\ No newline at end of file
<#if mode == "FORWARD_FUNCTION">
${element.name} = gluon.Const('${element.name}', ${element.constValue})
<#elseif mode == "PYTHON_INLINE">
${element.name} = nd.array(${element.constValue})
<#elseif mode == "CPP_INLINE">
vector<float> ${element.name}{${element.constValue}};
</#if>
\ No newline at end of file
......@@ -8,8 +8,7 @@
strides=(${tc.join(element.stride, ",")}),
use_bias=${element.noBias?string("False", "True")})
<#include "OutputShape.ftl">
</#if>
<#if mode == "FORWARD_FUNCTION">
<#elseif mode == "FORWARD_FUNCTION">
<#if element.padding??>
${element.name}padding = self.${element.name}padding(${input})
<#assign input = element.name + "padding">
......
......@@ -2,7 +2,6 @@
<#assign input = element.inputs[0]>
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = gluon.nn.Dropout(rate=${rate})
</#if>
<#if mode == "FORWARD_FUNCTION">
<#elseif mode == "FORWARD_FUNCTION">
${element.name} = self.${element.name}(${input})
</#if>
\ No newline at end of file
......@@ -2,7 +2,6 @@
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = gluon.nn.Flatten()
<#include "OutputShape.ftl">
</#if>
<#if mode == "FORWARD_FUNCTION">
<#elseif mode == "FORWARD_FUNCTION">
${element.name} = self.${element.name}(${input})
</#if>
\ No newline at end of file
......@@ -8,8 +8,7 @@
</#if>
self.${element.name} = gluon.nn.Dense(units=${units}, use_bias=${use_bias})
<#include "OutputShape.ftl">
</#if>
<#if mode == "FORWARD_FUNCTION">
<#elseif mode == "FORWARD_FUNCTION">
<#if flatten>
${element.name}flatten_ = self.${element.name}flatten(${input})
<#assign input = element.name + "flatten_">
......
<#if mode == "FORWARD_FUNCTION">
${element.name} = ${element.inputs[element.index]}
<#elseif mode == "PYTHON_INLINE">
${element.name} = ${element.inputs[element.index]}
<#elseif mode == "CPP_INLINE">
vector<float> ${element.name} = ${element.inputs[element.index]};
</#if>
\ No newline at end of file
......@@ -8,7 +8,6 @@
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = gluon.nn.Global${poolFunctionType}Pool2D()
<#include "OutputShape.ftl">
</#if>
<#if mode == "FORWARD_FUNCTION">
<#elseif mode == "FORWARD_FUNCTION">
${element.name} = self.${element.name}(${input})
</#if>
\ No newline at end of file
......@@ -6,7 +6,10 @@
else:
self.input_normalization_${element.name} = NoNormalization()
</#if>
<#if mode == "FORWARD_FUNCTION">
<#elseif mode == "FORWARD_FUNCTION">
${element.name} = self.input_normalization_${element.name}(${element.name})
<#elseif mode == "PYTHON_INLINE">
${element.name} = ${element.name}_data
<#elseif mode == "CPP_INLINE">
vector<float> ${element.name} = CNNTranslator::translate(${tc.ioNameToCpp(element.name)});
</#if>
\ No newline at end of file
......@@ -3,7 +3,11 @@
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = OneHot(size=${size})
<#include "OutputShape.ftl">
</#if>
<#if mode == "FORWARD_FUNCTION">
<#elseif mode == "FORWARD_FUNCTION">
${element.name} = self.${element.name}(${input})
</#if>
<#elseif mode == "PYTHON_INLINE">
${element.name} = nd.one_hot(indices=${input}, depth=${size})
<#elseif mode == "CPP_INLINE">
vector<float> ${element.name}(${size}, 0);
${element.name}[${input}[0]] = 1;
</#if>
\ No newline at end of file
......@@ -9,7 +9,10 @@
<#elseif element.oneHotOutput>
self.last_layers['${element.name}'] = 'softmax'
</#if>
</#if>
<#if mode == "FORWARD_FUNCTION">
<#elseif mode == "FORWARD_FUNCTION">
outputs.append(${input})
<#elseif mode == "PYTHON_INLINE">
${element.name}_output = ${input}
<#elseif mode == "CPP_INLINE">
CNN_${element.name} = ${input};
</#if>
......@@ -15,8 +15,7 @@
pool_size=${poolSize},
strides=${strides})
<#include "OutputShape.ftl">
</#if>
<#if mode == "FORWARD_FUNCTION">
<#elseif mode == "FORWARD_FUNCTION">
<#if element.padding??>
${element.name}padding = self.${element.name}padding(${input})
<#assign input = element.name + "padding">
......
<#assign input = element.inputs[0]>
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = gluon.nn.Activation(activation='relu')
</#if>
<#if mode == "FORWARD_FUNCTION">
<#elseif mode == "FORWARD_FUNCTION">
${element.name} = self.${element.name}(${input})
</#if>
\ No newline at end of file
<#assign input = element.inputs[0]>
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = gluon.nn.Activation(activation='sigmoid')
</#if>
<#if mode == "FORWARD_FUNCTION">
<#elseif mode == "FORWARD_FUNCTION">
${element.name} = self.${element.name}(${input})
</#if>
\ No newline at end of file
......@@ -2,7 +2,6 @@
<#assign input = element.inputs[0]>
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = Softmax()
</#if>
<#if mode == "FORWARD_FUNCTION">
<#elseif mode == "FORWARD_FUNCTION">
${element.name} = self.${element.name}(${input})
</#if>
......@@ -3,7 +3,6 @@
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = Split(num_outputs=${num_outputs}, axis=1)
<#include "OutputShape.ftl">
</#if>
<#if mode == "FORWARD_FUNCTION">
<#elseif mode == "FORWARD_FUNCTION">
${element.name} = self.${element.name}(${input})
</#if>
\ No newline at end of file
<#assign input = element.inputs[0]>
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = gluon.nn.Activation(activation='tanh')
</#if>
<#if mode == "FORWARD_FUNCTION">
<#elseif mode == "FORWARD_FUNCTION">
${element.name} = self.${element.name}(${input})
</#if>
\ No newline at end of file
......@@ -7,6 +7,8 @@
_predictor_${stream?index}_.predict(<#list stream.getFirstAtomicElements() as input>CNNTranslator::translate(${input.name}<#if input.arrayAccess.isPresent()>[${input.arrayAccess.get().intValue.get()?c}]</#if>),
</#list><#list stream.getLastAtomicElements() as output>CNN_${tc.getName(output)}<#sep>,
</#list>);
<#else>
${tc.include(stream, "CPP_INLINE")}
</#if>
</#list>
......
......@@ -127,6 +127,14 @@ public class GenerationTest extends AbstractSymtabTest {
assertTrue(Log.getFindings().isEmpty());
}
@Test
public void testInvariant() throws IOException, TemplateException {
Log.getFindings().clear();
String[] args = {"-m", "src/test/resources/valid_tests", "-r", "Invariant"};
CNNArch2GluonCli.main(args);
assertTrue(Log.getFindings().isEmpty());
}
@Test
public void testResNeXtGeneration() throws IOException, TemplateException {
Log.getFindings().clear();
......
......@@ -94,7 +94,8 @@ class CNNSupervisedTrainer_Alexnet:
with autograd.record():
predictions_output = self._networks[0](data_data)
loss = loss_functions['predictions'](predictions_output, predictions_label)
loss = \
loss_functions['predictions'](predictions_output, predictions_label)
loss.backward()
......@@ -125,7 +126,8 @@ class CNNSupervisedTrainer_Alexnet:
batch.label[0].as_in_context(mx_context)
]
predictions_output = self._networks[0](data_data)
if True: # Fix indentation
predictions_output = self._networks[0](data_data)
predictions = [
mx.nd.argmax(predictions_output, axis=1)
......@@ -143,7 +145,9 @@ class CNNSupervisedTrainer_Alexnet:
batch.label[0].as_in_context(mx_context)
]
predictions_output = self._networks[0](data_data)
if True: # Fix indentation
predictions_output = self._networks[0](data_data)
predictions = [
mx.nd.argmax(predictions_output, axis=1)
]
......
......@@ -94,7 +94,8 @@ class CNNSupervisedTrainer_CifarClassifierNetwork:
with autograd.record():
softmax_output = self._networks[0](data_data)
loss = loss_functions['softmax'](softmax_output, softmax_label)
loss = \
loss_functions['softmax'](softmax_output, softmax_label)
loss.backward()
......@@ -125,7 +126,8 @@ class CNNSupervisedTrainer_CifarClassifierNetwork:
batch.label[0].as_in_context(mx_context)
]
softmax_output = self._networks[0](data_data)
if True: # Fix indentation
softmax_output = self._networks[0](data_data)
predictions = [
mx.nd.argmax(softmax_output, axis=1)
......@@ -143,7 +145,9 @@ class CNNSupervisedTrainer_CifarClassifierNetwork:
batch.label[0].as_in_context(mx_context)
]
softmax_output = self._networks[0](data_data)
if True: # Fix indentation
softmax_output = self._networks[0](data_data)
predictions = [
mx.nd.argmax(softmax_output, axis=1)
]
......
......@@ -94,7 +94,8 @@ class CNNSupervisedTrainer_VGG16:
with autograd.record():
predictions_output = self._networks[0](data_data)
loss = loss_functions['predictions'](predictions_output, predictions_label)
loss = \
loss_functions['predictions'](predictions_output, predictions_label)
loss.backward()
......@@ -125,7 +126,8 @@ class CNNSupervisedTrainer_VGG16:
batch.label[0].as_in_context(mx_context)
]
predictions_output = self._networks[0](data_data)
if True: # Fix indentation
predictions_output = self._networks[0](data_data)
predictions = [
mx.nd.argmax(predictions_output, axis=1)
......@@ -143,7 +145,9 @@ class CNNSupervisedTrainer_VGG16:
batch.label[0].as_in_context(mx_context)
]
predictions_output = self._networks[0](data_data)
if True: # Fix indentation
predictions_output = self._networks[0](data_data)
predictions = [
mx.nd.argmax(predictions_output, axis=1)
]
......
architecture Invariant{
def input Z(0:3)^{1} data[2]
def output Q(0:1)^{4} pred[3]
data[0] ->
FullyConnected(units=4) ->
Softmax() ->
pred[0];
data[1] ->
OneHot(size=4) ->
pred[1];
1 ->
OneHot(size=4) ->
pred[2];
}
\ No newline at end of file
......@@ -11,4 +11,4 @@ architecture MultipleStreams{
FullyConnected(units=4, no_bias=true) ->
Softmax() ->
pred[1];
}
\ No newline at end of file
}
......@@ -3,4 +3,5 @@ CifarClassifierNetwork data/CifarClassifierNetwork
ThreeInputCNN_M14 data/ThreeInputCNN_M14
Alexnet data/Alexnet
ResNeXt50 data/ResNeXt50
MultipleStreams data/MultipleStreams
\ No newline at end of file
MultipleStreams data/MultipleStreams
Invariant data/Invariant
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment