Commit 6f816979 authored by Sebastian Nickels's avatar Sebastian Nickels
Browse files

Updated constants

parent 72b82633
Pipeline #200241 failed with stages
in 19 seconds
......@@ -62,7 +62,9 @@ public class CNNArch2GluonTemplateController extends CNNArchTemplateController {
}
}
else if (element.getType() == VariableSymbol.Type.LAYER) {
if (element.getMember() != VariableSymbol.Member.OUTPUT) {
if (element.getMember() == VariableSymbol.Member.STATE) {
include(TEMPLATE_ELEMENTS_DIR_PATH, "Output", writer, netDefinitionMode);
} else if (element.getMember() == VariableSymbol.Member.NONE) {
include(TEMPLATE_ELEMENTS_DIR_PATH, element.getLayerVariableDeclaration().getLayer().getName(), writer, netDefinitionMode);
}
}
......@@ -74,20 +76,6 @@ public class CNNArch2GluonTemplateController extends CNNArchTemplateController {
setCurrentElement(previousElement);
}
public void include(ConstantSymbol constant, Writer writer, NetDefinitionMode netDefinitionMode) {
ArchitectureElementData previousElement = getCurrentElement();
setCurrentElement(constant);
if (constant.isAtomic()) {
include(TEMPLATE_ELEMENTS_DIR_PATH, "Const", writer, netDefinitionMode);
}
else {
include((ArchitectureElementSymbol) constant.getResolvedThis().get(), writer, netDefinitionMode);
}
setCurrentElement(previousElement);
}
public void include(LayerSymbol layer, Writer writer, NetDefinitionMode netDefinitionMode){
ArchitectureElementData previousElement = getCurrentElement();
setCurrentElement(layer);
......@@ -122,7 +110,7 @@ public class CNNArch2GluonTemplateController extends CNNArchTemplateController {
include((LayerSymbol) architectureElement, writer, netDefinitionMode);
}
else if (architectureElement instanceof ConstantSymbol) {
include((ConstantSymbol) architectureElement, writer, netDefinitionMode);
}
else {
include((VariableSymbol) architectureElement, writer, netDefinitionMode);
......@@ -243,6 +231,9 @@ public class CNNArch2GluonTemplateController extends CNNArchTemplateController {
inputs.put(getName(element), dimensions);
}
else if (element instanceof ConstantSymbol) {
inputs.put(getName(element), Arrays.asList("1"));
}
}
inputs.putAll(getStreamLayerVariableMembers(stream, false));
......
......@@ -94,6 +94,7 @@ class Net_${networkInstruction?index}(gluon.HybridBlock):
self.last_layers = {}
with self.name_scope():
${tc.include(networkInstruction.body, "ARCHITECTURE_DEFINITION")}
pass
def hybrid_forward(self, F, ${tc.join(tc.getStreamInputNames(networkInstruction.body), ", ")}):
${tc.include(networkInstruction.body, "FORWARD_FUNCTION")}
......
<#if mode == "FORWARD_FUNCTION">
${element.name} = gluon.Constant('${element.name}', ${element.constValue})
<#elseif mode == "PYTHON_INLINE">
${element.name} = mx.nd.full((batch_size, 1,), ${element.constValue}, ctx=mx_context)
<#elseif mode == "CPP_INLINE">
vector<float> ${element.name}{${element.constValue}};
</#if>
\ No newline at end of file
<#if element.member == "NONE">
<#assign input = element.inputs[0]>
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = CustomGRU(hidden_size=${element.units?c},
......@@ -11,16 +10,4 @@
<#else>
${element.name} = self.${element.name}(${input})
</#if>
</#if>
<#elseif element.member == "STATE">
<#if element.inputs?size gte 1>
<#assign input = element.inputs[0]>
<#if mode == "FORWARD_FUNCTION">
${element.name} = ${input}
<#elseif mode == "PYTHON_INLINE">
${element.name} = ${input}
<#elseif mode == "CPP_INLINE">
${element.name} = ${input}
</#if>
</#if>
</#if>
\ No newline at end of file
<#if element.member == "NONE">
<#assign input = element.inputs[0]>
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = CustomLSTM(hidden_size=${element.units?c},
......@@ -11,16 +10,4 @@
<#else>
${element.name} = self.${element.name}(${input})
</#if>
</#if>
<#elseif element.member == "STATE">
<#if element.inputs?size gte 1>
<#assign input = element.inputs[0]>
<#if mode == "FORWARD_FUNCTION">
${element.name} = ${input}
<#elseif mode == "PYTHON_INLINE">
${element.name} = ${input}
<#elseif mode == "CPP_INLINE">
${element.name} = ${input}
</#if>
</#if>
</#if>
\ No newline at end of file
......@@ -2,9 +2,4 @@
<#assign size = element.size?c>
<#if mode == "FORWARD_FUNCTION">
${element.name} = F.one_hot(indices=${input}, depth=${size})
<#elseif mode == "PYTHON_INLINE">
${element.name} = nd.one_hot(indices=${input}, depth=${size})
<#elseif mode == "CPP_INLINE">
vector<float> ${element.name}(${size}, 0);
${element.name}[${input}[0]] = 1;
</#if>
\ No newline at end of file
<#if element.member == "NONE">
<#assign input = element.inputs[0]>
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = CustomRNN(hidden_size=${element.units?c},
......@@ -11,16 +10,4 @@
<#else>
${element.name} = self.${element.name}(${input})
</#if>
</#if>
<#elseif element.member == "STATE">
<#if element.inputs?size gte 1>
<#assign input = element.inputs[0]>
<#if mode == "FORWARD_FUNCTION">
${element.name} = ${input}
<#elseif mode == "PYTHON_INLINE">
${element.name} = ${input}
<#elseif mode == "CPP_INLINE">
${element.name} = ${input}
</#if>
</#if>
</#if>
\ No newline at end of file
<#assign input = element.inputs[0]>
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = Reshape(shape=(${tc.join(element.shape, ",")}))
<#include "OutputShape.ftl">
<#elseif mode == "FORWARD_FUNCTION">
${element.name} = self.${element.name}(${input})
<#elseif mode == "PYTHON_INLINE">
self.${element.name} = Reshape(shape=${shape})
<#if mode == "FORWARD_FUNCTION">
${element.name} = F.reshape(${input}, shape=(${tc.join(element.shape, ",")}))
</#if>
\ No newline at end of file
......@@ -2,8 +2,4 @@
<#assign num_outputs = element.numOutputs?c>
<#if mode == "FORWARD_FUNCTION">
${element.name} = F.split(${input}, axis=1, num_outputs=${num_outputs})
<#elseif mode == "PYTHON_INLINE">
${element.name} = mx.nd.split(data=${input}, axis=1, num_outputs=${num_outputs})
<#elseif mode == "CPP_INLINE">
${element.name} = ${input} // TODO: Implement
</#if>
\ No newline at end of file
......@@ -2,10 +2,5 @@
<#assign dim1 = (element.axes[0] + 1)?c>
<#assign dim2 = (element.axes[1] + 1)?c>
<#if mode == "FORWARD_FUNCTION">
${element.name} = self.${element.name}(${input})
${element.name} = F.swapaxes(${input}, dim1=${dim1}, dim2=${dim2})
<#elseif mode == "PYTHON_INLINE">
self.${element.name} = nd.swapaxes(${input}, dim1=${dim1}, dim2=${dim2})
<#elseif mode == "CPP_INLINE">
${element.name} = ${input} // TODO: Implement
</#if>
\ No newline at end of file
......@@ -8,6 +8,9 @@
<#list tc.architectureOutputSymbols as output>
vector<float> ${tc.getName(output)}(${tc.join(output.ioDeclaration.type.dimensions, " * ")});
</#list>
<#list tc.architecture.constants as constant>
vector<float> ${tc.getName(constant)}{${constant.intValue?c}};
</#list>
<#list tc.architecture.networkInstructions as networkInstruction>
<#if networkInstruction.isUnroll()>
......@@ -18,7 +21,7 @@
<#if networkInstruction.body.isTrainable()>
_predictor_${networkInstruction?index}_.predict(${tc.join(tc.getStreamInputNames(networkInstruction.body), ", ")}, ${tc.join(tc.getStreamOutputNames(networkInstruction.body), ", ")});
<#else>
<#-- ${tc.include(networkInstruction.body, "CPP_INLINE")}; -->
${tc.include(networkInstruction.body, "CPP_INLINE")}
</#if>
</#if>
</#list>
......
......@@ -4,6 +4,9 @@
<#list tc.architectureOutputSymbols as output>
${tc.getName(output)} = mx.nd.zeros((batch_size, ${tc.join(output.ioDeclaration.type.dimensions, ", ")},), ctx=mx_context)
</#list>
<#list tc.architecture.constants as constant>
${tc.getName(constant)} = mx.nd.full((batch_size, 1,), ${constant.intValue?c}, ctx=mx_context)
</#list>
<#assign instructionCounter = 0>
<#list tc.architecture.networkInstructions as networkInstruction>
......
......@@ -4,6 +4,9 @@
<#list tc.architectureOutputSymbols as output>
${tc.getName(output)} = mx.nd.zeros((batch_size, ${tc.join(output.ioDeclaration.type.dimensions, ", ")},), ctx=mx_context)
</#list>
<#list tc.architecture.constants as constant>
${tc.getName(constant)} = mx.nd.full((batch_size, 1,), ${constant.intValue?c}, ctx=mx_context)
</#list>
lossList = []
<#list tc.architecture.networkInstructions as networkInstruction>
......
......@@ -201,6 +201,7 @@ class Net_0(gluon.HybridBlock):
# fc8_, output shape: {[10,1,1]}
pass
def hybrid_forward(self, F, data_):
data_ = self.input_normalization_data_(data_)
......
......@@ -365,6 +365,7 @@ class Net_0(gluon.HybridBlock):
# fc32_, output shape: {[10,1,1]}
pass
def hybrid_forward(self, F, data_):
data_ = self.input_normalization_data_(data_)
......
......@@ -241,6 +241,7 @@ class Net_0(gluon.HybridBlock):
# fc15_, output shape: {[1000,1,1]}
pass
def hybrid_forward(self, F, data_):
data_ = self.input_normalization_data_(data_)
......
......@@ -123,6 +123,7 @@ class Net_0(gluon.HybridBlock):
# fc5_, output shape: {[1,1,1]}
pass
def hybrid_forward(self, F, state_, action_):
state_ = self.input_normalization_state_(state_)
......
......@@ -123,6 +123,7 @@ class Net_0(gluon.HybridBlock):
# fc5_, output shape: {[1,1,1]}
pass
def hybrid_forward(self, F, state_, action_):
state_ = self.input_normalization_state_(state_)
......
......@@ -123,6 +123,7 @@ class Net_0(gluon.HybridBlock):
# fc5_, output shape: {[1,1,1]}
pass
def hybrid_forward(self, F, state_, action_):
state_ = self.input_normalization_state_(state_)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment