Commit 6f816979 authored by Sebastian N.'s avatar Sebastian N.
Browse files

Updated constants

parent 72b82633
Pipeline #200241 failed with stages
in 19 seconds
...@@ -62,7 +62,9 @@ public class CNNArch2GluonTemplateController extends CNNArchTemplateController { ...@@ -62,7 +62,9 @@ public class CNNArch2GluonTemplateController extends CNNArchTemplateController {
} }
} }
else if (element.getType() == VariableSymbol.Type.LAYER) { else if (element.getType() == VariableSymbol.Type.LAYER) {
if (element.getMember() != VariableSymbol.Member.OUTPUT) { if (element.getMember() == VariableSymbol.Member.STATE) {
include(TEMPLATE_ELEMENTS_DIR_PATH, "Output", writer, netDefinitionMode);
} else if (element.getMember() == VariableSymbol.Member.NONE) {
include(TEMPLATE_ELEMENTS_DIR_PATH, element.getLayerVariableDeclaration().getLayer().getName(), writer, netDefinitionMode); include(TEMPLATE_ELEMENTS_DIR_PATH, element.getLayerVariableDeclaration().getLayer().getName(), writer, netDefinitionMode);
} }
} }
...@@ -74,20 +76,6 @@ public class CNNArch2GluonTemplateController extends CNNArchTemplateController { ...@@ -74,20 +76,6 @@ public class CNNArch2GluonTemplateController extends CNNArchTemplateController {
setCurrentElement(previousElement); setCurrentElement(previousElement);
} }
public void include(ConstantSymbol constant, Writer writer, NetDefinitionMode netDefinitionMode) {
ArchitectureElementData previousElement = getCurrentElement();
setCurrentElement(constant);
if (constant.isAtomic()) {
include(TEMPLATE_ELEMENTS_DIR_PATH, "Const", writer, netDefinitionMode);
}
else {
include((ArchitectureElementSymbol) constant.getResolvedThis().get(), writer, netDefinitionMode);
}
setCurrentElement(previousElement);
}
public void include(LayerSymbol layer, Writer writer, NetDefinitionMode netDefinitionMode){ public void include(LayerSymbol layer, Writer writer, NetDefinitionMode netDefinitionMode){
ArchitectureElementData previousElement = getCurrentElement(); ArchitectureElementData previousElement = getCurrentElement();
setCurrentElement(layer); setCurrentElement(layer);
...@@ -122,7 +110,7 @@ public class CNNArch2GluonTemplateController extends CNNArchTemplateController { ...@@ -122,7 +110,7 @@ public class CNNArch2GluonTemplateController extends CNNArchTemplateController {
include((LayerSymbol) architectureElement, writer, netDefinitionMode); include((LayerSymbol) architectureElement, writer, netDefinitionMode);
} }
else if (architectureElement instanceof ConstantSymbol) { else if (architectureElement instanceof ConstantSymbol) {
include((ConstantSymbol) architectureElement, writer, netDefinitionMode);
} }
else { else {
include((VariableSymbol) architectureElement, writer, netDefinitionMode); include((VariableSymbol) architectureElement, writer, netDefinitionMode);
...@@ -243,6 +231,9 @@ public class CNNArch2GluonTemplateController extends CNNArchTemplateController { ...@@ -243,6 +231,9 @@ public class CNNArch2GluonTemplateController extends CNNArchTemplateController {
inputs.put(getName(element), dimensions); inputs.put(getName(element), dimensions);
} }
else if (element instanceof ConstantSymbol) {
inputs.put(getName(element), Arrays.asList("1"));
}
} }
inputs.putAll(getStreamLayerVariableMembers(stream, false)); inputs.putAll(getStreamLayerVariableMembers(stream, false));
......
...@@ -94,6 +94,7 @@ class Net_${networkInstruction?index}(gluon.HybridBlock): ...@@ -94,6 +94,7 @@ class Net_${networkInstruction?index}(gluon.HybridBlock):
self.last_layers = {} self.last_layers = {}
with self.name_scope(): with self.name_scope():
${tc.include(networkInstruction.body, "ARCHITECTURE_DEFINITION")} ${tc.include(networkInstruction.body, "ARCHITECTURE_DEFINITION")}
pass
def hybrid_forward(self, F, ${tc.join(tc.getStreamInputNames(networkInstruction.body), ", ")}): def hybrid_forward(self, F, ${tc.join(tc.getStreamInputNames(networkInstruction.body), ", ")}):
${tc.include(networkInstruction.body, "FORWARD_FUNCTION")} ${tc.include(networkInstruction.body, "FORWARD_FUNCTION")}
......
<#if mode == "FORWARD_FUNCTION">
${element.name} = gluon.Constant('${element.name}', ${element.constValue})
<#elseif mode == "PYTHON_INLINE">
${element.name} = mx.nd.full((batch_size, 1,), ${element.constValue}, ctx=mx_context)
<#elseif mode == "CPP_INLINE">
vector<float> ${element.name}{${element.constValue}};
</#if>
\ No newline at end of file
<#if element.member == "NONE">
<#assign input = element.inputs[0]> <#assign input = element.inputs[0]>
<#if mode == "ARCHITECTURE_DEFINITION"> <#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = CustomGRU(hidden_size=${element.units?c}, self.${element.name} = CustomGRU(hidden_size=${element.units?c},
...@@ -11,16 +10,4 @@ ...@@ -11,16 +10,4 @@
<#else> <#else>
${element.name} = self.${element.name}(${input}) ${element.name} = self.${element.name}(${input})
</#if> </#if>
</#if>
<#elseif element.member == "STATE">
<#if element.inputs?size gte 1>
<#assign input = element.inputs[0]>
<#if mode == "FORWARD_FUNCTION">
${element.name} = ${input}
<#elseif mode == "PYTHON_INLINE">
${element.name} = ${input}
<#elseif mode == "CPP_INLINE">
${element.name} = ${input}
</#if>
</#if>
</#if> </#if>
\ No newline at end of file
<#if element.member == "NONE">
<#assign input = element.inputs[0]> <#assign input = element.inputs[0]>
<#if mode == "ARCHITECTURE_DEFINITION"> <#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = CustomLSTM(hidden_size=${element.units?c}, self.${element.name} = CustomLSTM(hidden_size=${element.units?c},
...@@ -11,16 +10,4 @@ ...@@ -11,16 +10,4 @@
<#else> <#else>
${element.name} = self.${element.name}(${input}) ${element.name} = self.${element.name}(${input})
</#if> </#if>
</#if>
<#elseif element.member == "STATE">
<#if element.inputs?size gte 1>
<#assign input = element.inputs[0]>
<#if mode == "FORWARD_FUNCTION">
${element.name} = ${input}
<#elseif mode == "PYTHON_INLINE">
${element.name} = ${input}
<#elseif mode == "CPP_INLINE">
${element.name} = ${input}
</#if>
</#if>
</#if> </#if>
\ No newline at end of file
...@@ -2,9 +2,4 @@ ...@@ -2,9 +2,4 @@
<#assign size = element.size?c> <#assign size = element.size?c>
<#if mode == "FORWARD_FUNCTION"> <#if mode == "FORWARD_FUNCTION">
${element.name} = F.one_hot(indices=${input}, depth=${size}) ${element.name} = F.one_hot(indices=${input}, depth=${size})
<#elseif mode == "PYTHON_INLINE">
${element.name} = nd.one_hot(indices=${input}, depth=${size})
<#elseif mode == "CPP_INLINE">
vector<float> ${element.name}(${size}, 0);
${element.name}[${input}[0]] = 1;
</#if> </#if>
\ No newline at end of file
<#if element.member == "NONE">
<#assign input = element.inputs[0]> <#assign input = element.inputs[0]>
<#if mode == "ARCHITECTURE_DEFINITION"> <#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = CustomRNN(hidden_size=${element.units?c}, self.${element.name} = CustomRNN(hidden_size=${element.units?c},
...@@ -11,16 +10,4 @@ ...@@ -11,16 +10,4 @@
<#else> <#else>
${element.name} = self.${element.name}(${input}) ${element.name} = self.${element.name}(${input})
</#if> </#if>
</#if>
<#elseif element.member == "STATE">
<#if element.inputs?size gte 1>
<#assign input = element.inputs[0]>
<#if mode == "FORWARD_FUNCTION">
${element.name} = ${input}
<#elseif mode == "PYTHON_INLINE">
${element.name} = ${input}
<#elseif mode == "CPP_INLINE">
${element.name} = ${input}
</#if>
</#if>
</#if> </#if>
\ No newline at end of file
<#assign input = element.inputs[0]> <#assign input = element.inputs[0]>
<#if mode == "ARCHITECTURE_DEFINITION"> <#if mode == "FORWARD_FUNCTION">
self.${element.name} = Reshape(shape=(${tc.join(element.shape, ",")})) ${element.name} = F.reshape(${input}, shape=(${tc.join(element.shape, ",")}))
<#include "OutputShape.ftl">
<#elseif mode == "FORWARD_FUNCTION">
${element.name} = self.${element.name}(${input})
<#elseif mode == "PYTHON_INLINE">
self.${element.name} = Reshape(shape=${shape})
</#if> </#if>
\ No newline at end of file
...@@ -2,8 +2,4 @@ ...@@ -2,8 +2,4 @@
<#assign num_outputs = element.numOutputs?c> <#assign num_outputs = element.numOutputs?c>
<#if mode == "FORWARD_FUNCTION"> <#if mode == "FORWARD_FUNCTION">
${element.name} = F.split(${input}, axis=1, num_outputs=${num_outputs}) ${element.name} = F.split(${input}, axis=1, num_outputs=${num_outputs})
<#elseif mode == "PYTHON_INLINE">
${element.name} = mx.nd.split(data=${input}, axis=1, num_outputs=${num_outputs})
<#elseif mode == "CPP_INLINE">
${element.name} = ${input} // TODO: Implement
</#if> </#if>
\ No newline at end of file
...@@ -2,10 +2,5 @@ ...@@ -2,10 +2,5 @@
<#assign dim1 = (element.axes[0] + 1)?c> <#assign dim1 = (element.axes[0] + 1)?c>
<#assign dim2 = (element.axes[1] + 1)?c> <#assign dim2 = (element.axes[1] + 1)?c>
<#if mode == "FORWARD_FUNCTION"> <#if mode == "FORWARD_FUNCTION">
${element.name} = self.${element.name}(${input})
${element.name} = F.swapaxes(${input}, dim1=${dim1}, dim2=${dim2}) ${element.name} = F.swapaxes(${input}, dim1=${dim1}, dim2=${dim2})
<#elseif mode == "PYTHON_INLINE">
self.${element.name} = nd.swapaxes(${input}, dim1=${dim1}, dim2=${dim2})
<#elseif mode == "CPP_INLINE">
${element.name} = ${input} // TODO: Implement
</#if> </#if>
\ No newline at end of file
...@@ -8,6 +8,9 @@ ...@@ -8,6 +8,9 @@
<#list tc.architectureOutputSymbols as output> <#list tc.architectureOutputSymbols as output>
vector<float> ${tc.getName(output)}(${tc.join(output.ioDeclaration.type.dimensions, " * ")}); vector<float> ${tc.getName(output)}(${tc.join(output.ioDeclaration.type.dimensions, " * ")});
</#list> </#list>
<#list tc.architecture.constants as constant>
vector<float> ${tc.getName(constant)}{${constant.intValue?c}};
</#list>
<#list tc.architecture.networkInstructions as networkInstruction> <#list tc.architecture.networkInstructions as networkInstruction>
<#if networkInstruction.isUnroll()> <#if networkInstruction.isUnroll()>
...@@ -18,7 +21,7 @@ ...@@ -18,7 +21,7 @@
<#if networkInstruction.body.isTrainable()> <#if networkInstruction.body.isTrainable()>
_predictor_${networkInstruction?index}_.predict(${tc.join(tc.getStreamInputNames(networkInstruction.body), ", ")}, ${tc.join(tc.getStreamOutputNames(networkInstruction.body), ", ")}); _predictor_${networkInstruction?index}_.predict(${tc.join(tc.getStreamInputNames(networkInstruction.body), ", ")}, ${tc.join(tc.getStreamOutputNames(networkInstruction.body), ", ")});
<#else> <#else>
<#-- ${tc.include(networkInstruction.body, "CPP_INLINE")}; --> ${tc.include(networkInstruction.body, "CPP_INLINE")}
</#if> </#if>
</#if> </#if>
</#list> </#list>
......
...@@ -4,6 +4,9 @@ ...@@ -4,6 +4,9 @@
<#list tc.architectureOutputSymbols as output> <#list tc.architectureOutputSymbols as output>
${tc.getName(output)} = mx.nd.zeros((batch_size, ${tc.join(output.ioDeclaration.type.dimensions, ", ")},), ctx=mx_context) ${tc.getName(output)} = mx.nd.zeros((batch_size, ${tc.join(output.ioDeclaration.type.dimensions, ", ")},), ctx=mx_context)
</#list> </#list>
<#list tc.architecture.constants as constant>
${tc.getName(constant)} = mx.nd.full((batch_size, 1,), ${constant.intValue?c}, ctx=mx_context)
</#list>
<#assign instructionCounter = 0> <#assign instructionCounter = 0>
<#list tc.architecture.networkInstructions as networkInstruction> <#list tc.architecture.networkInstructions as networkInstruction>
......
...@@ -4,6 +4,9 @@ ...@@ -4,6 +4,9 @@
<#list tc.architectureOutputSymbols as output> <#list tc.architectureOutputSymbols as output>
${tc.getName(output)} = mx.nd.zeros((batch_size, ${tc.join(output.ioDeclaration.type.dimensions, ", ")},), ctx=mx_context) ${tc.getName(output)} = mx.nd.zeros((batch_size, ${tc.join(output.ioDeclaration.type.dimensions, ", ")},), ctx=mx_context)
</#list> </#list>
<#list tc.architecture.constants as constant>
${tc.getName(constant)} = mx.nd.full((batch_size, 1,), ${constant.intValue?c}, ctx=mx_context)
</#list>
lossList = [] lossList = []
<#list tc.architecture.networkInstructions as networkInstruction> <#list tc.architecture.networkInstructions as networkInstruction>
......
...@@ -201,6 +201,7 @@ class Net_0(gluon.HybridBlock): ...@@ -201,6 +201,7 @@ class Net_0(gluon.HybridBlock):
# fc8_, output shape: {[10,1,1]} # fc8_, output shape: {[10,1,1]}
pass
def hybrid_forward(self, F, data_): def hybrid_forward(self, F, data_):
data_ = self.input_normalization_data_(data_) data_ = self.input_normalization_data_(data_)
......
...@@ -365,6 +365,7 @@ class Net_0(gluon.HybridBlock): ...@@ -365,6 +365,7 @@ class Net_0(gluon.HybridBlock):
# fc32_, output shape: {[10,1,1]} # fc32_, output shape: {[10,1,1]}
pass
def hybrid_forward(self, F, data_): def hybrid_forward(self, F, data_):
data_ = self.input_normalization_data_(data_) data_ = self.input_normalization_data_(data_)
......
...@@ -241,6 +241,7 @@ class Net_0(gluon.HybridBlock): ...@@ -241,6 +241,7 @@ class Net_0(gluon.HybridBlock):
# fc15_, output shape: {[1000,1,1]} # fc15_, output shape: {[1000,1,1]}
pass
def hybrid_forward(self, F, data_): def hybrid_forward(self, F, data_):
data_ = self.input_normalization_data_(data_) data_ = self.input_normalization_data_(data_)
......
...@@ -123,6 +123,7 @@ class Net_0(gluon.HybridBlock): ...@@ -123,6 +123,7 @@ class Net_0(gluon.HybridBlock):
# fc5_, output shape: {[1,1,1]} # fc5_, output shape: {[1,1,1]}
pass
def hybrid_forward(self, F, state_, action_): def hybrid_forward(self, F, state_, action_):
state_ = self.input_normalization_state_(state_) state_ = self.input_normalization_state_(state_)
......
...@@ -123,6 +123,7 @@ class Net_0(gluon.HybridBlock): ...@@ -123,6 +123,7 @@ class Net_0(gluon.HybridBlock):
# fc5_, output shape: {[1,1,1]} # fc5_, output shape: {[1,1,1]}
pass
def hybrid_forward(self, F, state_, action_): def hybrid_forward(self, F, state_, action_):
state_ = self.input_normalization_state_(state_) state_ = self.input_normalization_state_(state_)
......
...@@ -123,6 +123,7 @@ class Net_0(gluon.HybridBlock): ...@@ -123,6 +123,7 @@ class Net_0(gluon.HybridBlock):
# fc5_, output shape: {[1,1,1]} # fc5_, output shape: {[1,1,1]}
pass
def hybrid_forward(self, F, state_, action_): def hybrid_forward(self, F, state_, action_):
state_ = self.input_normalization_state_(state_) state_ = self.input_normalization_state_(state_)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment