Commit b6332ec9 authored by Sebastian N.'s avatar Sebastian N.
Browse files

Merge branch 'develop' into rnn

parents 637fc715 2157ebad
Pipeline #175541 failed with stages
in 3 minutes and 24 seconds
<#assign flatten = element.element.inputTypes[0].height != 1 || element.element.inputTypes[0].width != 1>
<#assign input = element.inputs[0]>
<#assign units = element.units?c>
<#assign use_bias = element.noBias?string("False","True")>
<#assign flatten = element.flatten?string("True","False")>
<#if mode == "ARCHITECTURE_DEFINITION">
<#if flatten>
self.${element.name}flatten = gluon.nn.Flatten()
</#if>
self.${element.name} = gluon.nn.Dense(units=${units}, use_bias=${use_bias})
self.${element.name} = gluon.nn.Dense(units=${units}, use_bias=${use_bias}, flatten=${flatten})
<#include "OutputShape.ftl">
<#elseif mode == "FORWARD_FUNCTION">
<#if flatten>
${element.name}flatten_ = self.${element.name}flatten(${input})
<#assign input = element.name + "flatten_">
</#if>
${element.name} = self.${element.name}(${input})
</#if>
\ No newline at end of file
<#if element.member == "NONE">
<#assign input = element.inputs[0]>
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = gluon.rnn.GRU(hidden_size=${element.units?c}, num_layers=${element.layers?c}, layout='NTC')
self.${element.name} = gluon.rnn.GRU(hidden_size=${element.units?c},
num_layers=${element.layers?c},
bidirectional=${element.bidirectional?string("True", "False")}
layout='NTC')
<#include "OutputShape.ftl">
<#elseif mode == "FORWARD_FUNCTION">
<#if element.isVariable()>
......
<#if element.member == "NONE">
<#assign input = element.inputs[0]>
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = gluon.rnn.LSTM(hidden_size=${element.units?c}, num_layers=${element.layers?c}, layout='NTC')
self.${element.name} = gluon.rnn.LSTM(hidden_size=${element.units?c},
num_layers=${element.layers?c},
bidirectional=${element.bidirectional?string("True", "False")}
layout='NTC')
<#include "OutputShape.ftl">
<#elseif mode == "FORWARD_FUNCTION">
<#if element.isVariable()>
......
<#if element.member == "NONE">
<#assign input = element.inputs[0]>
<#if mode == "ARCHITECTURE_DEFINITION">
self.${element.name} = gluon.rnn.RNN(hidden_size=${element.units?c}, num_layers=${element.layers?c}, activation='tanh', layout='NTC')
self.${element.name} = gluon.rnn.RNN(hidden_size=${element.units?c},
num_layers=${element.layers?c},
bidirectional=${element.bidirectional?string("True", "False")},
activation='tanh',
layout='NTC')
<#include "OutputShape.ftl">
<#elseif mode == "FORWARD_FUNCTION">
<#if element.isVariable()>
......
......@@ -191,18 +191,17 @@ class Net_0(gluon.HybridBlock):
self.concatenate6_ = Concatenate(dim=1)
# concatenate6_, output shape: {[256,6,6]}
self.fc6_flatten = gluon.nn.Flatten()
self.fc6_ = gluon.nn.Dense(units=4096, use_bias=True)
self.fc6_ = gluon.nn.Dense(units=4096, use_bias=True, flatten=True)
# fc6_, output shape: {[4096,1,1]}
self.relu6_ = gluon.nn.Activation(activation='relu')
self.dropout6_ = gluon.nn.Dropout(rate=0.5)
self.fc7_ = gluon.nn.Dense(units=4096, use_bias=True)
self.fc7_ = gluon.nn.Dense(units=4096, use_bias=True, flatten=True)
# fc7_, output shape: {[4096,1,1]}
self.relu7_ = gluon.nn.Activation(activation='relu')
self.dropout7_ = gluon.nn.Dropout(rate=0.5)
self.fc8_ = gluon.nn.Dense(units=10, use_bias=True)
self.fc8_ = gluon.nn.Dense(units=10, use_bias=True, flatten=True)
# fc8_, output shape: {[10,1,1]}
self.softmax8_ = Softmax()
......@@ -262,8 +261,7 @@ class Net_0(gluon.HybridBlock):
pool5_2_ = self.pool5_2_(conv5_2_)
relu5_2_ = self.relu5_2_(pool5_2_)
concatenate6_ = self.concatenate6_(relu5_1_, relu5_2_)
fc6_flatten_ = self.fc6_flatten(concatenate6_)
fc6_ = self.fc6_(fc6_flatten_)
fc6_ = self.fc6_(concatenate6_)
relu6_ = self.relu6_(fc6_)
dropout6_ = self.dropout6_(relu6_)
fc7_ = self.fc7_(dropout6_)
......
......@@ -349,11 +349,11 @@ class Net_0(gluon.HybridBlock):
self.globalpooling31_ = gluon.nn.GlobalAvgPool2D()
# globalpooling31_, output shape: {[64,1,1]}
self.fc31_ = gluon.nn.Dense(units=128, use_bias=True)
self.fc31_ = gluon.nn.Dense(units=128, use_bias=True, flatten=True)
# fc31_, output shape: {[128,1,1]}
self.dropout31_ = gluon.nn.Dropout(rate=0.5)
self.fc32_ = gluon.nn.Dense(units=10, use_bias=True)
self.fc32_ = gluon.nn.Dense(units=10, use_bias=True, flatten=True)
# fc32_, output shape: {[10,1,1]}
self.softmax32_ = Softmax()
......
......@@ -219,18 +219,17 @@ class Net_0(gluon.HybridBlock):
strides=(2,2))
# pool13_, output shape: {[512,7,7]}
self.fc13_flatten = gluon.nn.Flatten()
self.fc13_ = gluon.nn.Dense(units=4096, use_bias=True)
self.fc13_ = gluon.nn.Dense(units=4096, use_bias=True, flatten=True)
# fc13_, output shape: {[4096,1,1]}
self.relu14_ = gluon.nn.Activation(activation='relu')
self.dropout14_ = gluon.nn.Dropout(rate=0.5)
self.fc14_ = gluon.nn.Dense(units=4096, use_bias=True)
self.fc14_ = gluon.nn.Dense(units=4096, use_bias=True, flatten=True)
# fc14_, output shape: {[4096,1,1]}
self.relu15_ = gluon.nn.Activation(activation='relu')
self.dropout15_ = gluon.nn.Dropout(rate=0.5)
self.fc15_ = gluon.nn.Dense(units=1000, use_bias=True)
self.fc15_ = gluon.nn.Dense(units=1000, use_bias=True, flatten=True)
# fc15_, output shape: {[1000,1,1]}
self.softmax15_ = Softmax()
......@@ -282,8 +281,7 @@ class Net_0(gluon.HybridBlock):
conv13_ = self.conv13_(conv13_padding)
relu13_ = self.relu13_(conv13_)
pool13_ = self.pool13_(relu13_)
fc13_flatten_ = self.fc13_flatten(pool13_)
fc13_ = self.fc13_(fc13_flatten_)
fc13_ = self.fc13_(pool13_)
relu14_ = self.relu14_(fc13_)
dropout14_ = self.dropout14_(relu14_)
fc14_ = self.fc14_(dropout14_)
......
......@@ -90,11 +90,11 @@ class Net_0(gluon.HybridBlock):
else:
self.input_normalization_state_ = NoNormalization()
self.fc2_1_ = gluon.nn.Dense(units=300, use_bias=True)
self.fc2_1_ = gluon.nn.Dense(units=300, use_bias=True, flatten=True)
# fc2_1_, output shape: {[300,1,1]}
self.relu2_1_ = gluon.nn.Activation(activation='relu')
self.fc3_1_ = gluon.nn.Dense(units=600, use_bias=True)
self.fc3_1_ = gluon.nn.Dense(units=600, use_bias=True, flatten=True)
# fc3_1_, output shape: {[600,1,1]}
if data_mean:
......@@ -104,14 +104,14 @@ class Net_0(gluon.HybridBlock):
else:
self.input_normalization_action_ = NoNormalization()
self.fc2_2_ = gluon.nn.Dense(units=600, use_bias=True)
self.fc2_2_ = gluon.nn.Dense(units=600, use_bias=True, flatten=True)
# fc2_2_, output shape: {[600,1,1]}
self.fc4_ = gluon.nn.Dense(units=600, use_bias=True)
self.fc4_ = gluon.nn.Dense(units=600, use_bias=True, flatten=True)
# fc4_, output shape: {[600,1,1]}
self.relu4_ = gluon.nn.Activation(activation='relu')
self.fc5_ = gluon.nn.Dense(units=1, use_bias=True)
self.fc5_ = gluon.nn.Dense(units=1, use_bias=True, flatten=True)
# fc5_, output shape: {[1,1,1]}
......
......@@ -90,11 +90,11 @@ class Net_0(gluon.HybridBlock):
else:
self.input_normalization_state_ = NoNormalization()
self.fc2_1_ = gluon.nn.Dense(units=300, use_bias=True)
self.fc2_1_ = gluon.nn.Dense(units=300, use_bias=True, flatten=True)
# fc2_1_, output shape: {[300,1,1]}
self.relu2_1_ = gluon.nn.Activation(activation='relu')
self.fc3_1_ = gluon.nn.Dense(units=600, use_bias=True)
self.fc3_1_ = gluon.nn.Dense(units=600, use_bias=True, flatten=True)
# fc3_1_, output shape: {[600,1,1]}
if data_mean:
......@@ -104,14 +104,14 @@ class Net_0(gluon.HybridBlock):
else:
self.input_normalization_action_ = NoNormalization()
self.fc2_2_ = gluon.nn.Dense(units=600, use_bias=True)
self.fc2_2_ = gluon.nn.Dense(units=600, use_bias=True, flatten=True)
# fc2_2_, output shape: {[600,1,1]}
self.fc4_ = gluon.nn.Dense(units=600, use_bias=True)
self.fc4_ = gluon.nn.Dense(units=600, use_bias=True, flatten=True)
# fc4_, output shape: {[600,1,1]}
self.relu4_ = gluon.nn.Activation(activation='relu')
self.fc5_ = gluon.nn.Dense(units=1, use_bias=True)
self.fc5_ = gluon.nn.Dense(units=1, use_bias=True, flatten=True)
# fc5_, output shape: {[1,1,1]}
......
......@@ -90,11 +90,11 @@ class Net_0(gluon.HybridBlock):
else:
self.input_normalization_state_ = NoNormalization()
self.fc2_1_ = gluon.nn.Dense(units=300, use_bias=True)
self.fc2_1_ = gluon.nn.Dense(units=300, use_bias=True, flatten=True)
# fc2_1_, output shape: {[300,1,1]}
self.relu2_1_ = gluon.nn.Activation(activation='relu')
self.fc3_1_ = gluon.nn.Dense(units=600, use_bias=True)
self.fc3_1_ = gluon.nn.Dense(units=600, use_bias=True, flatten=True)
# fc3_1_, output shape: {[600,1,1]}
if data_mean:
......@@ -104,14 +104,14 @@ class Net_0(gluon.HybridBlock):
else:
self.input_normalization_action_ = NoNormalization()
self.fc2_2_ = gluon.nn.Dense(units=600, use_bias=True)
self.fc2_2_ = gluon.nn.Dense(units=600, use_bias=True, flatten=True)
# fc2_2_, output shape: {[600,1,1]}
self.fc4_ = gluon.nn.Dense(units=600, use_bias=True)
self.fc4_ = gluon.nn.Dense(units=600, use_bias=True, flatten=True)
# fc4_, output shape: {[600,1,1]}
self.relu4_ = gluon.nn.Activation(activation='relu')
self.fc5_ = gluon.nn.Dense(units=1, use_bias=True)
self.fc5_ = gluon.nn.Dense(units=1, use_bias=True, flatten=True)
# fc5_, output shape: {[1,1,1]}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment