Commit 00b67fc7 authored by Sebastian Nickels's avatar Sebastian Nickels

Updated tests to support flatten parameter for FullyConnected layer and...

Updated tests to support flatten parameter for FullyConnected layer and bidirectional parameter for RNN layers
parent b018cdec
Pipeline #173125 passed with stages
in 7 minutes and 47 seconds
......@@ -112,12 +112,11 @@ class Net_0(gluon.HybridBlock):
strides=(2,2))
# pool2_, output shape: {[50,4,4]}
self.fc2_flatten = gluon.nn.Flatten()
self.fc2_ = gluon.nn.Dense(units=500, use_bias=True)
self.fc2_ = gluon.nn.Dense(units=500, use_bias=True, flatten=True)
# fc2_, output shape: {[500,1,1]}
self.relu2_ = gluon.nn.Activation(activation='relu')
self.fc3_ = gluon.nn.Dense(units=10, use_bias=True)
self.fc3_ = gluon.nn.Dense(units=10, use_bias=True, flatten=True)
# fc3_, output shape: {[10,1,1]}
self.softmax3_ = Softmax()
......@@ -129,8 +128,7 @@ class Net_0(gluon.HybridBlock):
pool1_ = self.pool1_(conv1_)
conv2_ = self.conv2_(pool1_)
pool2_ = self.pool2_(conv2_)
fc2_flatten_ = self.fc2_flatten(pool2_)
fc2_ = self.fc2_(fc2_flatten_)
fc2_ = self.fc2_(pool2_)
relu2_ = self.relu2_(fc2_)
fc3_ = self.fc3_(relu2_)
softmax3_ = self.softmax3_(fc3_)
......
......@@ -90,15 +90,15 @@ class Net_0(gluon.HybridBlock):
else:
self.input_normalization_state_ = NoNormalization()
self.fc1_ = gluon.nn.Dense(units=128, use_bias=True)
self.fc1_ = gluon.nn.Dense(units=128, use_bias=True, flatten=True)
# fc1_, output shape: {[128,1,1]}
self.tanh1_ = gluon.nn.Activation(activation='tanh')
self.fc2_ = gluon.nn.Dense(units=256, use_bias=True)
self.fc2_ = gluon.nn.Dense(units=256, use_bias=True, flatten=True)
# fc2_, output shape: {[256,1,1]}
self.tanh2_ = gluon.nn.Activation(activation='tanh')
self.fc3_ = gluon.nn.Dense(units=2, use_bias=True)
self.fc3_ = gluon.nn.Dense(units=2, use_bias=True, flatten=True)
# fc3_, output shape: {[2,1,1]}
......
......@@ -90,15 +90,15 @@ class Net_0(gluon.HybridBlock):
else:
self.input_normalization_state_ = NoNormalization()
self.fc1_ = gluon.nn.Dense(units=300, use_bias=True)
self.fc1_ = gluon.nn.Dense(units=300, use_bias=True, flatten=True)
# fc1_, output shape: {[300,1,1]}
self.relu1_ = gluon.nn.Activation(activation='relu')
self.fc2_ = gluon.nn.Dense(units=300, use_bias=True)
self.fc2_ = gluon.nn.Dense(units=300, use_bias=True, flatten=True)
# fc2_, output shape: {[300,1,1]}
self.relu2_ = gluon.nn.Activation(activation='relu')
self.fc3_ = gluon.nn.Dense(units=1, use_bias=True)
self.fc3_ = gluon.nn.Dense(units=1, use_bias=True, flatten=True)
# fc3_, output shape: {[1,1,1]}
self.tanh3_ = gluon.nn.Activation(activation='tanh')
......
......@@ -90,11 +90,11 @@ class Net_0(gluon.HybridBlock):
else:
self.input_normalization_state_ = NoNormalization()
self.fc2_1_ = gluon.nn.Dense(units=400, use_bias=True)
self.fc2_1_ = gluon.nn.Dense(units=400, use_bias=True, flatten=True)
# fc2_1_, output shape: {[400,1,1]}
self.relu2_1_ = gluon.nn.Activation(activation='relu')
self.fc3_1_ = gluon.nn.Dense(units=300, use_bias=True)
self.fc3_1_ = gluon.nn.Dense(units=300, use_bias=True, flatten=True)
# fc3_1_, output shape: {[300,1,1]}
if data_mean:
......@@ -104,11 +104,11 @@ class Net_0(gluon.HybridBlock):
else:
self.input_normalization_action_ = NoNormalization()
self.fc2_2_ = gluon.nn.Dense(units=300, use_bias=True)
self.fc2_2_ = gluon.nn.Dense(units=300, use_bias=True, flatten=True)
# fc2_2_, output shape: {[300,1,1]}
self.relu4_ = gluon.nn.Activation(activation='relu')
self.fc4_ = gluon.nn.Dense(units=1, use_bias=True)
self.fc4_ = gluon.nn.Dense(units=1, use_bias=True, flatten=True)
# fc4_, output shape: {[1,1,1]}
......
......@@ -90,15 +90,15 @@ class Net_0(gluon.HybridBlock):
else:
self.input_normalization_state_ = NoNormalization()
self.fc1_ = gluon.nn.Dense(units=512, use_bias=True)
self.fc1_ = gluon.nn.Dense(units=512, use_bias=True, flatten=True)
# fc1_, output shape: {[512,1,1]}
self.tanh1_ = gluon.nn.Activation(activation='tanh')
self.fc2_ = gluon.nn.Dense(units=256, use_bias=True)
self.fc2_ = gluon.nn.Dense(units=256, use_bias=True, flatten=True)
# fc2_, output shape: {[256,1,1]}
self.tanh2_ = gluon.nn.Activation(activation='tanh')
self.fc3_ = gluon.nn.Dense(units=30, use_bias=True)
self.fc3_ = gluon.nn.Dense(units=30, use_bias=True, flatten=True)
# fc3_, output shape: {[30,1,1]}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment