Commit 16a2fefe authored by Evgeny Kusmenko's avatar Evgeny Kusmenko

Merge branch 'develop' into 'master'

Updated tests and version numbers

See merge request !27
parents 71c518c8 00b67fc7
Pipeline #173549 passed with stages
in 10 minutes and 4 seconds
...@@ -15,12 +15,12 @@ ...@@ -15,12 +15,12 @@
<properties> <properties>
<!-- .. SE-Libraries .................................................. --> <!-- .. SE-Libraries .................................................. -->
<emadl.version>0.2.9-SNAPSHOT</emadl.version> <emadl.version>0.2.10-SNAPSHOT</emadl.version>
<CNNTrain.version>0.3.6-SNAPSHOT</CNNTrain.version> <CNNTrain.version>0.3.6-SNAPSHOT</CNNTrain.version>
<cnnarch-generator.version>0.0.3-SNAPSHOT</cnnarch-generator.version> <cnnarch-generator.version>0.0.4-SNAPSHOT</cnnarch-generator.version>
<cnnarch-mxnet-generator.version>0.2.17-SNAPSHOT</cnnarch-mxnet-generator.version> <cnnarch-mxnet-generator.version>0.2.17-SNAPSHOT</cnnarch-mxnet-generator.version>
<cnnarch-caffe2-generator.version>0.2.13-SNAPSHOT</cnnarch-caffe2-generator.version> <cnnarch-caffe2-generator.version>0.2.13-SNAPSHOT</cnnarch-caffe2-generator.version>
<cnnarch-gluon-generator.version>0.2.7-SNAPSHOT</cnnarch-gluon-generator.version> <cnnarch-gluon-generator.version>0.2.8-SNAPSHOT</cnnarch-gluon-generator.version>
<embedded-montiarc-math-opt-generator>0.1.4</embedded-montiarc-math-opt-generator> <embedded-montiarc-math-opt-generator>0.1.4</embedded-montiarc-math-opt-generator>
<!-- .. Libraries .................................................. --> <!-- .. Libraries .................................................. -->
......
...@@ -112,12 +112,11 @@ class Net_0(gluon.HybridBlock): ...@@ -112,12 +112,11 @@ class Net_0(gluon.HybridBlock):
strides=(2,2)) strides=(2,2))
# pool2_, output shape: {[50,4,4]} # pool2_, output shape: {[50,4,4]}
self.fc2_flatten = gluon.nn.Flatten() self.fc2_ = gluon.nn.Dense(units=500, use_bias=True, flatten=True)
self.fc2_ = gluon.nn.Dense(units=500, use_bias=True)
# fc2_, output shape: {[500,1,1]} # fc2_, output shape: {[500,1,1]}
self.relu2_ = gluon.nn.Activation(activation='relu') self.relu2_ = gluon.nn.Activation(activation='relu')
self.fc3_ = gluon.nn.Dense(units=10, use_bias=True) self.fc3_ = gluon.nn.Dense(units=10, use_bias=True, flatten=True)
# fc3_, output shape: {[10,1,1]} # fc3_, output shape: {[10,1,1]}
self.softmax3_ = Softmax() self.softmax3_ = Softmax()
...@@ -129,8 +128,7 @@ class Net_0(gluon.HybridBlock): ...@@ -129,8 +128,7 @@ class Net_0(gluon.HybridBlock):
pool1_ = self.pool1_(conv1_) pool1_ = self.pool1_(conv1_)
conv2_ = self.conv2_(pool1_) conv2_ = self.conv2_(pool1_)
pool2_ = self.pool2_(conv2_) pool2_ = self.pool2_(conv2_)
fc2_flatten_ = self.fc2_flatten(pool2_) fc2_ = self.fc2_(pool2_)
fc2_ = self.fc2_(fc2_flatten_)
relu2_ = self.relu2_(fc2_) relu2_ = self.relu2_(fc2_)
fc3_ = self.fc3_(relu2_) fc3_ = self.fc3_(relu2_)
softmax3_ = self.softmax3_(fc3_) softmax3_ = self.softmax3_(fc3_)
......
...@@ -136,6 +136,7 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net: ...@@ -136,6 +136,7 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net:
predictions_label = batch.label[0].as_in_context(mx_context) predictions_label = batch.label[0].as_in_context(mx_context)
with autograd.record(): with autograd.record():
predictions_ = mx.nd.zeros((10,), ctx=mx_context)
predictions_ = self._networks[0](image_) predictions_ = self._networks[0](image_)
...@@ -172,6 +173,7 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net: ...@@ -172,6 +173,7 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net:
] ]
if True: if True:
predictions_ = mx.nd.zeros((10,), ctx=mx_context)
predictions_ = self._networks[0](image_) predictions_ = self._networks[0](image_)
...@@ -192,6 +194,7 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net: ...@@ -192,6 +194,7 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net:
] ]
if True: if True:
predictions_ = mx.nd.zeros((10,), ctx=mx_context)
predictions_ = self._networks[0](image_) predictions_ = self._networks[0](image_)
......
...@@ -90,15 +90,15 @@ class Net_0(gluon.HybridBlock): ...@@ -90,15 +90,15 @@ class Net_0(gluon.HybridBlock):
else: else:
self.input_normalization_state_ = NoNormalization() self.input_normalization_state_ = NoNormalization()
self.fc1_ = gluon.nn.Dense(units=128, use_bias=True) self.fc1_ = gluon.nn.Dense(units=128, use_bias=True, flatten=True)
# fc1_, output shape: {[128,1,1]} # fc1_, output shape: {[128,1,1]}
self.tanh1_ = gluon.nn.Activation(activation='tanh') self.tanh1_ = gluon.nn.Activation(activation='tanh')
self.fc2_ = gluon.nn.Dense(units=256, use_bias=True) self.fc2_ = gluon.nn.Dense(units=256, use_bias=True, flatten=True)
# fc2_, output shape: {[256,1,1]} # fc2_, output shape: {[256,1,1]}
self.tanh2_ = gluon.nn.Activation(activation='tanh') self.tanh2_ = gluon.nn.Activation(activation='tanh')
self.fc3_ = gluon.nn.Dense(units=2, use_bias=True) self.fc3_ = gluon.nn.Dense(units=2, use_bias=True, flatten=True)
# fc3_, output shape: {[2,1,1]} # fc3_, output shape: {[2,1,1]}
......
...@@ -90,15 +90,15 @@ class Net_0(gluon.HybridBlock): ...@@ -90,15 +90,15 @@ class Net_0(gluon.HybridBlock):
else: else:
self.input_normalization_state_ = NoNormalization() self.input_normalization_state_ = NoNormalization()
self.fc1_ = gluon.nn.Dense(units=300, use_bias=True) self.fc1_ = gluon.nn.Dense(units=300, use_bias=True, flatten=True)
# fc1_, output shape: {[300,1,1]} # fc1_, output shape: {[300,1,1]}
self.relu1_ = gluon.nn.Activation(activation='relu') self.relu1_ = gluon.nn.Activation(activation='relu')
self.fc2_ = gluon.nn.Dense(units=300, use_bias=True) self.fc2_ = gluon.nn.Dense(units=300, use_bias=True, flatten=True)
# fc2_, output shape: {[300,1,1]} # fc2_, output shape: {[300,1,1]}
self.relu2_ = gluon.nn.Activation(activation='relu') self.relu2_ = gluon.nn.Activation(activation='relu')
self.fc3_ = gluon.nn.Dense(units=1, use_bias=True) self.fc3_ = gluon.nn.Dense(units=1, use_bias=True, flatten=True)
# fc3_, output shape: {[1,1,1]} # fc3_, output shape: {[1,1,1]}
self.tanh3_ = gluon.nn.Activation(activation='tanh') self.tanh3_ = gluon.nn.Activation(activation='tanh')
......
...@@ -90,11 +90,11 @@ class Net_0(gluon.HybridBlock): ...@@ -90,11 +90,11 @@ class Net_0(gluon.HybridBlock):
else: else:
self.input_normalization_state_ = NoNormalization() self.input_normalization_state_ = NoNormalization()
self.fc2_1_ = gluon.nn.Dense(units=400, use_bias=True) self.fc2_1_ = gluon.nn.Dense(units=400, use_bias=True, flatten=True)
# fc2_1_, output shape: {[400,1,1]} # fc2_1_, output shape: {[400,1,1]}
self.relu2_1_ = gluon.nn.Activation(activation='relu') self.relu2_1_ = gluon.nn.Activation(activation='relu')
self.fc3_1_ = gluon.nn.Dense(units=300, use_bias=True) self.fc3_1_ = gluon.nn.Dense(units=300, use_bias=True, flatten=True)
# fc3_1_, output shape: {[300,1,1]} # fc3_1_, output shape: {[300,1,1]}
if data_mean: if data_mean:
...@@ -104,11 +104,11 @@ class Net_0(gluon.HybridBlock): ...@@ -104,11 +104,11 @@ class Net_0(gluon.HybridBlock):
else: else:
self.input_normalization_action_ = NoNormalization() self.input_normalization_action_ = NoNormalization()
self.fc2_2_ = gluon.nn.Dense(units=300, use_bias=True) self.fc2_2_ = gluon.nn.Dense(units=300, use_bias=True, flatten=True)
# fc2_2_, output shape: {[300,1,1]} # fc2_2_, output shape: {[300,1,1]}
self.relu4_ = gluon.nn.Activation(activation='relu') self.relu4_ = gluon.nn.Activation(activation='relu')
self.fc4_ = gluon.nn.Dense(units=1, use_bias=True) self.fc4_ = gluon.nn.Dense(units=1, use_bias=True, flatten=True)
# fc4_, output shape: {[1,1,1]} # fc4_, output shape: {[1,1,1]}
......
...@@ -90,15 +90,15 @@ class Net_0(gluon.HybridBlock): ...@@ -90,15 +90,15 @@ class Net_0(gluon.HybridBlock):
else: else:
self.input_normalization_state_ = NoNormalization() self.input_normalization_state_ = NoNormalization()
self.fc1_ = gluon.nn.Dense(units=512, use_bias=True) self.fc1_ = gluon.nn.Dense(units=512, use_bias=True, flatten=True)
# fc1_, output shape: {[512,1,1]} # fc1_, output shape: {[512,1,1]}
self.tanh1_ = gluon.nn.Activation(activation='tanh') self.tanh1_ = gluon.nn.Activation(activation='tanh')
self.fc2_ = gluon.nn.Dense(units=256, use_bias=True) self.fc2_ = gluon.nn.Dense(units=256, use_bias=True, flatten=True)
# fc2_, output shape: {[256,1,1]} # fc2_, output shape: {[256,1,1]}
self.tanh2_ = gluon.nn.Activation(activation='tanh') self.tanh2_ = gluon.nn.Activation(activation='tanh')
self.fc3_ = gluon.nn.Dense(units=30, use_bias=True) self.fc3_ = gluon.nn.Dense(units=30, use_bias=True, flatten=True)
# fc3_, output shape: {[30,1,1]} # fc3_, output shape: {[30,1,1]}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment