Commit f3cd01b5 authored by Sebastian N.'s avatar Sebastian N.

Updated tests


Former-commit-id: b02d68d7
parent 40d664e2
......@@ -58,6 +58,7 @@ public class IntegrationGluonTest extends IntegrationTest {
assertTrue(Log.getFindings().isEmpty());
}
@Ignore
@Test
public void testShowAttendTell() {
Log.getFindings().clear();
......
......@@ -17,7 +17,7 @@ if __name__ == "__main__":
num_epoch=11,
batch_size=64,
context='gpu',
eval_metric='accuracy{{}}',
eval_metric='accuracy',
opt_type='adam',
epsilon=1.0E-8,
weight_decay=0.001,
......
......@@ -3,6 +3,8 @@ import h5py
import mxnet as mx
import logging
import sys
import numpy as np
import cv2
from mxnet import nd
class CNNDataLoader_mnist_mnistClassifier_net:
......@@ -65,6 +67,48 @@ class CNNDataLoader_mnist_mnistClassifier_net:
return train_iter, train_test_iter, test_iter, data_mean, data_std, train_images, test_images
def load_data_img(self, batch_size, img_size):
train_h5, test_h5 = self.load_h5_files()
width = img_size[0]
height = img_size[1]
comb_data = {}
data_mean = {}
data_std = {}
for input_name in self._input_names_:
train_data = train_h5[input_name][:]
test_data = test_h5[input_name][:]
train_shape = train_data.shape
test_shape = test_data.shape
comb_data[input_name] = mx.nd.zeros((train_shape[0]+test_shape[0], train_shape[1], width, height))
for i, img in enumerate(train_data):
img = img.transpose(1,2,0)
comb_data[input_name][i] = cv2.resize(img, (width, height)).reshape((train_shape[1],width,height))
for i, img in enumerate(test_data):
img = img.transpose(1, 2, 0)
comb_data[input_name][i+train_shape[0]] = cv2.resize(img, (width, height)).reshape((train_shape[1], width, height))
data_mean[input_name + '_'] = nd.array(comb_data[input_name][:].mean(axis=0))
data_std[input_name + '_'] = nd.array(comb_data[input_name][:].asnumpy().std(axis=0) + 1e-5)
comb_label = {}
for output_name in self._output_names_:
train_labels = train_h5[output_name][:]
test_labels = test_h5[output_name][:]
comb_label[output_name] = np.append(train_labels, test_labels, axis=0)
train_iter = mx.io.NDArrayIter(data=comb_data,
label=comb_label,
batch_size=batch_size)
test_iter = None
return train_iter, test_iter, data_mean, data_std
def load_h5_files(self):
train_h5 = None
test_h5 = None
......@@ -106,4 +150,4 @@ class CNNDataLoader_mnist_mnistClassifier_net:
return train_h5, test_h5
else:
logging.error("Data loading failure. File '" + os.path.abspath(train_path) + "' does not exist.")
sys.exit(1)
\ No newline at end of file
sys.exit(1)
......@@ -143,7 +143,7 @@ class Net_0(gluon.HybridBlock):
relu2_ = self.relu2_(fc2_)
fc3_ = self.fc3_(relu2_)
softmax3_ = F.softmax(fc3_, axis=-1)
predictions_ = softmax3_
predictions_ = F.identity(softmax3_)
return predictions_
......@@ -16,7 +16,7 @@ public:
const std::vector<std::string> input_keys = {
"data"
};
const std::vector<std::vector<mx_uint>> input_shapes = {{1, 28, 28}};
const std::vector<std::vector<mx_uint>> input_shapes = {{1, 1, 28, 28}};
const bool use_gpu = false;
PredictorHandle handle;
......@@ -45,7 +45,7 @@ public:
size = 1;
for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i];
assert(size == out_predictions_.size());
MXPredGetOutput(handle, 0, &(out_predictions_[0]), out_predictions_.size());
MXPredGetOutput(handle, output_index, &(out_predictions_[0]), out_predictions_.size());
}
......
......@@ -354,7 +354,7 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net:
attention_resized = np.resize(attention.asnumpy(), (8, 8))
ax = fig.add_subplot(max_length//3, max_length//4, l+1)
ax.set_title(dict[int(labels[l+1][0].asscalar())])
img = ax.imshow(train_images[0+test_batch_size*(batch_i)].transpose(1,2,0))
img = ax.imshow(train_images[0+test_batch_size*(batch_i)])
ax.imshow(attention_resized, cmap='gray', alpha=0.6, extent=img.get_extent())
......@@ -378,7 +378,7 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net:
test_iter.reset()
metric = mx.metric.create(eval_metric, **eval_metric_params)
for batch_i, batch in enumerate(test_iter):
if True:
if True:
labels = [batch.label[i].as_in_context(mx_context) for i in range(1)]
image_ = batch.data[0].as_in_context(mx_context)
......@@ -406,7 +406,7 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net:
attention_resized = np.resize(attention.asnumpy(), (8, 8))
ax = fig.add_subplot(max_length//3, max_length//4, l+1)
ax.set_title(dict[int(mx.nd.slice_axis(mx.nd.argmax(outputs[l+1], axis=1), axis=0, begin=0, end=1).asscalar())])
img = ax.imshow(test_images[0+test_batch_size*(batch_i)].transpose(1,2,0))
img = ax.imshow(test_images[0+test_batch_size*(batch_i)])
ax.imshow(attention_resized, cmap='gray', alpha=0.6, extent=img.get_extent())
......
......@@ -118,7 +118,7 @@ class Net_0(gluon.HybridBlock):
fc2_ = self.fc2_(tanh1_)
tanh2_ = self.tanh2_(fc2_)
fc3_ = self.fc3_(tanh2_)
qvalues_ = fc3_
qvalues_ = F.identity(fc3_)
return qvalues_
......@@ -16,7 +16,7 @@ public:
const std::vector<std::string> input_keys = {
"data"
};
const std::vector<std::vector<mx_uint>> input_shapes = {{4}};
const std::vector<std::vector<mx_uint>> input_shapes = {{1, 4}};
const bool use_gpu = false;
PredictorHandle handle;
......@@ -45,7 +45,7 @@ public:
size = 1;
for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i];
assert(size == out_qvalues_.size());
MXPredGetOutput(handle, 0, &(out_qvalues_[0]), out_qvalues_.size());
MXPredGetOutput(handle, output_index, &(out_qvalues_[0]), out_qvalues_.size());
}
......
......@@ -120,7 +120,7 @@ class Net_0(gluon.HybridBlock):
relu2_ = self.relu2_(fc2_)
fc3_ = self.fc3_(relu2_)
tanh3_ = self.tanh3_(fc3_)
action_ = tanh3_
action_ = F.identity(tanh3_)
return action_
......@@ -16,7 +16,7 @@ public:
const std::vector<std::string> input_keys = {
"data"
};
const std::vector<std::vector<mx_uint>> input_shapes = {{2}};
const std::vector<std::vector<mx_uint>> input_shapes = {{1, 2}};
const bool use_gpu = false;
PredictorHandle handle;
......@@ -45,7 +45,7 @@ public:
size = 1;
for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i];
assert(size == out_action_.size());
MXPredGetOutput(handle, 0, &(out_action_[0]), out_action_.size());
MXPredGetOutput(handle, output_index, &(out_action_[0]), out_action_.size());
}
......
......@@ -131,7 +131,7 @@ class Net_0(gluon.HybridBlock):
add4_ = fc3_1_ + fc2_2_
relu4_ = self.relu4_(add4_)
fc4_ = self.fc4_(relu4_)
qvalues_ = fc4_
qvalues_ = F.identity(fc4_)
return qvalues_
......@@ -118,7 +118,7 @@ class Net_0(gluon.HybridBlock):
fc2_ = self.fc2_(tanh1_)
tanh2_ = self.tanh2_(fc2_)
fc3_ = self.fc3_(tanh2_)
qvalues_ = fc3_
qvalues_ = F.identity(fc3_)
return qvalues_
......@@ -16,7 +16,7 @@ public:
const std::vector<std::string> input_keys = {
"data"
};
const std::vector<std::vector<mx_uint>> input_shapes = {{5}};
const std::vector<std::vector<mx_uint>> input_shapes = {{1, 5}};
const bool use_gpu = false;
PredictorHandle handle;
......@@ -45,7 +45,7 @@ public:
size = 1;
for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i];
assert(size == out_qvalues_.size());
MXPredGetOutput(handle, 0, &(out_qvalues_[0]), out_qvalues_.size());
MXPredGetOutput(handle, output_index, &(out_qvalues_[0]), out_qvalues_.size());
}
......
......@@ -120,7 +120,7 @@ class Net_0(gluon.HybridBlock):
relu2_ = self.relu2_(fc2_)
fc3_ = self.fc3_(relu2_)
tanh3_ = self.tanh3_(fc3_)
commands_ = tanh3_
commands_ = F.identity(tanh3_)
return commands_
......@@ -16,7 +16,7 @@ public:
const std::vector<std::string> input_keys = {
"data"
};
const std::vector<std::vector<mx_uint>> input_shapes = {{29}};
const std::vector<std::vector<mx_uint>> input_shapes = {{1, 29}};
const bool use_gpu = false;
PredictorHandle handle;
......@@ -45,7 +45,7 @@ public:
size = 1;
for (mx_uint i = 0; i < shape_len; ++i) size *= shape[i];
assert(size == out_commands_.size());
MXPredGetOutput(handle, 0, &(out_commands_[0]), out_commands_.size());
MXPredGetOutput(handle, output_index, &(out_commands_[0]), out_commands_.size());
}
......
......@@ -127,7 +127,7 @@ class Net_0(gluon.HybridBlock):
fc4_ = self.fc4_(relu3_)
relu4_ = self.relu4_(fc4_)
fc5_ = self.fc5_(relu4_)
qvalues_ = fc5_
qvalues_ = F.identity(fc5_)
return qvalues_
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment