Commit 982360b4 authored by Evgeny Kusmenko's avatar Evgeny Kusmenko

Merge branch 'fix_harcoded_testing_loop' into 'master'

Fix harcoded testing loop

See merge request !11
parents a88eaa20 8bf2e841
Pipeline #83529 failed with stages
in 4 minutes and 59 seconds
......@@ -210,6 +210,7 @@
<artifactId>maven-surefire-plugin</artifactId>
<version>2.19.1</version>
<configuration>
<useSystemClassLoader>false</useSystemClassLoader>
</configuration>
</plugin>
<plugin>
......
......@@ -5,7 +5,7 @@ import numpy as np
import logging
import os
import sys
import lmdb
class ${tc.fileNameWithoutEnding}:
module = None
......@@ -34,7 +34,10 @@ class ${tc.fileNameWithoutEnding}:
# don't need the gradient for the backward pass
data = model.StopGradient(data, data)
return data, label
dataset_size = int (lmdb.open(db).stat()['entries'])
return data, label, dataset_size
def create_model(self, model, data, device_opts):
with core.DeviceScope(device_opts):
......@@ -95,7 +98,7 @@ ${tc.include(tc.architecture.body)}
arg_scope = {"order": "NCHW"}
# == Training model ==
train_model= model_helper.ModelHelper(name="train_net", arg_scope=arg_scope)
data, label = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
data, label, train_dataset_size = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
${tc.join(tc.architectureOutputs, ",", "","")} = self.create_model(train_model, data, device_opts=device_opts)
self.add_training_operators(train_model, ${tc.join(tc.architectureOutputs, ",", "","")}, label, device_opts, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum)
self.add_accuracy(train_model, ${tc.join(tc.architectureOutputs, ",", "","")}, label, device_opts, eval_metric)
......@@ -107,28 +110,25 @@ ${tc.include(tc.architecture.body)}
workspace.CreateNet(train_model.net, overwrite=True)
# Main Training Loop
print("== Starting Training for " + str(num_epoch) + " num_epoch ==")
for j in range(0, num_epoch):
print("== Starting Training for " + str(num_epoch) + " epochs ==")
for i in range(num_epoch):
workspace.RunNet(train_model.net)
if j % 50 == 0:
print 'Iter: ' + str(j) + ': ' + 'Loss ' + str(workspace.FetchBlob("loss")) + ' - ' + 'Accuracy ' + str(workspace.FetchBlob('accuracy'))
if i % 50 == 0:
print 'Iter ' + str(i) + ': ' + 'Loss ' + str(workspace.FetchBlob("loss")) + ' - ' + 'Accuracy ' + str(workspace.FetchBlob('accuracy'))
print("Training done")
print("== Running Test model ==")
# == Testing model. ==
test_model= model_helper.ModelHelper(name="test_net", arg_scope=arg_scope, init_params=False)
data, label = self.add_input(test_model, batch_size=100, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
data, label, test_dataset_size = self.add_input(test_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
${tc.join(tc.architectureOutputs, ",", "","")} = self.create_model(test_model, data, device_opts=device_opts)
self.add_accuracy(test_model, predictions, label, device_opts, eval_metric)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net, overwrite=True)
# Main Testing Loop
# batch size: 100
# iteration: 100
# total test images: 10000
test_accuracy = np.zeros(100)
for i in range(100):
test_accuracy = np.zeros(test_dataset_size/batch_size)
for i in range(test_dataset_size/batch_size):
# Run a forward pass of the net on the current batch
workspace.RunNet(test_model.net)
# Collect the batch accuracy from the workspace
......@@ -192,4 +192,4 @@ ${tc.include(tc.architecture.body)}
net_def.ParseFromString(f.read())
net_def.device_option.CopyFrom(device_opts)
workspace.CreateNet(net_def.SerializeToString(), overwrite=True)
print("== Loaded init_net and predict_net ==")
\ No newline at end of file
print("== Loaded init_net and predict_net ==")
......@@ -5,7 +5,7 @@ import numpy as np
import logging
import os
import sys
import lmdb
class CNNCreator_Alexnet:
module = None
......@@ -34,7 +34,10 @@ class CNNCreator_Alexnet:
# don't need the gradient for the backward pass
data = model.StopGradient(data, data)
return data, label
dataset_size = int (lmdb.open(db).stat()['entries'])
return data, label, dataset_size
def create_model(self, model, data, device_opts):
with core.DeviceScope(device_opts):
......@@ -187,7 +190,7 @@ class CNNCreator_Alexnet:
arg_scope = {"order": "NCHW"}
# == Training model ==
train_model= model_helper.ModelHelper(name="train_net", arg_scope=arg_scope)
data, label = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
data, label, train_dataset_size = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
predictions = self.create_model(train_model, data, device_opts=device_opts)
self.add_training_operators(train_model, predictions, label, device_opts, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum)
self.add_accuracy(train_model, predictions, label, device_opts, eval_metric)
......@@ -199,28 +202,25 @@ class CNNCreator_Alexnet:
workspace.CreateNet(train_model.net, overwrite=True)
# Main Training Loop
print("== Starting Training for " + str(num_epoch) + " num_epoch ==")
for j in range(0, num_epoch):
print("== Starting Training for " + str(num_epoch) + " epochs ==")
for i in range(num_epoch):
workspace.RunNet(train_model.net)
if j % 50 == 0:
print 'Iter: ' + str(j) + ': ' + 'Loss ' + str(workspace.FetchBlob("loss")) + ' - ' + 'Accuracy ' + str(workspace.FetchBlob('accuracy'))
if i % 50 == 0:
print 'Iter ' + str(i) + ': ' + 'Loss ' + str(workspace.FetchBlob("loss")) + ' - ' + 'Accuracy ' + str(workspace.FetchBlob('accuracy'))
print("Training done")
print("== Running Test model ==")
# == Testing model. ==
test_model= model_helper.ModelHelper(name="test_net", arg_scope=arg_scope, init_params=False)
data, label = self.add_input(test_model, batch_size=100, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
data, label, test_dataset_size = self.add_input(test_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
predictions = self.create_model(test_model, data, device_opts=device_opts)
self.add_accuracy(test_model, predictions, label, device_opts, eval_metric)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net, overwrite=True)
# Main Testing Loop
# batch size: 100
# iteration: 100
# total test images: 10000
test_accuracy = np.zeros(100)
for i in range(100):
test_accuracy = np.zeros(test_dataset_size/batch_size)
for i in range(test_dataset_size/batch_size):
# Run a forward pass of the net on the current batch
workspace.RunNet(test_model.net)
# Collect the batch accuracy from the workspace
......
......@@ -5,7 +5,7 @@ import numpy as np
import logging
import os
import sys
import lmdb
class CNNCreator_CifarClassifierNetwork:
module = None
......@@ -34,7 +34,10 @@ class CNNCreator_CifarClassifierNetwork:
# don't need the gradient for the backward pass
data = model.StopGradient(data, data)
return data, label
dataset_size = int (lmdb.open(db).stat()['entries'])
return data, label, dataset_size
def create_model(self, model, data, device_opts):
with core.DeviceScope(device_opts):
......@@ -272,7 +275,7 @@ class CNNCreator_CifarClassifierNetwork:
arg_scope = {"order": "NCHW"}
# == Training model ==
train_model= model_helper.ModelHelper(name="train_net", arg_scope=arg_scope)
data, label = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
data, label, train_dataset_size = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
softmax = self.create_model(train_model, data, device_opts=device_opts)
self.add_training_operators(train_model, softmax, label, device_opts, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum)
self.add_accuracy(train_model, softmax, label, device_opts, eval_metric)
......@@ -284,28 +287,25 @@ class CNNCreator_CifarClassifierNetwork:
workspace.CreateNet(train_model.net, overwrite=True)
# Main Training Loop
print("== Starting Training for " + str(num_epoch) + " num_epoch ==")
for j in range(0, num_epoch):
print("== Starting Training for " + str(num_epoch) + " epochs ==")
for i in range(num_epoch):
workspace.RunNet(train_model.net)
if j % 50 == 0:
print 'Iter: ' + str(j) + ': ' + 'Loss ' + str(workspace.FetchBlob("loss")) + ' - ' + 'Accuracy ' + str(workspace.FetchBlob('accuracy'))
if i % 50 == 0:
print 'Iter ' + str(i) + ': ' + 'Loss ' + str(workspace.FetchBlob("loss")) + ' - ' + 'Accuracy ' + str(workspace.FetchBlob('accuracy'))
print("Training done")
print("== Running Test model ==")
# == Testing model. ==
test_model= model_helper.ModelHelper(name="test_net", arg_scope=arg_scope, init_params=False)
data, label = self.add_input(test_model, batch_size=100, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
data, label, test_dataset_size = self.add_input(test_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
softmax = self.create_model(test_model, data, device_opts=device_opts)
self.add_accuracy(test_model, predictions, label, device_opts, eval_metric)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net, overwrite=True)
# Main Testing Loop
# batch size: 100
# iteration: 100
# total test images: 10000
test_accuracy = np.zeros(100)
for i in range(100):
test_accuracy = np.zeros(test_dataset_size/batch_size)
for i in range(test_dataset_size/batch_size):
# Run a forward pass of the net on the current batch
workspace.RunNet(test_model.net)
# Collect the batch accuracy from the workspace
......
......@@ -5,7 +5,7 @@ import numpy as np
import logging
import os
import sys
import lmdb
class CNNCreator_VGG16:
module = None
......@@ -34,7 +34,10 @@ class CNNCreator_VGG16:
# don't need the gradient for the backward pass
data = model.StopGradient(data, data)
return data, label
dataset_size = int (lmdb.open(db).stat()['entries'])
return data, label, dataset_size
def create_model(self, model, data, device_opts):
with core.DeviceScope(device_opts):
......@@ -162,7 +165,7 @@ class CNNCreator_VGG16:
arg_scope = {"order": "NCHW"}
# == Training model ==
train_model= model_helper.ModelHelper(name="train_net", arg_scope=arg_scope)
data, label = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
data, label, train_dataset_size = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
predictions = self.create_model(train_model, data, device_opts=device_opts)
self.add_training_operators(train_model, predictions, label, device_opts, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum)
self.add_accuracy(train_model, predictions, label, device_opts, eval_metric)
......@@ -174,28 +177,25 @@ class CNNCreator_VGG16:
workspace.CreateNet(train_model.net, overwrite=True)
# Main Training Loop
print("== Starting Training for " + str(num_epoch) + " num_epoch ==")
for j in range(0, num_epoch):
print("== Starting Training for " + str(num_epoch) + " epochs ==")
for i in range(num_epoch):
workspace.RunNet(train_model.net)
if j % 50 == 0:
print 'Iter: ' + str(j) + ': ' + 'Loss ' + str(workspace.FetchBlob("loss")) + ' - ' + 'Accuracy ' + str(workspace.FetchBlob('accuracy'))
if i % 50 == 0:
print 'Iter ' + str(i) + ': ' + 'Loss ' + str(workspace.FetchBlob("loss")) + ' - ' + 'Accuracy ' + str(workspace.FetchBlob('accuracy'))
print("Training done")
print("== Running Test model ==")
# == Testing model. ==
test_model= model_helper.ModelHelper(name="test_net", arg_scope=arg_scope, init_params=False)
data, label = self.add_input(test_model, batch_size=100, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
data, label, test_dataset_size = self.add_input(test_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
predictions = self.create_model(test_model, data, device_opts=device_opts)
self.add_accuracy(test_model, predictions, label, device_opts, eval_metric)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net, overwrite=True)
# Main Testing Loop
# batch size: 100
# iteration: 100
# total test images: 10000
test_accuracy = np.zeros(100)
for i in range(100):
test_accuracy = np.zeros(test_dataset_size/batch_size)
for i in range(test_dataset_size/batch_size):
# Run a forward pass of the net on the current batch
workspace.RunNet(test_model.net)
# Collect the batch accuracy from the workspace
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment