Commit c696bd35 authored by Carlos Alfredo Yeverino Rodriguez's avatar Carlos Alfredo Yeverino Rodriguez
Browse files

Revert "Merge branch 'fix_harcoded_testing_loop' into 'master'"

This reverts merge request !11
parent 982360b4
Pipeline #84272 failed with stages
in 3 minutes and 10 seconds
......@@ -210,7 +210,6 @@
<artifactId>maven-surefire-plugin</artifactId>
<version>2.19.1</version>
<configuration>
<useSystemClassLoader>false</useSystemClassLoader>
</configuration>
</plugin>
<plugin>
......
......@@ -5,7 +5,7 @@ import numpy as np
import logging
import os
import sys
import lmdb
class ${tc.fileNameWithoutEnding}:
module = None
......@@ -34,10 +34,7 @@ class ${tc.fileNameWithoutEnding}:
# don't need the gradient for the backward pass
data = model.StopGradient(data, data)
dataset_size = int (lmdb.open(db).stat()['entries'])
return data, label, dataset_size
return data, label
def create_model(self, model, data, device_opts):
with core.DeviceScope(device_opts):
......@@ -98,7 +95,7 @@ ${tc.include(tc.architecture.body)}
arg_scope = {"order": "NCHW"}
# == Training model ==
train_model= model_helper.ModelHelper(name="train_net", arg_scope=arg_scope)
data, label, train_dataset_size = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
data, label = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
${tc.join(tc.architectureOutputs, ",", "","")} = self.create_model(train_model, data, device_opts=device_opts)
self.add_training_operators(train_model, ${tc.join(tc.architectureOutputs, ",", "","")}, label, device_opts, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum)
self.add_accuracy(train_model, ${tc.join(tc.architectureOutputs, ",", "","")}, label, device_opts, eval_metric)
......@@ -110,25 +107,28 @@ ${tc.include(tc.architecture.body)}
workspace.CreateNet(train_model.net, overwrite=True)
# Main Training Loop
print("== Starting Training for " + str(num_epoch) + " epochs ==")
for i in range(num_epoch):
print("== Starting Training for " + str(num_epoch) + " num_epoch ==")
for j in range(0, num_epoch):
workspace.RunNet(train_model.net)
if i % 50 == 0:
print 'Iter ' + str(i) + ': ' + 'Loss ' + str(workspace.FetchBlob("loss")) + ' - ' + 'Accuracy ' + str(workspace.FetchBlob('accuracy'))
if j % 50 == 0:
print 'Iter: ' + str(j) + ': ' + 'Loss ' + str(workspace.FetchBlob("loss")) + ' - ' + 'Accuracy ' + str(workspace.FetchBlob('accuracy'))
print("Training done")
print("== Running Test model ==")
# == Testing model. ==
test_model= model_helper.ModelHelper(name="test_net", arg_scope=arg_scope, init_params=False)
data, label, test_dataset_size = self.add_input(test_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
data, label = self.add_input(test_model, batch_size=100, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
${tc.join(tc.architectureOutputs, ",", "","")} = self.create_model(test_model, data, device_opts=device_opts)
self.add_accuracy(test_model, predictions, label, device_opts, eval_metric)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net, overwrite=True)
# Main Testing Loop
test_accuracy = np.zeros(test_dataset_size/batch_size)
for i in range(test_dataset_size/batch_size):
# batch size: 100
# iteration: 100
# total test images: 10000
test_accuracy = np.zeros(100)
for i in range(100):
# Run a forward pass of the net on the current batch
workspace.RunNet(test_model.net)
# Collect the batch accuracy from the workspace
......@@ -192,4 +192,4 @@ ${tc.include(tc.architecture.body)}
net_def.ParseFromString(f.read())
net_def.device_option.CopyFrom(device_opts)
workspace.CreateNet(net_def.SerializeToString(), overwrite=True)
print("== Loaded init_net and predict_net ==")
print("== Loaded init_net and predict_net ==")
\ No newline at end of file
......@@ -5,7 +5,7 @@ import numpy as np
import logging
import os
import sys
import lmdb
class CNNCreator_Alexnet:
module = None
......@@ -34,10 +34,7 @@ class CNNCreator_Alexnet:
# don't need the gradient for the backward pass
data = model.StopGradient(data, data)
dataset_size = int (lmdb.open(db).stat()['entries'])
return data, label, dataset_size
return data, label
def create_model(self, model, data, device_opts):
with core.DeviceScope(device_opts):
......@@ -190,7 +187,7 @@ class CNNCreator_Alexnet:
arg_scope = {"order": "NCHW"}
# == Training model ==
train_model= model_helper.ModelHelper(name="train_net", arg_scope=arg_scope)
data, label, train_dataset_size = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
data, label = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
predictions = self.create_model(train_model, data, device_opts=device_opts)
self.add_training_operators(train_model, predictions, label, device_opts, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum)
self.add_accuracy(train_model, predictions, label, device_opts, eval_metric)
......@@ -202,25 +199,28 @@ class CNNCreator_Alexnet:
workspace.CreateNet(train_model.net, overwrite=True)
# Main Training Loop
print("== Starting Training for " + str(num_epoch) + " epochs ==")
for i in range(num_epoch):
print("== Starting Training for " + str(num_epoch) + " num_epoch ==")
for j in range(0, num_epoch):
workspace.RunNet(train_model.net)
if i % 50 == 0:
print 'Iter ' + str(i) + ': ' + 'Loss ' + str(workspace.FetchBlob("loss")) + ' - ' + 'Accuracy ' + str(workspace.FetchBlob('accuracy'))
if j % 50 == 0:
print 'Iter: ' + str(j) + ': ' + 'Loss ' + str(workspace.FetchBlob("loss")) + ' - ' + 'Accuracy ' + str(workspace.FetchBlob('accuracy'))
print("Training done")
print("== Running Test model ==")
# == Testing model. ==
test_model= model_helper.ModelHelper(name="test_net", arg_scope=arg_scope, init_params=False)
data, label, test_dataset_size = self.add_input(test_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
data, label = self.add_input(test_model, batch_size=100, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
predictions = self.create_model(test_model, data, device_opts=device_opts)
self.add_accuracy(test_model, predictions, label, device_opts, eval_metric)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net, overwrite=True)
# Main Testing Loop
test_accuracy = np.zeros(test_dataset_size/batch_size)
for i in range(test_dataset_size/batch_size):
# batch size: 100
# iteration: 100
# total test images: 10000
test_accuracy = np.zeros(100)
for i in range(100):
# Run a forward pass of the net on the current batch
workspace.RunNet(test_model.net)
# Collect the batch accuracy from the workspace
......
......@@ -5,7 +5,7 @@ import numpy as np
import logging
import os
import sys
import lmdb
class CNNCreator_CifarClassifierNetwork:
module = None
......@@ -34,10 +34,7 @@ class CNNCreator_CifarClassifierNetwork:
# don't need the gradient for the backward pass
data = model.StopGradient(data, data)
dataset_size = int (lmdb.open(db).stat()['entries'])
return data, label, dataset_size
return data, label
def create_model(self, model, data, device_opts):
with core.DeviceScope(device_opts):
......@@ -275,7 +272,7 @@ class CNNCreator_CifarClassifierNetwork:
arg_scope = {"order": "NCHW"}
# == Training model ==
train_model= model_helper.ModelHelper(name="train_net", arg_scope=arg_scope)
data, label, train_dataset_size = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
data, label = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
softmax = self.create_model(train_model, data, device_opts=device_opts)
self.add_training_operators(train_model, softmax, label, device_opts, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum)
self.add_accuracy(train_model, softmax, label, device_opts, eval_metric)
......@@ -287,25 +284,28 @@ class CNNCreator_CifarClassifierNetwork:
workspace.CreateNet(train_model.net, overwrite=True)
# Main Training Loop
print("== Starting Training for " + str(num_epoch) + " epochs ==")
for i in range(num_epoch):
print("== Starting Training for " + str(num_epoch) + " num_epoch ==")
for j in range(0, num_epoch):
workspace.RunNet(train_model.net)
if i % 50 == 0:
print 'Iter ' + str(i) + ': ' + 'Loss ' + str(workspace.FetchBlob("loss")) + ' - ' + 'Accuracy ' + str(workspace.FetchBlob('accuracy'))
if j % 50 == 0:
print 'Iter: ' + str(j) + ': ' + 'Loss ' + str(workspace.FetchBlob("loss")) + ' - ' + 'Accuracy ' + str(workspace.FetchBlob('accuracy'))
print("Training done")
print("== Running Test model ==")
# == Testing model. ==
test_model= model_helper.ModelHelper(name="test_net", arg_scope=arg_scope, init_params=False)
data, label, test_dataset_size = self.add_input(test_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
data, label = self.add_input(test_model, batch_size=100, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
softmax = self.create_model(test_model, data, device_opts=device_opts)
self.add_accuracy(test_model, predictions, label, device_opts, eval_metric)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net, overwrite=True)
# Main Testing Loop
test_accuracy = np.zeros(test_dataset_size/batch_size)
for i in range(test_dataset_size/batch_size):
# batch size: 100
# iteration: 100
# total test images: 10000
test_accuracy = np.zeros(100)
for i in range(100):
# Run a forward pass of the net on the current batch
workspace.RunNet(test_model.net)
# Collect the batch accuracy from the workspace
......
......@@ -5,7 +5,7 @@ import numpy as np
import logging
import os
import sys
import lmdb
class CNNCreator_VGG16:
module = None
......@@ -34,10 +34,7 @@ class CNNCreator_VGG16:
# don't need the gradient for the backward pass
data = model.StopGradient(data, data)
dataset_size = int (lmdb.open(db).stat()['entries'])
return data, label, dataset_size
return data, label
def create_model(self, model, data, device_opts):
with core.DeviceScope(device_opts):
......@@ -165,7 +162,7 @@ class CNNCreator_VGG16:
arg_scope = {"order": "NCHW"}
# == Training model ==
train_model= model_helper.ModelHelper(name="train_net", arg_scope=arg_scope)
data, label, train_dataset_size = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
data, label = self.add_input(train_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-train-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
predictions = self.create_model(train_model, data, device_opts=device_opts)
self.add_training_operators(train_model, predictions, label, device_opts, opt_type, base_learning_rate, policy, stepsize, epsilon, beta1, beta2, gamma, momentum)
self.add_accuracy(train_model, predictions, label, device_opts, eval_metric)
......@@ -177,25 +174,28 @@ class CNNCreator_VGG16:
workspace.CreateNet(train_model.net, overwrite=True)
# Main Training Loop
print("== Starting Training for " + str(num_epoch) + " epochs ==")
for i in range(num_epoch):
print("== Starting Training for " + str(num_epoch) + " num_epoch ==")
for j in range(0, num_epoch):
workspace.RunNet(train_model.net)
if i % 50 == 0:
print 'Iter ' + str(i) + ': ' + 'Loss ' + str(workspace.FetchBlob("loss")) + ' - ' + 'Accuracy ' + str(workspace.FetchBlob('accuracy'))
if j % 50 == 0:
print 'Iter: ' + str(j) + ': ' + 'Loss ' + str(workspace.FetchBlob("loss")) + ' - ' + 'Accuracy ' + str(workspace.FetchBlob('accuracy'))
print("Training done")
print("== Running Test model ==")
# == Testing model. ==
test_model= model_helper.ModelHelper(name="test_net", arg_scope=arg_scope, init_params=False)
data, label, test_dataset_size = self.add_input(test_model, batch_size=batch_size, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
data, label = self.add_input(test_model, batch_size=100, db=os.path.join(self._data_dir_, 'mnist-test-nchw-lmdb'), db_type='lmdb', device_opts=device_opts)
predictions = self.create_model(test_model, data, device_opts=device_opts)
self.add_accuracy(test_model, predictions, label, device_opts, eval_metric)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net, overwrite=True)
# Main Testing Loop
test_accuracy = np.zeros(test_dataset_size/batch_size)
for i in range(test_dataset_size/batch_size):
# batch size: 100
# iteration: 100
# total test images: 10000
test_accuracy = np.zeros(100)
for i in range(100):
# Run a forward pass of the net on the current batch
workspace.RunNet(test_model.net)
# Collect the batch accuracy from the workspace
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment