Commit 160519ea authored by Sebastian Nickels's avatar Sebastian Nickels

Changed loss output calculation

parent ccdb8639
Pipeline #226592 failed with stages
in 30 seconds
......@@ -308,7 +308,7 @@ class ${tc.fileNameWithoutEnding}:
loss_total += loss.sum().asscalar()
global_loss_train += float(loss.mean().asscalar())
global_loss_train += loss.sum().asscalar()
train_batches += 1
if clip_global_grad_norm:
......@@ -338,8 +338,7 @@ class ${tc.fileNameWithoutEnding}:
tic = time.time()
if train_batches > 0:
global_loss_train /= train_batches
global_loss_train /= (train_batches * batch_size)
tic = None
......@@ -382,7 +381,7 @@ class ${tc.fileNameWithoutEnding}:
for element in lossList:
loss = loss + element
global_loss_test += float(loss.mean().asscalar())
global_loss_test += loss.sum().asscalar()
test_batches += 1
predictions = []
......@@ -396,8 +395,7 @@ class ${tc.fileNameWithoutEnding}:
metric.update(preds=predictions, labels=labels)
test_metric_score = metric.get()[1]
if test_batches > 0:
global_loss_test /= test_batches
global_loss_test /= (test_batches * batch_size)
logging.info("Epoch[%d] Train metric: %f, Test metric: %f, Train loss: %f, Test loss: %f" % (epoch, train_metric_score, test_metric_score, global_loss_train, global_loss_test))
......
......@@ -320,7 +320,7 @@ class CNNSupervisedTrainer_Alexnet:
loss_total += loss.sum().asscalar()
global_loss_train += float(loss.mean().asscalar())
global_loss_train += loss.sum().asscalar()
train_batches += 1
if clip_global_grad_norm:
......@@ -350,8 +350,7 @@ class CNNSupervisedTrainer_Alexnet:
tic = time.time()
if train_batches > 0:
global_loss_train /= train_batches
global_loss_train /= (train_batches * batch_size)
tic = None
......@@ -501,7 +500,7 @@ class CNNSupervisedTrainer_Alexnet:
for element in lossList:
loss = loss + element
global_loss_test += float(loss.mean().asscalar())
global_loss_test += loss.sum().asscalar()
test_batches += 1
predictions = []
......@@ -515,8 +514,7 @@ class CNNSupervisedTrainer_Alexnet:
metric.update(preds=predictions, labels=labels)
test_metric_score = metric.get()[1]
if test_batches > 0:
global_loss_test /= test_batches
global_loss_test /= (test_batches * batch_size)
logging.info("Epoch[%d] Train metric: %f, Test metric: %f, Train loss: %f, Test loss: %f" % (epoch, train_metric_score, test_metric_score, global_loss_train, global_loss_test))
......
......@@ -320,7 +320,7 @@ class CNNSupervisedTrainer_CifarClassifierNetwork:
loss_total += loss.sum().asscalar()
global_loss_train += float(loss.mean().asscalar())
global_loss_train += loss.sum().asscalar()
train_batches += 1
if clip_global_grad_norm:
......@@ -350,8 +350,7 @@ class CNNSupervisedTrainer_CifarClassifierNetwork:
tic = time.time()
if train_batches > 0:
global_loss_train /= train_batches
global_loss_train /= (train_batches * batch_size)
tic = None
......@@ -501,7 +500,7 @@ class CNNSupervisedTrainer_CifarClassifierNetwork:
for element in lossList:
loss = loss + element
global_loss_test += float(loss.mean().asscalar())
global_loss_test += loss.sum().asscalar()
test_batches += 1
predictions = []
......@@ -515,8 +514,7 @@ class CNNSupervisedTrainer_CifarClassifierNetwork:
metric.update(preds=predictions, labels=labels)
test_metric_score = metric.get()[1]
if test_batches > 0:
global_loss_test /= test_batches
global_loss_test /= (test_batches * batch_size)
logging.info("Epoch[%d] Train metric: %f, Test metric: %f, Train loss: %f, Test loss: %f" % (epoch, train_metric_score, test_metric_score, global_loss_train, global_loss_test))
......
......@@ -320,7 +320,7 @@ class CNNSupervisedTrainer_VGG16:
loss_total += loss.sum().asscalar()
global_loss_train += float(loss.mean().asscalar())
global_loss_train += loss.sum().asscalar()
train_batches += 1
if clip_global_grad_norm:
......@@ -350,8 +350,7 @@ class CNNSupervisedTrainer_VGG16:
tic = time.time()
if train_batches > 0:
global_loss_train /= train_batches
global_loss_train /= (train_batches * batch_size)
tic = None
......@@ -501,7 +500,7 @@ class CNNSupervisedTrainer_VGG16:
for element in lossList:
loss = loss + element
global_loss_test += float(loss.mean().asscalar())
global_loss_test += loss.sum().asscalar()
test_batches += 1
predictions = []
......@@ -515,8 +514,7 @@ class CNNSupervisedTrainer_VGG16:
metric.update(preds=predictions, labels=labels)
test_metric_score = metric.get()[1]
if test_batches > 0:
global_loss_test /= test_batches
global_loss_test /= (test_batches * batch_size)
logging.info("Epoch[%d] Train metric: %f, Test metric: %f, Train loss: %f, Test loss: %f" % (epoch, train_metric_score, test_metric_score, global_loss_train, global_loss_test))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment