Skip to content
Snippets Groups Projects
Commit 9c7f43fa authored by Christian Fuß's avatar Christian Fuß
Browse files

adjusted target code files

parent 96c688b9
No related branches found
No related tags found
1 merge request!23Added Unroll-related features and layers
Pipeline #191798 failed
......@@ -25,8 +25,10 @@ class CNNDataLoader_Alexnet:
data_std[input_name + '_'] = nd.array(train_h5[input_name][:].std(axis=0) + 1e-5)
train_label = {}
index = 0
for output_name in self._output_names_:
train_label[output_name] = train_h5[output_name]
train_label[index] = train_h5[output_name]
index += 1
train_iter = mx.io.NDArrayIter(data=train_data,
label=train_label,
......@@ -40,8 +42,10 @@ class CNNDataLoader_Alexnet:
test_data[input_name] = test_h5[input_name]
test_label = {}
index = 0
for output_name in self._output_names_:
test_label[output_name] = test_h5[output_name]
test_label[index] = test_h5[output_name]
index += 1
test_iter = mx.io.NDArrayIter(data=test_data,
label=test_label,
......
......@@ -25,8 +25,10 @@ class CNNDataLoader_CifarClassifierNetwork:
data_std[input_name + '_'] = nd.array(train_h5[input_name][:].std(axis=0) + 1e-5)
train_label = {}
index = 0
for output_name in self._output_names_:
train_label[output_name] = train_h5[output_name]
train_label[index] = train_h5[output_name]
index += 1
train_iter = mx.io.NDArrayIter(data=train_data,
label=train_label,
......@@ -40,8 +42,10 @@ class CNNDataLoader_CifarClassifierNetwork:
test_data[input_name] = test_h5[input_name]
test_label = {}
index = 0
for output_name in self._output_names_:
test_label[output_name] = test_h5[output_name]
test_label[index] = test_h5[output_name]
index += 1
test_iter = mx.io.NDArrayIter(data=test_data,
label=test_label,
......
......@@ -25,8 +25,10 @@ class CNNDataLoader_VGG16:
data_std[input_name + '_'] = nd.array(train_h5[input_name][:].std(axis=0) + 1e-5)
train_label = {}
index = 0
for output_name in self._output_names_:
train_label[output_name] = train_h5[output_name]
train_label[index] = train_h5[output_name]
index += 1
train_iter = mx.io.NDArrayIter(data=train_data,
label=train_label,
......@@ -40,8 +42,10 @@ class CNNDataLoader_VGG16:
test_data[input_name] = test_h5[input_name]
test_label = {}
index = 0
for output_name in self._output_names_:
test_label[output_name] = test_h5[output_name]
test_label[index] = test_h5[output_name]
index += 1
test_iter = mx.io.NDArrayIter(data=test_data,
label=test_label,
......
......@@ -4,6 +4,7 @@ import numpy as np
import time
import os
import shutil
import pickle
from mxnet import gluon, autograd, nd
class CrossEntropyLoss(gluon.loss.Loss):
......@@ -31,6 +32,7 @@ class LogCoshLoss(gluon.loss.Loss):
loss = gluon.loss._apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class CNNSupervisedTrainer_Alexnet:
def applyBeamSearch(input, length, width, maxLength, currProb, netIndex, bestOutput):
bestProb = 0.0
......@@ -168,6 +170,8 @@ class CNNSupervisedTrainer_Alexnet:
data_ = batch.data[0].as_in_context(mx_context)
predictions_label = batch.label[0].as_in_context(mx_context)
outputs=[]
with autograd.record():
predictions_ = mx.nd.zeros((batch_size, 10,), ctx=mx_context)
......@@ -224,8 +228,34 @@ class CNNSupervisedTrainer_Alexnet:
else:
predictions.append(output_name)
#print [word[0] for word in predictions]
#print labels[0]
#Compute BLEU and NIST Score if data folder contains a dictionary -> NLP dataset
if(os.path.isfile('data/Alexnet/dict.pkl')):
with open('data/Alexnet/dict.pkl', 'rb') as f:
dict = pickle.load(f)
import nltk.translate.bleu_score
import nltk.translate.nist_score
prediction = []
for index in range(batch_size):
sentence = ''
for entry in predictions:
sentence += dict[int(entry[index].asscalar())] + ' '
prediction.append(sentence)
for index in range(batch_size):
sentence = ''
for batchEntry in batch.label:
sentence += dict[int(batchEntry[index].asscalar())] + ' '
print "############################"
print "label: ", sentence
print "prediction: ", prediction[index]
BLEUscore = nltk.translate.bleu_score.sentence_bleu([sentence], prediction[index])
NISTscore = nltk.translate.nist_score.sentence_nist([sentence], prediction[index])
print "BLEU: ", BLEUscore
print "NIST: ", NISTscore
print "############################"
metric.update(preds=predictions, labels=labels)
train_metric_score = metric.get()[1]
......
......@@ -4,6 +4,7 @@ import numpy as np
import time
import os
import shutil
import pickle
from mxnet import gluon, autograd, nd
class CrossEntropyLoss(gluon.loss.Loss):
......@@ -31,6 +32,7 @@ class LogCoshLoss(gluon.loss.Loss):
loss = gluon.loss._apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class CNNSupervisedTrainer_CifarClassifierNetwork:
def applyBeamSearch(input, length, width, maxLength, currProb, netIndex, bestOutput):
bestProb = 0.0
......@@ -168,6 +170,8 @@ class CNNSupervisedTrainer_CifarClassifierNetwork:
data_ = batch.data[0].as_in_context(mx_context)
softmax_label = batch.label[0].as_in_context(mx_context)
outputs=[]
with autograd.record():
softmax_ = mx.nd.zeros((batch_size, 10,), ctx=mx_context)
......@@ -224,8 +228,34 @@ class CNNSupervisedTrainer_CifarClassifierNetwork:
else:
predictions.append(output_name)
#print [word[0] for word in predictions]
#print labels[0]
#Compute BLEU and NIST Score if data folder contains a dictionary -> NLP dataset
if(os.path.isfile('data/CifarClassifierNetwork/dict.pkl')):
with open('data/CifarClassifierNetwork/dict.pkl', 'rb') as f:
dict = pickle.load(f)
import nltk.translate.bleu_score
import nltk.translate.nist_score
prediction = []
for index in range(batch_size):
sentence = ''
for entry in predictions:
sentence += dict[int(entry[index].asscalar())] + ' '
prediction.append(sentence)
for index in range(batch_size):
sentence = ''
for batchEntry in batch.label:
sentence += dict[int(batchEntry[index].asscalar())] + ' '
print "############################"
print "label: ", sentence
print "prediction: ", prediction[index]
BLEUscore = nltk.translate.bleu_score.sentence_bleu([sentence], prediction[index])
NISTscore = nltk.translate.nist_score.sentence_nist([sentence], prediction[index])
print "BLEU: ", BLEUscore
print "NIST: ", NISTscore
print "############################"
metric.update(preds=predictions, labels=labels)
train_metric_score = metric.get()[1]
......
......@@ -4,6 +4,7 @@ import numpy as np
import time
import os
import shutil
import pickle
from mxnet import gluon, autograd, nd
class CrossEntropyLoss(gluon.loss.Loss):
......@@ -31,6 +32,7 @@ class LogCoshLoss(gluon.loss.Loss):
loss = gluon.loss._apply_weighting(F, loss, self._weight, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class CNNSupervisedTrainer_VGG16:
def applyBeamSearch(input, length, width, maxLength, currProb, netIndex, bestOutput):
bestProb = 0.0
......@@ -168,6 +170,8 @@ class CNNSupervisedTrainer_VGG16:
data_ = batch.data[0].as_in_context(mx_context)
predictions_label = batch.label[0].as_in_context(mx_context)
outputs=[]
with autograd.record():
predictions_ = mx.nd.zeros((batch_size, 1000,), ctx=mx_context)
......@@ -224,8 +228,34 @@ class CNNSupervisedTrainer_VGG16:
else:
predictions.append(output_name)
#print [word[0] for word in predictions]
#print labels[0]
#Compute BLEU and NIST Score if data folder contains a dictionary -> NLP dataset
if(os.path.isfile('data/VGG16/dict.pkl')):
with open('data/VGG16/dict.pkl', 'rb') as f:
dict = pickle.load(f)
import nltk.translate.bleu_score
import nltk.translate.nist_score
prediction = []
for index in range(batch_size):
sentence = ''
for entry in predictions:
sentence += dict[int(entry[index].asscalar())] + ' '
prediction.append(sentence)
for index in range(batch_size):
sentence = ''
for batchEntry in batch.label:
sentence += dict[int(batchEntry[index].asscalar())] + ' '
print "############################"
print "label: ", sentence
print "prediction: ", prediction[index]
BLEUscore = nltk.translate.bleu_score.sentence_bleu([sentence], prediction[index])
NISTscore = nltk.translate.nist_score.sentence_nist([sentence], prediction[index])
print "BLEU: ", BLEUscore
print "NIST: ", NISTscore
print "############################"
metric.update(preds=predictions, labels=labels)
train_metric_score = metric.get()[1]
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment