Commit 3ce335f0 authored by Sebastian N.'s avatar Sebastian N.
Browse files

Updated constants

parent 6f816979
Pipeline #200242 failed with stages
in 18 seconds
...@@ -266,7 +266,7 @@ class ${tc.fileNameWithoutEnding}: ...@@ -266,7 +266,7 @@ class ${tc.fileNameWithoutEnding}:
if not os.path.isdir(self._net_creator._model_dir_): if not os.path.isdir(self._net_creator._model_dir_):
raise raise
trainers = [mx.gluon.Trainer(network.collect_params(), optimizer, optimizer_params) for network in self._networks.values()] trainers = [mx.gluon.Trainer(network.collect_params(), optimizer, optimizer_params) if len(net.collect_params().values()) != 0 for network in self._networks.values()]
margin = loss_params['margin'] if 'margin' in loss_params else 1.0 margin = loss_params['margin'] if 'margin' in loss_params else 1.0
sparseLabel = loss_params['sparse_label'] if 'sparse_label' in loss_params else True sparseLabel = loss_params['sparse_label'] if 'sparse_label' in loss_params else True
...@@ -368,37 +368,6 @@ class ${tc.fileNameWithoutEnding}: ...@@ -368,37 +368,6 @@ class ${tc.fileNameWithoutEnding}:
else: else:
predictions.append(output_name) predictions.append(output_name)
'''
#Compute BLEU and NIST Score if data folder contains a dictionary -> NLP dataset
if(os.path.isfile('src/test/resources/training_data/Show_attend_tell/dict.pkl')):
with open('src/test/resources/training_data/Show_attend_tell/dict.pkl', 'rb') as f:
dict = pickle.load(f)
import nltk.translate.bleu_score
import nltk.translate.nist_score
prediction = []
for index in range(batch_size):
sentence = ''
for entry in predictions:
sentence += dict[int(entry[index].asscalar())] + ' '
prediction.append(sentence)
for index in range(batch_size):
sentence = ''
for batchEntry in batch.label:
sentence += dict[int(batchEntry[index].asscalar())] + ' '
print("############################")
print("label1: ", sentence)
print("prediction1: ", prediction[index])
BLEUscore = nltk.translate.bleu_score.sentence_bleu([sentence], prediction[index])
NISTscore = nltk.translate.nist_score.sentence_nist([sentence], prediction[index])
print("BLEU: ", BLEUscore)
print("NIST: ", NISTscore)
print("############################")
'''
metric.update(preds=predictions, labels=labels) metric.update(preds=predictions, labels=labels)
train_metric_score = metric.get()[1] train_metric_score = metric.get()[1]
......
...@@ -266,7 +266,7 @@ class CNNSupervisedTrainer_Alexnet: ...@@ -266,7 +266,7 @@ class CNNSupervisedTrainer_Alexnet:
if not os.path.isdir(self._net_creator._model_dir_): if not os.path.isdir(self._net_creator._model_dir_):
raise raise
trainers = [mx.gluon.Trainer(network.collect_params(), optimizer, optimizer_params) for network in self._networks.values()] trainers = [mx.gluon.Trainer(network.collect_params(), optimizer, optimizer_params) if len(net.collect_params().values()) != 0 for network in self._networks.values()]
margin = loss_params['margin'] if 'margin' in loss_params else 1.0 margin = loss_params['margin'] if 'margin' in loss_params else 1.0
sparseLabel = loss_params['sparse_label'] if 'sparse_label' in loss_params else True sparseLabel = loss_params['sparse_label'] if 'sparse_label' in loss_params else True
...@@ -366,37 +366,6 @@ class CNNSupervisedTrainer_Alexnet: ...@@ -366,37 +366,6 @@ class CNNSupervisedTrainer_Alexnet:
else: else:
predictions.append(output_name) predictions.append(output_name)
'''
#Compute BLEU and NIST Score if data folder contains a dictionary -> NLP dataset
if(os.path.isfile('src/test/resources/training_data/Show_attend_tell/dict.pkl')):
with open('src/test/resources/training_data/Show_attend_tell/dict.pkl', 'rb') as f:
dict = pickle.load(f)
import nltk.translate.bleu_score
import nltk.translate.nist_score
prediction = []
for index in range(batch_size):
sentence = ''
for entry in predictions:
sentence += dict[int(entry[index].asscalar())] + ' '
prediction.append(sentence)
for index in range(batch_size):
sentence = ''
for batchEntry in batch.label:
sentence += dict[int(batchEntry[index].asscalar())] + ' '
print("############################")
print("label1: ", sentence)
print("prediction1: ", prediction[index])
BLEUscore = nltk.translate.bleu_score.sentence_bleu([sentence], prediction[index])
NISTscore = nltk.translate.nist_score.sentence_nist([sentence], prediction[index])
print("BLEU: ", BLEUscore)
print("NIST: ", NISTscore)
print("############################")
'''
metric.update(preds=predictions, labels=labels) metric.update(preds=predictions, labels=labels)
train_metric_score = metric.get()[1] train_metric_score = metric.get()[1]
......
...@@ -266,7 +266,7 @@ class CNNSupervisedTrainer_CifarClassifierNetwork: ...@@ -266,7 +266,7 @@ class CNNSupervisedTrainer_CifarClassifierNetwork:
if not os.path.isdir(self._net_creator._model_dir_): if not os.path.isdir(self._net_creator._model_dir_):
raise raise
trainers = [mx.gluon.Trainer(network.collect_params(), optimizer, optimizer_params) for network in self._networks.values()] trainers = [mx.gluon.Trainer(network.collect_params(), optimizer, optimizer_params) if len(net.collect_params().values()) != 0 for network in self._networks.values()]
margin = loss_params['margin'] if 'margin' in loss_params else 1.0 margin = loss_params['margin'] if 'margin' in loss_params else 1.0
sparseLabel = loss_params['sparse_label'] if 'sparse_label' in loss_params else True sparseLabel = loss_params['sparse_label'] if 'sparse_label' in loss_params else True
...@@ -366,37 +366,6 @@ class CNNSupervisedTrainer_CifarClassifierNetwork: ...@@ -366,37 +366,6 @@ class CNNSupervisedTrainer_CifarClassifierNetwork:
else: else:
predictions.append(output_name) predictions.append(output_name)
'''
#Compute BLEU and NIST Score if data folder contains a dictionary -> NLP dataset
if(os.path.isfile('src/test/resources/training_data/Show_attend_tell/dict.pkl')):
with open('src/test/resources/training_data/Show_attend_tell/dict.pkl', 'rb') as f:
dict = pickle.load(f)
import nltk.translate.bleu_score
import nltk.translate.nist_score
prediction = []
for index in range(batch_size):
sentence = ''
for entry in predictions:
sentence += dict[int(entry[index].asscalar())] + ' '
prediction.append(sentence)
for index in range(batch_size):
sentence = ''
for batchEntry in batch.label:
sentence += dict[int(batchEntry[index].asscalar())] + ' '
print("############################")
print("label1: ", sentence)
print("prediction1: ", prediction[index])
BLEUscore = nltk.translate.bleu_score.sentence_bleu([sentence], prediction[index])
NISTscore = nltk.translate.nist_score.sentence_nist([sentence], prediction[index])
print("BLEU: ", BLEUscore)
print("NIST: ", NISTscore)
print("############################")
'''
metric.update(preds=predictions, labels=labels) metric.update(preds=predictions, labels=labels)
train_metric_score = metric.get()[1] train_metric_score = metric.get()[1]
......
...@@ -266,7 +266,7 @@ class CNNSupervisedTrainer_VGG16: ...@@ -266,7 +266,7 @@ class CNNSupervisedTrainer_VGG16:
if not os.path.isdir(self._net_creator._model_dir_): if not os.path.isdir(self._net_creator._model_dir_):
raise raise
trainers = [mx.gluon.Trainer(network.collect_params(), optimizer, optimizer_params) for network in self._networks.values()] trainers = [mx.gluon.Trainer(network.collect_params(), optimizer, optimizer_params) if len(net.collect_params().values()) != 0 for network in self._networks.values()]
margin = loss_params['margin'] if 'margin' in loss_params else 1.0 margin = loss_params['margin'] if 'margin' in loss_params else 1.0
sparseLabel = loss_params['sparse_label'] if 'sparse_label' in loss_params else True sparseLabel = loss_params['sparse_label'] if 'sparse_label' in loss_params else True
...@@ -366,37 +366,6 @@ class CNNSupervisedTrainer_VGG16: ...@@ -366,37 +366,6 @@ class CNNSupervisedTrainer_VGG16:
else: else:
predictions.append(output_name) predictions.append(output_name)
'''
#Compute BLEU and NIST Score if data folder contains a dictionary -> NLP dataset
if(os.path.isfile('src/test/resources/training_data/Show_attend_tell/dict.pkl')):
with open('src/test/resources/training_data/Show_attend_tell/dict.pkl', 'rb') as f:
dict = pickle.load(f)
import nltk.translate.bleu_score
import nltk.translate.nist_score
prediction = []
for index in range(batch_size):
sentence = ''
for entry in predictions:
sentence += dict[int(entry[index].asscalar())] + ' '
prediction.append(sentence)
for index in range(batch_size):
sentence = ''
for batchEntry in batch.label:
sentence += dict[int(batchEntry[index].asscalar())] + ' '
print("############################")
print("label1: ", sentence)
print("prediction1: ", prediction[index])
BLEUscore = nltk.translate.bleu_score.sentence_bleu([sentence], prediction[index])
NISTscore = nltk.translate.nist_score.sentence_nist([sentence], prediction[index])
print("BLEU: ", BLEUscore)
print("NIST: ", NISTscore)
print("############################")
'''
metric.update(preds=predictions, labels=labels) metric.update(preds=predictions, labels=labels)
train_metric_score = metric.get()[1] train_metric_score = metric.get()[1]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment