Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
monticore
EmbeddedMontiArc
generators
CNNArch2Gluon
Commits
931555d3
Commit
931555d3
authored
Feb 02, 2020
by
Julian Treiber
Browse files
updated tests for DiceLoss
parent
a5b6b868
Changes
11
Hide whitespace changes
Inline
Side-by-side
src/test/resources/target_code/CNNSupervisedTrainer_Alexnet.py
View file @
931555d3
...
...
@@ -50,13 +50,42 @@ class SoftmaxCrossEntropyLossIgnoreIndices(gluon.loss.Loss):
if
self
.
_sparse_label
:
loss
=
-
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
_reshape_like
(
F
,
label
,
pred
)
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
(
pred
*
label
).
sum
(
axis
=
self
.
_axis
,
keepdims
=
True
)
# ignore some indices for loss, e.g. <pad> tokens in NLP applications
for
i
in
self
.
_ignore_indices
:
loss
=
loss
*
mx
.
nd
.
logical_not
(
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
mx
.
nd
.
ones_like
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
))
*
i
)
*
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
label
))
return
loss
.
mean
(
axis
=
self
.
_batch_axis
,
exclude
=
True
)
class
DiceLoss
(
gluon
.
loss
.
Loss
):
def
__init__
(
self
,
axis
=-
1
,
sparse_label
=
True
,
from_logits
=
False
,
weight
=
None
,
batch_axis
=
0
,
**
kwargs
):
super
(
DiceLoss
,
self
).
__init__
(
weight
,
batch_axis
,
**
kwargs
)
self
.
_axis
=
axis
self
.
_sparse_label
=
sparse_label
self
.
_from_logits
=
from_logits
def
dice_loss
(
self
,
F
,
pred
,
label
):
smooth
=
1.
pred_y
=
F
.
argmax
(
pred
,
axis
=
self
.
_axis
)
intersection
=
pred_y
*
label
score
=
(
2
*
F
.
mean
(
intersection
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
\
/
(
F
.
mean
(
label
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
F
.
mean
(
pred_y
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
return
-
F
.
log
(
score
)
def
hybrid_forward
(
self
,
F
,
pred
,
label
,
sample_weight
=
None
):
if
not
self
.
_from_logits
:
pred
=
F
.
log_softmax
(
pred
,
self
.
_axis
)
if
self
.
_sparse_label
:
loss
=
-
F
.
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
F
.
sum
(
pred
*
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
loss
=
gluon
.
loss
.
_apply_weighting
(
F
,
loss
,
self
.
_weight
,
sample_weight
)
diceloss
=
self
.
dice_loss
(
F
,
pred
,
label
)
return
F
.
mean
(
loss
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
diceloss
@
mx
.
metric
.
register
class
BLEU
(
mx
.
metric
.
EvalMetric
):
N
=
4
...
...
@@ -260,11 +289,15 @@ class CNNSupervisedTrainer_Alexnet:
loss_function
=
mx
.
gluon
.
loss
.
SoftmaxCrossEntropyLoss
(
axis
=
loss_axis
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'softmax_cross_entropy_ignore_indices'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
axis
=
loss_axis
,
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'sigmoid_binary_cross_entropy'
:
loss_function
=
mx
.
gluon
.
loss
.
SigmoidBinaryCrossEntropyLoss
()
elif
loss
==
'cross_entropy'
:
loss_function
=
CrossEntropyLoss
(
axis
=
loss_axis
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'dice_loss'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
dice_weight
=
loss_params
[
'dice_weight'
]
if
'dice_weight'
in
loss_params
else
None
loss_function
=
DiceLoss
(
axis
=
loss_axis
,
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
weight
=
dice_weight
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'l2'
:
loss_function
=
mx
.
gluon
.
loss
.
L2Loss
()
elif
loss
==
'l1'
:
...
...
src/test/resources/target_code/CNNSupervisedTrainer_CifarClassifierNetwork.py
View file @
931555d3
...
...
@@ -50,13 +50,42 @@ class SoftmaxCrossEntropyLossIgnoreIndices(gluon.loss.Loss):
if
self
.
_sparse_label
:
loss
=
-
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
_reshape_like
(
F
,
label
,
pred
)
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
(
pred
*
label
).
sum
(
axis
=
self
.
_axis
,
keepdims
=
True
)
# ignore some indices for loss, e.g. <pad> tokens in NLP applications
for
i
in
self
.
_ignore_indices
:
loss
=
loss
*
mx
.
nd
.
logical_not
(
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
mx
.
nd
.
ones_like
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
))
*
i
)
*
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
label
))
return
loss
.
mean
(
axis
=
self
.
_batch_axis
,
exclude
=
True
)
class
DiceLoss
(
gluon
.
loss
.
Loss
):
def
__init__
(
self
,
axis
=-
1
,
sparse_label
=
True
,
from_logits
=
False
,
weight
=
None
,
batch_axis
=
0
,
**
kwargs
):
super
(
DiceLoss
,
self
).
__init__
(
weight
,
batch_axis
,
**
kwargs
)
self
.
_axis
=
axis
self
.
_sparse_label
=
sparse_label
self
.
_from_logits
=
from_logits
def
dice_loss
(
self
,
F
,
pred
,
label
):
smooth
=
1.
pred_y
=
F
.
argmax
(
pred
,
axis
=
self
.
_axis
)
intersection
=
pred_y
*
label
score
=
(
2
*
F
.
mean
(
intersection
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
\
/
(
F
.
mean
(
label
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
F
.
mean
(
pred_y
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
return
-
F
.
log
(
score
)
def
hybrid_forward
(
self
,
F
,
pred
,
label
,
sample_weight
=
None
):
if
not
self
.
_from_logits
:
pred
=
F
.
log_softmax
(
pred
,
self
.
_axis
)
if
self
.
_sparse_label
:
loss
=
-
F
.
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
F
.
sum
(
pred
*
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
loss
=
gluon
.
loss
.
_apply_weighting
(
F
,
loss
,
self
.
_weight
,
sample_weight
)
diceloss
=
self
.
dice_loss
(
F
,
pred
,
label
)
return
F
.
mean
(
loss
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
diceloss
@
mx
.
metric
.
register
class
BLEU
(
mx
.
metric
.
EvalMetric
):
N
=
4
...
...
@@ -260,11 +289,15 @@ class CNNSupervisedTrainer_CifarClassifierNetwork:
loss_function
=
mx
.
gluon
.
loss
.
SoftmaxCrossEntropyLoss
(
axis
=
loss_axis
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'softmax_cross_entropy_ignore_indices'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
axis
=
loss_axis
,
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'sigmoid_binary_cross_entropy'
:
loss_function
=
mx
.
gluon
.
loss
.
SigmoidBinaryCrossEntropyLoss
()
elif
loss
==
'cross_entropy'
:
loss_function
=
CrossEntropyLoss
(
axis
=
loss_axis
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'dice_loss'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
dice_weight
=
loss_params
[
'dice_weight'
]
if
'dice_weight'
in
loss_params
else
None
loss_function
=
DiceLoss
(
axis
=
loss_axis
,
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
weight
=
dice_weight
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'l2'
:
loss_function
=
mx
.
gluon
.
loss
.
L2Loss
()
elif
loss
==
'l1'
:
...
...
src/test/resources/target_code/CNNSupervisedTrainer_Invariant.py
View file @
931555d3
...
...
@@ -50,13 +50,42 @@ class SoftmaxCrossEntropyLossIgnoreIndices(gluon.loss.Loss):
if
self
.
_sparse_label
:
loss
=
-
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
_reshape_like
(
F
,
label
,
pred
)
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
(
pred
*
label
).
sum
(
axis
=
self
.
_axis
,
keepdims
=
True
)
# ignore some indices for loss, e.g. <pad> tokens in NLP applications
for
i
in
self
.
_ignore_indices
:
loss
=
loss
*
mx
.
nd
.
logical_not
(
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
mx
.
nd
.
ones_like
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
))
*
i
)
*
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
label
))
return
loss
.
mean
(
axis
=
self
.
_batch_axis
,
exclude
=
True
)
class
DiceLoss
(
gluon
.
loss
.
Loss
):
def
__init__
(
self
,
axis
=-
1
,
sparse_label
=
True
,
from_logits
=
False
,
weight
=
None
,
batch_axis
=
0
,
**
kwargs
):
super
(
DiceLoss
,
self
).
__init__
(
weight
,
batch_axis
,
**
kwargs
)
self
.
_axis
=
axis
self
.
_sparse_label
=
sparse_label
self
.
_from_logits
=
from_logits
def
dice_loss
(
self
,
F
,
pred
,
label
):
smooth
=
1.
pred_y
=
F
.
argmax
(
pred
,
axis
=
self
.
_axis
)
intersection
=
pred_y
*
label
score
=
(
2
*
F
.
mean
(
intersection
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
\
/
(
F
.
mean
(
label
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
F
.
mean
(
pred_y
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
return
-
F
.
log
(
score
)
def
hybrid_forward
(
self
,
F
,
pred
,
label
,
sample_weight
=
None
):
if
not
self
.
_from_logits
:
pred
=
F
.
log_softmax
(
pred
,
self
.
_axis
)
if
self
.
_sparse_label
:
loss
=
-
F
.
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
F
.
sum
(
pred
*
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
loss
=
gluon
.
loss
.
_apply_weighting
(
F
,
loss
,
self
.
_weight
,
sample_weight
)
diceloss
=
self
.
dice_loss
(
F
,
pred
,
label
)
return
F
.
mean
(
loss
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
diceloss
@
mx
.
metric
.
register
class
BLEU
(
mx
.
metric
.
EvalMetric
):
N
=
4
...
...
@@ -253,11 +282,15 @@ class CNNSupervisedTrainer_Invariant:
loss_function
=
mx
.
gluon
.
loss
.
SoftmaxCrossEntropyLoss
(
axis
=
loss_axis
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'softmax_cross_entropy_ignore_indices'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
axis
=
loss_axis
,
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'sigmoid_binary_cross_entropy'
:
loss_function
=
mx
.
gluon
.
loss
.
SigmoidBinaryCrossEntropyLoss
()
elif
loss
==
'cross_entropy'
:
loss_function
=
CrossEntropyLoss
(
axis
=
loss_axis
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'dice_loss'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
dice_weight
=
loss_params
[
'dice_weight'
]
if
'dice_weight'
in
loss_params
else
None
loss_function
=
DiceLoss
(
axis
=
loss_axis
,
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
weight
=
dice_weight
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'l2'
:
loss_function
=
mx
.
gluon
.
loss
.
L2Loss
()
elif
loss
==
'l1'
:
...
...
src/test/resources/target_code/CNNSupervisedTrainer_MultipleStreams.py
View file @
931555d3
...
...
@@ -50,13 +50,42 @@ class SoftmaxCrossEntropyLossIgnoreIndices(gluon.loss.Loss):
if
self
.
_sparse_label
:
loss
=
-
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
_reshape_like
(
F
,
label
,
pred
)
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
(
pred
*
label
).
sum
(
axis
=
self
.
_axis
,
keepdims
=
True
)
# ignore some indices for loss, e.g. <pad> tokens in NLP applications
for
i
in
self
.
_ignore_indices
:
loss
=
loss
*
mx
.
nd
.
logical_not
(
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
mx
.
nd
.
ones_like
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
))
*
i
)
*
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
label
))
return
loss
.
mean
(
axis
=
self
.
_batch_axis
,
exclude
=
True
)
class
DiceLoss
(
gluon
.
loss
.
Loss
):
def
__init__
(
self
,
axis
=-
1
,
sparse_label
=
True
,
from_logits
=
False
,
weight
=
None
,
batch_axis
=
0
,
**
kwargs
):
super
(
DiceLoss
,
self
).
__init__
(
weight
,
batch_axis
,
**
kwargs
)
self
.
_axis
=
axis
self
.
_sparse_label
=
sparse_label
self
.
_from_logits
=
from_logits
def
dice_loss
(
self
,
F
,
pred
,
label
):
smooth
=
1.
pred_y
=
F
.
argmax
(
pred
,
axis
=
self
.
_axis
)
intersection
=
pred_y
*
label
score
=
(
2
*
F
.
mean
(
intersection
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
\
/
(
F
.
mean
(
label
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
F
.
mean
(
pred_y
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
return
-
F
.
log
(
score
)
def
hybrid_forward
(
self
,
F
,
pred
,
label
,
sample_weight
=
None
):
if
not
self
.
_from_logits
:
pred
=
F
.
log_softmax
(
pred
,
self
.
_axis
)
if
self
.
_sparse_label
:
loss
=
-
F
.
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
F
.
sum
(
pred
*
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
loss
=
gluon
.
loss
.
_apply_weighting
(
F
,
loss
,
self
.
_weight
,
sample_weight
)
diceloss
=
self
.
dice_loss
(
F
,
pred
,
label
)
return
F
.
mean
(
loss
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
diceloss
@
mx
.
metric
.
register
class
BLEU
(
mx
.
metric
.
EvalMetric
):
N
=
4
...
...
@@ -253,11 +282,15 @@ class CNNSupervisedTrainer_MultipleStreams:
loss_function
=
mx
.
gluon
.
loss
.
SoftmaxCrossEntropyLoss
(
axis
=
loss_axis
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'softmax_cross_entropy_ignore_indices'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
axis
=
loss_axis
,
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'sigmoid_binary_cross_entropy'
:
loss_function
=
mx
.
gluon
.
loss
.
SigmoidBinaryCrossEntropyLoss
()
elif
loss
==
'cross_entropy'
:
loss_function
=
CrossEntropyLoss
(
axis
=
loss_axis
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'dice_loss'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
dice_weight
=
loss_params
[
'dice_weight'
]
if
'dice_weight'
in
loss_params
else
None
loss_function
=
DiceLoss
(
axis
=
loss_axis
,
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
weight
=
dice_weight
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'l2'
:
loss_function
=
mx
.
gluon
.
loss
.
L2Loss
()
elif
loss
==
'l1'
:
...
...
src/test/resources/target_code/CNNSupervisedTrainer_RNNencdec.py
View file @
931555d3
...
...
@@ -50,13 +50,42 @@ class SoftmaxCrossEntropyLossIgnoreIndices(gluon.loss.Loss):
if
self
.
_sparse_label
:
loss
=
-
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
_reshape_like
(
F
,
label
,
pred
)
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
(
pred
*
label
).
sum
(
axis
=
self
.
_axis
,
keepdims
=
True
)
# ignore some indices for loss, e.g. <pad> tokens in NLP applications
for
i
in
self
.
_ignore_indices
:
loss
=
loss
*
mx
.
nd
.
logical_not
(
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
mx
.
nd
.
ones_like
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
))
*
i
)
*
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
label
))
return
loss
.
mean
(
axis
=
self
.
_batch_axis
,
exclude
=
True
)
class
DiceLoss
(
gluon
.
loss
.
Loss
):
def
__init__
(
self
,
axis
=-
1
,
sparse_label
=
True
,
from_logits
=
False
,
weight
=
None
,
batch_axis
=
0
,
**
kwargs
):
super
(
DiceLoss
,
self
).
__init__
(
weight
,
batch_axis
,
**
kwargs
)
self
.
_axis
=
axis
self
.
_sparse_label
=
sparse_label
self
.
_from_logits
=
from_logits
def
dice_loss
(
self
,
F
,
pred
,
label
):
smooth
=
1.
pred_y
=
F
.
argmax
(
pred
,
axis
=
self
.
_axis
)
intersection
=
pred_y
*
label
score
=
(
2
*
F
.
mean
(
intersection
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
\
/
(
F
.
mean
(
label
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
F
.
mean
(
pred_y
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
return
-
F
.
log
(
score
)
def
hybrid_forward
(
self
,
F
,
pred
,
label
,
sample_weight
=
None
):
if
not
self
.
_from_logits
:
pred
=
F
.
log_softmax
(
pred
,
self
.
_axis
)
if
self
.
_sparse_label
:
loss
=
-
F
.
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
F
.
sum
(
pred
*
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
loss
=
gluon
.
loss
.
_apply_weighting
(
F
,
loss
,
self
.
_weight
,
sample_weight
)
diceloss
=
self
.
dice_loss
(
F
,
pred
,
label
)
return
F
.
mean
(
loss
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
diceloss
@
mx
.
metric
.
register
class
BLEU
(
mx
.
metric
.
EvalMetric
):
N
=
4
...
...
@@ -253,11 +282,15 @@ class CNNSupervisedTrainer_RNNencdec:
loss_function
=
mx
.
gluon
.
loss
.
SoftmaxCrossEntropyLoss
(
axis
=
loss_axis
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'softmax_cross_entropy_ignore_indices'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
axis
=
loss_axis
,
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'sigmoid_binary_cross_entropy'
:
loss_function
=
mx
.
gluon
.
loss
.
SigmoidBinaryCrossEntropyLoss
()
elif
loss
==
'cross_entropy'
:
loss_function
=
CrossEntropyLoss
(
axis
=
loss_axis
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'dice_loss'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
dice_weight
=
loss_params
[
'dice_weight'
]
if
'dice_weight'
in
loss_params
else
None
loss_function
=
DiceLoss
(
axis
=
loss_axis
,
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
weight
=
dice_weight
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'l2'
:
loss_function
=
mx
.
gluon
.
loss
.
L2Loss
()
elif
loss
==
'l1'
:
...
...
src/test/resources/target_code/CNNSupervisedTrainer_RNNsearch.py
View file @
931555d3
...
...
@@ -50,13 +50,42 @@ class SoftmaxCrossEntropyLossIgnoreIndices(gluon.loss.Loss):
if
self
.
_sparse_label
:
loss
=
-
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
_reshape_like
(
F
,
label
,
pred
)
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
(
pred
*
label
).
sum
(
axis
=
self
.
_axis
,
keepdims
=
True
)
# ignore some indices for loss, e.g. <pad> tokens in NLP applications
for
i
in
self
.
_ignore_indices
:
loss
=
loss
*
mx
.
nd
.
logical_not
(
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
mx
.
nd
.
ones_like
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
))
*
i
)
*
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
label
))
return
loss
.
mean
(
axis
=
self
.
_batch_axis
,
exclude
=
True
)
class
DiceLoss
(
gluon
.
loss
.
Loss
):
def
__init__
(
self
,
axis
=-
1
,
sparse_label
=
True
,
from_logits
=
False
,
weight
=
None
,
batch_axis
=
0
,
**
kwargs
):
super
(
DiceLoss
,
self
).
__init__
(
weight
,
batch_axis
,
**
kwargs
)
self
.
_axis
=
axis
self
.
_sparse_label
=
sparse_label
self
.
_from_logits
=
from_logits
def
dice_loss
(
self
,
F
,
pred
,
label
):
smooth
=
1.
pred_y
=
F
.
argmax
(
pred
,
axis
=
self
.
_axis
)
intersection
=
pred_y
*
label
score
=
(
2
*
F
.
mean
(
intersection
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
\
/
(
F
.
mean
(
label
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
F
.
mean
(
pred_y
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
return
-
F
.
log
(
score
)
def
hybrid_forward
(
self
,
F
,
pred
,
label
,
sample_weight
=
None
):
if
not
self
.
_from_logits
:
pred
=
F
.
log_softmax
(
pred
,
self
.
_axis
)
if
self
.
_sparse_label
:
loss
=
-
F
.
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
F
.
sum
(
pred
*
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
loss
=
gluon
.
loss
.
_apply_weighting
(
F
,
loss
,
self
.
_weight
,
sample_weight
)
diceloss
=
self
.
dice_loss
(
F
,
pred
,
label
)
return
F
.
mean
(
loss
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
diceloss
@
mx
.
metric
.
register
class
BLEU
(
mx
.
metric
.
EvalMetric
):
N
=
4
...
...
@@ -253,11 +282,15 @@ class CNNSupervisedTrainer_RNNsearch:
loss_function
=
mx
.
gluon
.
loss
.
SoftmaxCrossEntropyLoss
(
axis
=
loss_axis
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'softmax_cross_entropy_ignore_indices'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
axis
=
loss_axis
,
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'sigmoid_binary_cross_entropy'
:
loss_function
=
mx
.
gluon
.
loss
.
SigmoidBinaryCrossEntropyLoss
()
elif
loss
==
'cross_entropy'
:
loss_function
=
CrossEntropyLoss
(
axis
=
loss_axis
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'dice_loss'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
dice_weight
=
loss_params
[
'dice_weight'
]
if
'dice_weight'
in
loss_params
else
None
loss_function
=
DiceLoss
(
axis
=
loss_axis
,
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
weight
=
dice_weight
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'l2'
:
loss_function
=
mx
.
gluon
.
loss
.
L2Loss
()
elif
loss
==
'l1'
:
...
...
src/test/resources/target_code/CNNSupervisedTrainer_RNNtest.py
View file @
931555d3
...
...
@@ -50,13 +50,42 @@ class SoftmaxCrossEntropyLossIgnoreIndices(gluon.loss.Loss):
if
self
.
_sparse_label
:
loss
=
-
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
_reshape_like
(
F
,
label
,
pred
)
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
(
pred
*
label
).
sum
(
axis
=
self
.
_axis
,
keepdims
=
True
)
# ignore some indices for loss, e.g. <pad> tokens in NLP applications
for
i
in
self
.
_ignore_indices
:
loss
=
loss
*
mx
.
nd
.
logical_not
(
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
mx
.
nd
.
ones_like
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
))
*
i
)
*
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
label
))
return
loss
.
mean
(
axis
=
self
.
_batch_axis
,
exclude
=
True
)
class
DiceLoss
(
gluon
.
loss
.
Loss
):
def
__init__
(
self
,
axis
=-
1
,
sparse_label
=
True
,
from_logits
=
False
,
weight
=
None
,
batch_axis
=
0
,
**
kwargs
):
super
(
DiceLoss
,
self
).
__init__
(
weight
,
batch_axis
,
**
kwargs
)
self
.
_axis
=
axis
self
.
_sparse_label
=
sparse_label
self
.
_from_logits
=
from_logits
def
dice_loss
(
self
,
F
,
pred
,
label
):
smooth
=
1.
pred_y
=
F
.
argmax
(
pred
,
axis
=
self
.
_axis
)
intersection
=
pred_y
*
label
score
=
(
2
*
F
.
mean
(
intersection
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
\
/
(
F
.
mean
(
label
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
F
.
mean
(
pred_y
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
return
-
F
.
log
(
score
)
def
hybrid_forward
(
self
,
F
,
pred
,
label
,
sample_weight
=
None
):
if
not
self
.
_from_logits
:
pred
=
F
.
log_softmax
(
pred
,
self
.
_axis
)
if
self
.
_sparse_label
:
loss
=
-
F
.
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
F
.
sum
(
pred
*
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
loss
=
gluon
.
loss
.
_apply_weighting
(
F
,
loss
,
self
.
_weight
,
sample_weight
)
diceloss
=
self
.
dice_loss
(
F
,
pred
,
label
)
return
F
.
mean
(
loss
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
diceloss
@
mx
.
metric
.
register
class
BLEU
(
mx
.
metric
.
EvalMetric
):
N
=
4
...
...
@@ -253,11 +282,15 @@ class CNNSupervisedTrainer_RNNtest:
loss_function
=
mx
.
gluon
.
loss
.
SoftmaxCrossEntropyLoss
(
axis
=
loss_axis
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'softmax_cross_entropy_ignore_indices'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
axis
=
loss_axis
,
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'sigmoid_binary_cross_entropy'
:
loss_function
=
mx
.
gluon
.
loss
.
SigmoidBinaryCrossEntropyLoss
()
elif
loss
==
'cross_entropy'
:
loss_function
=
CrossEntropyLoss
(
axis
=
loss_axis
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'dice_loss'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
dice_weight
=
loss_params
[
'dice_weight'
]
if
'dice_weight'
in
loss_params
else
None
loss_function
=
DiceLoss
(
axis
=
loss_axis
,
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
weight
=
dice_weight
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'l2'
:
loss_function
=
mx
.
gluon
.
loss
.
L2Loss
()
elif
loss
==
'l1'
:
...
...
src/test/resources/target_code/CNNSupervisedTrainer_ResNeXt50.py
View file @
931555d3
...
...
@@ -50,13 +50,42 @@ class SoftmaxCrossEntropyLossIgnoreIndices(gluon.loss.Loss):
if
self
.
_sparse_label
:
loss
=
-
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
_reshape_like
(
F
,
label
,
pred
)
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
(
pred
*
label
).
sum
(
axis
=
self
.
_axis
,
keepdims
=
True
)
# ignore some indices for loss, e.g. <pad> tokens in NLP applications
for
i
in
self
.
_ignore_indices
:
loss
=
loss
*
mx
.
nd
.
logical_not
(
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
mx
.
nd
.
ones_like
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
))
*
i
)
*
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
label
))
return
loss
.
mean
(
axis
=
self
.
_batch_axis
,
exclude
=
True
)
class
DiceLoss
(
gluon
.
loss
.
Loss
):
def
__init__
(
self
,
axis
=-
1
,
sparse_label
=
True
,
from_logits
=
False
,
weight
=
None
,
batch_axis
=
0
,
**
kwargs
):
super
(
DiceLoss
,
self
).
__init__
(
weight
,
batch_axis
,
**
kwargs
)
self
.
_axis
=
axis
self
.
_sparse_label
=
sparse_label
self
.
_from_logits
=
from_logits
def
dice_loss
(
self
,
F
,
pred
,
label
):
smooth
=
1.
pred_y
=
F
.
argmax
(
pred
,
axis
=
self
.
_axis
)
intersection
=
pred_y
*
label
score
=
(
2
*
F
.
mean
(
intersection
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
\
/
(
F
.
mean
(
label
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
F
.
mean
(
pred_y
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
return
-
F
.
log
(
score
)
def
hybrid_forward
(
self
,
F
,
pred
,
label
,
sample_weight
=
None
):
if
not
self
.
_from_logits
:
pred
=
F
.
log_softmax
(
pred
,
self
.
_axis
)
if
self
.
_sparse_label
:
loss
=
-
F
.
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
F
.
sum
(
pred
*
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
loss
=
gluon
.
loss
.
_apply_weighting
(
F
,
loss
,
self
.
_weight
,
sample_weight
)
diceloss
=
self
.
dice_loss
(
F
,
pred
,
label
)
return
F
.
mean
(
loss
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
diceloss
@
mx
.
metric
.
register
class
BLEU
(
mx
.
metric
.
EvalMetric
):
N
=
4
...
...
@@ -253,11 +282,15 @@ class CNNSupervisedTrainer_ResNeXt50:
loss_function
=
mx
.
gluon
.
loss
.
SoftmaxCrossEntropyLoss
(
axis
=
loss_axis
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'softmax_cross_entropy_ignore_indices'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
axis
=
loss_axis
,
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'sigmoid_binary_cross_entropy'
:
loss_function
=
mx
.
gluon
.
loss
.
SigmoidBinaryCrossEntropyLoss
()
elif
loss
==
'cross_entropy'
:
loss_function
=
CrossEntropyLoss
(
axis
=
loss_axis
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'dice_loss'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
dice_weight
=
loss_params
[
'dice_weight'
]
if
'dice_weight'
in
loss_params
else
None
loss_function
=
DiceLoss
(
axis
=
loss_axis
,
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
weight
=
dice_weight
,
sparse_label
=
sparseLabel
,
batch_axis
=
batch_axis
)
elif
loss
==
'l2'
:
loss_function
=
mx
.
gluon
.
loss
.
L2Loss
()
elif
loss
==
'l1'
:
...
...
src/test/resources/target_code/CNNSupervisedTrainer_Show_attend_tell.py
View file @
931555d3
...
...
@@ -50,13 +50,42 @@ class SoftmaxCrossEntropyLossIgnoreIndices(gluon.loss.Loss):
if
self
.
_sparse_label
:
loss
=
-
pick
(
pred
,
label
,
axis
=
self
.
_axis
,
keepdims
=
True
)
else
:
label
=
_reshape_like
(
F
,
label
,
pred
)
label
=
gluon
.
loss
.
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
(
pred
*
label
).
sum
(
axis
=
self
.
_axis
,
keepdims
=
True
)
# ignore some indices for loss, e.g. <pad> tokens in NLP applications
for
i
in
self
.
_ignore_indices
:
loss
=
loss
*
mx
.
nd
.
logical_not
(
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
mx
.
nd
.
ones_like
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
))
*
i
)
*
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
label
))
return
loss
.
mean
(
axis
=
self
.
_batch_axis
,
exclude
=
True
)
class
DiceLoss
(
gluon
.
loss
.
Loss
):
def
__init__
(
self
,
axis
=-
1
,
sparse_label
=
True
,
from_logits
=
False
,
weight
=
None
,
batch_axis
=
0
,
**
kwargs
):
super
(
DiceLoss
,
self
).
__init__
(
weight
,
batch_axis
,
**
kwargs
)
self
.
_axis
=
axis
self
.
_sparse_label
=
sparse_label
self
.
_from_logits
=
from_logits
def
dice_loss
(
self
,
F
,
pred
,
label
):
smooth
=
1.
pred_y
=
F
.
argmax
(
pred
,
axis
=
self
.
_axis
)
intersection
=
pred_y
*
label
score
=
(
2
*
F
.
mean
(
intersection
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)
\
/
(
F
.
mean
(
label
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
F
.
mean
(
pred_y
,
axis
=
self
.
_batch_axis
,
exclude
=
True
)
+
smooth
)