Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
What's new
7
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Open sidebar
monticore
EmbeddedMontiArc
generators
EMADL2CPP
Commits
a8440765
Commit
a8440765
authored
Nov 26, 2019
by
Christian Fuß
1
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
adjusted Show, Attend and Tell model
parent
39783063
Pipeline
#211319
failed with stages
in 29 seconds
Changes
7
Pipelines
1
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
1981 additions
and
16 deletions
+1981
-16
loss.txt
loss.txt
+1959
-0
src/test/resources/models/showAttendTell/Main.emadl
src/test/resources/models/showAttendTell/Main.emadl
+1
-1
src/test/resources/models/showAttendTell/Show_attend_tell.cnnt
...est/resources/models/showAttendTell/Show_attend_tell.cnnt
+0
-1
src/test/resources/models/showAttendTell/Show_attend_tell.emadl
...st/resources/models/showAttendTell/Show_attend_tell.emadl
+1
-2
src/test/resources/models/showAttendTell/Show_attend_tell_images_as_input.emadl
...els/showAttendTell/Show_attend_tell_images_as_input.emadl
+4
-5
src/test/resources/target_code/gluon/CNNSupervisedTrainer_mnist_mnistClassifier_net.py
...e/gluon/CNNSupervisedTrainer_mnist_mnistClassifier_net.py
+16
-7
src/test/resources/training_data/Cifar/train_lmdb/lock.mdb
src/test/resources/training_data/Cifar/train_lmdb/lock.mdb
+0
-0
No files found.
loss.txt
0 → 100644
View file @
a8440765
This diff is collapsed.
Click to expand it.
src/test/resources/models/showAttendTell/Main.emadl
View file @
a8440765
...
...
@@ -3,7 +3,7 @@ package showAttendTell;
component
Main
{
ports
in
Z
(
0
:
255
)^{
3
,
224
,
224
}
images
,
in
Z
(-
oo
:
oo
)^{
64
,
2048
}
data
,
out
Z
(
0
:
25316
)^{
1
}
target
[
25
];
out
Z
(
0
:
37758
)^{
1
}
target
[
25
];
instance
Show_attend_tell
net
;
...
...
src/test/resources/models/showAttendTell/Show_attend_tell.cnnt
View file @
a8440765
...
...
@@ -4,7 +4,6 @@ configuration Show_attend_tell{
context:cpu
eval_metric:bleu
loss:softmax_cross_entropy_ignore_indices{
sparse_label:true
ignore_indices:2
}
use_teacher_forcing:true
...
...
src/test/resources/models/showAttendTell/Show_attend_tell.emadl
View file @
a8440765
...
...
@@ -9,7 +9,7 @@ component Show_attend_tell{
layer
LSTM
(
units
=
512
)
decoder
;
layer
FullyConnected
(
units
=
256
)
features
;
layer
FullyConnected
(
units
=
256
,
flatten
=
false
)
features
;
layer
FullyConnected
(
units
=
1
,
flatten
=
false
)
attention
;
0
->
target
[
0
];
...
...
@@ -30,7 +30,6 @@ component Show_attend_tell{
Tanh
()
->
FullyConnected
(
units
=
1
,
flatten
=
false
)
->
Softmax
(
axis
=
0
)
->
Dropout
(
p
=
0.25
)
->
attention
|
features
.
output
...
...
src/test/resources/models/showAttendTell/Show_attend_tell_images_as_input.emadl
View file @
a8440765
...
...
@@ -11,15 +11,15 @@ component Show_attend_tell_images_as_input{
layer
LSTM
(
units
=
512
)
decoder
;
layer
FullyConnected
(
units
=
256
)
features
;
layer
FullyConnected
(
units
=
256
,
flatten
=
false
)
features
;
layer
FullyConnected
(
units
=
1
,
flatten
=
false
)
attention
;
0
->
target
[
0
];
images
->
Convolution
(
kernel
=(
7
,
7
),
channels
=
64
,
stride
=(
7
,
7
),
padding
=
"valid"
)
->
Convolution
(
kernel
=(
4
,
4
),
channels
=
64
,
stride
=(
4
,
4
),
padding
=
"valid"
)
->
GlobalPooling
(
pool_type
=
"max"
)
->
Convolution
(
kernel
=(
7
,
7
),
channels
=
128
,
stride
=(
7
,
7
),
padding
=
"valid"
)
->
Convolution
(
kernel
=(
4
,
4
),
channels
=
128
,
stride
=(
4
,
4
),
padding
=
"valid"
)
->
Reshape
(
shape
=(
64
,
128
)
)
->
features
;
timed
<
t
>
GreedySearch
(
max_length
=
25
){
...
...
@@ -36,7 +36,6 @@ component Show_attend_tell_images_as_input{
Tanh
()
->
FullyConnected
(
units
=
1
,
flatten
=
false
)
->
Softmax
(
axis
=
0
)
->
Dropout
(
p
=
0.25
)
->
attention
|
features
.
output
...
...
src/test/resources/target_code/gluon/CNNSupervisedTrainer_mnist_mnistClassifier_net.py
View file @
a8440765
...
...
@@ -52,7 +52,6 @@ class SoftmaxCrossEntropyLossIgnoreIndices(gluon.loss.Loss):
else
:
label
=
_reshape_like
(
F
,
label
,
pred
)
loss
=
-
(
pred
*
label
).
sum
(
axis
=
self
.
_axis
,
keepdims
=
True
)
#loss = _apply_weighting(F, loss, self._weight, sample_weight)
# ignore some indices for loss, e.g. <pad> tokens in NLP applications
for
i
in
self
.
_ignore_indices
:
loss
=
loss
*
mx
.
nd
.
logical_not
(
mx
.
nd
.
equal
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
),
mx
.
nd
.
ones_like
(
mx
.
nd
.
argmax
(
pred
,
axis
=
1
))
*
i
))
...
...
@@ -246,7 +245,7 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net:
if
loss
==
'softmax_cross_entropy'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
loss_function
=
mx
.
gluon
.
loss
.
SoftmaxCrossEntropyLoss
(
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
)
if
loss
==
'softmax_cross_entropy_ignore_indices'
:
el
if
loss
==
'softmax_cross_entropy_ignore_indices'
:
fromLogits
=
loss_params
[
'from_logits'
]
if
'from_logits'
in
loss_params
else
False
loss_function
=
SoftmaxCrossEntropyLossIgnoreIndices
(
ignore_indices
=
ignore_indices
,
from_logits
=
fromLogits
,
sparse_label
=
sparseLabel
)
elif
loss
==
'sigmoid_binary_cross_entropy'
:
...
...
@@ -363,7 +362,12 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net:
attention
=
mx
.
nd
.
squeeze
(
attention
)
attention_resized
=
np
.
resize
(
attention
.
asnumpy
(),
(
8
,
8
))
ax
=
fig
.
add_subplot
(
max_length
//
3
,
max_length
//
4
,
l
+
2
)
if
dict
[
int
(
labels
[
l
+
1
][
0
].
asscalar
())]
==
"<end>"
:
if
int
(
labels
[
l
+
1
][
0
].
asscalar
())
>
len
(
dict
):
ax
.
set_title
(
"<unk>"
)
img
=
ax
.
imshow
(
train_images
[
0
+
test_batch_size
*
(
batch_i
)].
transpose
(
1
,
2
,
0
))
ax
.
imshow
(
attention_resized
,
cmap
=
'gray'
,
alpha
=
0.6
,
extent
=
img
.
get_extent
())
break
elif
dict
[
int
(
labels
[
l
+
1
][
0
].
asscalar
())]
==
"<end>"
:
ax
.
set_title
(
"."
)
img
=
ax
.
imshow
(
train_images
[
0
+
test_batch_size
*
(
batch_i
)].
transpose
(
1
,
2
,
0
))
ax
.
imshow
(
attention_resized
,
cmap
=
'gray'
,
alpha
=
0.6
,
extent
=
img
.
get_extent
())
...
...
@@ -426,13 +430,18 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net:
attention
=
mx
.
nd
.
squeeze
(
attention
)
attention_resized
=
np
.
resize
(
attention
.
asnumpy
(),
(
8
,
8
))
ax
=
fig
.
add_subplot
(
max_length
//
3
,
max_length
//
4
,
l
+
2
)
if
dict
[
int
(
mx
.
nd
.
slice_axis
(
mx
.
nd
.
argmax
(
outputs
[
l
+
1
],
axis
=
1
),
axis
=
0
,
begin
=
0
,
end
=
1
).
asscalar
())]
==
"<end>"
:
if
int
(
mx
.
nd
.
slice_axis
(
outputs
[
l
+
1
],
axis
=
0
,
begin
=
0
,
end
=
1
).
squeeze
().
asscalar
())
>
len
(
dict
):
ax
.
set_title
(
"<unk>"
)
img
=
ax
.
imshow
(
test_images
[
0
+
test_batch_size
*
(
batch_i
)].
transpose
(
1
,
2
,
0
))
ax
.
imshow
(
attention_resized
,
cmap
=
'gray'
,
alpha
=
0.6
,
extent
=
img
.
get_extent
())
break
elif
dict
[
int
(
mx
.
nd
.
slice_axis
(
outputs
[
l
+
1
],
axis
=
0
,
begin
=
0
,
end
=
1
).
squeeze
().
asscalar
())]
==
"<end>"
:
ax
.
set_title
(
"."
)
img
=
ax
.
imshow
(
test_images
[
0
+
test_batch_size
*
(
batch_i
)].
transpose
(
1
,
2
,
0
))
ax
.
imshow
(
attention_resized
,
cmap
=
'gray'
,
alpha
=
0.6
,
extent
=
img
.
get_extent
())
break
else
:
ax
.
set_title
(
dict
[
int
(
mx
.
nd
.
slice_axis
(
mx
.
nd
.
argmax
(
outputs
[
l
+
1
],
axis
=
1
),
axis
=
0
,
begin
=
0
,
end
=
1
).
asscalar
())])
ax
.
set_title
(
dict
[
int
(
mx
.
nd
.
slice_axis
(
outputs
[
l
+
1
],
axis
=
0
,
begin
=
0
,
end
=
1
).
squeeze
().
asscalar
())])
img
=
ax
.
imshow
(
test_images
[
0
+
test_batch_size
*
(
batch_i
)].
transpose
(
1
,
2
,
0
))
ax
.
imshow
(
attention_resized
,
cmap
=
'gray'
,
alpha
=
0.6
,
extent
=
img
.
get_extent
())
...
...
src/test/resources/training_data/Cifar/train_lmdb/lock.mdb
View file @
a8440765
No preview for this file type
Christian Fuß
@chrifuss
mentioned in commit
b0d04271
·
Dec 06, 2019
mentioned in commit
b0d04271
mentioned in commit b0d042711467c67489917d248515f8eb0f785a42
Toggle commit list
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment