Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
E
EMADL2CPP
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Locked Files
Issues
10
Issues
10
List
Boards
Labels
Service Desk
Milestones
Iterations
Merge Requests
0
Merge Requests
0
Requirements
Requirements
List
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Test Cases
Security & Compliance
Security & Compliance
Dependency List
License Compliance
Operations
Operations
Incidents
Environments
Packages & Registries
Packages & Registries
Container Registry
Analytics
Analytics
CI / CD
Code Review
Insights
Issue
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
monticore
EmbeddedMontiArc
generators
EMADL2CPP
Commits
dd6a63da
Commit
dd6a63da
authored
Aug 13, 2019
by
Evgeny Kusmenko
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'develop' into 'master'
Implemented layer variables and RNN layer See merge request
!25
parents
3a697803
4b9440cf
Pipeline
#171324
passed with stages
in 8 minutes and 13 seconds
Changes
34
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
34 changed files
with
209 additions
and
161 deletions
+209
-161
pom.xml
pom.xml
+5
-5
src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLGenerator.java
...nticore/lang/monticar/emadl/generator/EMADLGenerator.java
+1
-1
src/test/java/de/monticore/lang/monticar/emadl/GenerationTest.java
...java/de/monticore/lang/monticar/emadl/GenerationTest.java
+8
-0
src/test/resources/models/RNNtest.cnnt
src/test/resources/models/RNNtest.cnnt
+11
-0
src/test/resources/models/RNNtest.emadl
src/test/resources/models/RNNtest.emadl
+22
-0
src/test/resources/models/data_paths.txt
src/test/resources/models/data_paths.txt
+2
-1
src/test/resources/target_code/CNNCreator_cifar10_cifar10Classifier_net.py
...s/target_code/CNNCreator_cifar10_cifar10Classifier_net.py
+24
-21
src/test/resources/target_code/CNNCreator_mnist_mnistClassifier_net.py
...urces/target_code/CNNCreator_mnist_mnistClassifier_net.py
+10
-10
src/test/resources/target_code/CNNPredictor_cifar10_cifar10Classifier_net.h
.../target_code/CNNPredictor_cifar10_cifar10Classifier_net.h
+8
-8
src/test/resources/target_code/CNNPredictor_mnist_mnistClassifier_net.h
...rces/target_code/CNNPredictor_mnist_mnistClassifier_net.h
+5
-5
src/test/resources/target_code/cifar10_cifar10Classifier_net.h
...est/resources/target_code/cifar10_cifar10Classifier_net.h
+3
-3
src/test/resources/target_code/gluon/CNNCreator_mnist_mnistClassifier_net.py
...target_code/gluon/CNNCreator_mnist_mnistClassifier_net.py
+1
-1
src/test/resources/target_code/gluon/CNNDataLoader_mnist_mnistClassifier_net.py
...get_code/gluon/CNNDataLoader_mnist_mnistClassifier_net.py
+2
-2
src/test/resources/target_code/gluon/CNNNet_mnist_mnistClassifier_net.py
...ces/target_code/gluon/CNNNet_mnist_mnistClassifier_net.py
+9
-9
src/test/resources/target_code/gluon/CNNPredictor_mnist_mnistClassifier_net.h
...arget_code/gluon/CNNPredictor_mnist_mnistClassifier_net.h
+5
-5
src/test/resources/target_code/gluon/CNNSupervisedTrainer_mnist_mnistClassifier_net.py
...e/gluon/CNNSupervisedTrainer_mnist_mnistClassifier_net.py
+14
-11
src/test/resources/target_code/gluon/mnist_mnistClassifier_net.h
...t/resources/target_code/gluon/mnist_mnistClassifier_net.h
+4
-4
src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNCreator_cartpole_master_dqn.py
...forcementModel/cartpole/CNNCreator_cartpole_master_dqn.py
+1
-1
src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNNet_cartpole_master_dqn.py
...reinforcementModel/cartpole/CNNNet_cartpole_master_dqn.py
+9
-9
src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNPredictor_cartpole_master_dqn.h
...orcementModel/cartpole/CNNPredictor_cartpole_master_dqn.h
+5
-5
src/test/resources/target_code/gluon/reinforcementModel/cartpole/cartpole_master_dqn.h
...e/gluon/reinforcementModel/cartpole/cartpole_master_dqn.h
+4
-4
src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNCreator_mountaincar_master_actor.py
...tModel/mountaincar/CNNCreator_mountaincar_master_actor.py
+1
-1
src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNNet_mountaincar_master_actor.py
...ementModel/mountaincar/CNNNet_mountaincar_master_actor.py
+9
-9
src/test/resources/target_code/gluon/reinforcementModel/mountaincar/CNNPredictor_mountaincar_master_actor.h
...Model/mountaincar/CNNPredictor_mountaincar_master_actor.h
+5
-5
src/test/resources/target_code/gluon/reinforcementModel/mountaincar/mountaincar_master_actor.h
...reinforcementModel/mountaincar/mountaincar_master_actor.h
+4
-4
src/test/resources/target_code/gluon/reinforcementModel/mountaincar/reinforcement_learning/CNNCreator_mountaincar_agent_mountaincarCritic.py
...earning/CNNCreator_mountaincar_agent_mountaincarCritic.py
+1
-1
src/test/resources/target_code/gluon/reinforcementModel/mountaincar/reinforcement_learning/CNNNet_mountaincar_agent_mountaincarCritic.py
...nt_learning/CNNNet_mountaincar_agent_mountaincarCritic.py
+14
-14
src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNCreator_torcs_agent_torcsAgent_dqn.py
...ementModel/torcs/CNNCreator_torcs_agent_torcsAgent_dqn.py
+1
-1
src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNNet_torcs_agent_torcsAgent_dqn.py
...forcementModel/torcs/CNNNet_torcs_agent_torcsAgent_dqn.py
+9
-9
src/test/resources/target_code/gluon/reinforcementModel/torcs/CNNPredictor_torcs_agent_torcsAgent_dqn.h
...mentModel/torcs/CNNPredictor_torcs_agent_torcsAgent_dqn.h
+5
-5
src/test/resources/target_code/gluon/reinforcementModel/torcs/torcs_agent_torcsAgent_dqn.h
...uon/reinforcementModel/torcs/torcs_agent_torcsAgent_dqn.h
+4
-4
src/test/resources/target_code/mnist_mnistClassifier_net.h
src/test/resources/target_code/mnist_mnistClassifier_net.h
+3
-3
src/test/resources/training_data/MultipleStreams/test.h5
src/test/resources/training_data/MultipleStreams/test.h5
+0
-0
src/test/resources/training_data/MultipleStreams/train.h5
src/test/resources/training_data/MultipleStreams/train.h5
+0
-0
No files found.
pom.xml
View file @
dd6a63da
...
...
@@ -15,12 +15,12 @@
<properties>
<!-- .. SE-Libraries .................................................. -->
<emadl.version>
0.2.
8
-SNAPSHOT
</emadl.version>
<emadl.version>
0.2.
9
-SNAPSHOT
</emadl.version>
<CNNTrain.version>
0.3.6-SNAPSHOT
</CNNTrain.version>
<cnnarch-generator.version>
0.0.
2
-SNAPSHOT
</cnnarch-generator.version>
<cnnarch-mxnet-generator.version>
0.2.1
6
-SNAPSHOT
</cnnarch-mxnet-generator.version>
<cnnarch-caffe2-generator.version>
0.2.1
2
-SNAPSHOT
</cnnarch-caffe2-generator.version>
<cnnarch-gluon-generator.version>
0.2.
6
-SNAPSHOT
</cnnarch-gluon-generator.version>
<cnnarch-generator.version>
0.0.
3
-SNAPSHOT
</cnnarch-generator.version>
<cnnarch-mxnet-generator.version>
0.2.1
7
-SNAPSHOT
</cnnarch-mxnet-generator.version>
<cnnarch-caffe2-generator.version>
0.2.1
3
-SNAPSHOT
</cnnarch-caffe2-generator.version>
<cnnarch-gluon-generator.version>
0.2.
7
-SNAPSHOT
</cnnarch-gluon-generator.version>
<embedded-montiarc-math-opt-generator>
0.1.4
</embedded-montiarc-math-opt-generator>
<!-- .. Libraries .................................................. -->
...
...
src/main/java/de/monticore/lang/monticar/emadl/generator/EMADLGenerator.java
View file @
dd6a63da
...
...
@@ -448,7 +448,7 @@ public class EMADLGenerator {
int
i
=
0
;
for
(
SerialCompositeElementSymbol
stream
:
architecture
.
getStreams
())
{
if
(
stream
.
is
Network
())
{
if
(
stream
.
is
Trainable
())
{
networkAttributes
+=
"\n"
+
predictorClassName
+
"_"
+
i
+
" _predictor_"
+
i
+
"_;"
;
}
...
...
src/test/java/de/monticore/lang/monticar/emadl/GenerationTest.java
View file @
dd6a63da
...
...
@@ -198,6 +198,14 @@ public class GenerationTest extends AbstractSymtabTest {
assertTrue
(
Log
.
getFindings
().
size
()
==
0
);
}
@Test
public
void
testRNNtestForGluon
()
throws
IOException
,
TemplateException
{
Log
.
getFindings
().
clear
();
String
[]
args
=
{
"-m"
,
"src/test/resources/models/"
,
"-r"
,
"RNNtest"
,
"-b"
,
"GLUON"
,
"-f"
,
"n"
,
"-c"
,
"n"
};
EMADLGeneratorCli
.
main
(
args
);
assertTrue
(
Log
.
getFindings
().
size
()
==
0
);
}
@Test
public
void
testGluonReinforcementModelGymEnvironment
()
{
Log
.
getFindings
().
clear
();
...
...
src/test/resources/models/RNNtest.cnnt
0 → 100644
View file @
dd6a63da
configuration RNNtest{
num_epoch:10
batch_size:5
context:cpu
optimizer:adam{
learning_rate:0.01
learning_rate_decay:0.8
step_size:1000
weight_decay:0.0001
}
}
src/test/resources/models/RNNtest.emadl
0 → 100644
View file @
dd6a63da
component RNNtest{
ports in Q(-oo:oo)^{50, 30001} source[2],
out Q(-oo:oo)^{50, 30001} target[2];
implementation CNN {
layer RNN(units=500, layers=2) encoder;
layer RNN(units=500, layers=2) decoder;
source[0] ->
encoder;
encoder.output ->
target[0];
encoder.state ->
decoder.state;
source[1] ->
decoder ->
target[1];
}
}
\ No newline at end of file
src/test/resources/models/data_paths.txt
View file @
dd6a63da
...
...
@@ -11,4 +11,5 @@ mnist.LeNetNetwork data/mnist.LeNetNetwork
MultipleInputs src/test/resources/training_data/MultipleInputs
MultipleOutputs src/test/resources/training_data/MultipleOutputs
MultipleStreams src/test/resources/training_data/MultipleStreams
Invariant src/test/resources/training_data/Invariant
\ No newline at end of file
Invariant src/test/resources/training_data/Invariant
RNNtest data/RNNtest
\ No newline at end of file
src/test/resources/target_code/CNNCreator_cifar10_cifar10Classifier_net.py
View file @
dd6a63da
...
...
@@ -21,9 +21,12 @@ class CNNCreator_cifar10_cifar10Classifier_net:
_data_dir_
=
"src/test/resources/training_data/Cifar/"
_model_dir_
=
"model/cifar10.CifarNetwork/"
_model_prefix_
=
"model"
_input_names_
=
[
'data'
]
_input_names_
=
[
'data
_
'
]
_input_shapes_
=
[(
3
,
32
,
32
)]
_output_names_
=
[
'softmax_label'
]
_output_names_
=
[
'softmax__label'
]
_input_data_names_
=
[
'data'
]
_output_data_names_
=
[
'softmax_label'
]
def
load
(
self
,
context
):
...
...
@@ -62,18 +65,18 @@ class CNNCreator_cifar10_cifar10Classifier_net:
def
load_data
(
self
,
batch_size
):
train_h5
,
test_h5
=
self
.
load_h5_files
()
data_mean
=
train_h5
[
self
.
_input_names_
[
0
]][:].
mean
(
axis
=
0
)
data_std
=
train_h5
[
self
.
_input_names_
[
0
]][:].
std
(
axis
=
0
)
+
1e-5
data_mean
=
train_h5
[
self
.
_input_
data_
names_
[
0
]][:].
mean
(
axis
=
0
)
data_std
=
train_h5
[
self
.
_input_
data_
names_
[
0
]][:].
std
(
axis
=
0
)
+
1e-5
train_iter
=
mx
.
io
.
NDArrayIter
(
train_h5
[
self
.
_input_names_
[
0
]],
train_h5
[
self
.
_output_names_
[
0
]],
train_iter
=
mx
.
io
.
NDArrayIter
(
train_h5
[
self
.
_input_
data_
names_
[
0
]],
train_h5
[
self
.
_output_
data_
names_
[
0
]],
batch_size
=
batch_size
,
data_name
=
self
.
_input_names_
[
0
],
label_name
=
self
.
_output_names_
[
0
])
test_iter
=
None
if
test_h5
!=
None
:
test_iter
=
mx
.
io
.
NDArrayIter
(
test_h5
[
self
.
_input_names_
[
0
]],
test_h5
[
self
.
_output_names_
[
0
]],
test_iter
=
mx
.
io
.
NDArrayIter
(
test_h5
[
self
.
_input_
data_
names_
[
0
]],
test_h5
[
self
.
_output_
data_
names_
[
0
]],
batch_size
=
batch_size
,
data_name
=
self
.
_input_names_
[
0
],
label_name
=
self
.
_output_names_
[
0
])
...
...
@@ -86,16 +89,16 @@ class CNNCreator_cifar10_cifar10Classifier_net:
test_path
=
self
.
_data_dir_
+
"test.h5"
if
os
.
path
.
isfile
(
train_path
):
train_h5
=
h5py
.
File
(
train_path
,
'r'
)
if
not
(
self
.
_input_
names_
[
0
]
in
train_h5
and
self
.
_output
_names_
[
0
]
in
train_h5
):
if
not
(
self
.
_input_
data_names_
[
0
]
in
train_h5
and
self
.
_output_data
_names_
[
0
]
in
train_h5
):
logging
.
error
(
"The HDF5 file '"
+
os
.
path
.
abspath
(
train_path
)
+
"' has to contain the datasets: "
+
"'"
+
self
.
_input_
names_
[
0
]
+
"', '"
+
self
.
_output
_names_
[
0
]
+
"'"
)
+
"'"
+
self
.
_input_
data_names_
[
0
]
+
"', '"
+
self
.
_output_data
_names_
[
0
]
+
"'"
)
sys
.
exit
(
1
)
test_iter
=
None
if
os
.
path
.
isfile
(
test_path
):
test_h5
=
h5py
.
File
(
test_path
,
'r'
)
if
not
(
self
.
_input_
names_
[
0
]
in
test_h5
and
self
.
_output
_names_
[
0
]
in
test_h5
):
if
not
(
self
.
_input_
data_names_
[
0
]
in
test_h5
and
self
.
_output_data
_names_
[
0
]
in
test_h5
):
logging
.
error
(
"The HDF5 file '"
+
os
.
path
.
abspath
(
test_path
)
+
"' has to contain the datasets: "
+
"'"
+
self
.
_input_
names_
[
0
]
+
"', '"
+
self
.
_output
_names_
[
0
]
+
"'"
)
+
"'"
+
self
.
_input_
data_names_
[
0
]
+
"', '"
+
self
.
_output_data
_names_
[
0
]
+
"'"
)
sys
.
exit
(
1
)
else
:
logging
.
warning
(
"Couldn't load test set. File '"
+
os
.
path
.
abspath
(
test_path
)
+
"' does not exist."
)
...
...
@@ -254,9 +257,9 @@ class CNNCreator_cifar10_cifar10Classifier_net:
def
construct
(
self
,
context
,
data_mean
=
None
,
data_std
=
None
):
data
=
mx
.
sym
.
var
(
"data
"
,
data
_
=
mx
.
sym
.
var
(
"data_
"
,
shape
=
(
0
,
3
,
32
,
32
))
# data, output shape: {[3,32,32]}
# data
_
, output shape: {[3,32,32]}
if
not
data_mean
is
None
:
assert
(
not
data_std
is
None
)
...
...
@@ -264,9 +267,9 @@ class CNNCreator_cifar10_cifar10Classifier_net:
_data_mean_
=
mx
.
sym
.
BlockGrad
(
_data_mean_
)
_data_std_
=
mx
.
sym
.
Variable
(
"_data_std_"
,
shape
=
(
3
,
32
,
32
),
init
=
MyConstant
(
value
=
data_mean
.
tolist
()))
_data_std_
=
mx
.
sym
.
BlockGrad
(
_data_std_
)
data
=
mx
.
symbol
.
broadcast_sub
(
data
,
_data_mean_
)
data
=
mx
.
symbol
.
broadcast_div
(
data
,
_data_std_
)
conv2_1_
=
mx
.
symbol
.
pad
(
data
=
data
,
data
_
=
mx
.
symbol
.
broadcast_sub
(
data_
,
_data_mean_
)
data
_
=
mx
.
symbol
.
broadcast_div
(
data_
,
_data_std_
)
conv2_1_
=
mx
.
symbol
.
pad
(
data
=
data
_
,
mode
=
'constant'
,
pad_width
=
(
0
,
0
,
0
,
0
,
1
,
1
,
1
,
1
),
constant_value
=
0
)
...
...
@@ -300,7 +303,7 @@ class CNNCreator_cifar10_cifar10Classifier_net:
batchnorm3_1_
=
mx
.
symbol
.
BatchNorm
(
data
=
conv3_1_
,
fix_gamma
=
True
,
name
=
"batchnorm3_1_"
)
conv2_2_
=
mx
.
symbol
.
Convolution
(
data
=
data
,
conv2_2_
=
mx
.
symbol
.
Convolution
(
data
=
data
_
,
kernel
=
(
1
,
1
),
stride
=
(
1
,
1
),
num_filter
=
8
,
...
...
@@ -741,10 +744,10 @@ class CNNCreator_cifar10_cifar10Classifier_net:
softmax32_
=
mx
.
symbol
.
softmax
(
data
=
fc32_
,
axis
=
1
,
name
=
"softmax32_"
)
softmax
=
mx
.
symbol
.
SoftmaxOutput
(
data
=
softmax32_
,
name
=
"softmax"
)
softmax
_
=
mx
.
symbol
.
SoftmaxOutput
(
data
=
softmax32_
,
name
=
"softmax
_
"
)
self
.
module
=
mx
.
mod
.
Module
(
symbol
=
mx
.
symbol
.
Group
([
softmax
]),
self
.
module
=
mx
.
mod
.
Module
(
symbol
=
mx
.
symbol
.
Group
([
softmax
_
]),
data_names
=
self
.
_input_names_
,
label_names
=
self
.
_output_names_
,
context
=
context
)
src/test/resources/target_code/CNNCreator_mnist_mnistClassifier_net.py
View file @
dd6a63da
...
...
@@ -72,9 +72,9 @@ class CNNCreator_mnist_mnistClassifier_net:
def
create_model
(
self
,
model
,
data
,
device_opts
,
is_test
):
with
core
.
DeviceScope
(
device_opts
):
image
=
data
# image, output shape: {[1,28,28]}
conv1_
=
brew
.
conv
(
model
,
image
,
'conv1_'
,
dim_in
=
1
,
dim_out
=
20
,
kernel
=
5
,
stride
=
1
)
image
_
=
data
# image
_
, output shape: {[1,28,28]}
conv1_
=
brew
.
conv
(
model
,
image
_
,
'conv1_'
,
dim_in
=
1
,
dim_out
=
20
,
kernel
=
5
,
stride
=
1
)
# conv1_, output shape: {[20,24,24]}
pool1_
=
brew
.
max_pool
(
model
,
conv1_
,
'pool1_'
,
kernel
=
2
,
stride
=
2
)
# pool1_, output shape: {[20,12,12]}
...
...
@@ -87,9 +87,9 @@ class CNNCreator_mnist_mnistClassifier_net:
relu2_
=
brew
.
relu
(
model
,
fc2_
,
fc2_
)
fc3_
=
brew
.
fc
(
model
,
relu2_
,
'fc3_'
,
dim_in
=
500
,
dim_out
=
10
)
# fc3_, output shape: {[10,1,1]}
predictions
=
brew
.
softmax
(
model
,
fc3_
,
'predictions
'
)
predictions
_
=
brew
.
softmax
(
model
,
fc3_
,
'predictions_
'
)
return
predictions
return
predictions
_
# this adds the loss and optimizer
def
add_training_operators
(
self
,
model
,
output
,
label
,
device_opts
,
loss
,
opt_type
,
base_learning_rate
,
policy
,
stepsize
,
epsilon
,
beta1
,
beta2
,
gamma
,
momentum
)
:
...
...
@@ -150,10 +150,10 @@ class CNNCreator_mnist_mnistClassifier_net:
# == Training model ==
train_model
=
model_helper
.
ModelHelper
(
name
=
"train_net"
,
arg_scope
=
arg_scope
)
data
,
label
,
train_dataset_size
=
self
.
add_input
(
train_model
,
batch_size
=
batch_size
,
db
=
os
.
path
.
join
(
self
.
_data_dir_
,
'train_lmdb'
),
db_type
=
'lmdb'
,
device_opts
=
device_opts
)
predictions
=
self
.
create_model
(
train_model
,
data
,
device_opts
=
device_opts
,
is_test
=
False
)
self
.
add_training_operators
(
train_model
,
predictions
,
label
,
device_opts
,
loss
,
opt_type
,
base_learning_rate
,
policy
,
stepsize
,
epsilon
,
beta1
,
beta2
,
gamma
,
momentum
)
predictions
_
=
self
.
create_model
(
train_model
,
data
,
device_opts
=
device_opts
,
is_test
=
False
)
self
.
add_training_operators
(
train_model
,
predictions
_
,
label
,
device_opts
,
loss
,
opt_type
,
base_learning_rate
,
policy
,
stepsize
,
epsilon
,
beta1
,
beta2
,
gamma
,
momentum
)
if
not
loss
==
'euclidean'
:
self
.
add_accuracy
(
train_model
,
predictions
,
label
,
device_opts
,
eval_metric
)
self
.
add_accuracy
(
train_model
,
predictions
_
,
label
,
device_opts
,
eval_metric
)
with
core
.
DeviceScope
(
device_opts
):
brew
.
add_weight_decay
(
train_model
,
weight_decay
)
...
...
@@ -185,9 +185,9 @@ class CNNCreator_mnist_mnistClassifier_net:
# == Testing model. ==
test_model
=
model_helper
.
ModelHelper
(
name
=
"test_net"
,
arg_scope
=
arg_scope
,
init_params
=
False
)
data
,
label
,
test_dataset_size
=
self
.
add_input
(
test_model
,
batch_size
=
batch_size
,
db
=
os
.
path
.
join
(
self
.
_data_dir_
,
'test_lmdb'
),
db_type
=
'lmdb'
,
device_opts
=
device_opts
)
predictions
=
self
.
create_model
(
test_model
,
data
,
device_opts
=
device_opts
,
is_test
=
True
)
predictions
_
=
self
.
create_model
(
test_model
,
data
,
device_opts
=
device_opts
,
is_test
=
True
)
if
not
loss
==
'euclidean'
:
self
.
add_accuracy
(
test_model
,
predictions
,
label
,
device_opts
,
eval_metric
)
self
.
add_accuracy
(
test_model
,
predictions
_
,
label
,
device_opts
,
eval_metric
)
workspace
.
RunNetOnce
(
test_model
.
param_init_net
)
workspace
.
CreateNet
(
test_model
.
net
,
overwrite
=
True
)
...
...
src/test/resources/target_code/CNNPredictor_cifar10_cifar10Classifier_net.h
View file @
dd6a63da
...
...
@@ -13,8 +13,9 @@ class CNNPredictor_cifar10_cifar10Classifier_net_0{
public:
const
std
::
string
json_file
=
"model/cifar10.CifarNetwork/model_newest-symbol.json"
;
const
std
::
string
param_file
=
"model/cifar10.CifarNetwork/model_newest-0000.params"
;
//const std::vector<std::string> input_keys = {"data"};
const
std
::
vector
<
std
::
string
>
input_keys
=
{
"data"
};
const
std
::
vector
<
std
::
string
>
input_keys
=
{
"data"
};
const
std
::
vector
<
std
::
vector
<
mx_uint
>>
input_shapes
=
{{
1
,
3
,
32
,
32
}};
const
bool
use_gpu
=
false
;
...
...
@@ -28,10 +29,9 @@ public:
if
(
handle
)
MXPredFree
(
handle
);
}
void
predict
(
const
std
::
vector
<
float
>
&
data
,
std
::
vector
<
float
>
&
softmax
){
MXPredSetInput
(
handle
,
"data"
,
data
.
data
(),
data
.
size
());
//MXPredSetInput(handle, "data", data.data(), data.size());
void
predict
(
const
std
::
vector
<
float
>
&
data_
,
std
::
vector
<
float
>
&
softmax_
){
MXPredSetInput
(
handle
,
input_keys
[
0
].
c_str
(),
data_
.
data
(),
data_
.
size
());
MXPredForward
(
handle
);
...
...
@@ -44,8 +44,8 @@ public:
MXPredGetOutputShape
(
handle
,
output_index
,
&
shape
,
&
shape_len
);
size
=
1
;
for
(
mx_uint
i
=
0
;
i
<
shape_len
;
++
i
)
size
*=
shape
[
i
];
assert
(
size
==
softmax
.
size
());
MXPredGetOutput
(
handle
,
0
,
&
(
softmax
[
0
]),
softmax
.
size
());
assert
(
size
==
softmax
_
.
size
());
MXPredGetOutput
(
handle
,
0
,
&
(
softmax
_
[
0
]),
softmax_
.
size
());
}
...
...
src/test/resources/target_code/CNNPredictor_mnist_mnistClassifier_net.h
View file @
dd6a63da
...
...
@@ -85,9 +85,9 @@ class CNNPredictor_mnist_mnistClassifier_net_0{
input
.
Resize
(
input_shapes
);
}
void
predict
(
const
std
::
vector
<
float
>
&
image
,
std
::
vector
<
float
>
&
predictions
){
void
predict
(
const
std
::
vector
<
float
>
&
image
_
,
std
::
vector
<
float
>
&
predictions_
){
//Note: ShareExternalPointer requires a float pointer.
input
.
ShareExternalPointer
((
float
*
)
image
.
data
());
input
.
ShareExternalPointer
((
float
*
)
image
_
.
data
());
// Get input blob
#ifdef USE_GPU
...
...
@@ -104,11 +104,11 @@ class CNNPredictor_mnist_mnistClassifier_net_0{
// Get output blob
#ifdef USE_GPU
auto
predictionsBlob
=
TensorCPU
(
workSpace
.
GetBlob
(
"predictions"
)
->
Get
<
TensorCUDA
>
());
auto
predictions
_
Blob
=
TensorCPU
(
workSpace
.
GetBlob
(
"predictions"
)
->
Get
<
TensorCUDA
>
());
#else
auto
predictionsBlob
=
workSpace
.
GetBlob
(
"predictions"
)
->
Get
<
TensorCPU
>
();
auto
predictions
_
Blob
=
workSpace
.
GetBlob
(
"predictions"
)
->
Get
<
TensorCPU
>
();
#endif
predictions
.
assign
(
predictionsBlob
.
data
<
float
>
(),
predictionsBlob
.
data
<
float
>
()
+
predictions
Blob
.
size
());
predictions
_
.
assign
(
predictions_Blob
.
data
<
float
>
(),
predictions_Blob
.
data
<
float
>
()
+
predictions_
Blob
.
size
());
google
::
protobuf
::
ShutdownProtobufLibrary
();
}
...
...
src/test/resources/target_code/cifar10_cifar10Classifier_net.h
View file @
dd6a63da
...
...
@@ -19,12 +19,12 @@ data = icube(3, 32, 32);
softmax
=
colvec
(
classes
);
}
void
execute
(){
vector
<
float
>
CNN_softmax
(
10
);
vector
<
float
>
CNN_softmax
_
(
10
);
_predictor_0_
.
predict
(
CNNTranslator
::
translate
(
data
),
CNN_softmax
);
CNN_softmax
_
);
softmax
=
CNNTranslator
::
translateToCol
(
CNN_softmax
,
std
::
vector
<
size_t
>
{
10
});
softmax
=
CNNTranslator
::
translateToCol
(
CNN_softmax
_
,
std
::
vector
<
size_t
>
{
10
});
}
...
...
src/test/resources/target_code/gluon/CNNCreator_mnist_mnistClassifier_net.py
View file @
dd6a63da
...
...
@@ -50,7 +50,7 @@ class CNNCreator_mnist_mnistClassifier_net:
self
.
networks
[
0
]
=
Net_0
(
data_mean
=
data_mean
,
data_std
=
data_std
)
self
.
networks
[
0
].
collect_params
().
initialize
(
self
.
weight_initializer
,
ctx
=
context
)
self
.
networks
[
0
].
hybridize
()
self
.
networks
[
0
](
mx
.
nd
.
zeros
((
1
,
1
,
28
,
28
,),
ctx
=
context
))
self
.
networks
[
0
](
mx
.
nd
.
zeros
((
1
,
1
,
28
,
28
,),
ctx
=
context
))
if
not
os
.
path
.
exists
(
self
.
_model_dir_
):
os
.
makedirs
(
self
.
_model_dir_
)
...
...
src/test/resources/target_code/gluon/CNNDataLoader_mnist_mnistClassifier_net.py
View file @
dd6a63da
...
...
@@ -21,8 +21,8 @@ class CNNDataLoader_mnist_mnistClassifier_net:
for
input_name
in
self
.
_input_names_
:
train_data
[
input_name
]
=
train_h5
[
input_name
]
data_mean
[
input_name
]
=
nd
.
array
(
train_h5
[
input_name
][:].
mean
(
axis
=
0
))
data_std
[
input_name
]
=
nd
.
array
(
train_h5
[
input_name
][:].
std
(
axis
=
0
)
+
1e-5
)
data_mean
[
input_name
+
'_'
]
=
nd
.
array
(
train_h5
[
input_name
][:].
mean
(
axis
=
0
))
data_std
[
input_name
+
'_'
]
=
nd
.
array
(
train_h5
[
input_name
][:].
std
(
axis
=
0
)
+
1e-5
)
train_label
=
{}
for
output_name
in
self
.
_output_names_
:
...
...
src/test/resources/target_code/gluon/CNNNet_mnist_mnistClassifier_net.py
View file @
dd6a63da
...
...
@@ -85,10 +85,10 @@ class Net_0(gluon.HybridBlock):
with
self
.
name_scope
():
if
data_mean
:
assert
(
data_std
)
self
.
input_normalization_image
=
ZScoreNormalization
(
data_mean
=
data_mean
[
'image
'
],
data_std
=
data_std
[
'image'
])
self
.
input_normalization_image
_
=
ZScoreNormalization
(
data_mean
=
data_mean
[
'image_
'
],
data_std
=
data_std
[
'image
_
'
])
else
:
self
.
input_normalization_image
=
NoNormalization
()
self
.
input_normalization_image
_
=
NoNormalization
()
self
.
conv1_
=
gluon
.
nn
.
Conv2D
(
channels
=
20
,
kernel_size
=
(
5
,
5
),
...
...
@@ -123,10 +123,9 @@ class Net_0(gluon.HybridBlock):
self
.
softmax3_
=
Softmax
()
def
hybrid_forward
(
self
,
F
,
image
):
outputs
=
[]
image
=
self
.
input_normalization_image
(
image
)
conv1_
=
self
.
conv1_
(
image
)
def
hybrid_forward
(
self
,
F
,
image_
):
image_
=
self
.
input_normalization_image_
(
image_
)
conv1_
=
self
.
conv1_
(
image_
)
pool1_
=
self
.
pool1_
(
conv1_
)
conv2_
=
self
.
conv2_
(
pool1_
)
pool2_
=
self
.
pool2_
(
conv2_
)
...
...
@@ -135,6 +134,7 @@ class Net_0(gluon.HybridBlock):
relu2_
=
self
.
relu2_
(
fc2_
)
fc3_
=
self
.
fc3_
(
relu2_
)
softmax3_
=
self
.
softmax3_
(
fc3_
)
outputs
.
append
(
softmax3_
)
predictions_
=
softmax3_
return
predictions_
return
outputs
[
0
]
src/test/resources/target_code/gluon/CNNPredictor_mnist_mnistClassifier_net.h
View file @
dd6a63da
...
...
@@ -29,9 +29,9 @@ public:
if
(
handle
)
MXPredFree
(
handle
);
}
void
predict
(
const
std
::
vector
<
float
>
&
i
mage
,
std
::
vector
<
float
>
&
predictions
){
MXPredSetInput
(
handle
,
"data"
,
image
.
data
(),
static_cast
<
mx_uint
>
(
image
.
size
()));
void
predict
(
const
std
::
vector
<
float
>
&
i
n_image_
,
std
::
vector
<
float
>
&
out_predictions_
){
MXPredSetInput
(
handle
,
input_keys
[
0
].
c_str
(),
in_image_
.
data
(),
static_cast
<
mx_uint
>
(
in_image_
.
size
()));
MXPredForward
(
handle
);
...
...
@@ -44,8 +44,8 @@ public:
MXPredGetOutputShape
(
handle
,
output_index
,
&
shape
,
&
shape_len
);
size
=
1
;
for
(
mx_uint
i
=
0
;
i
<
shape_len
;
++
i
)
size
*=
shape
[
i
];
assert
(
size
==
predictions
.
size
());
MXPredGetOutput
(
handle
,
0
,
&
(
predictions
[
0
]),
predictions
.
size
());
assert
(
size
==
out_predictions_
.
size
());
MXPredGetOutput
(
handle
,
0
,
&
(
out_predictions_
[
0
]),
out_predictions_
.
size
());
}
...
...
src/test/resources/target_code/gluon/CNNSupervisedTrainer_mnist_mnistClassifier_net.py
View file @
dd6a63da
...
...
@@ -132,14 +132,15 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net:
for
epoch
in
range
(
begin_epoch
,
begin_epoch
+
num_epoch
):
train_iter
.
reset
()
for
batch_i
,
batch
in
enumerate
(
train_iter
):
image_
data
=
batch
.
data
[
0
].
as_in_context
(
mx_context
)
image_
=
batch
.
data
[
0
].
as_in_context
(
mx_context
)
predictions_label
=
batch
.
label
[
0
].
as_in_context
(
mx_context
)
with
autograd
.
record
():
predictions_output
=
self
.
_networks
[
0
](
image_data
)
predictions_
=
self
.
_networks
[
0
](
image_
)
loss
=
\
loss_function
(
predictions_
output
,
predictions_label
)
loss_function
(
predictions_
,
predictions_label
)
loss
.
backward
()
...
...
@@ -164,17 +165,18 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net:
train_iter
.
reset
()
metric
=
mx
.
metric
.
create
(
eval_metric
)
for
batch_i
,
batch
in
enumerate
(
train_iter
):
image_
data
=
batch
.
data
[
0
].
as_in_context
(
mx_context
)
image_
=
batch
.
data
[
0
].
as_in_context
(
mx_context
)
labels
=
[
batch
.
label
[
0
].
as_in_context
(
mx_context
)
]
if
True
:
# Fix indentation
predictions_output
=
self
.
_networks
[
0
](
image_data
)
if
True
:
predictions_
=
self
.
_networks
[
0
](
image_
)
predictions
=
[
mx
.
nd
.
argmax
(
predictions_
output
,
axis
=
1
)
mx
.
nd
.
argmax
(
predictions_
,
axis
=
1
)
]
metric
.
update
(
preds
=
predictions
,
labels
=
labels
)
...
...
@@ -183,17 +185,18 @@ class CNNSupervisedTrainer_mnist_mnistClassifier_net:
test_iter
.
reset
()
metric
=
mx
.
metric
.
create
(
eval_metric
)
for
batch_i
,
batch
in
enumerate
(
test_iter
):
image_
data
=
batch
.
data
[
0
].
as_in_context
(
mx_context
)
image_
=
batch
.
data
[
0
].
as_in_context
(
mx_context
)
labels
=
[
batch
.
label
[
0
].
as_in_context
(
mx_context
)
]
if
True
:
# Fix indentation
predictions_output
=
self
.
_networks
[
0
](
image_data
)
if
True
:
predictions_
=
self
.
_networks
[
0
](
image_
)
predictions
=
[
mx
.
nd
.
argmax
(
predictions_
output
,
axis
=
1
)
mx
.
nd
.
argmax
(
predictions_
,
axis
=
1
)
]
metric
.
update
(
preds
=
predictions
,
labels
=
labels
)
...
...
src/test/resources/target_code/gluon/mnist_mnistClassifier_net.h
View file @
dd6a63da
...
...
@@ -19,12 +19,12 @@ image = icube(1, 28, 28);
predictions
=
colvec
(
classes
);
}
void
execute
(){
vector
<
float
>
CNN_predictions
(
10
);
vector
<
float
>
image_
=
CNNTranslator
::
translate
(
image
);
vector
<
float
>
predictions_
(
10
);
_predictor_0_
.
predict
(
CNNTranslator
::
translate
(
image
),
CNN_predictions
);
_predictor_0_
.
predict
(
image_
,
predictions_
);
predictions
=
CNNTranslator
::
translateToCol
(
CNN_predictions
,
std
::
vector
<
size_t
>
{
10
});
predictions
=
CNNTranslator
::
translateToCol
(
predictions_
,
std
::
vector
<
size_t
>
{
10
});
}
...
...
src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNCreator_cartpole_master_dqn.py
View file @
dd6a63da
...
...
@@ -50,7 +50,7 @@ class CNNCreator_cartpole_master_dqn:
self
.
networks
[
0
]
=
Net_0
(
data_mean
=
data_mean
,
data_std
=
data_std
)
self
.
networks
[
0
].
collect_params
().
initialize
(
self
.
weight_initializer
,
ctx
=
context
)
self
.
networks
[
0
].
hybridize
()
self
.
networks
[
0
](
mx
.
nd
.
zeros
((
1
,
4
,),
ctx
=
context
))
self
.
networks
[
0
](
mx
.
nd
.
zeros
((
1
,
4
,),
ctx
=
context
))
if
not
os
.
path
.
exists
(
self
.
_model_dir_
):
os
.
makedirs
(
self
.
_model_dir_
)
...
...
src/test/resources/target_code/gluon/reinforcementModel/cartpole/CNNNet_cartpole_master_dqn.py
View file @
dd6a63da
...
...
@@ -85,10 +85,10 @@ class Net_0(gluon.HybridBlock):
with
self
.
name_scope
():
if
data_mean
:
assert
(
data_std
)
self
.
input_normalization_state
=
ZScoreNormalization
(
data_mean
=
data_mean
[
'state
'
],
data_std
=
data_std
[
'state'
])
self
.
input_normalization_state
_
=
ZScoreNormalization
(
data_mean
=
data_mean
[
'state_
'
],
data_std
=
data_std
[
'state
_
'
])
else
:
self
.
input_normalization_state
=
NoNormalization
()
self
.
input_normalization_state
_
=
NoNormalization
()
self
.
fc1_
=
gluon
.
nn
.
Dense
(
units
=
128
,
use_bias
=
True
)
# fc1_, output shape: {[128,1,1]}
...
...
@@ -103,14 +103,14 @@ class Net_0(gluon.HybridBlock):
def
hybrid_forward
(
self
,
F
,
state
):
outputs
=
[]
state
=
self
.
input_normalization_state
(
state
)
fc1_
=
self
.
fc1_
(
state
)
def
hybrid_forward
(
self
,
F
,
state_
):
state_
=
self
.
input_normalization_state_
(
state_
)
fc1_
=
self
.
fc1_
(
state_
)
tanh1_
=
self
.
tanh1_
(
fc1_
)
fc2_
=
self
.
fc2_
(
tanh1_
)
tanh2_
=
self
.
tanh2_
(
fc2_
)
fc3_
=
self
.
fc3_
(
tanh2_
)
outputs
.
append
(
fc3_
)
qvalues_
=
fc3_
return
qvalues_
return
outputs
[
0
]