CNNTrainer_reinforcementConfig2.py 4.23 KB
Newer Older
1
2
from reinforcement_learning.agent import DqnAgent
from reinforcement_learning.util import AgentSignalHandler
Nicola Gatto's avatar
Nicola Gatto committed
3
from reinforcement_learning.cnnarch_logger import ArchLogger
4
5
6
7
8
9
import reinforcement_learning.environment
import CNNCreator_reinforcementConfig2

import os
import sys
import re
Nicola Gatto's avatar
Nicola Gatto committed
10
11
import time
import numpy as np
12
13
14
import mxnet as mx


Nicola Gatto's avatar
Nicola Gatto committed
15
def resume_session(sessions_dir):
16
17
    resume_session = False
    resume_directory = None
Nicola Gatto's avatar
Nicola Gatto committed
18
    if os.path.isdir(sessions_dir):
19
        regex = re.compile(r'\d\d\d\d-\d\d-\d\d-\d\d-\d\d')
Nicola Gatto's avatar
Nicola Gatto committed
20
        dir_content = os.listdir(sessions_dir)
21
22
23
        session_files = filter(regex.search, dir_content)
        session_files.sort(reverse=True)
        for d in session_files:
Nicola Gatto's avatar
Nicola Gatto committed
24
            interrupted_session_dir = os.path.join(sessions_dir, d, '.interrupted_session')
25
26
27
28
29
30
31
32
            if os.path.isdir(interrupted_session_dir):
                resume = raw_input('Interrupted session from {} found. Do you want to resume? (y/n) '.format(d))
                if resume == 'y':
                    resume_session = True
                    resume_directory = interrupted_session_dir
                break
    return resume_session, resume_directory

Nicola Gatto's avatar
Nicola Gatto committed
33

34
if __name__ == "__main__":
Nicola Gatto's avatar
Nicola Gatto committed
35
36
37
38
39
40
41
42
43
44
45
    agent_name = 'reinforcement_agent'
    # Prepare output directory and logger
    all_output_dir = os.path.join('model', agent_name)
    output_directory = os.path.join(
        all_output_dir,
        time.strftime('%Y-%m-%d-%H-%M-%S',
                      time.localtime(time.time())))
    ArchLogger.set_output_directory(output_directory)
    ArchLogger.set_logger_name(agent_name)
    ArchLogger.set_output_level(ArchLogger.INFO)

46
47
    env = reinforcement_learning.environment.GymEnvironment('CartPole-v1')

Nicola Gatto's avatar
Nicola Gatto committed
48
    context = mx.cpu()
49
50
    initializer = mx.init.Normal()
    critic_initializer = mx.init.Normal()
Nicola Gatto's avatar
Nicola Gatto committed
51
    qnet_creator = CNNCreator_reinforcementConfig2.CNNCreator_reinforcementConfig2()
52
    qnet_creator.setWeightInitializer(initializer)
Nicola Gatto's avatar
Nicola Gatto committed
53
    qnet_creator.construct(context)
54

Nicola Gatto's avatar
Nicola Gatto committed
55
56
57
58
59
60
61
    agent_params = {
        'environment': env,
        'replay_memory_params': {
            'method': 'buffer',
            'memory_size': 10000,
            'sample_size': 32,
            'state_dtype': 'float32',
62
            'action_dtype': 'uint8',
Nicola Gatto's avatar
Nicola Gatto committed
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
            'rewards_dtype': 'float32'
        },
        'strategy_params': {
            'method':'epsgreedy',
            'epsilon': 1,
            'min_epsilon': 0.01,
            'epsilon_decay_method': 'linear',
            'epsilon_decay': 0.0001,
            'epsilon_decay_start': 20,
        },
        'agent_name': agent_name,
        'verbose': True,
        'output_directory': output_directory,
        'state_dim': (8,),
        'action_dim': (3,),
        'discount_factor': 0.999,
        'training_episodes': 200,
        'train_interval': 1,
        'snapshot_interval': 20,
        'max_episode_step': 250,
        'evaluation_samples': 100,
        'target_score': 185.5,
85
        'qnet':qnet_creator.networks[0],
Nicola Gatto's avatar
Nicola Gatto committed
86
        'use_fix_target': False,
87
        'loss_function': 'l2',
Nicola Gatto's avatar
Nicola Gatto committed
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
        'optimizer': 'rmsprop',
        'optimizer_params': {
            'weight_decay': 0.01,
            'centered': True,
            'gamma2': 0.9,
            'gamma1': 0.9,
            'clip_weights': 10.0,
            'learning_rate_decay': 0.9,
            'epsilon': 1.0E-6,
            'rescale_grad': 1.1,
            'clip_gradient': 10.0,
            'learning_rate_minimum': 1.0E-5,
            'learning_rate_policy': 'step',
            'learning_rate': 0.001,
            'step_size': 1000        },
        'double_dqn': False,
104
105
    }

Nicola Gatto's avatar
Nicola Gatto committed
106
    resume, resume_directory = resume_session(all_output_dir)
107

Nicola Gatto's avatar
Nicola Gatto committed
108
109
110
111
112
113
    if resume:
        output_directory, _ = os.path.split(resume_directory)
        ArchLogger.set_output_directory(output_directory)
        resume_agent_params = {
            'session_dir': resume_directory,
            'environment': env,
Nicola Gatto's avatar
Nicola Gatto committed
114
            'net': qnet_creator.networks[0],
Nicola Gatto's avatar
Nicola Gatto committed
115
116
        }
        agent = DqnAgent.resume_from_session(**resume_agent_params)
117
    else:
Nicola Gatto's avatar
Nicola Gatto committed
118
        agent = DqnAgent(**agent_params)
119
120
121
122
123
124
125

    signal_handler = AgentSignalHandler()
    signal_handler.register_agent(agent)

    train_successful = agent.train()

    if train_successful:
126
        agent.export_best_network(path=qnet_creator._model_dir_ + qnet_creator._model_prefix_ + '_0_newest', epoch=0)