CNNTrainer_reinforcementConfig2.py 4.11 KB
Newer Older
1 2
from reinforcement_learning.agent import DqnAgent
from reinforcement_learning.util import AgentSignalHandler
Nicola Gatto's avatar
Nicola Gatto committed
3
from reinforcement_learning.cnnarch_logger import ArchLogger
4 5 6 7 8 9
import reinforcement_learning.environment
import CNNCreator_reinforcementConfig2

import os
import sys
import re
Nicola Gatto's avatar
Nicola Gatto committed
10 11
import time
import numpy as np
12 13 14
import mxnet as mx


Nicola Gatto's avatar
Nicola Gatto committed
15
def resume_session(sessions_dir):
16 17
    resume_session = False
    resume_directory = None
Nicola Gatto's avatar
Nicola Gatto committed
18
    if os.path.isdir(sessions_dir):
19
        regex = re.compile(r'\d\d\d\d-\d\d-\d\d-\d\d-\d\d')
Nicola Gatto's avatar
Nicola Gatto committed
20
        dir_content = os.listdir(sessions_dir)
21 22 23
        session_files = filter(regex.search, dir_content)
        session_files.sort(reverse=True)
        for d in session_files:
Nicola Gatto's avatar
Nicola Gatto committed
24
            interrupted_session_dir = os.path.join(sessions_dir, d, '.interrupted_session')
25 26 27 28 29 30 31 32
            if os.path.isdir(interrupted_session_dir):
                resume = raw_input('Interrupted session from {} found. Do you want to resume? (y/n) '.format(d))
                if resume == 'y':
                    resume_session = True
                    resume_directory = interrupted_session_dir
                break
    return resume_session, resume_directory

Nicola Gatto's avatar
Nicola Gatto committed
33

34
if __name__ == "__main__":
Nicola Gatto's avatar
Nicola Gatto committed
35 36 37 38 39 40 41 42 43 44 45
    agent_name = 'reinforcement_agent'
    # Prepare output directory and logger
    all_output_dir = os.path.join('model', agent_name)
    output_directory = os.path.join(
        all_output_dir,
        time.strftime('%Y-%m-%d-%H-%M-%S',
                      time.localtime(time.time())))
    ArchLogger.set_output_directory(output_directory)
    ArchLogger.set_logger_name(agent_name)
    ArchLogger.set_output_level(ArchLogger.INFO)

46 47
    env = reinforcement_learning.environment.GymEnvironment('CartPole-v1')

Nicola Gatto's avatar
Nicola Gatto committed
48 49 50
    context = mx.cpu()
    qnet_creator = CNNCreator_reinforcementConfig2.CNNCreator_reinforcementConfig2()
    qnet_creator.construct(context)
51

Nicola Gatto's avatar
Nicola Gatto committed
52 53 54 55 56 57 58
    agent_params = {
        'environment': env,
        'replay_memory_params': {
            'method': 'buffer',
            'memory_size': 10000,
            'sample_size': 32,
            'state_dtype': 'float32',
59
            'action_dtype': 'uint8',
Nicola Gatto's avatar
Nicola Gatto committed
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
            'rewards_dtype': 'float32'
        },
        'strategy_params': {
            'method':'epsgreedy',
            'epsilon': 1,
            'min_epsilon': 0.01,
            'epsilon_decay_method': 'linear',
            'epsilon_decay': 0.0001,
            'epsilon_decay_start': 20,
        },
        'agent_name': agent_name,
        'verbose': True,
        'output_directory': output_directory,
        'state_dim': (8,),
        'action_dim': (3,),
        'discount_factor': 0.999,
        'training_episodes': 200,
        'train_interval': 1,
        'snapshot_interval': 20,
        'max_episode_step': 250,
        'evaluation_samples': 100,
        'target_score': 185.5,
82
        'qnet':qnet_creator.networks[0],
Nicola Gatto's avatar
Nicola Gatto committed
83
        'use_fix_target': False,
84
        'loss_function': 'l2',
Nicola Gatto's avatar
Nicola Gatto committed
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
        'optimizer': 'rmsprop',
        'optimizer_params': {
            'weight_decay': 0.01,
            'centered': True,
            'gamma2': 0.9,
            'gamma1': 0.9,
            'clip_weights': 10.0,
            'learning_rate_decay': 0.9,
            'epsilon': 1.0E-6,
            'rescale_grad': 1.1,
            'clip_gradient': 10.0,
            'learning_rate_minimum': 1.0E-5,
            'learning_rate_policy': 'step',
            'learning_rate': 0.001,
            'step_size': 1000        },
        'double_dqn': False,
101 102
    }

Nicola Gatto's avatar
Nicola Gatto committed
103
    resume, resume_directory = resume_session(all_output_dir)
104

Nicola Gatto's avatar
Nicola Gatto committed
105 106 107 108 109 110
    if resume:
        output_directory, _ = os.path.split(resume_directory)
        ArchLogger.set_output_directory(output_directory)
        resume_agent_params = {
            'session_dir': resume_directory,
            'environment': env,
Nicola Gatto's avatar
Nicola Gatto committed
111
            'net': qnet_creator.networks[0],
Nicola Gatto's avatar
Nicola Gatto committed
112 113
        }
        agent = DqnAgent.resume_from_session(**resume_agent_params)
114
    else:
Nicola Gatto's avatar
Nicola Gatto committed
115
        agent = DqnAgent(**agent_params)
116 117 118 119 120 121 122

    signal_handler = AgentSignalHandler()
    signal_handler.register_agent(agent)

    train_successful = agent.train()

    if train_successful:
123
        agent.export_best_network(path=qnet_creator._model_dir_ + qnet_creator._model_prefix_ + '_0_newest', epoch=0)