Aufgrund einer Wartung wird GitLab am 19.10. zwischen 8:00 und 9:00 Uhr kurzzeitig nicht zur Verfügung stehen. / Due to maintenance, GitLab will be temporarily unavailable on 19.10. between 8:00 and 9:00 am.

Commit 0ff92c59 authored by Katherine Cornell's avatar Katherine Cornell
Browse files

really final commit

parent 6a57597a
# `nfa_prune`
Randomly samples NFA and measures running time of top-down and bottom-up algorithm
Usage: `nfa_prune -n [number of samples] -nA [alphabet size] -nQ [number of states] [output folder]`
The output folder will contain a `.csv` file with the results and for each sampled automaton a `[n].nfa` file describing it.
\ No newline at end of file
......@@ -4,6 +4,7 @@
#include "Delegator.hpp"
#include <chrono>
#include <stack>
#include <queue>
......@@ -217,7 +218,9 @@ auto delegator::Delegator::operator()(std::size_t q, std::size_t a) const -> std
return _map[q * _alphabet + a];
}
auto delegator::topDown(NFA const& automaton, double* load) -> std::optional<Delegator> {
auto delegator::topDown(NFA const& automaton, double* load) -> std::tuple<std::optional<Delegator>, double, double> {
auto init_start = std::chrono::high_resolution_clock::now();
PositionMap<PositionData> data(automaton);
for (std::size_t q = 0; q != automaton.states.size(); ++q) {
......@@ -229,12 +232,19 @@ auto delegator::topDown(NFA const& automaton, double* load) -> std::optional<Del
}
}
auto init_end = std::chrono::high_resolution_clock::now();
for (std::size_t a = 0; a != automaton.alphabetSize; ++a) {
if (check(automaton, { Player::Player0, a, 0, 0 }, data) == PositionState::Unsafe) {
if (load) {
*load = computeLoad_TopDown(data, automaton);
}
return std::nullopt;
auto end = std::chrono::high_resolution_clock::now();
return std::make_tuple(
std::nullopt,
std::chrono::duration_cast<std::chrono::duration<double>>(init_end - init_start).count(),
std::chrono::duration_cast<std::chrono::duration<double>>(end - init_end).count()
);
}
}
......@@ -257,10 +267,17 @@ auto delegator::topDown(NFA const& automaton, double* load) -> std::optional<Del
*load = computeLoad_TopDown(data, automaton);
}
return f;
auto end = std::chrono::high_resolution_clock::now();
return std::make_tuple(
f,
std::chrono::duration_cast<std::chrono::duration<double>>(init_end - init_start).count(),
std::chrono::duration_cast<std::chrono::duration<double>>(end - init_end).count()
);
}
auto delegator::bottomUpQueue(NFA const& automaton, double* load) -> std::optional<Delegator> {
auto delegator::bottomUpQueue(NFA const& automaton, double* load) -> std::tuple<std::optional<Delegator>, double, double> {
auto init_start = std::chrono::high_resolution_clock::now();
std::queue<Position> queue;
PositionMap<std::size_t> count(automaton);
......@@ -281,6 +298,8 @@ auto delegator::bottomUpQueue(NFA const& automaton, double* load) -> std::option
}
}
auto init_end = std::chrono::high_resolution_clock::now();
while (!queue.empty()) {
auto [i, a, q, p] = queue.front();
queue.pop();
......@@ -310,7 +329,12 @@ auto delegator::bottomUpQueue(NFA const& automaton, double* load) -> std::option
*load = computeLoad_BottomUp(count, automaton);
}
return std::nullopt;
auto end = std::chrono::high_resolution_clock::now();
return std::make_tuple(
std::nullopt,
std::chrono::duration_cast<std::chrono::duration<double>>(init_end - init_start).count(),
std::chrono::duration_cast<std::chrono::duration<double>>(end - init_end).count()
);
}
else {
queue.push({ Player::Player0, a, q_prime, p });
......@@ -339,10 +363,17 @@ auto delegator::bottomUpQueue(NFA const& automaton, double* load) -> std::option
*load = computeLoad_BottomUp(count, automaton);
}
return f;
auto end = std::chrono::high_resolution_clock::now();
return std::make_tuple(
f,
std::chrono::duration_cast<std::chrono::duration<double>>(init_end - init_start).count(),
std::chrono::duration_cast<std::chrono::duration<double>>(end - init_end).count()
);
}
auto delegator::bottomUpStack(NFA const& automaton, double* load) -> std::optional<Delegator> {
auto delegator::bottomUpStack(NFA const& automaton, double* load) -> std::tuple<std::optional<Delegator>, double, double> {
auto init_start = std::chrono::high_resolution_clock::now();
std::stack<Position> stack;
PositionMap<std::size_t> count(automaton);
......@@ -363,6 +394,8 @@ auto delegator::bottomUpStack(NFA const& automaton, double* load) -> std::option
}
}
auto init_end = std::chrono::high_resolution_clock::now();
while (!stack.empty()) {
auto [i, a, q, p] = stack.top();
stack.pop();
......@@ -391,7 +424,13 @@ auto delegator::bottomUpStack(NFA const& automaton, double* load) -> std::option
if (load) {
*load = computeLoad_BottomUp(count, automaton);
}
return std::nullopt;
auto end = std::chrono::high_resolution_clock::now();
return std::make_tuple(
std::nullopt,
std::chrono::duration_cast<std::chrono::duration<double>>(init_end - init_start).count(),
std::chrono::duration_cast<std::chrono::duration<double>>(end - init_end).count()
);
}
else {
stack.push({ Player::Player0, a, q_prime, p });
......@@ -420,5 +459,10 @@ auto delegator::bottomUpStack(NFA const& automaton, double* load) -> std::option
*load = computeLoad_BottomUp(count, automaton);
}
return f;
auto end = std::chrono::high_resolution_clock::now();
return std::make_tuple(
f,
std::chrono::duration_cast<std::chrono::duration<double>>(init_end - init_start).count(),
std::chrono::duration_cast<std::chrono::duration<double>>(end - init_end).count()
);
}
\ No newline at end of file
......@@ -52,20 +52,20 @@ namespace delegator {
///
/// \param automaton The automaton for which a delegator should be determined
/// \param load If not `null` will contain the fraction of game positions actually seen by the algorithm
/// \returns The delegator for `automaton` if one exists
auto topDown(NFA const& automaton, double* load = nullptr) -> std::optional<Delegator>;
/// \returns The delegator for `automaton` if one exists, the time needed for initialisation and the time needed for the core algorithm
auto topDown(NFA const& automaton, double* load = nullptr) -> std::tuple<std::optional<Delegator>, double, double>;
/// Tries to determine a delegator for an NFA using the bottom-up algorithm using a stack
///
/// \param automaton The automaton for which a delegator should be determined
/// \param load If not `null` will contain the fraction of game positions actually seen by the algorithm
/// \returns The delegator for `automaton` if one exists
auto bottomUpStack(NFA const& automaton, double* load = nullptr) -> std::optional<Delegator>;
/// \returns The delegator for `automaton` if one exists, the time needed for initialisation and the time needed for the core algorithm
auto bottomUpStack(NFA const& automaton, double* load = nullptr) -> std::tuple<std::optional<Delegator>, double, double>;
/// Tries to determine a delegator for an NFA using the bottom-up algorithm using a queue
///
/// \param automaton The automaton for which a delegator should be determined
/// \param load If not `null` will contain the fraction of game positions actually seen by the algorithm
/// \returns The delegator for `automaton` if one exists
auto bottomUpQueue(NFA const& automaton, double* load = nullptr) -> std::optional<Delegator>;
/// \returns The delegator for `automaton` if one exists, the time needed for initialisation and the time needed for the core algorithm
auto bottomUpQueue(NFA const& automaton, double* load = nullptr) -> std::tuple<std::optional<Delegator>, double, double>;
}
#endif /* DELEGATOR_AUTOMATA_DELEGATOR_HPP_INCLUDED */
......@@ -34,15 +34,15 @@ namespace {
// Only select states with the same number of outgoing and ingoing transitions for the same letter
auto reject = false;
for (std::size_t a = 0; a != automaton.states.size(); ++a) {
for (std::size_t a = 0; a != automaton.alphabetSize; ++a) {
if (automaton.states[i].out_transitions[a].targets.size() != automaton.states[j].out_transitions[a].targets.size()) {
reject = true;
continue;
break;
}
if (automaton.states[i].in_transitions[a].targets.size() != automaton.states[j].in_transitions[a].targets.size()) {
reject = true;
continue;
break;
}
}
......
......@@ -127,8 +127,10 @@ auto delegator::sampleNFA(std::size_t states, std::size_t alphabet, double p_f,
NFA automaton(states, alphabet);
for (std::size_t i = 0; i != states; ++i) {
automaton.states[i].accepting = true;
automaton.states[i].out_transitions[0].targets.insert((i + 1) % states);
automaton.states[(i + 1) % states].in_transitions[0].targets.insert(i);
for (std::size_t j = i; j != states; ++j) {
automaton.states[i].out_transitions[0].targets.insert(j);
automaton.states[j].in_transitions[0].targets.insert(i);
}
}
auto autSize = automaton.autSize();
......
......@@ -168,7 +168,7 @@ auto main(int argc, char const** argv) -> int {
// Initialise output CSV file
std::ofstream results(config.output / "results.csv");
results << "Run;Result;Time (top-down);Time (bottom-up w/ stack);Time (bottom-up w/ queue);Load (top-down);Load (bottom-up w/ stack);Load (bottom-up w/ queue);States;Alphabet;Out-Degree (min);Out-Degree (max);Out-Degree (avg);In-Degree (min);In-Degree (max);In-Degree (avg);Dump\n";
results << "Run;Result;Time (top-down initialisation);Time (top-down main);Time (bottom-up w/ stack initialisation);Time (bottom-up w/ stack main);Time (bottom-up w/ queue initialisation);Time (bottom-up w/ queue main);Load (top-down);Load (bottom-up w/ stack);Load (bottom-up w/ queue);States;Alphabet;Out-Degree (min);Out-Degree (max);Out-Degree (avg);In-Degree (min);In-Degree (max);In-Degree (avg);Dump\n";
for (std::size_t i = 0; i != config.number; ++i) {
// Generate state and alphabet for automaton
......@@ -197,7 +197,7 @@ auto main(int argc, char const** argv) -> int {
// Run top-down algorithm
std::cout << "Automaton " << i + 1 << "/" << config.number << ": Top-down evaluation...\n";
auto [top_delegator, top_time] = delegator::measure<std::chrono::high_resolution_clock>(delegator::topDown, completeAutomaton, nullptr);
auto [top_delegator, top_init_time, top_work_time] = delegator::topDown(completeAutomaton, nullptr);
double top_load = 0.0;
auto _ = delegator::topDown(completeAutomaton, &top_load);
......@@ -205,7 +205,7 @@ auto main(int argc, char const** argv) -> int {
// Run bottom-up algorithm with stack
std::cout << "Automaton " << i + 1 << "/" << config.number << ": Bottom-up evaluation (stack)...\n";
auto [bottom_stack_delegator, bottom_stack_time] = delegator::measure<std::chrono::high_resolution_clock>(delegator::bottomUpStack, completeAutomaton, nullptr);
auto [bottom_stack_delegator, bottom_stack_init_time, bottom_stack_work_time] = delegator::bottomUpStack(completeAutomaton, nullptr);
if (!top_delegator != !bottom_stack_delegator) {
std::cerr << "WARNING: Different results for top-down and bottom-up algorithm (stack) in run " << i << "\n";
......@@ -219,7 +219,7 @@ auto main(int argc, char const** argv) -> int {
// Run bottom-up algorithm with queue
std::cout << "Automaton " << i + 1 << "/" << config.number << ": Bottom-up evaluation (queue)...\n";
auto [bottom_queue_delegator, bottom_queue_time] = delegator::measure<std::chrono::high_resolution_clock>(delegator::bottomUpQueue, completeAutomaton, nullptr);
auto [bottom_queue_delegator, bottom_queue_init_time, bottom_queue_work_time] = delegator::bottomUpQueue(completeAutomaton, nullptr);
if (!top_delegator != !bottom_queue_delegator) {
std::cerr << "WARNING: Different results for top-down and bottom-up algorithm (queue) in run " << i << "\n";
......@@ -243,9 +243,12 @@ auto main(int argc, char const** argv) -> int {
results << i + 1 << ';'
<< !!top_delegator << ';'
<< std::chrono::duration_cast<std::chrono::duration<double>>(top_time).count() << ';'
<< std::chrono::duration_cast<std::chrono::duration<double>>(bottom_stack_time).count() << ';'
<< std::chrono::duration_cast<std::chrono::duration<double>>(bottom_queue_time).count() << ';'
<< top_init_time << ';'
<< top_work_time << ';'
<< bottom_stack_init_time << ';'
<< bottom_stack_work_time << ';'
<< bottom_queue_init_time << ';'
<< bottom_queue_work_time << ';'
<< top_load << ';'
<< bottom_stack_load << ';'
<< bottom_queue_load << ';'
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment