Skip to content

Commit

Permalink
revert refactoring
Browse files Browse the repository at this point in the history
  • Loading branch information
housengw committed Nov 30, 2021
2 parents 9af688c + 3f2de1a commit 0b8263d
Show file tree
Hide file tree
Showing 16 changed files with 3,972 additions and 236 deletions.
28 changes: 28 additions & 0 deletions benchmark/C/Distributed/src/PingPongDistributed.lf
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
/**
* Basic benchmark from the Savina benchmark suite that is
* intended to measure message-passing overhead.
* This version is distributed, communicating using physical connections over sockets.
* See [Benchmarks wiki page](https://github.com/icyphy/lingua-franca/wiki/Benchmarks).

* This is based on https://www.scala-lang.org/old/node/54
* See https://shamsimam.github.io/papers/2014-agere-savina.pdf.
*
* This is a distributed version, where Ping and Pong run in
* separate programs and can be run on different machines.
*
* There is no parallelism in this application, so it does not benefit from being
* being distributed.
*
* These measurements are total execution time, including startup and shutdown, of
* all three programs.
*
* @author Edward A. Lee
*/
target C;
import Ping, Pong from "../PingPong.lf"
federated reactor(count:int(10000000)) {
ping = new Ping(count = count);
pong = new Pong(expected = count);
ping.send -> pong.receive;
pong.send -> ping.receive;
}
204 changes: 204 additions & 0 deletions benchmark/C/Savina/src/BenchmarkRunner.lf
Original file line number Diff line number Diff line change
@@ -0,0 +1,204 @@
target C;

/**
* Reactor that starts the kernel of a benchmark, measures its runtime and outputs
* the results for a given number of iterations.
*
* This reactor is instantiated by the main reactor of a benchmark and
* the startup reaction of this reactor is the starting point for that benchmark.
* The reactor runs a given number of iterations of the benchmark, measures
* the runtime of each iteration and outputs them. The benchmark itself is responsible
* to reset its state between the iterations.
* A benchmark can have an optional initialization phase that is run once before
* the first iteration and is not measured.
* A benchmark can have an optional cleanup phase after each iteration before
* the next iteration start which is not considered in the runtime measurement.
*
* How to use:
* - Instantiate this reactor in the main reactor of the benchmark.
* - Connect the ports inStart, outIterationStart, inIterationFinish with
* the appropriate reactors of the benchmark.
* - Optionally connect the ports for initialization and cleanup.
* - Create a startup reaction in the main reactor that calls printBenchmarkInfo(),
*
* Prototype startup reaction in the main reactor of a benchmark:
* runner = new BenchmarkRunner(num_iterations=num_iterations);
* reaction(startup) -> runner.inStart {=
* printBenchmarkInfo("ThreadRingReactorLFCppBenchmark");
* printSystemInfo();
* SET(runner.inStart, true);
* =}
*
* @param num_iterations How many times to execute the kernel of the benchmark to measure.
* @param use_init Benchmarks needs initialization and handles the corresponding signals.
* @param use_cleanup_iteration Benchmark needs cleanup after each iteration and handles the corresponding signals.
*
* @author Hannes Klein
* @author Shaokai Lin
*/
reactor BenchmarkRunner(num_iterations:int(12), use_init:bool(false), use_cleanup_iteration:bool(false)) {

/** Signal to start execution. Set this input from a startup reaction in the main reactor. */
input inStart:bool;

/** Signals for starting and finishing the kernel and runtime measurement. */
output outIterationStart:bool;
input inIterationFinish:bool;

/** Signals for initializations that are not part of the measured kernel. */
output outInitializeStart:bool;
input inInitializeFinish:bool;

/** Signals for cleanup operations after each iteration of the kernel. */
output outCleanupIterationStart:bool;
input inCleanupIterationFinish:bool;

/** Events to switch between the phases of running the iterations. */
logical action initBenchmark:bool;
logical action cleanupIteration:bool;
logical action nextIteration:bool;
logical action finish:bool;

/** Number of iterations already executed. */
state count:unsigned(0);

/** Start time for runtime measurement. */
state startTime:instant_t;

/** Runtime measurements. */
state measuredTimes:interval_t[];


reaction(startup) {=
// Initialize an array of interval_t
self->measuredTimes = calloc(self->num_iterations, sizeof(interval_t));
=}

reaction(inStart) -> nextIteration, initBenchmark {=
if(self->use_init) {
schedule(initBenchmark, 0);
} else {
schedule(nextIteration, 0);
}
=}

reaction(initBenchmark) -> outInitializeStart {=
SET(outInitializeStart, true);
=}

reaction(inInitializeFinish) -> nextIteration {=
schedule(nextIteration, 0);
=}

reaction(cleanupIteration) -> outCleanupIterationStart {=
SET(outCleanupIterationStart, true);
=}

reaction(inCleanupIterationFinish) -> nextIteration {=
schedule(nextIteration, 0);
=}

reaction(nextIteration) -> outIterationStart, finish {=
if (self->count < self->num_iterations) {
self->startTime = get_physical_time();
SET(outIterationStart, true);
} else {
schedule(finish, 0);
}
=}

reaction(inIterationFinish) -> nextIteration, cleanupIteration {=
interval_t end_time = get_physical_time();
interval_t duration = end_time - self->startTime;
self->measuredTimes[self->count] = duration;
self->count += 1;

printf("Iteration: %d\t Duration: %.3f msec\n", self->count, toMS(duration));

if(self->use_cleanup_iteration) {
schedule(cleanupIteration, 0);
} else {
schedule(nextIteration, 0);
}
=}

reaction(finish) {=
double* measuredMSTimes = getMSMeasurements(self->measuredTimes, self->num_iterations);
qsort(measuredMSTimes, self->num_iterations, sizeof(double), comp);

printf("Execution - Summary:\n");
printf("Best Time:\t %.3f msec\n", measuredMSTimes[0]);
printf("Worst Time:\t %.3f msec\n", measuredMSTimes[self->num_iterations - 1]);
printf("Median Time:\t %.3f msec\n", median(measuredMSTimes, self->num_iterations));
request_stop();
=}

preamble {=

static double toMS(interval_t t) {
return t / 1000000.0;
}

int comp (const void * elem1, const void * elem2) {
int f = *((double*)elem1);
int s = *((double*)elem2);
if (f > s) return 1;
if (f < s) return -1;
return 0;
}

static double median(double* execTimes, int size) {
if (size == 0) {
return 0.0;
}

int middle = size / 2;
if(size % 2 == 1) {
return execTimes[middle];
} else {
return (execTimes[middle-1] + execTimes[middle]) / 2;
}
}

static double* getMSMeasurements(interval_t* measured_times, int num_iterations) {

double* msMeasurements = calloc(num_iterations, sizeof(double));
for (int i = 0; i < num_iterations; i++) {
msMeasurements[i] = toMS(measured_times[i]);
}

return msMeasurements;
}
=}

preamble {=

void printBenchmarkInfo(char* benchmarkId) {
printf("Benchmark: %s\n", benchmarkId);
}

void printSystemInfo() {

printf("System information\n");
printf("O/S Name: ");

#ifdef _WIN32
printf("Windows 32-bit");
#elif _WIN64
printf("Windows 64-bit");
#elif __APPLE__ || __MACH__
printf("Mac OSX");
#elif __linux__
printf("Linux");
#elif __FreeBSD__
printf("FreeBSD");
#elif __unix || __unix__
printf("Unix");
#else
printf("Other");
#endif

printf("\n");
}
=}
}
Loading

0 comments on commit 0b8263d

Please sign in to comment.