Skip to content

Commit e30da94

Browse files
authored
Merge branch 'lf-lang:main' into ts_savina
2 parents 54965a9 + e21f9f9 commit e30da94

39 files changed

+306
-92
lines changed

.github/workflows/benchmark-tests.yml

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ on:
1212
benchmarks-ref:
1313
required: false
1414
type: string
15-
default: main
1615

1716
jobs:
1817
run:
@@ -21,7 +20,7 @@ jobs:
2120
- name: Check out benchmarks
2221
uses: actions/checkout@v3
2322
with:
24-
repository: lf-lang/benchmarks
23+
repository: lf-lang/benchmarks-lingua-franca
2524
ref: ${{ inputs.benchmarks-ref}}
2625
- name: Check out lingua-franca repository
2726
uses: actions/checkout@v3
@@ -52,7 +51,7 @@ jobs:
5251
run: |
5352
mkdir -p reactor-cpp/build
5453
cd reactor-cpp/build
55-
cmake -DCMAKE_INSTALL_PREFIX=../install ../../lf/org.lflang/src/lib/cpp/reactor-cpp
54+
cmake -DCMAKE_INSTALL_PREFIX=../install -DCMAKE_BUILD_TYPE=RelWithDebInfo -DREACTOR_CPP_VALIDATE=ON -DREACTOR_CPP_TRACE=OFF -DREACTOR_CPP_LOG_LEVEL=2 ../../lf/org.lflang/src/lib/cpp/reactor-cpp
5655
make install
5756
echo "LD_LIBRARY_PATH=$GITHUB_WORKSPACE/reactor-cpp/install/lib" >> $GITHUB_ENV
5857
if: ${{ inputs.target == 'Cpp' }}
@@ -70,3 +69,19 @@ jobs:
7069
run: |
7170
python3 runner/run_benchmark.py -m test_mode=True iterations=1 benchmark="glob(*)" target=lf-rust
7271
if: ${{ inputs.target == 'Rust' }}
72+
- name: Setup Node.js environment
73+
uses: actions/[email protected]
74+
if: ${{ inputs.target == 'TS' }}
75+
- name: Install pnpm
76+
run: npm i -g pnpm
77+
if: ${{ inputs.target == 'TS' }}
78+
- name: Cache .pnpm-store
79+
uses: actions/cache@v2
80+
with:
81+
path: ~/.pnpm-store
82+
key: ${{ runner.os }}-node${{ matrix.node-version }}-${{ hashFiles('org.lflang/src/lib/ts/package.json') }}
83+
if: ${{ inputs.target == 'TS' }}
84+
- name: Test TypeScript benchmarks
85+
run: |
86+
python3 runner/run_benchmark.py -m test_mode=True iterations=1 benchmark="glob(*)" target=lf-ts
87+
if: ${{ inputs.target == 'TS' }}

.github/workflows/ci.yml

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,5 +23,9 @@ jobs:
2323
uses: lf-lang/benchmarks-lingua-franca/.github/workflows/benchmark-tests.yml@main
2424
with:
2525
target: 'Rust'
26+
ts-benchmark-tests:
27+
uses: lf-lang/benchmarks-lingua-franca/.github/workflows/benchmark-tests.yml@main
28+
with:
29+
target: 'TS'
2630

2731

.gitignore

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,5 +9,5 @@ multirun
99
__pycache__
1010
*.csv
1111
*.txt
12-
12+
.vscode
1313

C/Savina/src/BenchmarkRunner.lf

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -61,29 +61,28 @@ reactor BenchmarkRunner(num_iterations:int(12)) {
6161
reaction(startup) -> nextIteration {=
6262
// Initialize an array of interval_t
6363
self->measuredTimes = (interval_t *) calloc(self->num_iterations, sizeof(interval_t));
64-
schedule(nextIteration, 0);
64+
lf_schedule(nextIteration, 0);
6565
=}
6666

6767

6868
reaction(nextIteration) -> start, done {=
6969
if (self->count < self->num_iterations) {
70-
self->startTime = get_physical_time();
71-
SET(start, true);
70+
self->startTime = lf_time_physical();
71+
lf_set(start, true);
7272
} else {
73-
schedule(done, 0);
73+
lf_schedule(done, 0);
7474
}
7575
=}
7676

7777
reaction(finish) -> nextIteration {=
78-
interval_t end_time = get_physical_time();
78+
interval_t end_time = lf_time_physical();
7979
interval_t duration = end_time - self->startTime;
8080
self->measuredTimes[self->count] = duration;
8181
self->count += 1;
8282

8383
printf("Iteration %d - %.3f ms\n", self->count, toMS(duration));
8484

85-
schedule(nextIteration, 0);
86-
85+
lf_schedule(nextIteration, 0);
8786
=}
8887

8988
reaction(done) {=
@@ -94,7 +93,7 @@ reactor BenchmarkRunner(num_iterations:int(12)) {
9493
printf("Best Time:\t %.3f msec\n", measuredMSTimes[0]);
9594
printf("Worst Time:\t %.3f msec\n", measuredMSTimes[self->num_iterations - 1]);
9695
printf("Median Time:\t %.3f msec\n", median(measuredMSTimes, self->num_iterations));
97-
request_stop();
96+
lf_request_stop();
9897
=}
9998

10099
preamble {=

C/Savina/src/micro/PingPong.lf

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ reactor Ping(count:int(1000000)) {
5050
=}
5151
reaction (receive) -> serve, finished {=
5252
if (self->pingsLeft > 0) {
53-
schedule(serve, 0);
53+
lf_schedule(serve, 0);
5454
} else {
5555
// reset pingsLeft for next iteration
5656
self->pingsLeft = self->count;

C/Savina/src/parallelism/Trapezoidal.lf

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ reactor Worker(instance:int(0)) {
135135
}
136136

137137
/* [[[cog
138-
cog.outl(f'main reactor Trapezoidal(numIterations:int({numIterations}), numWorkers:int({workers}), numPieces:int({pieces}), leftEndPoint:double({left}), rightEndPoint:double({right}))')
138+
cog.outl(f'main reactor Trapezoidal(numIterations:int({numIterations}), numWorkers:int({worker_reactors}), numPieces:int({pieces}), leftEndPoint:double({left}), rightEndPoint:double({right}))')
139139
]]] */
140140
main reactor Trapezoidal(numIterations:int(12), numWorkers:int(100), numPieces:int(10000000), leftEndPoint:double(1.0), rightEndPoint:double(5.0))
141141

README.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,10 +117,11 @@ The results for a multirun are written to a directory in the scheme `multirun/<d
117117

118118
A second script called `collect_results.py` provides a convenient way for collecting results from a multirun and merging them into a single CSV file. Simply running
119119
```
120-
./collect_results.py multirun/<date>/<time>/ out.csv
120+
./collect_results.py out.csv multirun/<date>/<time>/
121121
```
122122
collects all results from the particular multirun and stores the merged data structure in out.csv. `collect_results.py` not only merges the results, but it also calculates minimum, maximum and median execution time for each individual run. The resulting CSV does not contain the measured values of individual iterations anymore and only contains a single row per run. This behavior can be disabled with the `--raw` command line flag. With the flag set, the results from all runs are merged as say are and the resulting file contains rows for all individual runs, but no minimum, maximum and median values.
123123

124+
As a shortcut, you may omit the multirun directory to write the latest multirun results to `out.csv`.
124125

125126
## How it works
126127

File renamed without changes.

TS/src/BenchmarkRunner.lf renamed to TS/Savina/src/BenchmarkRunner.lf

Lines changed: 15 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ target TypeScript;
77
* @author Wonseo Choi
88
*/
99

10-
reactor BenchmarkRunner(num_iterations:number(12)) {
10+
reactor BenchmarkRunner(numIterations:number(12)) {
1111

1212
output start:boolean;
1313
input finish:boolean;
@@ -25,42 +25,40 @@ reactor BenchmarkRunner(num_iterations:number(12)) {
2525

2626

2727
reaction(startup) -> nextIteration {=
28-
measuredTimes.length = num_iterations;
28+
// measuredTimes.length = numIterations;
2929
actions.nextIteration.schedule(0, true);
3030

31-
console.log("Benchmark will run " + num_iterations + " times \n");
31+
console.log("Benchmark will run " + numIterations + " times \n");
3232
console.log("System information:")
3333
console.log(`Platform: ${process.platform} \n`)
3434
=}
35-
3635

37-
reaction(nextIteration) -> start, done {=
38-
if (count < num_iterations) {
39-
startTime = util.getCurrentPhysicalTime();
40-
start = true;
41-
} else {
42-
actions.done.schedule(0, true);
43-
}
36+
reaction(nextIteration) -> start {=
37+
startTime = util.getCurrentPhysicalTime();
38+
start = true;
4439
=}
4540

46-
reaction(finish) -> nextIteration {=
41+
reaction(finish) -> nextIteration, done {=
4742
const duration = util.getCurrentPhysicalTime().subtract(startTime as TimeValue);
48-
measuredTimes[count] = duration.toMilliseconds();
43+
measuredTimes.push(duration.toMilliseconds());
4944
++count;
5045

5146
console.log("Iteration "+ count + " - " + duration.toMilliseconds() + " ms\n");
5247

53-
actions.nextIteration.schedule(0, true);
54-
48+
if (count < numIterations) {
49+
actions.nextIteration.schedule(0, true);
50+
} else {
51+
actions.done.schedule(0, true);
52+
}
5553
=}
5654

5755
reaction(done) {=
5856
measuredTimes.sort()
5957

6058
console.log("Execution - Summary:\n");
6159
console.log("Best Time:\t " + measuredTimes[0] + " msec\n");
62-
console.log("Worst Time:\t " + measuredTimes[num_iterations - 1] + " msec\n");
63-
console.log("Median Time:\t " + median(measuredTimes, num_iterations) + " msec\n");
60+
console.log("Worst Time:\t " + measuredTimes[numIterations - 1] + " msec\n");
61+
console.log("Median Time:\t " + median(measuredTimes, numIterations) + " msec\n");
6462

6563
util.requestStop();
6664
=}

TS/src/Philosophers.lf renamed to TS/Savina/src/concurrency/Philosophers.lf

Lines changed: 25 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -5,12 +5,11 @@
55
*/
66

77

8-
target TypeScript {
8+
target TypeScript {
99
fast: true
1010
};
1111

12-
import BenchmarkRunner from "BenchmarkRunner.lf";
13-
12+
import BenchmarkRunner from "../BenchmarkRunner.lf";
1413

1514
reactor Philosopher(count:number(10000)) {
1615

@@ -48,7 +47,7 @@ reactor Philosopher(count:number(10000)) {
4847
}
4948

5049

51-
reactor Arbitrator(num_philosophers:number(20)) {
50+
reactor Arbitrator(numPhilosophers:number(20)) {
5251
preamble {=
5352
enum Reply {
5453
INVALID = 0,
@@ -60,11 +59,11 @@ reactor Arbitrator(num_philosophers:number(20)) {
6059
input start: boolean;
6160
output allFinished: boolean;
6261

63-
input[num_philosophers] finished: boolean;
64-
input[num_philosophers] done: boolean;
65-
input[num_philosophers] hungry: boolean;
66-
output[num_philosophers] eat: boolean;
67-
output[num_philosophers] denied: boolean;
62+
input[numPhilosophers] finished: boolean;
63+
input[numPhilosophers] done: boolean;
64+
input[numPhilosophers] hungry: boolean;
65+
output[numPhilosophers] eat: boolean;
66+
output[numPhilosophers] denied: boolean;
6867

6968
state forks: {=Array<boolean>=}({= [] =});
7069
state replies: {=Array<number>=}({= [] =});
@@ -75,8 +74,8 @@ reactor Arbitrator(num_philosophers:number(20)) {
7574
logical action sendReplies;
7675

7776
reaction(startup) {=
78-
forks.length = num_philosophers;
79-
replies.length = num_philosophers;
77+
forks.length = numPhilosophers;
78+
replies.length = numPhilosophers;
8079
=}
8180

8281
reaction(start) {=
@@ -88,7 +87,7 @@ reactor Arbitrator(num_philosophers:number(20)) {
8887
=}
8988

9089
reaction(sendReplies) -> eat, denied {=
91-
for(let i = 0; i < num_philosophers; i++) {
90+
for(let i = 0; i < numPhilosophers; i++) {
9291
if (replies[i] == Reply.EAT) {
9392
eat[i] = true;
9493
} else if (replies[i] == Reply.DENIED) {
@@ -99,42 +98,42 @@ reactor Arbitrator(num_philosophers:number(20)) {
9998
=}
10099

101100
reaction (done) {=
102-
for(let i = 0; i < num_philosophers; i++){
101+
for(let i = 0; i < numPhilosophers; i++){
103102
if (done[i] !== undefined) {
104103
forks[i] = false;
105-
forks[(i + 1) % num_philosophers] = false;
104+
forks[(i + 1) % numPhilosophers] = false;
106105
}
107106
}
108107
=}
109108

110109
reaction(hungry) -> sendReplies {=
111-
for(let i = 0; i < num_philosophers; i++) {
112-
const j= (i + arbitration_id) % num_philosophers;
110+
for(let i = 0; i < numPhilosophers; i++) {
111+
const j= (i + arbitration_id) % numPhilosophers;
113112

114113
if(hungry[j] !== undefined) {
115114
const left = j;
116-
const right = (j + 1) % num_philosophers;
115+
const right = (j + 1) % numPhilosophers;
117116

118117
if(forks[left] || forks[right]) {
119118
replies[j] = Reply.DENIED;
120119
++numRetries;
121120
} else {
122121
forks[j] = true;
123-
forks[(j + 1) % num_philosophers] = true;
122+
forks[(j + 1) % numPhilosophers] = true;
124123
replies[j] = Reply.EAT;
125124
}
126125
}
127126
}
128-
arbitration_id = (arbitration_id + 1) % num_philosophers;
127+
arbitration_id = (arbitration_id + 1) % numPhilosophers;
129128
actions.sendReplies.schedule(0, null);
130129
=}
131130

132131
reaction (finished) -> allFinished {=
133-
// i < finished_width ... finished_width = num_philosophers in this code
134-
for(let i = 0; i < num_philosophers; i++) {
132+
// i < finished_width ... finished_width = numPhilosophers in this code
133+
for(let i = 0; i < numPhilosophers; i++) {
135134
if(finished[i] !== undefined) {
136135
++numFinishedPhilosophers;
137-
if(num_philosophers == numFinishedPhilosophers) {
136+
if(numPhilosophers == numFinishedPhilosophers) {
138137
console.log("Arbitrator: All philosophers are sated. Number of denials to philosophers: " + numRetries + "\n");
139138
allFinished = true;
140139
}
@@ -144,11 +143,11 @@ reactor Arbitrator(num_philosophers:number(20)) {
144143
}
145144

146145

147-
main reactor Philosophers (numIterations:number(12), num_philosophers:number(20), count:number(100)){
146+
main reactor Philosophers (numIterations:number(12), numPhilosophers:number(20), count:number(100)){
148147

149-
arbitrator = new Arbitrator(num_philosophers = num_philosophers);
150-
philosophers = new[num_philosophers] Philosopher(count=count);
151-
runner = new BenchmarkRunner(num_iterations = numIterations);
148+
arbitrator = new Arbitrator(numPhilosophers = numPhilosophers);
149+
philosophers = new[numPhilosophers] Philosopher(count=count);
150+
runner = new BenchmarkRunner(numIterations = numIterations);
152151

153152
reaction (startup) {=
154153
console.log("Start Philosophers LF Benchmark!");

0 commit comments

Comments
 (0)