Skip to content

Commit 43edf10

Browse files
committed
samples: bench: Additional tests of varying numbers of workers
Schedule a possibly large number of workers to measure how performance degrades. Signed-off-by: David Brown <[email protected]>
1 parent 46af251 commit 43edf10

File tree

1 file changed

+100
-5
lines changed

1 file changed

+100
-5
lines changed

samples/bench/src/lib.rs

Lines changed: 100 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ extern crate alloc;
1212
use alloc::vec;
1313
use alloc::vec::Vec;
1414
use zephyr::time::NoWait;
15+
use zephyr::work::futures::work_size;
1516
use zephyr::{
1617
kconfig::CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC,
1718
kio::{spawn, yield_now},
@@ -49,7 +50,20 @@ extern "C" fn rust_main() {
4950
tester.run(Command::SemHigh(10_000));
5051
tester.run(Command::SemPingPong(10_000));
5152
tester.run(Command::SemPingPongAsync(10_000));
53+
tester.run(Command::SemOnePingPong(10_000));
54+
/*
55+
tester.run(Command::SemOnePingPongAsync(NUM_THREADS, 10_000 / 6));
56+
tester.run(Command::SemOnePingPongAsync(20, 10_000 / 20));
57+
tester.run(Command::SemOnePingPongAsync(50, 10_000 / 50));
58+
tester.run(Command::SemOnePingPongAsync(100, 10_000 / 100));
59+
tester.run(Command::SemOnePingPongAsync(500, 10_000 / 500));
5260
tester.run(Command::Empty);
61+
*/
62+
let mut num = 6;
63+
while num < 500 {
64+
tester.run(Command::SemOnePingPongAsync(num, 10_000 / num));
65+
num = num * 13 / 10;
66+
}
5367

5468
printkln!("Done with all tests\n");
5569
tester.leak();
@@ -167,6 +181,16 @@ impl ThreadTests {
167181
Self::high_runner(result2, high_recv);
168182
});
169183

184+
// Calculate a size to show.
185+
printkln!("worker size: {} bytes",
186+
work_size(
187+
Self::ping_pong_worker_async(
188+
result.clone(),
189+
0,
190+
result.sems[0].clone(),
191+
result.back_sems[0].clone(),
192+
6)));
193+
170194
result
171195
}
172196

@@ -287,6 +311,10 @@ impl ThreadTests {
287311
this.ping_pong_worker(id, &this.sems[id], &this.back_sems[id], count, &mut total);
288312
}
289313

314+
Command::SemOnePingPong(count) => {
315+
this.ping_pong_worker(id, &this.sems[0], &this.back_sems[0], count, &mut total);
316+
}
317+
290318
// For the async commands, spawn this on the worker thread and don't reply
291319
// ourselves.
292320
Command::SimpleSemAsync(count) => {
@@ -362,6 +390,39 @@ impl ThreadTests {
362390

363391
continue;
364392
}
393+
394+
Command::SemOnePingPongAsync(nthread, count) => {
395+
if id == 0 {
396+
for th in 0..nthread {
397+
spawn(
398+
Self::ping_pong_worker_async(
399+
this.clone(),
400+
th,
401+
this.sems[0].clone(),
402+
this.back_sems[0].clone(),
403+
count,
404+
),
405+
&this.workq,
406+
c"worker",
407+
);
408+
}
409+
spawn(
410+
Self::one_ping_pong_replier_async(
411+
this.clone(),
412+
nthread,
413+
count,
414+
),
415+
&this.workq,
416+
c"giver",
417+
);
418+
}
419+
420+
// Avoid the reply for the number of workers that are within the range. This
421+
// does assume that nthread will always be >= the number configured.
422+
if id < this.sems.len() {
423+
continue;
424+
}
425+
}
365426
}
366427

367428
this.results
@@ -475,11 +536,14 @@ impl ThreadTests {
475536
back_sem.give();
476537
}
477538

478-
this.results
479-
.sender
480-
.send_async(Result::Worker { id, count })
481-
.await
482-
.unwrap();
539+
// Only send for an ID in range.
540+
if id < this.sems.len() {
541+
this.results
542+
.sender
543+
.send_async(Result::Worker { id, count })
544+
.await
545+
.unwrap();
546+
}
483547
}
484548

485549
fn ping_pong_replier(&self, count: usize) {
@@ -491,6 +555,15 @@ impl ThreadTests {
491555
}
492556
}
493557

558+
fn one_ping_pong_replier(&self, count: usize) {
559+
for _ in 0..count {
560+
for _ in 0..self.sems.len() {
561+
self.sems[0].give();
562+
self.back_sems[0].take(Forever).unwrap();
563+
}
564+
}
565+
}
566+
494567
async fn ping_pong_replier_async(this: Arc<Self>, count: usize) {
495568
for _ in 0..count {
496569
for (sem, back) in this.sems.iter().zip(&this.back_sems) {
@@ -502,6 +575,17 @@ impl ThreadTests {
502575
// No reply.
503576
}
504577

578+
async fn one_ping_pong_replier_async(this: Arc<Self>, nthread: usize, count: usize) {
579+
for _ in 0..count {
580+
for _ in 0..nthread {
581+
this.sems[0].give();
582+
this.back_sems[0].take_async(Forever).await.unwrap();
583+
}
584+
}
585+
586+
// No reply.
587+
}
588+
505589
async fn sem_giver_async(this: Arc<Self>, sems: Vec<Arc<Semaphore>>, count: usize) {
506590
for _ in 0..count {
507591
for sem in &sems {
@@ -541,7 +625,11 @@ impl ThreadTests {
541625
Command::SemPingPong(count) => {
542626
this.ping_pong_replier(count);
543627
}
628+
Command::SemOnePingPong(count) => {
629+
this.one_ping_pong_replier(count);
630+
}
544631
Command::SemPingPongAsync(_) => (),
632+
Command::SemOnePingPongAsync(_, _) => (),
545633
}
546634
// printkln!("low command: {:?}", cmd);
547635

@@ -562,6 +650,7 @@ impl ThreadTests {
562650
Command::SemWaitAsync(_) => (),
563651
Command::SemWaitSameAsync(_) => (),
564652
Command::SemPingPong(_) => (),
653+
Command::SemOnePingPong(_) => (),
565654
Command::SemHigh(count) => {
566655
// The high-priority thread does all of the gives, this should cause every single
567656
// semaphore operation to be ready.
@@ -572,6 +661,7 @@ impl ThreadTests {
572661
}
573662
}
574663
Command::SemPingPongAsync(_) => (),
664+
Command::SemOnePingPongAsync(_, _) => (),
575665
}
576666
// printkln!("high command: {:?}", cmd);
577667

@@ -625,6 +715,11 @@ enum Command {
625715
SemPingPong(usize),
626716
/// SemPingPong, but async
627717
SemPingPongAsync(usize),
718+
/// PingPong but with a single shared semaphore. Demonstrates multiple threads queued on the
719+
/// same object.
720+
SemOnePingPong(usize),
721+
/// Same as SemOnePingPong, but async. The first parameter is the number of async tasks.
722+
SemOnePingPongAsync(usize, usize),
628723
}
629724

630725
enum Result {

0 commit comments

Comments
 (0)