@@ -63,8 +63,33 @@ class TieredStorageTest : public BaseFamilyTest {
6363 }
6464};
6565
66+ // Test that should run with both modes of "cooling"
67+ class LatentCoolingTSTest : public TieredStorageTest , public testing ::WithParamInterface<bool > {
68+ void SetUp () override {
69+ fs.emplace ();
70+ SetFlag (&FLAGS_tiered_experimental_cooling, GetParam ());
71+ TieredStorageTest::SetUp ();
72+ }
73+
74+ optional<absl::FlagSaver> fs;
75+ };
76+
77+ INSTANTIATE_TEST_SUITE_P (TS, LatentCoolingTSTest, testing::Values(true , false ));
78+
79+ // Disabled cooling and all values are offloaded
80+ class PureDiskTSTest : public TieredStorageTest {
81+ void SetUp () override {
82+ fs.emplace ();
83+ SetFlag (&FLAGS_tiered_offload_threshold, 1.0 );
84+ SetFlag (&FLAGS_tiered_experimental_cooling, false );
85+ TieredStorageTest::SetUp ();
86+ }
87+
88+ optional<absl::FlagSaver> fs;
89+ };
90+
6691// Perform simple series of SET, GETSET and GET
67- TEST_F (TieredStorageTest , SimpleGetSet) {
92+ TEST_P (LatentCoolingTSTest , SimpleGetSet) {
6893 absl::FlagSaver saver;
6994 SetFlag (&FLAGS_tiered_offload_threshold, 0 .0f ); // disable offloading
7095 UpdateFromFlags ();
@@ -107,7 +132,8 @@ TEST_F(TieredStorageTest, SimpleGetSet) {
107132 EXPECT_EQ (metrics.db_stats [0 ].tiered_used_bytes , 0 );
108133}
109134
110- TEST_F (TieredStorageTest, MGET) {
135+ // Use MGET to load multiple offloaded values
136+ TEST_P (LatentCoolingTSTest, MGET) {
111137 vector<string> command = {" MGET" }, values = {};
112138 for (char key = ' A' ; key <= ' Z' ; key++) {
113139 command.emplace_back (1 , key);
@@ -178,7 +204,8 @@ TEST_F(TieredStorageTest, AppendStorm) {
178204 EXPECT_LE (metrics.tiered_stats .total_uploads , 2u );
179205}
180206
181- TEST_F (TieredStorageTest, Ranges) {
207+ // SETRANGE and GETRANGE
208+ TEST_P (LatentCoolingTSTest, Ranges) {
182209 Run ({" SET" , " key" , string (3000 , ' a' )});
183210 ExpectConditionWithinTimeout ([this ] { return GetMetrics ().tiered_stats .total_stashes >= 1 ; });
184211
@@ -194,7 +221,8 @@ TEST_F(TieredStorageTest, Ranges) {
194221 EXPECT_EQ (resp, string (500 , ' c' ) + string (500 , ' d' ));
195222}
196223
197- TEST_F (TieredStorageTest, MultiDb) {
224+ // Stash values from different databases and read them back
225+ TEST_P (LatentCoolingTSTest, MultiDb) {
198226 for (size_t i = 0 ; i < 10 ; i++) {
199227 Run ({" SELECT" , absl::StrCat (i)});
200228 Run ({" SET" , absl::StrCat (" k" , i), BuildString (3000 , char (' A' + i))});
@@ -212,6 +240,7 @@ TEST_F(TieredStorageTest, MultiDb) {
212240 }
213241}
214242
243+ // Trigger defragmentation
215244TEST_F (TieredStorageTest, Defrag) {
216245 for (char k = ' a' ; k < ' a' + 8 ; k++) {
217246 Run ({" SET" , string (1 , k), string (600 , k)});
@@ -248,11 +277,9 @@ TEST_F(TieredStorageTest, Defrag) {
248277 EXPECT_EQ (metrics.tiered_stats .allocated_bytes , 0u );
249278}
250279
251- TEST_F (TieredStorageTest , BackgroundOffloading) {
280+ TEST_F (PureDiskTSTest , BackgroundOffloading) {
252281 absl::FlagSaver saver;
253- SetFlag (&FLAGS_tiered_offload_threshold, 1 .0f ); // offload all values
254- SetFlag (&FLAGS_tiered_upload_threshold, 0 .0f ); // upload all values
255- SetFlag (&FLAGS_tiered_experimental_cooling, false ); // The setup works without cooling buffers
282+ SetFlag (&FLAGS_tiered_upload_threshold, 0 .0f ); // upload all values
256283 UpdateFromFlags ();
257284
258285 const int kNum = 500 ;
@@ -295,18 +322,8 @@ TEST_F(TieredStorageTest, BackgroundOffloading) {
295322 EXPECT_EQ (metrics.tiered_stats .allocated_bytes , kNum * 4096 );
296323}
297324
298- TEST_F (TieredStorageTest, FlushAll) {
299- absl::FlagSaver saver;
300- SetFlag (&FLAGS_tiered_offload_threshold, 1 .0f ); // offload all values
301-
302- // We want to cover the interaction of FlushAll with concurrent reads from disk.
303- // For that we disable tiered_experimental_cooling.
304- // TODO: seems that our replacement policy will upload the entries to RAM in any case,
305- // making this test ineffective. We should add the ability to disable promotion of offloaded
306- // entries to RAM upon reads.
307- SetFlag (&FLAGS_tiered_experimental_cooling, false );
308- UpdateFromFlags ();
309-
325+ // Test FLUSHALL while reading entries
326+ TEST_F (PureDiskTSTest, FlushAll) {
310327 const int kNum = 500 ;
311328 for (size_t i = 0 ; i < kNum ; i++) {
312329 Run ({" SET" , absl::StrCat (" k" , i), BuildString (3000 )});
@@ -344,6 +361,7 @@ TEST_F(TieredStorageTest, FlushAll) {
344361 EXPECT_EQ (metrics.db_stats .front ().tiered_entries , 0u );
345362}
346363
364+ // Check FLUSHALL clears filling bytes of small bins
347365TEST_F (TieredStorageTest, FlushPending) {
348366 absl::FlagSaver saver;
349367 SetFlag (&FLAGS_tiered_offload_threshold, 1 .0f ); // offload all values
@@ -358,23 +376,42 @@ TEST_F(TieredStorageTest, FlushPending) {
358376 EXPECT_EQ (GetMetrics ().tiered_stats .small_bins_filling_bytes , 0u );
359377}
360378
361- TEST_F (TieredStorageTest, MemoryPressure) {
362- max_memory_limit = 20_MB;
379+ // Test that clients are throttled if many stashes are issued.
380+ // Stashes are released with CLIENT UNPAUSE to occur at the same time
381+ TEST_F (PureDiskTSTest, ThrottleClients) {
363382 absl::FlagSaver saver;
364- absl::SetFlag (&FLAGS_tiered_upload_threshold, float (2_MB) / float (max_memory_limit));
383+ absl::SetFlag (&FLAGS_tiered_upload_threshold, 0.0 );
384+ UpdateFromFlags ();
365385
366- constexpr size_t kNum = 10000 ;
367- for (size_t i = 0 ; i < kNum ; i++) {
368- auto resp = Run ({" SET" , absl::StrCat (" k" , i), BuildString (10000 )});
369- if (resp != " OK" sv) {
370- resp = Run ({" INFO" , " ALL" });
371- ASSERT_FALSE (true ) << i << " \n Info ALL:\n " << resp.GetString ();
372- }
373- ThisFiber::SleepFor (500us);
386+ // issue client pause to accumualte SETs
387+ Run ({" CLIENT" , " PAUSE" , " 1000" });
388+
389+ string value (4096 , ' a' );
390+ vector<Fiber> fibs;
391+ for (size_t i = 0 ; i < 100 ; i++) {
392+ fibs.emplace_back (pp_->at (0 )->LaunchFiber ([this , i, &value] {
393+ string key = absl::StrCat (" k" , i);
394+ Run (key, {" SET" , key, value});
395+ }));
374396 }
397+ ThisFiber::Yield ();
398+
399+ // Unpause
400+ Run ({" CLIENT" , " UNPAUSE" });
375401
402+ // Check if at least some of the clients were caugth throttling
403+ // but we provided backpressure for all of them
376404 auto metrics = GetMetrics ();
377- EXPECT_LT (metrics.used_mem_peak , 20_MB);
405+ EXPECT_GT (metrics.tiered_stats .clients_throttled , fibs.size () / 10 );
406+ EXPECT_EQ (metrics.tiered_stats .total_clients_throttled , fibs.size ());
407+
408+ for (auto & fib : fibs)
409+ fib.JoinIfNeeded ();
410+
411+ // Because of the 5ms max wait time for backpressure, we can't rely on the stashes to have
412+ // finished even after all the fibers joined, so expect the condition with a timeout
413+ ExpectConditionWithinTimeout (
414+ [&] { return GetMetrics ().tiered_stats .total_stashes == fibs.size (); });
378415}
379416
380417TEST_F (TieredStorageTest, Expiry) {
@@ -386,11 +423,7 @@ TEST_F(TieredStorageTest, Expiry) {
386423 EXPECT_EQ (resp, val);
387424}
388425
389- TEST_F (TieredStorageTest, SetExistingExpire) {
390- absl::FlagSaver saver;
391- SetFlag (&FLAGS_tiered_offload_threshold, 1 .0f ); // offload all values
392- SetFlag (&FLAGS_tiered_experimental_cooling, false );
393-
426+ TEST_F (PureDiskTSTest, SetExistingExpire) {
394427 const int kNum = 20 ;
395428 for (size_t i = 0 ; i < kNum ; i++) {
396429 Run ({" SETEX" , absl::StrCat (" k" , i), " 100" , BuildString (256 )});
@@ -407,13 +440,7 @@ TEST_F(TieredStorageTest, SetExistingExpire) {
407440 }
408441}
409442
410- TEST_F (TieredStorageTest, Dump) {
411- absl::FlagSaver saver;
412- SetFlag (&FLAGS_tiered_offload_threshold, 1 .0f ); // offload all values
413-
414- // we want to test without cooling to trigger disk I/O on reads.
415- SetFlag (&FLAGS_tiered_experimental_cooling, false );
416-
443+ TEST_F (PureDiskTSTest, Dump) {
417444 const int kNum = 10 ;
418445 for (size_t i = 0 ; i < kNum ; i++) {
419446 Run ({" SET" , absl::StrCat (" k" , i), BuildString (3000 )}); // big enough to trigger offloading.
0 commit comments