1010#include < iostream>
1111#include < memory>
1212
13+ #include " cloud/cloud_log_controller_impl.h"
14+ #include " rocksdb/cloud/cloud_log_controller.h"
1315#include " rocksdb/env.h"
1416#include " rocksdb/status.h"
1517#include " util/stderr_logger.h"
2022#endif
2123
2224#include " cloud/aws/aws_file.h"
23- #include " cloud/cloud_log_controller.h"
2425#include " cloud/db_cloud_impl.h"
2526
2627namespace rocksdb {
@@ -516,10 +517,12 @@ AwsEnv::AwsEnv(Env* underlying_env, const CloudEnvOptions& _cloud_env_options,
516517 // create cloud log client for storing/reading logs
517518 if (create_bucket_status_.ok () && !cloud_env_options.keep_local_log_files ) {
518519 if (cloud_env_options.log_type == kLogKinesis ) {
519- create_bucket_status_ = CreateKinesisController (this , &cloud_log_controller_);
520+ create_bucket_status_ = CloudLogControllerImpl::CreateKinesisController (
521+ this , &cloud_env_options.cloud_log_controller );
520522 } else if (cloud_env_options.log_type == kLogKafka ) {
521523#ifdef USE_KAFKA
522- create_bucket_status_ = CreateKafkaController (this , &cloud_log_controller_);
524+ create_bucket_status_ = CloudLogControllerImpl::CreateKafkaController (
525+ this , &cloud_env_options.cloud_log_controller );
523526#else
524527 create_bucket_status_ = Status::NotSupported (
525528 " In order to use Kafka, make sure you're compiling with "
@@ -542,7 +545,8 @@ AwsEnv::AwsEnv(Env* underlying_env, const CloudEnvOptions& _cloud_env_options,
542545 // Create Kinesis stream and wait for it to be ready
543546 if (create_bucket_status_.ok ()) {
544547 create_bucket_status_ =
545- cloud_log_controller_->StartTailingStream (GetSrcBucketName ());
548+ cloud_env_options.cloud_log_controller ->StartTailingStream (
549+ GetSrcBucketName ());
546550 if (!create_bucket_status_.ok ()) {
547551 Log (InfoLogLevel::ERROR_LEVEL, info_log,
548552 " [aws] NewAwsEnv Unable to create stream %s" ,
@@ -656,19 +660,8 @@ Status AwsEnv::NewSequentialFile(const std::string& logical_fname,
656660 return st;
657661
658662 } else if (logfile && !cloud_env_options.keep_local_log_files ) {
659- // read from Kinesis
660- st = cloud_log_controller_->status ();
661- if (st.ok ()) {
662- // map pathname to cache dir
663- std::string pathname = cloud_log_controller_->GetCachePath (Slice (fname));
664- Log (InfoLogLevel::DEBUG_LEVEL, info_log_,
665- " [Kinesis] NewSequentialFile logfile %s %s" , pathname.c_str (), " ok" );
666-
667- auto lambda = [this , pathname, &result, options]() -> Status {
668- return base_env_->NewSequentialFile (pathname, result, options);
669- };
670- return cloud_log_controller_->Retry (lambda);
671- }
663+ return cloud_env_options.cloud_log_controller ->NewSequentialFile (
664+ fname, result, options);
672665 }
673666
674667 // This is neither a sst file or a log file. Read from default env.
@@ -769,19 +762,8 @@ Status AwsEnv::NewRandomAccessFile(const std::string& logical_fname,
769762
770763 } else if (logfile && !cloud_env_options.keep_local_log_files ) {
771764 // read from Kinesis
772- st = cloud_log_controller_->status ();
773- if (st.ok ()) {
774- // map pathname to cache dir
775- std::string pathname = cloud_log_controller_->GetCachePath (Slice (fname));
776- Log (InfoLogLevel::DEBUG_LEVEL, info_log_,
777- " [kinesis] NewRandomAccessFile logfile %s %s" , pathname.c_str (),
778- " ok" );
779-
780- auto lambda = [this , pathname, &result, options]() -> Status {
781- return base_env_->NewRandomAccessFile (pathname, result, options);
782- };
783- return cloud_log_controller_->Retry (lambda);
784- }
765+ return cloud_env_options.cloud_log_controller ->NewRandomAccessFile (
766+ fname, result, options);
785767 }
786768
787769 // This is neither a sst file or a log file. Read from default env.
@@ -818,7 +800,8 @@ Status AwsEnv::NewWritableFile(const std::string& logical_fname,
818800 result->reset (dynamic_cast <WritableFile*>(f.release ()));
819801 } else if (logfile && !cloud_env_options.keep_local_log_files ) {
820802 std::unique_ptr<CloudLogWritableFile> f (
821- cloud_log_controller_->CreateWritableFile (fname, options));
803+ cloud_env_options.cloud_log_controller ->CreateWritableFile (fname,
804+ options));
822805 if (!f || !f->status ().ok ()) {
823806 s = Status::IOError (" [aws] NewWritableFile" , fname.c_str ());
824807 Log (InfoLogLevel::ERROR_LEVEL, info_log_,
@@ -922,18 +905,7 @@ Status AwsEnv::FileExists(const std::string& logical_fname) {
922905 }
923906 } else if (logfile && !cloud_env_options.keep_local_log_files ) {
924907 // read from Kinesis
925- st = cloud_log_controller_->status ();
926- if (st.ok ()) {
927- // map pathname to cache dir
928- std::string pathname = cloud_log_controller_->GetCachePath (Slice (fname));
929- Log (InfoLogLevel::DEBUG_LEVEL, info_log_,
930- " [kinesis] FileExists logfile %s %s" , pathname.c_str (), " ok" );
931-
932- auto lambda = [this , pathname]() -> Status {
933- return base_env_->FileExists (pathname);
934- };
935- st = cloud_log_controller_->Retry (lambda);
936- }
908+ st = cloud_env_options.cloud_log_controller ->FileExists (fname);
937909 } else {
938910 st = base_env_->FileExists (fname);
939911 }
@@ -1224,11 +1196,12 @@ Status AwsEnv::DeleteFile(const std::string& logical_fname) {
12241196 base_env_->DeleteFile (fname);
12251197 } else if (logfile && !cloud_env_options.keep_local_log_files ) {
12261198 // read from Kinesis
1227- st = cloud_log_controller_ ->status ();
1199+ st = cloud_env_options. cloud_log_controller ->status ();
12281200 if (st.ok ()) {
12291201 // Log a Delete record to kinesis stream
12301202 std::unique_ptr<CloudLogWritableFile> f (
1231- cloud_log_controller_->CreateWritableFile (fname, EnvOptions ()));
1203+ cloud_env_options.cloud_log_controller ->CreateWritableFile (
1204+ fname, EnvOptions ()));
12321205 if (!f || !f->status ().ok ()) {
12331206 st = Status::IOError (" [Kinesis] DeleteFile" , fname.c_str ());
12341207 } else {
@@ -1392,18 +1365,7 @@ Status AwsEnv::GetFileSize(const std::string& logical_fname, uint64_t* size) {
13921365 }
13931366 }
13941367 } else if (logfile && !cloud_env_options.keep_local_log_files ) {
1395- st = cloud_log_controller_->status ();
1396- if (st.ok ()) {
1397- // map pathname to cache dir
1398- std::string pathname = cloud_log_controller_->GetCachePath (Slice (fname));
1399- Log (InfoLogLevel::DEBUG_LEVEL, info_log_,
1400- " [kinesis] GetFileSize logfile %s %s" , pathname.c_str (), " ok" );
1401-
1402- auto lambda = [this , pathname, size]() -> Status {
1403- return base_env_->GetFileSize (pathname, size);
1404- };
1405- st = cloud_log_controller_->Retry (lambda);
1406- }
1368+ st = cloud_env_options.cloud_log_controller ->GetFileSize (fname, size);
14071369 } else {
14081370 st = base_env_->GetFileSize (fname, size);
14091371 }
@@ -1439,19 +1401,8 @@ Status AwsEnv::GetFileModificationTime(const std::string& logical_fname,
14391401 }
14401402 }
14411403 } else if (logfile && !cloud_env_options.keep_local_log_files ) {
1442- st = cloud_log_controller_->status ();
1443- if (st.ok ()) {
1444- // map pathname to cache dir
1445- std::string pathname = cloud_log_controller_->GetCachePath (Slice (fname));
1446- Log (InfoLogLevel::DEBUG_LEVEL, info_log_,
1447- " [kinesis] GetFileModificationTime logfile %s %s" , pathname.c_str (),
1448- " ok" );
1449-
1450- auto lambda = [this , pathname, time]() -> Status {
1451- return base_env_->GetFileModificationTime (pathname, time);
1452- };
1453- st = cloud_log_controller_->Retry (lambda);
1454- }
1404+ st = cloud_env_options.cloud_log_controller ->GetFileModificationTime (fname,
1405+ time);
14551406 } else {
14561407 st = base_env_->GetFileModificationTime (fname, time);
14571408 }
@@ -1992,7 +1943,7 @@ Status AwsEnv::NewAwsEnv(Env* base_env,
19921943}
19931944
19941945std::string AwsEnv::GetWALCacheDir () {
1995- return cloud_log_controller_ ->GetCacheDir ();
1946+ return cloud_env_options. cloud_log_controller ->GetCacheDir ();
19961947}
19971948
19981949
0 commit comments