-
Notifications
You must be signed in to change notification settings - Fork 24
Add interface to add client data rpc #846
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -22,6 +22,7 @@ | |
| #include "device/device.h" | ||
| #include "push_data_rpc_generated.h" | ||
| #include "fetch_data_rpc_generated.h" | ||
| #include <nuraft_mesg/common.hpp> | ||
|
|
||
| namespace homestore { | ||
| std::atomic< uint64_t > RaftReplDev::s_next_group_ordinal{1}; | ||
|
|
@@ -138,6 +139,66 @@ bool RaftReplDev::join_group() { | |
| return true; | ||
| } | ||
|
|
||
| data_rpc_error_code RaftReplDev::nuraft_to_data_rpc_error_code(nuraft::cmd_result_code const& nuraft_err) { | ||
| switch (nuraft_err) { | ||
| case nuraft::cmd_result_code::OK: | ||
| return data_rpc_error_code::SUCCESS; | ||
| case nuraft::cmd_result_code::SERVER_NOT_FOUND: | ||
| return data_rpc_error_code::SERVER_NOT_FOUND; | ||
| case nuraft::cmd_result_code::TIMEOUT: | ||
| return data_rpc_error_code::TIMEOUT; | ||
| case nuraft::cmd_result_code::SERVER_ALREADY_EXISTS: | ||
| return data_rpc_error_code::SERVER_ALREADY_EXISTS; | ||
| case nuraft::cmd_result_code::CANCELLED: | ||
| return data_rpc_error_code::CANCELLED; | ||
| case nuraft::cmd_result_code::TERM_MISMATCH: | ||
| return data_rpc_error_code::TERM_MISMATCH; | ||
| case nuraft::cmd_result_code::BAD_REQUEST: | ||
| return data_rpc_error_code::BAD_REQUEST; | ||
| case nuraft::cmd_result_code::FAILED: | ||
| return data_rpc_error_code::FAILED; | ||
| default: | ||
| return data_rpc_error_code::NOT_SUPPORTED; | ||
| } | ||
| } | ||
|
|
||
| nuraft_mesg::destination_t RaftReplDev::change_to_nuraft_mesg_destination(destination_t dest) { | ||
| if (std::holds_alternative< peer_id_t >(dest)) { | ||
| return nuraft_mesg::destination_t(std::get< peer_id_t >(dest)); | ||
| } else if (std::holds_alternative< role_regex >(dest)) { | ||
| return nuraft_mesg::destination_t(static_cast< nuraft_mesg::role_regex >(std::get< role_regex >(dest))); | ||
| } else { | ||
| return nuraft_mesg::destination_t(std::get< svr_id_t >(dest)); | ||
| } | ||
| } | ||
|
|
||
| bool RaftReplDev::add_data_rpc_service(std::string const& request_name, | ||
| data_service_request_handler_t const& request_handler) { | ||
| return m_msg_mgr.bind_data_service_request(request_name, m_group_id, request_handler); | ||
| } | ||
|
|
||
| NullDataRpcAsyncResult RaftReplDev::data_request_unidirectional(destination_t const& dest, | ||
| std::string const& request_name, | ||
| sisl::io_blob_list_t const& cli_buf) { | ||
| return group_msg_service() | ||
| ->data_service_request_unidirectional(change_to_nuraft_mesg_destination(dest), request_name, cli_buf) | ||
| .deferValue([this](auto&& r) -> Result< folly::Unit, data_rpc_error_code > { | ||
| if (r.hasError()) { return folly::makeUnexpected(nuraft_to_data_rpc_error_code(r.error())); } | ||
| return folly::unit; | ||
| }); | ||
| } | ||
|
|
||
| DataRpcAsyncResult< sisl::GenericClientResponse > | ||
| RaftReplDev::data_request_bidirectional(destination_t const& dest, std::string const& request_name, | ||
| sisl::io_blob_list_t const& cli_buf) { | ||
| return group_msg_service() | ||
| ->data_service_request_bidirectional(change_to_nuraft_mesg_destination(dest), request_name, cli_buf) | ||
| .deferValue([this](auto&& r) -> Result< sisl::GenericClientResponse, data_rpc_error_code > { | ||
| if (r.hasError()) { return folly::makeUnexpected(nuraft_to_data_rpc_error_code(r.error())); } | ||
| return std::move(r.value()); | ||
| }); | ||
| } | ||
|
|
||
| // All the steps in the implementation should be idempotent and retryable. | ||
| AsyncReplResult<> RaftReplDev::start_replace_member(std::string& task_id, const replica_member_info& member_out, | ||
| const replica_member_info& member_in, uint32_t commit_quorum, | ||
|
|
@@ -1204,7 +1265,7 @@ repl_req_ptr_t RaftReplDev::applier_create_req(repl_key const& rkey, journal_typ | |
| return nullptr; | ||
| } | ||
|
|
||
| RD_LOGD(rkey.traceID, , "in follower_create_req: rreq={}, addr=0x{:x}", rreq->to_string(), | ||
| RD_LOGD(rkey.traceID, "in follower_create_req: rreq={}, addr=0x{:x}", rreq->to_string(), | ||
| reinterpret_cast< uintptr_t >(rreq.get())); | ||
| return rreq; | ||
| } | ||
|
|
@@ -2820,6 +2881,7 @@ void RaftReplDev::become_leader_cb() { | |
| // becoming a leader. | ||
|
|
||
| RD_LOGD(NO_TRACE_ID, "become_leader_cb: setting traffic_ready_lsn from {} to {}", current_gate, new_gate); | ||
| m_listener->on_become_leader(m_group_id); | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just curious about the purpose of on_become_xxx. Should we move it to the beginning of become_xxx_cb to call the upper callback first, similar to how handle_commit does?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. on_become_leader and on_become_follower will be used for homeobject scrubbing. when scrubbing is on-going, if leader switch happens, the old leader will become follower and thus on_become_follower will be called on this node, where we stop the scrubbing thread that will request scrub results from the other two members. one of the follower will become leader and thus on_become_leader will be called on this node, where we will read the scrub superblk and start the scrubbing thread to request scrub result from the other two memebers.
|
||
| } | ||
|
|
||
| bool RaftReplDev::is_ready_for_traffic() const { | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
not sure, kind of concerning it will be a problem during shutdown, the HO will be deconstructed first then the handler is invalid.
It is not major atm.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
this is a good point. but actually , we call homestore::shutdown in HSHomeObject::shutdown(), this means Homeobject is not destructed until homestore shutdown return.