@@ -19,12 +19,13 @@ limitations under the License.
19
19
// election state. This implementation does not guarantee that only one
20
20
// client is acting as a leader (a.k.a. fencing).
21
21
//
22
- // A client observes timestamps captured locally to infer the state of the
23
- // leader election. Thus the implementation is tolerant to arbitrary clock
24
- // skew, but is not tolerant to arbitrary clock skew rate. Timestamp(renew time)
25
- // is not meaningful if it was collected on another machine. The implementation
26
- // of this client only acts on locally collected timestamps and cannot rely on
27
- // the accuracy of timestamp in the record for correctness.
22
+ // A client only acts on timestamps captured locally to infer the state of the
23
+ // leader election. The client does not consider timestamps in the leader
24
+ // election record to be accurate because these timestamps may not have been
25
+ // produced by a local clock. The implemention does not depend on their
26
+ // accuracy and only uses their change to indicate that another client has
27
+ // renewed the leader lease. Thus the implementation is tolerant to arbitrary
28
+ // clock skew, but is not tolerant to arbitrary clock skew rate.
28
29
//
29
30
// However the level of tolerance to skew rate can be configured by setting
30
31
// RenewDeadline and LeaseDuration appropriately. The tolerance expressed as a
@@ -107,21 +108,27 @@ type LeaderElectionConfig struct {
107
108
108
109
// LeaseDuration is the duration that non-leader candidates will
109
110
// wait to force acquire leadership. This is measured against time of
110
- // last observed ack. A client needs to wait a full LeaseDuration without
111
- // observing a change to the record before it can attempt to take over even
112
- // when a client with a different identity against the record's starts and
113
- // the renew time in the record is older than LeaseDuration. A.k.a., when
114
- // all clients are shutdown and after at least a LeaseDuration, clients
115
- // started with different identities against the record's must wait a full
116
- // LeaseDuration before acquiring a lock. Thus LeaseDuration should be as
117
- // short as possible to avoid a possible long waiting. LeaseDuration is 15
118
- // seconds in core Kubernetes components.
111
+ // last observed ack.
112
+ //
113
+ // A client needs to wait a full LeaseDuration without observing a change to
114
+ // the record before it can attempt to take over. When all clients are
115
+ // shutdown and a new set of clients are started with different names against
116
+ // the same leader record, they must wait the full LeaseDuration before
117
+ // attempting to acquire the lease. Thus LeaseDuration should be as short as
118
+ // possible (within your tolerance for clock skew rate) to avoid a possible
119
+ // long waits in the scenario.
120
+ //
121
+ // Core clients default this value to 15 seconds.
119
122
LeaseDuration time.Duration
120
123
// RenewDeadline is the duration that the acting master will retry
121
124
// refreshing leadership before giving up.
125
+ //
126
+ // Core clients default this value to 10 seconds.
122
127
RenewDeadline time.Duration
123
128
// RetryPeriod is the duration the LeaderElector clients should wait
124
129
// between tries of actions.
130
+ //
131
+ // Core clients default this value to 2 seconds.
125
132
RetryPeriod time.Duration
126
133
127
134
// Callbacks are callbacks that are triggered during certain lifecycle
0 commit comments