Skip to content

Commit bceac6a

Browse files
authored
Merge pull request #1202 from Lorak-mmk/retry-policy-naming
Update RetryPolicy and SE terminology: "host" -> "target"
2 parents e39f449 + 0884f8b commit bceac6a

File tree

9 files changed

+63
-56
lines changed

9 files changed

+63
-56
lines changed

scylla/src/client/pager.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -236,12 +236,12 @@ where
236236
last_error = request_error.into();
237237

238238
match retry_decision {
239-
RetryDecision::RetrySameNode(cl) => {
239+
RetryDecision::RetrySameTarget(cl) => {
240240
self.metrics.inc_retries_num();
241241
current_consistency = cl.unwrap_or(current_consistency);
242242
continue 'same_node_retries;
243243
}
244-
RetryDecision::RetryNextNode(cl) => {
244+
RetryDecision::RetryNextTarget(cl) => {
245245
self.metrics.inc_retries_num();
246246
current_consistency = cl.unwrap_or(current_consistency);
247247
continue 'nodes_in_plan;

scylla/src/client/session.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1916,12 +1916,12 @@ impl Session {
19161916
last_error = Some(request_error.into());
19171917

19181918
match retry_decision {
1919-
RetryDecision::RetrySameNode(new_cl) => {
1919+
RetryDecision::RetrySameTarget(new_cl) => {
19201920
self.metrics.inc_retries_num();
19211921
current_consistency = new_cl.unwrap_or(current_consistency);
19221922
continue 'same_node_retries;
19231923
}
1924-
RetryDecision::RetryNextNode(new_cl) => {
1924+
RetryDecision::RetryNextTarget(new_cl) => {
19251925
self.metrics.inc_retries_num();
19261926
current_consistency = new_cl.unwrap_or(current_consistency);
19271927
continue 'nodes_in_plan;

scylla/src/observability/history.rs

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -626,7 +626,7 @@ mod tests {
626626
history_collector.log_attempt_error(
627627
attempt_id,
628628
&unexpected_response(CqlResponseKind::Ready),
629-
&RetryDecision::RetrySameNode(Some(Consistency::Quorum)),
629+
&RetryDecision::RetrySameTarget(Some(Consistency::Quorum)),
630630
);
631631

632632
let second_attempt_id: AttemptId =
@@ -653,7 +653,7 @@ mod tests {
653653
| request send time: 2022-02-22 20:22:22 UTC
654654
| Error at 2022-02-22 20:22:22 UTC
655655
| Error: Received unexpected response from the server: READY. Expected RESULT or ERROR response.
656-
| Retry decision: RetrySameNode(Some(Quorum))
656+
| Retry decision: RetrySameTarget(Some(Quorum))
657657
|
658658
| - Attempt #1 sent to 127.0.0.1:19042
659659
| request send time: 2022-02-22 20:22:22 UTC
@@ -737,7 +737,7 @@ mod tests {
737737
history_collector.log_attempt_error(
738738
attempt1,
739739
&unexpected_response(CqlResponseKind::Event),
740-
&RetryDecision::RetryNextNode(Some(Consistency::Quorum)),
740+
&RetryDecision::RetryNextTarget(Some(Consistency::Quorum)),
741741
);
742742
let _attempt2: AttemptId =
743743
history_collector.log_attempt_start(request_id, None, node3_addr());
@@ -749,7 +749,7 @@ mod tests {
749749
history_collector.log_attempt_error(
750750
spec2_attempt1,
751751
&no_stream_id_error(),
752-
&RetryDecision::RetrySameNode(Some(Consistency::Quorum)),
752+
&RetryDecision::RetrySameTarget(Some(Consistency::Quorum)),
753753
);
754754

755755
let spec2_attempt2: AttemptId =
@@ -761,7 +761,7 @@ mod tests {
761761
history_collector.log_attempt_error(
762762
spec1_attempt1,
763763
&unavailable_error(),
764-
&RetryDecision::RetryNextNode(Some(Consistency::Quorum)),
764+
&RetryDecision::RetryNextTarget(Some(Consistency::Quorum)),
765765
);
766766

767767
let _spec4_attempt1: AttemptId =
@@ -780,7 +780,7 @@ mod tests {
780780
| request send time: 2022-02-22 20:22:22 UTC
781781
| Error at 2022-02-22 20:22:22 UTC
782782
| Error: Received unexpected response from the server: EVENT. Expected RESULT or ERROR response.
783-
| Retry decision: RetryNextNode(Some(Quorum))
783+
| Retry decision: RetryNextTarget(Some(Quorum))
784784
|
785785
| - Attempt #1 sent to 127.0.0.3:19042
786786
| request send time: 2022-02-22 20:22:22 UTC
@@ -793,7 +793,7 @@ mod tests {
793793
| request send time: 2022-02-22 20:22:22 UTC
794794
| Error at 2022-02-22 20:22:22 UTC
795795
| Error: Database returned an error: Not enough nodes are alive to satisfy required consistency level (consistency: Quorum, required: 2, alive: 1), Error message: Not enough nodes to satisfy consistency
796-
| Retry decision: RetryNextNode(Some(Quorum))
796+
| Retry decision: RetryNextTarget(Some(Quorum))
797797
|
798798
|
799799
| > Speculative fiber #1
@@ -802,7 +802,7 @@ mod tests {
802802
| request send time: 2022-02-22 20:22:22 UTC
803803
| Error at 2022-02-22 20:22:22 UTC
804804
| Error: Unable to allocate stream id
805-
| Retry decision: RetrySameNode(Some(Quorum))
805+
| Retry decision: RetrySameTarget(Some(Quorum))
806806
|
807807
| - Attempt #1 sent to 127.0.0.1:19042
808808
| request send time: 2022-02-22 20:22:22 UTC
@@ -836,7 +836,7 @@ mod tests {
836836
history_collector.log_attempt_error(
837837
request1_attempt1,
838838
&unexpected_response(CqlResponseKind::Supported),
839-
&RetryDecision::RetryNextNode(Some(Consistency::Quorum)),
839+
&RetryDecision::RetryNextTarget(Some(Consistency::Quorum)),
840840
);
841841
let request1_attempt2: AttemptId =
842842
history_collector.log_attempt_start(request1_id, None, node2_addr());
@@ -859,7 +859,7 @@ mod tests {
859859
| request send time: 2022-02-22 20:22:22 UTC
860860
| Error at 2022-02-22 20:22:22 UTC
861861
| Error: Received unexpected response from the server: SUPPORTED. Expected RESULT or ERROR response.
862-
| Retry decision: RetryNextNode(Some(Quorum))
862+
| Retry decision: RetryNextTarget(Some(Quorum))
863863
|
864864
| - Attempt #1 sent to 127.0.0.2:19042
865865
| request send time: 2022-02-22 20:22:22 UTC

scylla/src/policies/retry/default.rs

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ impl RetrySession for DefaultRetrySession {
6262
| RequestAttemptError::DbError(DbError::ServerError, _)
6363
| RequestAttemptError::DbError(DbError::TruncateError, _) => {
6464
if request_info.is_idempotent {
65-
RetryDecision::RetryNextNode(None)
65+
RetryDecision::RetryNextTarget(None)
6666
} else {
6767
RetryDecision::DontRetry
6868
}
@@ -75,7 +75,7 @@ impl RetrySession for DefaultRetrySession {
7575
RequestAttemptError::DbError(DbError::Unavailable { .. }, _) => {
7676
if !self.was_unavailable_retry {
7777
self.was_unavailable_retry = true;
78-
RetryDecision::RetryNextNode(None)
78+
RetryDecision::RetryNextTarget(None)
7979
} else {
8080
RetryDecision::DontRetry
8181
}
@@ -97,7 +97,7 @@ impl RetrySession for DefaultRetrySession {
9797
) => {
9898
if !self.was_read_timeout_retry && received >= required && !*data_present {
9999
self.was_read_timeout_retry = true;
100-
RetryDecision::RetrySameNode(None)
100+
RetryDecision::RetrySameTarget(None)
101101
} else {
102102
RetryDecision::DontRetry
103103
}
@@ -112,17 +112,17 @@ impl RetrySession for DefaultRetrySession {
112112
&& *write_type == WriteType::BatchLog
113113
{
114114
self.was_write_timeout_retry = true;
115-
RetryDecision::RetrySameNode(None)
115+
RetryDecision::RetrySameTarget(None)
116116
} else {
117117
RetryDecision::DontRetry
118118
}
119119
}
120120
// The node is still bootstrapping it can't execute the request, we should try another one
121121
RequestAttemptError::DbError(DbError::IsBootstrapping, _) => {
122-
RetryDecision::RetryNextNode(None)
122+
RetryDecision::RetryNextTarget(None)
123123
}
124124
// Connection to the contacted node is overloaded, try another one
125-
RequestAttemptError::UnableToAllocStreamId => RetryDecision::RetryNextNode(None),
125+
RequestAttemptError::UnableToAllocStreamId => RetryDecision::RetryNextTarget(None),
126126
// In all other cases propagate the error to the user
127127
_ => RetryDecision::DontRetry,
128128
}
@@ -236,7 +236,7 @@ mod tests {
236236
let mut policy = DefaultRetryPolicy::new().new_session();
237237
assert_eq!(
238238
policy.decide_should_retry(make_request_info(&error, true)),
239-
RetryDecision::RetryNextNode(None)
239+
RetryDecision::RetryNextTarget(None)
240240
);
241241
}
242242

@@ -266,13 +266,13 @@ mod tests {
266266
let mut policy = DefaultRetryPolicy::new().new_session();
267267
assert_eq!(
268268
policy.decide_should_retry(make_request_info(&error, false)),
269-
RetryDecision::RetryNextNode(None)
269+
RetryDecision::RetryNextTarget(None)
270270
);
271271

272272
let mut policy = DefaultRetryPolicy::new().new_session();
273273
assert_eq!(
274274
policy.decide_should_retry(make_request_info(&error, true)),
275-
RetryDecision::RetryNextNode(None)
275+
RetryDecision::RetryNextTarget(None)
276276
);
277277
}
278278

@@ -292,7 +292,7 @@ mod tests {
292292
let mut policy_not_idempotent = DefaultRetryPolicy::new().new_session();
293293
assert_eq!(
294294
policy_not_idempotent.decide_should_retry(make_request_info(&error, false)),
295-
RetryDecision::RetryNextNode(None)
295+
RetryDecision::RetryNextTarget(None)
296296
);
297297
assert_eq!(
298298
policy_not_idempotent.decide_should_retry(make_request_info(&error, false)),
@@ -302,7 +302,7 @@ mod tests {
302302
let mut policy_idempotent = DefaultRetryPolicy::new().new_session();
303303
assert_eq!(
304304
policy_idempotent.decide_should_retry(make_request_info(&error, true)),
305-
RetryDecision::RetryNextNode(None)
305+
RetryDecision::RetryNextTarget(None)
306306
);
307307
assert_eq!(
308308
policy_idempotent.decide_should_retry(make_request_info(&error, true)),
@@ -329,7 +329,7 @@ mod tests {
329329
let mut policy = DefaultRetryPolicy::new().new_session();
330330
assert_eq!(
331331
policy.decide_should_retry(make_request_info(&enough_responses_no_data, false)),
332-
RetryDecision::RetrySameNode(None)
332+
RetryDecision::RetrySameTarget(None)
333333
);
334334
assert_eq!(
335335
policy.decide_should_retry(make_request_info(&enough_responses_no_data, false)),
@@ -340,7 +340,7 @@ mod tests {
340340
let mut policy = DefaultRetryPolicy::new().new_session();
341341
assert_eq!(
342342
policy.decide_should_retry(make_request_info(&enough_responses_no_data, true)),
343-
RetryDecision::RetrySameNode(None)
343+
RetryDecision::RetrySameTarget(None)
344344
);
345345
assert_eq!(
346346
policy.decide_should_retry(make_request_info(&enough_responses_no_data, true)),
@@ -425,7 +425,7 @@ mod tests {
425425
let mut policy = DefaultRetryPolicy::new().new_session();
426426
assert_eq!(
427427
policy.decide_should_retry(make_request_info(&good_write_type, true)),
428-
RetryDecision::RetrySameNode(None)
428+
RetryDecision::RetrySameTarget(None)
429429
);
430430
assert_eq!(
431431
policy.decide_should_retry(make_request_info(&good_write_type, true)),

scylla/src/policies/retry/downgrading_consistency.rs

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -54,8 +54,8 @@ impl RetrySession for DowngradingConsistencyRetrySession {
5454
RequestAttemptError::DbError(DbError::Unavailable { .. }, _) => {
5555
// JAVA-764: if the requested consistency level is serial, it means that the operation failed at
5656
// the paxos phase of a LWT.
57-
// Retry on the next host, on the assumption that the initial coordinator could be network-isolated.
58-
RetryDecision::RetryNextNode(None)
57+
// Retry on the next target, on the assumption that the initial coordinator could be network-isolated.
58+
RetryDecision::RetryNextTarget(None)
5959
}
6060
_ => RetryDecision::DontRetry,
6161
};
@@ -65,18 +65,18 @@ impl RetrySession for DowngradingConsistencyRetrySession {
6565

6666
fn max_likely_to_work_cl(known_ok: i32, previous_cl: Consistency) -> RetryDecision {
6767
let decision = if known_ok >= 3 {
68-
RetryDecision::RetrySameNode(Some(Consistency::Three))
68+
RetryDecision::RetrySameTarget(Some(Consistency::Three))
6969
} else if known_ok == 2 {
70-
RetryDecision::RetrySameNode(Some(Consistency::Two))
70+
RetryDecision::RetrySameTarget(Some(Consistency::Two))
7171
} else if known_ok == 1 || previous_cl == Consistency::EachQuorum {
7272
// JAVA-1005: EACH_QUORUM does not report a global number of alive replicas
7373
// so even if we get 0 alive replicas, there might be
7474
// a node up in some other datacenter
75-
RetryDecision::RetrySameNode(Some(Consistency::One))
75+
RetryDecision::RetrySameTarget(Some(Consistency::One))
7676
} else {
7777
RetryDecision::DontRetry
7878
};
79-
if let RetryDecision::RetrySameNode(new_cl) = decision {
79+
if let RetryDecision::RetrySameTarget(new_cl) = decision {
8080
debug!(
8181
"Decided to lower required consistency from {} to {:?}.",
8282
previous_cl, new_cl
@@ -93,7 +93,7 @@ impl RetrySession for DowngradingConsistencyRetrySession {
9393
| RequestAttemptError::DbError(DbError::ServerError, _)
9494
| RequestAttemptError::DbError(DbError::TruncateError, _) => {
9595
if request_info.is_idempotent {
96-
RetryDecision::RetryNextNode(None)
96+
RetryDecision::RetryNextTarget(None)
9797
} else {
9898
RetryDecision::DontRetry
9999
}
@@ -125,7 +125,7 @@ impl RetrySession for DowngradingConsistencyRetrySession {
125125
max_likely_to_work_cl(*received, cl)
126126
} else if !*data_present {
127127
self.was_retry = true;
128-
RetryDecision::RetrySameNode(None)
128+
RetryDecision::RetrySameTarget(None)
129129
} else {
130130
RetryDecision::DontRetry
131131
}
@@ -153,18 +153,18 @@ impl RetrySession for DowngradingConsistencyRetrySession {
153153
// retry with whatever consistency should allow to persist all
154154
max_likely_to_work_cl(*received, cl)
155155
}
156-
WriteType::BatchLog => RetryDecision::RetrySameNode(None),
156+
WriteType::BatchLog => RetryDecision::RetrySameTarget(None),
157157

158158
_ => RetryDecision::DontRetry,
159159
}
160160
}
161161
}
162162
// The node is still bootstrapping it can't execute the request, we should try another one
163163
RequestAttemptError::DbError(DbError::IsBootstrapping, _) => {
164-
RetryDecision::RetryNextNode(None)
164+
RetryDecision::RetryNextTarget(None)
165165
}
166166
// Connection to the contacted node is overloaded, try another one
167-
RequestAttemptError::UnableToAllocStreamId => RetryDecision::RetryNextNode(None),
167+
RequestAttemptError::UnableToAllocStreamId => RetryDecision::RetryNextTarget(None),
168168
// In all other cases propagate the error to the user
169169
_ => RetryDecision::DontRetry,
170170
}
@@ -311,20 +311,20 @@ mod tests {
311311
let mut policy = DowngradingConsistencyRetryPolicy::new().new_session();
312312
assert_eq!(
313313
policy.decide_should_retry(make_request_info_with_cl(&error, true, cl)),
314-
RetryDecision::RetryNextNode(None)
314+
RetryDecision::RetryNextTarget(None)
315315
);
316316
}
317317

318318
fn max_likely_to_work_cl(known_ok: i32, current_cl: Consistency) -> RetryDecision {
319319
if known_ok >= 3 {
320-
RetryDecision::RetrySameNode(Some(Consistency::Three))
320+
RetryDecision::RetrySameTarget(Some(Consistency::Three))
321321
} else if known_ok == 2 {
322-
RetryDecision::RetrySameNode(Some(Consistency::Two))
322+
RetryDecision::RetrySameTarget(Some(Consistency::Two))
323323
} else if known_ok == 1 || current_cl == Consistency::EachQuorum {
324324
// JAVA-1005: EACH_QUORUM does not report a global number of alive replicas
325325
// so even if we get 0 alive replicas, there might be
326326
// a node up in some other datacenter
327-
RetryDecision::RetrySameNode(Some(Consistency::One))
327+
RetryDecision::RetrySameTarget(Some(Consistency::One))
328328
} else {
329329
RetryDecision::DontRetry
330330
}
@@ -359,13 +359,13 @@ mod tests {
359359
let mut policy = DowngradingConsistencyRetryPolicy::new().new_session();
360360
assert_eq!(
361361
policy.decide_should_retry(make_request_info_with_cl(&error, false, cl)),
362-
RetryDecision::RetryNextNode(None)
362+
RetryDecision::RetryNextTarget(None)
363363
);
364364

365365
let mut policy = DowngradingConsistencyRetryPolicy::new().new_session();
366366
assert_eq!(
367367
policy.decide_should_retry(make_request_info_with_cl(&error, true, cl)),
368-
RetryDecision::RetryNextNode(None)
368+
RetryDecision::RetryNextTarget(None)
369369
);
370370
}
371371
}
@@ -433,7 +433,7 @@ mod tests {
433433
false,
434434
cl
435435
)),
436-
RetryDecision::RetrySameNode(None)
436+
RetryDecision::RetrySameTarget(None)
437437
);
438438
assert_eq!(
439439
policy.decide_should_retry(make_request_info_with_cl(
@@ -452,7 +452,7 @@ mod tests {
452452
true,
453453
cl
454454
)),
455-
RetryDecision::RetrySameNode(None)
455+
RetryDecision::RetrySameTarget(None)
456456
);
457457
assert_eq!(
458458
policy.decide_should_retry(make_request_info_with_cl(
@@ -523,7 +523,7 @@ mod tests {
523523
)),
524524
expected_decision
525525
);
526-
if let RetryDecision::RetrySameNode(new_cl) = expected_decision {
526+
if let RetryDecision::RetrySameTarget(new_cl) = expected_decision {
527527
assert_eq!(
528528
policy.decide_should_retry(make_request_info_with_cl(
529529
&not_enough_responses_with_data,
@@ -544,7 +544,7 @@ mod tests {
544544
)),
545545
expected_decision
546546
);
547-
if let RetryDecision::RetrySameNode(new_cl) = expected_decision {
547+
if let RetryDecision::RetrySameTarget(new_cl) = expected_decision {
548548
assert_eq!(
549549
policy.decide_should_retry(make_request_info_with_cl(
550550
&not_enough_responses_with_data,
@@ -593,7 +593,7 @@ mod tests {
593593
true,
594594
cl
595595
)),
596-
RetryDecision::RetrySameNode(None)
596+
RetryDecision::RetrySameTarget(None)
597597
);
598598
assert_eq!(
599599
policy.decide_should_retry(make_request_info_with_cl(

0 commit comments

Comments
 (0)