|
287 | 287 | fi
|
288 | 288 | }
|
289 | 289 |
|
| 290 | +function makeLocalLeaseAndReplaceRemote { |
| 291 | +# TODO remote set +x/-x |
| 292 | + set +x |
| 293 | + makeLocalLease |
| 294 | + if [ $? -ne 0 ]; then |
| 295 | + traceError "failed - could not generate a new local lease" |
| 296 | + return 1 |
| 297 | + fi |
| 298 | + |
| 299 | + local tempcf=${LOCAL_ROOT}/tempcf.yaml |
| 300 | + |
| 301 | + # next, try replace remote lease with the candidate lease |
| 302 | + kubectl create configmap ${CONFIGMAP_NAME} --from-file ${LOCAL_ROOT}/${LOCAL_FILE} -o yaml -n default --dry-run > tempcf.yaml |
| 303 | + if [ $? -ne 0 ]; then |
| 304 | + traceError "failed - could not generate config map yaml" |
| 305 | + return 1 |
| 306 | + fi |
| 307 | + |
| 308 | + kubectl replace -f $tempcf |
| 309 | + if [ $? -ne 0 ]; then |
| 310 | + traceError "failed - could not get replace remote lease" |
| 311 | + return 1 |
| 312 | + fi |
| 313 | + |
| 314 | + # finally, check if we now actually own the lease (someone could have been replacing at the same time) |
| 315 | + checkLease |
| 316 | + if [ $? -eq 0 ]; then |
| 317 | + return 0 |
| 318 | + else |
| 319 | + traceError "failed - replaced remote lease, but we somehow lost a race or can no longer communicate with kubernetes" |
| 320 | + return 1 |
| 321 | + fi |
| 322 | + set -x |
| 323 | +} |
| 324 | + |
290 | 325 | function getRemoteLease {
|
291 | 326 | #
|
292 | 327 | # first, if the remote lease configmap doesn't exist
|
@@ -397,25 +432,11 @@ function obtainLease {
|
397 | 432 | # so assume it can be replaced and we can try takeover the lease
|
398 | 433 |
|
399 | 434 | # first make a local candidate lease
|
400 |
| - makeLocalLease |
401 |
| - if [ $? -ne 0 ]; then |
402 |
| - traceError "failed - could not generate a new local lease" |
403 |
| - return 1 |
404 |
| - fi |
405 |
| - |
406 |
| - # next, try replace remote lease with the candidate lease |
407 |
| - kubectl create configmap ${CONFIGMAP_NAME} --from-file ${LOCAL_ROOT}/${LOCAL_FILE} -o yaml -n default --dry-run | kubectl replace -f - |
408 |
| - if [ $? -ne 0 ]; then |
409 |
| - traceError "failed - could not replace remote lease" |
410 |
| - return 1 |
411 |
| - fi |
412 |
| - |
413 |
| - # finally, check if we now actually own the lease (someone could have been replacing at the same time) |
414 |
| - checkLease |
| 435 | + makeLocalLeaseAndReplaceRemote |
415 | 436 | if [ $? -eq 0 ]; then
|
416 | 437 | return 0
|
417 | 438 | else
|
418 |
| - traceError "failed - replaced remote lease, but kubernetes is not responding or we lost a race and another potential owner replaced it too, will keep retrying up to the timeout" |
| 439 | + traceError "failed to replace remote lease, will keep retrying up to the timeout" |
419 | 440 | fi
|
420 | 441 | fi
|
421 | 442 | local mnow=`date +%s`
|
@@ -449,26 +470,12 @@ function renewLease {
|
449 | 470 | fi
|
450 | 471 |
|
451 | 472 | # now make a new local candidate lease
|
452 |
| - makeLocalLease |
453 |
| - if [ $? -ne 0 ]; then |
454 |
| - traceError "failed - could not generate a new local lease" |
455 |
| - return 1 |
456 |
| - fi |
457 |
| - |
458 |
| - # next, try replace remote lease with the candidate lease |
459 |
| - kubectl create configmap ${CONFIGMAP_NAME} --from-file ${LOCAL_ROOT}/${LOCAL_FILE} -o yaml -n default --dry-run | kubectl replace -f - |
| 473 | + makeLocalLeaseAndReplaceRemote |
460 | 474 | if [ $? -ne 0 ]; then
|
461 |
| - traceError "failed - could not get replace remote lease" |
| 475 | + traceError "failed to replace remote lease" |
462 | 476 | return 1
|
463 |
| - fi |
464 |
| - |
465 |
| - # finally, check if we now actually own the lease (someone could have been replacing at the same time) |
466 |
| - checkLease |
467 |
| - if [ $? -eq 0 ]; then |
468 |
| - return 0 |
469 | 477 | else
|
470 |
| - traceError "failed - replaced remote lease, but we somehow lost a race or can no longer communicate with kubernetes" |
471 |
| - return 1 |
| 478 | + return 0 |
472 | 479 | fi
|
473 | 480 | }
|
474 | 481 |
|
|
0 commit comments