|
8 | 8 | "net/http" |
9 | 9 | "net/url" |
10 | 10 | "strings" |
| 11 | + "time" |
11 | 12 |
|
12 | 13 | "github.com/rwx-cloud/cli/cmd/rwx/config" |
13 | 14 | "github.com/rwx-cloud/cli/internal/accesstoken" |
@@ -575,6 +576,131 @@ func (c Client) TaskStatus(cfg TaskStatusConfig) (TaskStatusResult, error) { |
575 | 576 | return result, nil |
576 | 577 | } |
577 | 578 |
|
| 579 | +func (c Client) GetLogDownloadRequest(taskId string) (LogDownloadRequestResult, error) { |
| 580 | + endpoint := fmt.Sprintf("/mint/api/log_downloads/%s", url.PathEscape(taskId)) |
| 581 | + result := LogDownloadRequestResult{} |
| 582 | + |
| 583 | + req, err := http.NewRequest(http.MethodGet, endpoint, nil) |
| 584 | + if err != nil { |
| 585 | + return result, errors.Wrap(err, "unable to create new HTTP request") |
| 586 | + } |
| 587 | + req.Header.Set("Content-Type", "application/json") |
| 588 | + req.Header.Set("Accept", "application/json") |
| 589 | + |
| 590 | + resp, err := c.RoundTrip(req) |
| 591 | + if err != nil { |
| 592 | + return result, errors.Wrap(err, "HTTP request failed") |
| 593 | + } |
| 594 | + defer resp.Body.Close() |
| 595 | + |
| 596 | + if err = decodeResponseJSON(resp, &result); err != nil { |
| 597 | + return result, err |
| 598 | + } |
| 599 | + |
| 600 | + return result, nil |
| 601 | +} |
| 602 | + |
| 603 | +func (c Client) DownloadLogs(request LogDownloadRequestResult, maxRetryDurationSeconds ...int) ([]byte, error) { |
| 604 | + maxRetryDuration := 30 * time.Second |
| 605 | + if len(maxRetryDurationSeconds) > 0 && maxRetryDurationSeconds[0] > 0 { |
| 606 | + maxRetryDuration = time.Duration(maxRetryDurationSeconds[0]) * time.Second |
| 607 | + } |
| 608 | + const initialBackoff = 1 * time.Second |
| 609 | + |
| 610 | + startTime := time.Now() |
| 611 | + backoff := initialBackoff |
| 612 | + attempt := 0 |
| 613 | + |
| 614 | + var lastErr error |
| 615 | + |
| 616 | + for { |
| 617 | + attempt++ |
| 618 | + |
| 619 | + // need to recreate for each attempt since body readers are consumed |
| 620 | + var req *http.Request |
| 621 | + var err error |
| 622 | + |
| 623 | + if request.Contents != nil { |
| 624 | + // POST approach, for zip files (group tasks) |
| 625 | + formData := url.Values{} |
| 626 | + formData.Set("token", request.Token) |
| 627 | + formData.Set("filename", request.Filename) |
| 628 | + formData.Set("contents", *request.Contents) |
| 629 | + encodedBody := formData.Encode() |
| 630 | + |
| 631 | + req, err = http.NewRequest(http.MethodPost, request.URL, strings.NewReader(encodedBody)) |
| 632 | + if err != nil { |
| 633 | + return nil, errors.Wrap(err, "unable to create new HTTP request") |
| 634 | + } |
| 635 | + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") |
| 636 | + req.Header.Set("Accept", "application/octet-stream") |
| 637 | + } else { |
| 638 | + // GET approach, for single log files |
| 639 | + req, err = http.NewRequest(http.MethodGet, request.URL, nil) |
| 640 | + if err != nil { |
| 641 | + return nil, errors.Wrap(err, "unable to create new HTTP request") |
| 642 | + } |
| 643 | + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", request.Token)) |
| 644 | + req.Header.Set("Accept", "application/octet-stream") |
| 645 | + } |
| 646 | + |
| 647 | + // Use http.DefaultClient directly since the logs will come from a task server URL rather than Cloud |
| 648 | + resp, err := http.DefaultClient.Do(req) |
| 649 | + if err != nil { |
| 650 | + lastErr = errors.Wrap(err, "HTTP request failed") |
| 651 | + |
| 652 | + if time.Since(startTime) >= maxRetryDuration { |
| 653 | + return nil, errors.Wrapf(lastErr, "failed after %d attempts over %v", attempt, time.Since(startTime).Round(time.Second)) |
| 654 | + } |
| 655 | + |
| 656 | + time.Sleep(backoff) |
| 657 | + backoff *= 2 |
| 658 | + if backoff > 5*time.Second { |
| 659 | + backoff = 5 * time.Second |
| 660 | + } |
| 661 | + continue |
| 662 | + } |
| 663 | + |
| 664 | + if resp.StatusCode >= 200 && resp.StatusCode < 300 { |
| 665 | + defer resp.Body.Close() |
| 666 | + logBytes, err := io.ReadAll(resp.Body) |
| 667 | + if err != nil { |
| 668 | + return nil, errors.Wrap(err, "unable to read response body") |
| 669 | + } |
| 670 | + return logBytes, nil |
| 671 | + } |
| 672 | + |
| 673 | + bodyBytes, _ := io.ReadAll(resp.Body) |
| 674 | + resp.Body.Close() |
| 675 | + |
| 676 | + // Don't retry on 4xx errors |
| 677 | + if resp.StatusCode >= 400 && resp.StatusCode < 500 { |
| 678 | + errMsg := extractErrorMessage(bytes.NewReader(bodyBytes)) |
| 679 | + if errMsg == "" { |
| 680 | + errMsg = fmt.Sprintf("Unable to download logs - %s", resp.Status) |
| 681 | + } |
| 682 | + return nil, errors.New(errMsg) |
| 683 | + } |
| 684 | + |
| 685 | + // Retry on 5xx errors - task server may be waking up |
| 686 | + errMsg := extractErrorMessage(bytes.NewReader(bodyBytes)) |
| 687 | + if errMsg == "" { |
| 688 | + errMsg = fmt.Sprintf("Unable to download logs - %s", resp.Status) |
| 689 | + } |
| 690 | + lastErr = errors.New(errMsg) |
| 691 | + |
| 692 | + if time.Since(startTime) >= maxRetryDuration { |
| 693 | + return nil, errors.Wrapf(lastErr, "failed after %d attempts over %v", attempt, time.Since(startTime).Round(time.Second)) |
| 694 | + } |
| 695 | + |
| 696 | + time.Sleep(backoff) |
| 697 | + backoff *= 2 |
| 698 | + if backoff > 5*time.Second { |
| 699 | + backoff = 5 * time.Second |
| 700 | + } |
| 701 | + } |
| 702 | +} |
| 703 | + |
578 | 704 | func decodeResponseJSON(resp *http.Response, result any) error { |
579 | 705 | if resp.StatusCode >= 200 && resp.StatusCode < 300 { |
580 | 706 | if result == nil { |
|
0 commit comments