|
| 1 | +package admupgradestatus |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "fmt" |
| 6 | + "os" |
| 7 | + "path/filepath" |
| 8 | + "time" |
| 9 | + |
| 10 | + "github.com/openshift/origin/pkg/monitortestframework" |
| 11 | + exutil "github.com/openshift/origin/test/extended/util" |
| 12 | + "k8s.io/apimachinery/pkg/util/errors" |
| 13 | + "k8s.io/apimachinery/pkg/util/wait" |
| 14 | + |
| 15 | + "github.com/openshift/origin/pkg/monitor/monitorapi" |
| 16 | + "github.com/openshift/origin/pkg/test/ginkgo/junitapi" |
| 17 | + "k8s.io/client-go/rest" |
| 18 | +) |
| 19 | + |
| 20 | +type snapshot struct { |
| 21 | + when time.Time |
| 22 | + out string |
| 23 | + err error |
| 24 | +} |
| 25 | +type monitor struct { |
| 26 | + collectionDone chan struct{} |
| 27 | + ocAdmUpgradeStatus map[time.Time]*snapshot |
| 28 | +} |
| 29 | + |
| 30 | +func NewOcAdmUpgradeStatusChecker() monitortestframework.MonitorTest { |
| 31 | + return &monitor{ |
| 32 | + collectionDone: make(chan struct{}), |
| 33 | + ocAdmUpgradeStatus: map[time.Time]*snapshot{}, |
| 34 | + } |
| 35 | +} |
| 36 | + |
| 37 | +func (w *monitor) PrepareCollection(ctx context.Context, adminRESTConfig *rest.Config, recorder monitorapi.RecorderWriter) error { |
| 38 | + return nil |
| 39 | +} |
| 40 | + |
| 41 | +func snapshotOcAdmUpgradeStatus(ch chan *snapshot) { |
| 42 | + // TODO: I _think_ this should somehow use the adminRESTConfig given to StartCollection but I don't know how to |
| 43 | + // how to do pass that to exutil.NewCLI* or if it is even possible. It seems to work this way though. |
| 44 | + oc := exutil.NewCLIWithoutNamespace("adm-upgrade-status").AsAdmin() |
| 45 | + now := time.Now() |
| 46 | + cmd := oc.Run("adm", "upgrade", "status").EnvVar("OC_ENABLE_CMD_UPGRADE_STATUS", "true") |
| 47 | + out, err := cmd.Output() |
| 48 | + ch <- &snapshot{when: now, out: out, err: err} |
| 49 | +} |
| 50 | + |
| 51 | +func (w *monitor) StartCollection(ctx context.Context, adminRESTConfig *rest.Config, recorder monitorapi.RecorderWriter) error { |
| 52 | + // TODO: The double goroutine spawn should probably be placed under some abstraction |
| 53 | + go func(ctx context.Context) { |
| 54 | + snapshots := make(chan *snapshot) |
| 55 | + go func() { |
| 56 | + for snap := range snapshots { |
| 57 | + // TODO: Maybe also collect some cluster resources (CV? COs?) through recorder? |
| 58 | + w.ocAdmUpgradeStatus[snap.when] = snap |
| 59 | + } |
| 60 | + w.collectionDone <- struct{}{} |
| 61 | + }() |
| 62 | + // TODO: Configurable interval? |
| 63 | + // TODO: Collect multiple invocations (--details)? Would need more another producer/consumer pair and likely |
| 64 | + // collectionDone would need to be a WaitGroup |
| 65 | + |
| 66 | + wait.UntilWithContext(ctx, func(ctx context.Context) { snapshotOcAdmUpgradeStatus(snapshots) }, time.Minute) |
| 67 | + // The UntilWithContext blocks until the framework cancels the context when it wants tests to stop -> when we |
| 68 | + // get here, we know last snapshotOcAdmUpgradeStatus producer wrote to the snapshots channel, we can close it |
| 69 | + // which in turn will allow the consumer to finish and signal collectionDone. |
| 70 | + close(snapshots) |
| 71 | + }(ctx) |
| 72 | + |
| 73 | + return nil |
| 74 | +} |
| 75 | + |
| 76 | +func (w *monitor) CollectData(ctx context.Context, storageDir string, beginning, end time.Time) (monitorapi.Intervals, []*junitapi.JUnitTestCase, error) { |
| 77 | + // The framework cancels the context it gave StartCollection before it calls CollectData, but we need to wait for |
| 78 | + // the collection goroutines spawned in StartedCollection to finish |
| 79 | + <-w.collectionDone |
| 80 | + return nil, nil, nil |
| 81 | +} |
| 82 | + |
| 83 | +func (*monitor) ConstructComputedIntervals(ctx context.Context, startingIntervals monitorapi.Intervals, recordedResources monitorapi.ResourcesMap, beginning, end time.Time) (monitorapi.Intervals, error) { |
| 84 | + return nil, nil |
| 85 | +} |
| 86 | + |
| 87 | +func (*monitor) EvaluateTestsFromConstructedIntervals(ctx context.Context, finalIntervals monitorapi.Intervals) ([]*junitapi.JUnitTestCase, error) { |
| 88 | + return nil, nil |
| 89 | +} |
| 90 | + |
| 91 | +func (w *monitor) WriteContentToStorage(ctx context.Context, storageDir, timeSuffix string, finalIntervals monitorapi.Intervals, finalResourceState monitorapi.ResourcesMap) error { |
| 92 | + var errs []error |
| 93 | + for when, observed := range w.ocAdmUpgradeStatus { |
| 94 | + // TODO: Maybe make a directory for these files |
| 95 | + outputFilename := fmt.Sprintf("adm-upgrade-status-%s_%s.txt", when, timeSuffix) |
| 96 | + outputFile := filepath.Join(storageDir, outputFilename) |
| 97 | + if err := os.WriteFile(outputFile, []byte(observed.out), 0644); err != nil { |
| 98 | + errs = append(errs, fmt.Errorf("failed to write %s: %w", outputFile, err)) |
| 99 | + } |
| 100 | + } |
| 101 | + return errors.NewAggregate(errs) |
| 102 | +} |
| 103 | + |
| 104 | +func (*monitor) Cleanup(ctx context.Context) error { |
| 105 | + return nil |
| 106 | +} |
0 commit comments