diff --git a/test/cvo/cvo.go b/test/cvo/cvo.go index 8f59a0cab..162fdb99e 100644 --- a/test/cvo/cvo.go +++ b/test/cvo/cvo.go @@ -1,12 +1,26 @@ package cvo import ( - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + exutil "github.com/openshift/cluster-version-operator/test/util" ) -var _ = Describe("[cvo-testing] cluster-version-operator-tests", func() { - It("should support passing tests", func() { - Expect(true).To(BeTrue()) +var _ = g.Describe("[cvo-testing] cluster-version-operator-tests", func() { + defer g.GinkgoRecover() + + projectName := "openshift-cluster-version" + + oc := exutil.NewCLIWithoutNamespace(projectName) + + g.It("should support passing tests", func() { + o.Expect(true).To(o.BeTrue()) + }) + + g.It("Ingress to CVO is not breaking for monitoring scrape", func() { + exutil.By("Testing my commands") + err := oc.AsAdmin().WithoutNamespace().Run("version") + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(true).To(o.BeTrue()) }) }) diff --git a/test/cvo/utils.go b/test/cvo/utils.go new file mode 100644 index 000000000..0e524d45b --- /dev/null +++ b/test/cvo/utils.go @@ -0,0 +1 @@ +package cvo diff --git a/test/util/architecture/OWNERS b/test/util/architecture/OWNERS new file mode 100644 index 000000000..53fef4e58 --- /dev/null +++ b/test/util/architecture/OWNERS @@ -0,0 +1,5 @@ +reviewers: + - aleskandro + - LiangquanLi930 + - lwan-wanglin + diff --git a/test/util/architecture/architecture.go b/test/util/architecture/architecture.go new file mode 100644 index 000000000..35721a45d --- /dev/null +++ b/test/util/architecture/architecture.go @@ -0,0 +1,177 @@ +package architecture + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + exutil "github.com/openshift/openshift-tests-private/test/extended/util" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +type Architecture int + +const ( + AMD64 Architecture = iota + ARM64 + PPC64LE + S390X + MULTI + UNKNOWN +) + +const ( + NodeArchitectureLabel = "kubernetes.io/arch" +) + +// SkipIfNoNodeWithArchitectures skip the test if the cluster is one of the given architectures +func SkipIfNoNodeWithArchitectures(oc *exutil.CLI, architectures ...Architecture) { + if sets.New( + GetAvailableArchitecturesSet(oc)...).IsSuperset( + sets.New(architectures...)) { + return + } + g.Skip(fmt.Sprintf("Skip for no nodes with requested architectures")) +} + +// SkipArchitectures skip the test if the cluster is one of the given architectures +func SkipArchitectures(oc *exutil.CLI, architectures ...Architecture) (architecture Architecture) { + architecture = ClusterArchitecture(oc) + for _, arch := range architectures { + if arch == architecture { + g.Skip(fmt.Sprintf("Skip for cluster architecture: %s", arch.String())) + } + } + return +} + +// SkipNonAmd64SingleArch skip the test if the cluster is not an AMD64, single-arch, cluster +func SkipNonAmd64SingleArch(oc *exutil.CLI) (Architecture Architecture) { + architecture := ClusterArchitecture(oc) + if architecture != AMD64 { + g.Skip(fmt.Sprintf("Skip for cluster architecture: %s", architecture.String())) + } + return +} + +// GetAvailableArchitecturesSet returns multi-arch node cluster's Architectures +func GetAvailableArchitecturesSet(oc *exutil.CLI) []Architecture { + output, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("nodes", "-o=jsonpath={.items[*].status.nodeInfo.architecture}").Output() + if err != nil { + e2e.Failf("unable to get the cluster architecture: ", err) + } + if output == "" { + e2e.Failf("the retrieved architecture is empty") + } + architectureList := strings.Split(output, " ") + archMap := make(map[Architecture]bool, 0) + var architectures []Architecture + for _, nodeArchitecture := range architectureList { + if _, ok := archMap[FromString(nodeArchitecture)]; !ok { + archMap[FromString(nodeArchitecture)] = true + architectures = append(architectures, FromString(nodeArchitecture)) + } + } + return architectures +} + +// SkipNonMultiArchCluster skip the test if the cluster is not an multi-arch cluster +func SkipNonMultiArchCluster(oc *exutil.CLI) { + if !IsMultiArchCluster(oc) { + g.Skip("This cluster is not multi-arch cluster, skip this case!") + } +} + +// IsMultiArchCluster check if the cluster is multi-arch cluster +func IsMultiArchCluster(oc *exutil.CLI) bool { + architectures := GetAvailableArchitecturesSet(oc) + return len(architectures) > 1 +} + +// FromString returns the Architecture value for the given string +func FromString(arch string) Architecture { + switch arch { + case "amd64": + return AMD64 + case "arm64": + return ARM64 + case "ppc64le": + return PPC64LE + case "s390x": + return S390X + case "multi": + return MULTI + default: + e2e.Failf("Unknown architecture %s", arch) + } + return AMD64 +} + +// String returns the string value for the given Architecture +func (a Architecture) String() string { + switch a { + case AMD64: + return "amd64" + case ARM64: + return "arm64" + case PPC64LE: + return "ppc64le" + case S390X: + return "s390x" + case MULTI: + return "multi" + default: + e2e.Failf("Unknown architecture %d", a) + } + return "" +} + +// ClusterArchitecture returns the cluster's Architecture +// If the cluster uses the multi-arch payload, this function returns Architecture.multi +func ClusterArchitecture(oc *exutil.CLI) (architecture Architecture) { + output, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("nodes", "-o=jsonpath={.items[*].status.nodeInfo.architecture}").Output() + if err != nil { + e2e.Failf("unable to get the cluster architecture: ", err) + } + if output == "" { + e2e.Failf("the retrieved architecture is empty") + } + architectureList := strings.Split(output, " ") + architecture = FromString(architectureList[0]) + for _, nodeArchitecture := range architectureList[1:] { + if FromString(nodeArchitecture) != architecture { + e2e.Logf("Found multi-arch node cluster") + return MULTI + } + } + return +} + +func (a Architecture) GNUString() string { + switch a { + case AMD64: + return "x86_64" + case ARM64: + return "aarch64" + case PPC64LE: + return "ppc64le" + case S390X: + return "s390x" + case MULTI: + return "multi" + default: + e2e.Failf("Unknown architecture %d", a) + } + return "" +} + +// GetControlPlaneArch get the architecture of the contol plane node +func GetControlPlaneArch(oc *exutil.CLI) Architecture { + masterNode, err := exutil.GetFirstMasterNode(oc) + architecture, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", masterNode, "-o=jsonpath={.status.nodeInfo.architecture}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + return FromString(architecture) +} diff --git a/test/util/assert.go b/test/util/assert.go new file mode 100644 index 000000000..b408cfea6 --- /dev/null +++ b/test/util/assert.go @@ -0,0 +1,65 @@ +package util + +import ( + "fmt" + "strings" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// e is return value of Wait.Poll +// msg is the reason why time out +// the function assert return value of Wait.Poll, and expect NO error +// if e is Nil, just pass and nothing happen. +// if e is not Nil, will not print the default error message "timed out waiting for the condition" because it causes RP AA not to analysis result exactly. +// if e is "timed out waiting for the condition" or "context deadline exceeded", it is replaced by msg. +// if e is not "timed out waiting for the condition", it print e and then case fails. + +func AssertWaitPollNoErr(e error, msg string) { + if e == nil { + return + } + var err error + if strings.Compare(e.Error(), "timed out waiting for the condition") == 0 || strings.Compare(e.Error(), "context deadline exceeded") == 0 { + err = fmt.Errorf("case: %v\nerror: %s", g.CurrentSpecReport().FullText(), msg) + } else { + err = fmt.Errorf("case: %v\nerror: %s", g.CurrentSpecReport().FullText(), e.Error()) + } + o.Expect(err).NotTo(o.HaveOccurred()) + +} + +// e is return value of Wait.Poll +// msg is the reason why not get +// the function assert return value of Wait.Poll, and expect error raised. +// if e is not Nil, just pass and nothing happen. +// if e is Nil, will print expected error info and then case fails. + +func AssertWaitPollWithErr(e error, msg string) { + if e != nil { + e2e.Logf("the error: %v", e) + return + } + + err := fmt.Errorf("case: %v\nexpected error not got because of %v", g.CurrentSpecReport().FullText(), msg) + o.Expect(err).NotTo(o.HaveOccurred()) + +} + +// OrFail function will process another function's return values and fail if any of those returned values is ane error != nil and returns the first value +// example: if we have: func getValued() (string, error) +// +// we can do: value := OrFail[string](getValue()) +func OrFail[T any](vals ...any) T { + + for _, val := range vals { + err, ok := val.(error) + if ok { + o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) + } + } + + return vals[0].(T) +} diff --git a/test/util/auth_rule_resolver.go b/test/util/auth_rule_resolver.go new file mode 100644 index 000000000..b12b9020b --- /dev/null +++ b/test/util/auth_rule_resolver.go @@ -0,0 +1,16 @@ +package util + +import ( + rbacinformers "k8s.io/client-go/informers/rbac/v1" + rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation" + rbacauthorizer "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac" +) + +func NewRuleResolver(informers rbacinformers.Interface) rbacregistryvalidation.AuthorizationRuleResolver { + return rbacregistryvalidation.NewDefaultRuleResolver( + &rbacauthorizer.RoleGetter{Lister: informers.Roles().Lister()}, + &rbacauthorizer.RoleBindingLister{Lister: informers.RoleBindings().Lister()}, + &rbacauthorizer.ClusterRoleGetter{Lister: informers.ClusterRoles().Lister()}, + &rbacauthorizer.ClusterRoleBindingLister{Lister: informers.ClusterRoleBindings().Lister()}, + ) +} diff --git a/test/util/aws_client.go b/test/util/aws_client.go new file mode 100644 index 000000000..fd48a7cd1 --- /dev/null +++ b/test/util/aws_client.go @@ -0,0 +1,1259 @@ +package util + +import ( + "context" + "errors" + "fmt" + "net/url" + "regexp" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go/service/ecr" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/kms" + "github.com/aws/aws-sdk-go/service/route53" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/aws/aws-sdk-go/service/sts" + + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// AWSInstanceNotFound custom error for not found instances +type AWSInstanceNotFound struct{ InstanceName string } + +// Error implements the error interface +func (nfe *AWSInstanceNotFound) Error() string { + return fmt.Sprintf("No instance found in current cluster with name %s", nfe.InstanceName) +} + +// AwsClient struct +type AwsClient struct { + svc *ec2.EC2 +} + +// InitAwsSession init session +func InitAwsSession() *AwsClient { + mySession := session.Must(session.NewSession()) + aClient := &AwsClient{ + svc: ec2.New(mySession, aws.NewConfig()), + } + + return aClient +} + +func InitAwsSessionWithRegion(region string) *AwsClient { + mySession := session.Must(session.NewSession()) + aClient := &AwsClient{ + svc: ec2.New(mySession, aws.NewConfig().WithRegion(region)), + } + + return aClient +} + +// GetAwsInstanceID Get int svc instance ID +func (a *AwsClient) GetAwsInstanceID(instanceName string) (string, error) { + filters := []*ec2.Filter{ + { + Name: aws.String("tag:Name"), + Values: []*string{ + aws.String(instanceName), + }, + }, + } + input := ec2.DescribeInstancesInput{Filters: filters} + instanceInfo, err := a.svc.DescribeInstances(&input) + + if err != nil { + return "", err + } + + if len(instanceInfo.Reservations) < 1 { + return "", &AWSInstanceNotFound{instanceName} + } + + instanceID := instanceInfo.Reservations[0].Instances[0].InstanceId + e2e.Logf("The %s instance id is %s .", instanceName, *instanceID) + return *instanceID, err +} + +// GetAwsPublicSubnetID get one regular public subnet ID in aws outpost mixed worker cluster +func (a *AwsClient) GetAwsPublicSubnetID(clusterID string) (string, error) { + filters := []*ec2.Filter{ + { + Name: aws.String("tag:kubernetes.io/cluster/" + clusterID), + Values: []*string{ + aws.String("shared"), + }, + }, + { + Name: aws.String("tag:aws:cloudformation:logical-id"), + Values: []*string{ + aws.String("PublicSubnet"), + }, + }, + } + input := ec2.DescribeSubnetsInput{Filters: filters} + subnetInfo, err := a.svc.DescribeSubnets(&input) + + if err != nil { + return "", err + } + + if len(subnetInfo.Subnets) < 1 { + return "", fmt.Errorf("No subnet found in current cluster with name %s", clusterID) + } + + subnetID := subnetInfo.Subnets[0].SubnetId + e2e.Logf("The subnet id is %s .", *subnetID) + return *subnetID, err +} + +// GetAwsPrivateSubnetIDs get private subnet IDs +func (a *AwsClient) GetAwsPrivateSubnetIDs(vpcID string) ([]string, error) { + input := &ec2.DescribeSubnetsInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: []*string{aws.String(vpcID)}, + }, + }, + } + subnetInfo, err := a.svc.DescribeSubnets(input) + if err != nil { + return nil, fmt.Errorf("failed to describe subnets, %v", err) + } + + var privateSubnetIDs []string + for _, subnet := range subnetInfo.Subnets { + if *subnet.MapPublicIpOnLaunch == false { + privateSubnetIDs = append(privateSubnetIDs, *subnet.SubnetId) + break + } + } + + return privateSubnetIDs, nil +} + +// GetAwsIntIPs get aws int ip +func (a *AwsClient) GetAwsIntIPs(instanceID string) (map[string]string, error) { + filters := []*ec2.Filter{ + { + Name: aws.String("instance-id"), + Values: []*string{ + aws.String(instanceID), + }, + }, + } + input := ec2.DescribeInstancesInput{Filters: filters} + instanceInfo, err := a.svc.DescribeInstances(&input) + if err != nil { + return nil, err + } + + if len(instanceInfo.Reservations) < 1 { + return nil, fmt.Errorf("No instance found in current cluster with ID %s", instanceID) + } + + privateIP := instanceInfo.Reservations[0].Instances[0].PrivateIpAddress + publicIP := instanceInfo.Reservations[0].Instances[0].PublicIpAddress + ips := make(map[string]string, 3) + + if publicIP == nil && privateIP == nil { + e2e.Logf("There is no ips for this instance %s", instanceID) + return nil, fmt.Errorf("There is no ips for this instance %s", instanceID) + } + + if publicIP != nil { + ips["publicIP"] = *publicIP + e2e.Logf("The instance's public ip is %s", *publicIP) + } + + if privateIP != nil { + ips["privateIP"] = *privateIP + e2e.Logf("The instance's private ip is %s", *privateIP) + } + + return ips, nil +} + +// UpdateAwsIntSecurityRule update int security rule +func (a *AwsClient) UpdateAwsIntSecurityRule(instanceID string, dstPort int64) error { + filters := []*ec2.Filter{ + { + Name: aws.String("instance-id"), + Values: []*string{ + aws.String(instanceID), + }, + }, + } + input := ec2.DescribeInstancesInput{Filters: filters} + instanceInfo, err := a.svc.DescribeInstances(&input) + if err != nil { + return err + } + + if len(instanceInfo.Reservations) < 1 { + return fmt.Errorf("No such instance ID in current cluster %s", instanceID) + } + + securityGroupID := instanceInfo.Reservations[0].Instances[0].SecurityGroups[0].GroupId + + e2e.Logf("The instance's %s,security group id is %s .", instanceID, *securityGroupID) + + // Check if destination port is opned + req := &ec2.DescribeSecurityGroupsInput{ + GroupIds: []*string{aws.String(*securityGroupID)}, + } + resp, err := a.svc.DescribeSecurityGroups(req) + if err != nil { + return err + } + + if strings.Contains(resp.GoString(), "ToPort: "+strconv.FormatInt(dstPort, 10)) { + e2e.Logf("The destination port %v was opened in security group %s .", dstPort, *securityGroupID) + return nil + } + + // Update ingress secure rule to allow destination port + _, err = a.svc.AuthorizeSecurityGroupIngress(&ec2.AuthorizeSecurityGroupIngressInput{ + GroupId: aws.String(*securityGroupID), + IpPermissions: []*ec2.IpPermission{ + (&ec2.IpPermission{}). + SetIpProtocol("tcp"). + SetFromPort(dstPort). + SetToPort(dstPort). + SetIpRanges([]*ec2.IpRange{ + {CidrIp: aws.String("0.0.0.0/0")}, + }), + }, + }) + + if err != nil { + e2e.Logf("Unable to set security group %s, ingress, %v", *securityGroupID, err) + return err + } + + e2e.Logf("Successfully update destination port %v to security group %s ingress rule.", dstPort, *securityGroupID) + + return nil +} + +// GetAwsInstanceIDFromHostname Get instance ID from hostname +func (a *AwsClient) GetAwsInstanceIDFromHostname(hostname string) (string, error) { + filters := []*ec2.Filter{ + { + Name: aws.String("private-dns-name"), + Values: []*string{ + aws.String(hostname), + }, + }, + } + input := ec2.DescribeInstancesInput{Filters: filters} + instanceInfo, err := a.svc.DescribeInstances(&input) + + if err != nil { + return "", err + } + + if len(instanceInfo.Reservations) < 1 { + return "", fmt.Errorf("No instance found in current cluster with name %s", hostname) + } + + instanceID := instanceInfo.Reservations[0].Instances[0].InstanceId + e2e.Logf("The %s instance id is %s .", hostname, *instanceID) + return *instanceID, err +} + +// StartInstance Start an instance +func (a *AwsClient) StartInstance(instanceID string) error { + if instanceID == "" { + e2e.Logf("You must supply an instance ID (-i INSTANCE-ID") + return fmt.Errorf("You must supply an instance ID (-i INSTANCE-ID") + } + input := &ec2.StartInstancesInput{ + InstanceIds: []*string{ + &instanceID, + }, + } + result, err := a.svc.StartInstances(input) + e2e.Logf("%v", result.StartingInstances) + return err +} + +// StopInstance Stop an instance +func (a *AwsClient) StopInstance(instanceID string) error { + if instanceID == "" { + e2e.Logf("You must supply an instance ID (-i INSTANCE-ID") + return fmt.Errorf("You must supply an instance ID (-i INSTANCE-ID") + } + input := &ec2.StopInstancesInput{ + InstanceIds: []*string{ + &instanceID, + }, + } + result, err := a.svc.StopInstances(input) + e2e.Logf("%v", result.StoppingInstances) + return err +} + +// GetAwsInstanceState gives the instance state +func (a *AwsClient) GetAwsInstanceState(instanceID string) (string, error) { + filters := []*ec2.Filter{ + { + Name: aws.String("instance-id"), + Values: []*string{ + aws.String(instanceID), + }, + }, + } + input := ec2.DescribeInstancesInput{Filters: filters} + instanceInfo, err := a.svc.DescribeInstances(&input) + if err != nil { + return "", err + } + + if len(instanceInfo.Reservations) < 1 { + return "", fmt.Errorf("No instance found in current cluster with ID %s", instanceID) + } + + instanceState := instanceInfo.Reservations[0].Instances[0].State.Name + return *instanceState, err +} + +// CreateDhcpOptions Create a dhcpOptions +func (a *AwsClient) CreateDhcpOptions() (string, error) { + input := &ec2.CreateDhcpOptionsInput{ + DhcpConfigurations: []*ec2.NewDhcpConfiguration{ + { + Key: aws.String("domain-name-servers"), + Values: []*string{ + aws.String("AmazonProvidedDNS"), + }, + }, + }, + } + result, err := a.svc.CreateDhcpOptions(input) + if err != nil { + e2e.Logf("err: %v", err) + return "", err + } + dhcpOptionsID := result.DhcpOptions.DhcpOptionsId + e2e.Logf("The created dhcpOptionsId is %s", *dhcpOptionsID) + return *dhcpOptionsID, err +} + +// CreateDhcpOptions Create a dhcpOptions with domainName +func (a *AwsClient) CreateDhcpOptionsWithDomainName(domainName string) (string, error) { + input := &ec2.CreateDhcpOptionsInput{ + DhcpConfigurations: []*ec2.NewDhcpConfiguration{ + { + Key: aws.String("domain-name-servers"), + Values: []*string{ + aws.String("AmazonProvidedDNS"), + }, + }, + { + Key: aws.String("domain-name"), + Values: []*string{ + aws.String(domainName), + }, + }, + }, + } + result, err := a.svc.CreateDhcpOptions(input) + if err != nil { + e2e.Logf("err: %v", err) + return "", err + } + dhcpOptionsID := result.DhcpOptions.DhcpOptionsId + e2e.Logf("The created dhcpOptionsId is %s", *dhcpOptionsID) + return *dhcpOptionsID, err +} + +// DeleteDhcpOptions Delete a dhcpOptions +func (a *AwsClient) DeleteDhcpOptions(dhcpOptionsID string) error { + input := &ec2.DeleteDhcpOptionsInput{ + DhcpOptionsId: aws.String(dhcpOptionsID), + } + _, err := a.svc.DeleteDhcpOptions(input) + return err +} + +// GetPlacementGroupByName Get placement group by group-name +func (a *AwsClient) GetPlacementGroupByName(groupName string) (string, error) { + input := &ec2.DescribePlacementGroupsInput{ + GroupNames: []*string{ + aws.String(groupName), + }, + } + result, err := a.svc.DescribePlacementGroups(input) + if err != nil { + e2e.Logf("err: %v", err) + return "", err + } + placementGroupID := *result.PlacementGroups[0].GroupId + e2e.Logf("The %s placement group ID is %s ", groupName, placementGroupID) + return placementGroupID, err +} + +// GetAwsInstanceVPCId gives the instance vpcID +func (a *AwsClient) GetAwsInstanceVPCId(instanceID string) (string, error) { + filters := []*ec2.Filter{ + { + Name: aws.String("instance-id"), + Values: []*string{ + aws.String(instanceID), + }, + }, + } + input := ec2.DescribeInstancesInput{Filters: filters} + instanceInfo, err := a.svc.DescribeInstances(&input) + if err != nil { + return "", err + } + + if len(instanceInfo.Reservations) < 1 { + return "", fmt.Errorf("No instance found in current cluster with ID %s", instanceID) + } + + instanceVpcID := instanceInfo.Reservations[0].Instances[0].VpcId + return *instanceVpcID, err +} + +// GetDhcpOptionsIDOfVpc Get VPC's dhcpOptionsID +func (a *AwsClient) GetDhcpOptionsIDOfVpc(vpcID string) (string, error) { + input := &ec2.DescribeVpcsInput{ + VpcIds: []*string{ + aws.String(vpcID), + }, + } + result, err := a.svc.DescribeVpcs(input) + if err != nil { + e2e.Logf("err: %v", err) + return "", err + } + dhcpOptionsID := result.Vpcs[0].DhcpOptionsId + e2e.Logf("The %s dhcpOptionsId is %s ", vpcID, *dhcpOptionsID) + return *dhcpOptionsID, err +} + +// GetDhcpOptionsIDFromTag Get the dhcpOptionsID that have a tag +func (a *AwsClient) GetDhcpOptionsIDFromTag(tagKey string, tagValue string) ([]string, error) { + filters := []*ec2.Filter{ + { + Name: aws.String("tag:" + tagKey), + Values: []*string{ + aws.String(tagValue), + }, + }, + } + input := ec2.DescribeDhcpOptionsInput{Filters: filters} + dhcpOptionsIDs := []string{} + result, err := a.svc.DescribeDhcpOptions(&input) + if err != nil { + e2e.Logf("err: %v", err) + return dhcpOptionsIDs, err + } + for _, value := range result.DhcpOptions { + dhcpOptionsIDs = append(dhcpOptionsIDs, *value.DhcpOptionsId) + e2e.Logf("Found dhcpOptionsId %s that have a tag %s:%s", *value.DhcpOptionsId, tagKey, tagValue) + } + return dhcpOptionsIDs, err +} + +// AssociateDhcpOptions Associate a VPC with a dhcpOptions +func (a *AwsClient) AssociateDhcpOptions(vpcID, dhcpOptionsID string) error { + input := &ec2.AssociateDhcpOptionsInput{ + VpcId: aws.String(vpcID), + DhcpOptionsId: aws.String(dhcpOptionsID), + } + _, err := a.svc.AssociateDhcpOptions(input) + return err +} + +func (a *AwsClient) CreateSecurityGroup(groupName, vpcID, description string) (string, error) { + createRes, err := a.svc.CreateSecurityGroup(&ec2.CreateSecurityGroupInput{ + GroupName: aws.String(groupName), + Description: aws.String(description), + VpcId: aws.String(vpcID), + }) + if err != nil { + return "", err + } + + return *createRes.GroupId, nil +} + +func (a *AwsClient) DeleteSecurityGroup(groupID string) error { + _, err := a.svc.DeleteSecurityGroup(&ec2.DeleteSecurityGroupInput{ + GroupId: aws.String(groupID), + }) + return err +} + +func (a *AwsClient) GetInstanceSecurityGroupIDs(instanceID string) ([]string, error) { + filters := []*ec2.Filter{ + { + Name: aws.String("instance-id"), + Values: []*string{aws.String(instanceID)}, + }, + { + Name: aws.String("instance.group-name"), + Values: []*string{aws.String("*")}, + }, + } + + input := &ec2.DescribeInstancesInput{Filters: filters} + result, err := a.svc.DescribeInstances(input) + if err != nil { + return nil, err + } + + if len(result.Reservations) < 1 { + return nil, fmt.Errorf("No instance found in current cluster with ID %s", instanceID) + } + + instance := result.Reservations[0].Instances[0] + + var securityGroups []string + for _, group := range instance.SecurityGroups { + securityGroups = append(securityGroups, *group.GroupId) + } + + return securityGroups, err +} + +func (a *AwsClient) CreateTag(resource string, key string, value string) error { + createTagInput := &ec2.CreateTagsInput{ + Resources: []*string{aws.String(resource)}, + Tags: []*ec2.Tag{ + { + Key: aws.String(key), + Value: aws.String(value), + }, + }, + } + _, err := a.svc.CreateTags(createTagInput) + return err +} + +func (a *AwsClient) DeleteTag(resource string, key string, value string) error { + deleteTagInput := &ec2.DeleteTagsInput{ + Resources: []*string{aws.String(resource)}, + Tags: []*ec2.Tag{ + { + Key: aws.String(key), + Value: aws.String(value), + }, + }, + } + _, err := a.svc.DeleteTags(deleteTagInput) + return err +} + +func (a *AwsClient) DescribeVpcEndpoint(endpointID string) (*ec2.VpcEndpoint, error) { + res, err := a.svc.DescribeVpcEndpoints(&ec2.DescribeVpcEndpointsInput{ + VpcEndpointIds: aws.StringSlice([]string{endpointID}), + }) + if err != nil { + return nil, err + } + return res.VpcEndpoints[0], nil +} + +func (a *AwsClient) GetSecurityGroupsByVpcEndpointID(endpointID string) ([]*ec2.SecurityGroupIdentifier, error) { + ep, err := a.DescribeVpcEndpoint(endpointID) + if err != nil { + return []*ec2.SecurityGroupIdentifier{}, err + } + + return ep.Groups, nil +} + +func (a *AwsClient) GetDefaultSecurityGroupByVpcID(vpcID string) (*ec2.SecurityGroup, error) { + filters := []*ec2.Filter{ + { + Name: aws.String("vpc-id"), + Values: []*string{ + aws.String(vpcID), + }, + }, + { + Name: aws.String("group-name"), + Values: []*string{ + aws.String("default"), + }, + }, + } + input := ec2.DescribeSecurityGroupsInput{Filters: filters} + ep, err := a.svc.DescribeSecurityGroups(&input) + if err != nil { + return nil, err + } + + return ep.SecurityGroups[0], nil +} + +func (a *AwsClient) GetSecurityGroupByGroupName(groupName string) (*ec2.SecurityGroup, error) { + filters := []*ec2.Filter{ + { + Name: aws.String("group-name"), + Values: []*string{ + aws.String(groupName), + }, + }, + } + input := ec2.DescribeSecurityGroupsInput{Filters: filters} + ep, err := a.svc.DescribeSecurityGroups(&input) + if err != nil { + return nil, err + } + // Check if SecurityGroups is empty + if len(ep.SecurityGroups) == 0 { + return nil, fmt.Errorf("no security group found with name: %s", groupName) + } + + return ep.SecurityGroups[0], nil +} + +func (a *AwsClient) GetSecurityGroupByGroupID(groupID string) (*ec2.SecurityGroup, error) { + input := ec2.DescribeSecurityGroupsInput{GroupIds: []*string{aws.String(groupID)}} + ep, err := a.svc.DescribeSecurityGroups(&input) + if err != nil { + return nil, err + } + + return ep.SecurityGroups[0], nil +} + +func (a *AwsClient) GetAvailabilityZoneNames() ([]string, error) { + zones, err := a.svc.DescribeAvailabilityZones(&ec2.DescribeAvailabilityZonesInput{}) + if err != nil { + return nil, err + } + var zoneNames []string + for _, az := range zones.AvailabilityZones { + if az.ZoneName != nil { + zoneNames = append(zoneNames, *az.ZoneName) + } + } + return zoneNames, nil +} + +// S3Client struct for S3 storage operations +type S3Client struct { + svc *s3.S3 +} + +// NewS3Client constructor to create S3 client with default credential and config +func NewS3Client() *S3Client { + return &S3Client{ + svc: s3.New( + session.Must(session.NewSession()), + ), + } +} + +// NewS3ClientFromCredFile constrctor to create S3 client with user's credential file and region +// param: filename crednetial file path +// param: profile config profile e.g. [default] +// param: region +func NewS3ClientFromCredFile(filename, profile, region string) *S3Client { + + awsSession := session.Must(session.NewSessionWithOptions( + session.Options{ + SharedConfigState: session.SharedConfigDisable, + }, + )) + + return &S3Client{ + svc: s3.New( + awsSession, + aws.NewConfig(). + WithRegion(region). + WithCredentials(credentials.NewSharedCredentials(filename, "default")), + ), + } + +} + +// NewDelegatingS3Client creates an S3Client which delegates calls to methods that are not implemented by itself +// to the wrapped s3.S3 client. +func NewDelegatingS3Client(wrappedClient *s3.S3) *S3Client { + return &S3Client{ + svc: wrappedClient, + } +} + +// CreateBucket create S3 bucket +// param: bucket name from user input +func (sc *S3Client) CreateBucket(name string) error { + + e2e.Logf("creating s3 bucket %s", name) + + var createBucketInput *s3.CreateBucketInput + if *sc.svc.Config.Region == "us-east-1" { + createBucketInput = &s3.CreateBucketInput{ + Bucket: aws.String(name), + ACL: aws.String(s3.BucketCannedACLPublicRead), + } + } else { + createBucketInput = &s3.CreateBucketInput{ + Bucket: aws.String(name), + CreateBucketConfiguration: &s3.CreateBucketConfiguration{ + LocationConstraint: aws.String(*sc.svc.Config.Region), + }, + } + } + + cbo, cboe := sc.svc.CreateBucket(createBucketInput) + if cboe != nil { + e2e.Logf("create bucket %s failed: %v", name, cboe) + return cboe + } + + e2e.Logf("bucket %s is created successfully %v", name, cbo) + + _, doe := sc.svc.DeletePublicAccessBlock(&s3.DeletePublicAccessBlockInput{ + Bucket: aws.String(name), + }) + if doe != nil { + e2e.Logf("delete public access block failed on bucket %s: %v", name, doe) + return doe + } + + return nil + +} + +// PutBucketPolicy configures a given bucket with a policy +// param: name bucket name +// param: policy policy that will be added the bucket +func (sc *S3Client) PutBucketPolicy(name, policy string) error { + e2e.Logf("Setting policy in bucket %s. Policy: %s", name, policy) + + input := &s3.PutBucketPolicyInput{ + Bucket: aws.String(name), + Policy: aws.String(policy), + } + + result, err := sc.svc.PutBucketPolicy(input) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + e2e.Logf("AWS Error %s setting policy in bucket %s: %s", aerr.Code(), name, aerr.Error()) + } else { + // Print the error, cast err to awserr.Error to get the Code and + // Message from an error. + e2e.Logf("Error setting policy in bucket %s: %s", name, err.Error()) + } + return err + } + + e2e.Logf("Policy result: %s", result) + + return nil +} + +// EmptyBucketWithContextAndCheck empties a bucket, then wait for the deletions to take effect. +func (sc *S3Client) EmptyBucketWithContextAndCheck(ctx context.Context, bucketName string) error { + e2e.Logf("Batch deleting objects") + iter := s3manager.NewDeleteListIterator(sc.svc, &s3.ListObjectsInput{ + Bucket: aws.String(bucketName), + }) + if err := s3manager.NewBatchDeleteWithClient(sc.svc).Delete(ctx, iter); err != nil { + return fmt.Errorf("empty bucket %s and check: %w", bucketName, err) + } + + if err := sc.WaitForBucketEmptinessWithContext(ctx, bucketName, BucketEmpty, + 5*time.Second /* Interval */, 1*time.Minute /* Timeout */); err != nil { + return fmt.Errorf("empty bucket %s and check: %w", bucketName, err) + } + return nil +} + +// DeleteBucket delete S3 bucket +// param: name bucket name from user input +func (sc *S3Client) DeleteBucket(name string) error { + + e2e.Logf("deleting s3 bucket %s", name) + + deleteBucketInput := &s3.DeleteBucketInput{ + Bucket: aws.String(name), + } + + _, dboe := sc.svc.DeleteBucket(deleteBucketInput) + if dboe != nil { + e2e.Logf("delete bucket %s failed: %v", name, dboe) + return dboe + } + + e2e.Logf("bucket %s is successfully deleted", name) + + return nil +} + +// HeadBucket util func to check whether bucket exists or not +// param: name bucket name +func (sc *S3Client) HeadBucket(name string) error { + + e2e.Logf("check bucket %s exists or not", name) + + headBucketInput := &s3.HeadBucketInput{ + Bucket: aws.String(name), + } + + hbo, hboe := sc.svc.HeadBucket(headBucketInput) + if hboe != nil { + e2e.Logf("head bucket %s failed: %v", name, hboe) + return hboe + } + + e2e.Logf("head bucket %s output is %v", name, hbo) + + return nil + +} + +func (sc *S3Client) IsBucketEmptyWithContext(ctx aws.Context, input *s3.ListObjectsV2Input, opts ...request.Option) (bool, error) { + listObjOutput, err := sc.svc.ListObjectsV2WithContext(ctx, input, opts...) + if err != nil { + return false, fmt.Errorf("error checking if bucket is empty: %w", err) + } + return len(listObjOutput.Contents) == 0, nil +} + +// BucketEmptiness captures a S3 bucket's state of emptiness i.e. it is empty or not. +type BucketEmptiness bool + +const ( + BucketEmpty BucketEmptiness = true + BucketNonEmpty BucketEmptiness = false +) + +func (be BucketEmptiness) String() string { + if be { + return "empty" + } + return "non-empty" +} + +// WaitForBucketEmptinessWithContext waits for the expected bucket emptiness (i.e. empty/non-empty) to be fulfilled. +func (sc *S3Client) WaitForBucketEmptinessWithContext(ctx context.Context, bucketName string, + bucketEmptiness BucketEmptiness, interval, timeout time.Duration) error { + e2e.Logf("Waiting for bucket %s to be %s", bucketName, bucketEmptiness) + if err := wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (done bool, err error) { + var bucketIsEmpty bool + bucketIsEmpty, err = sc.IsBucketEmptyWithContext(ctx, &s3.ListObjectsV2Input{ + Bucket: aws.String(bucketName), + }) + if err != nil { + e2e.Logf("Error checking if bucket %s is empty: %v", bucketName, err) + return false, nil + } + return bool(bucketEmptiness) == bucketIsEmpty, nil + }); err != nil { + return fmt.Errorf("error waiting for bucket %s to be %s: %w", bucketName, bucketEmptiness, err) + } + + e2e.Logf("Bucket %s is now %s", bucketName, bucketEmptiness) + return nil +} + +// IAMClient struct for IAM operations +type IAMClient struct { + svc *iam.IAM +} + +// NewIAMClient constructor to create IAM client with default credential and config +// Should use GetAwsCredentialFromCluster(oc) to set ENV first before using it +func NewIAMClient() *IAMClient { + return &IAMClient{ + svc: iam.New( + session.Must(session.NewSession()), + aws.NewConfig(), + ), + } +} + +// NewDelegatingIAMClient creates an IAMClient which delegates calls to methods that are not implemented by itself +// to the wrapped iam.IAM client. +func NewDelegatingIAMClient(wrappedClient *iam.IAM) *IAMClient { + return &IAMClient{ + svc: wrappedClient, + } +} + +// NewIAMClientFromCredFile constructor to create IAM client with user's credential file +func NewIAMClientFromCredFile(filename, region string) *IAMClient { + return &IAMClient{ + svc: iam.New( + session.Must(session.NewSession()), + aws.NewConfig().WithCredentials(credentials.NewSharedCredentials(filename, "default")).WithRegion(region), + ), + } +} + +func (iamClient *IAMClient) CreateRoleWithContext(ctx aws.Context, input *iam.CreateRoleInput, opts ...request.Option) (*iam.CreateRoleOutput, error) { + return iamClient.svc.CreateRoleWithContext(ctx, input, opts...) +} + +func (iamClient *IAMClient) DeleteRoleWithContext(ctx aws.Context, input *iam.DeleteRoleInput, opts ...request.Option) (*iam.DeleteRoleOutput, error) { + return iamClient.svc.DeleteRoleWithContext(ctx, input, opts...) +} + +func (iamClient *IAMClient) DeleteOpenIDConnectProviderByProviderName(providerName string) error { + oidcProviderList, err := iamClient.svc.ListOpenIDConnectProviders(&iam.ListOpenIDConnectProvidersInput{}) + if err != nil { + return err + } + + for _, provider := range oidcProviderList.OpenIDConnectProviderList { + if strings.Contains(*provider.Arn, providerName) { + _, err := iamClient.svc.DeleteOpenIDConnectProvider(&iam.DeleteOpenIDConnectProviderInput{ + OpenIDConnectProviderArn: provider.Arn, + }) + if err != nil { + e2e.Logf("Failed to Delete existing OIDC provider arn: %s for providerName: %s", *provider.Arn, providerName) + return err + } + break + } + } + return nil +} + +func (iamClient *IAMClient) GetRolePolicy(roleName, policyName string) (string, error) { + rc, err := iamClient.svc.GetRolePolicy(&iam.GetRolePolicyInput{ + PolicyName: aws.String(policyName), + RoleName: aws.String(roleName), + }) + + if err != nil { + e2e.Logf("Failed to GetRolePolicy with roleName: %s policyName %s error %s", roleName, policyName, err.Error()) + return "", err + } + + decodePolicy, err := url.QueryUnescape(*rc.PolicyDocument) + if err != nil { + e2e.Logf("Failed to QueryUnescape role policy: role %s policyName %s error %s original rc %s", roleName, policyName, err.Error(), *rc.PolicyDocument) + return "", err + } + + return decodePolicy, nil +} + +func (iamClient *IAMClient) UpdateRolePolicy(roleName, policyName, policyDocument string) error { + _, err := iamClient.svc.PutRolePolicy(&iam.PutRolePolicyInput{ + RoleName: aws.String(roleName), + PolicyName: aws.String(policyName), + PolicyDocument: aws.String(policyDocument), + }) + + if err != nil { + e2e.Logf("Failed to UpdateRolePolicy for roleName: %s policyName %s error %s", roleName, policyName, err.Error()) + } + + return err +} + +// Create policy +func (iamClient *IAMClient) CreatePolicy(policyDocument string, policyName string, description string, tagList map[string]string, path string) (string, error) { + // Check that required inputs exist + createdPolicy := &iam.CreatePolicyOutput{} + if policyDocument == "" || policyName == "" { + return "", errors.New("policyDocument or policyName can be an empty string") + } + createPolicyInput := &iam.CreatePolicyInput{ + PolicyName: aws.String(policyName), + PolicyDocument: aws.String(policyDocument), + } + if path != "" { + createPolicyInput.Path = aws.String(path) + } + if description != "" { + createPolicyInput.Description = aws.String(description) + } + if len(tagList) > 0 { + createPolicyInput.Tags = getTags(tagList) + } + createdPolicy, err := iamClient.svc.CreatePolicy(createPolicyInput) + if err != nil { + return "", err + } + return aws.StringValue(createdPolicy.Policy.Arn), nil +} + +// Delete policy +func (iamClient *IAMClient) DeletePolicy(policyArn string) error { + _, err := iamClient.svc.DeletePolicy(&iam.DeletePolicyInput{ + PolicyArn: aws.String(policyArn), + }) + return err +} + +// Attach role policy +func (iamClient *IAMClient) AttachRolePolicy(roleName, policyArn string) error { + _, err := iamClient.svc.AttachRolePolicy(&iam.AttachRolePolicyInput{ + RoleName: aws.String(roleName), + PolicyArn: aws.String(policyArn), + }) + + if err != nil { + e2e.Logf("Failed to AttachRolePolicy for roleName: %s policyArn %s error %s", roleName, policyArn, err.Error()) + } + + return err +} + +// Detach role policy +func (iamClient *IAMClient) DetachRolePolicy(roleName, policyArn string) error { + _, err := iamClient.svc.DetachRolePolicy(&iam.DetachRolePolicyInput{ + RoleName: aws.String(roleName), + PolicyArn: aws.String(policyArn), + }) + + if err != nil { + e2e.Logf("Failed to DetachRolePolicy for roleName: %s policyArn %s error %s", roleName, policyArn, err.Error()) + } + + return err +} + +// convert tags map to []iam.Tag +func getTags(tagList map[string]string) []*iam.Tag { + iamTags := []*iam.Tag{} + for k, v := range tagList { + iamTags = append(iamTags, &iam.Tag{ + Key: aws.String(k), + Value: aws.String(v), + }) + } + return iamTags +} + +func (iamClient *IAMClient) ListRoles() ([]*iam.Role, error) { + roles := []*iam.Role{} + err := iamClient.svc.ListRolesPages(&iam.ListRolesInput{}, func(page *iam.ListRolesOutput, lastPage bool) bool { + roles = append(roles, page.Roles...) + return aws.BoolValue(page.IsTruncated) + }) + return roles, err +} + +func (iamClient *IAMClient) ListOperatsorRolesByPrefix(prefix string, version string) ([]*iam.Role, error) { + operatorRoles := []*iam.Role{} + roles, err := iamClient.ListRoles() + if err != nil { + return operatorRoles, err + } + prefixOperatorRoleRE := regexp.MustCompile(`(?i)(?P[\w+=,.@-]+)-(openshift|kube-system)`) + for _, role := range roles { + matches := prefixOperatorRoleRE.FindStringSubmatch(*role.RoleName) + if len(matches) == 0 { + continue + } + prefixIndex := prefixOperatorRoleRE.SubexpIndex("Prefix") + foundPrefix := strings.ToLower(matches[prefixIndex]) + if foundPrefix != prefix { + continue + } + operatorRoles = append(operatorRoles, role) + } + return operatorRoles, nil +} + +// Route53Client extends the route53.Route53 client without overriding its existing methods. +type Route53Client struct { + *route53.Route53 +} + +// NewRoute53Client creates a new Route53Client. +// It is expected to be called after GetAwsCredentialFromCluster which sets AWS-specific environment variables. +func NewRoute53Client() *Route53Client { + return &Route53Client{ + Route53: route53.New(session.Must(session.NewSession()), aws.NewConfig()), + } +} + +// DeleteHostedZoneWithContextAndCheck deletes a hosted zone (delegate to the wrapped route53.Route53 client), +// then wait for the deletion to take effect. +func (route53Client *Route53Client) DeleteHostedZoneWithContextAndCheck(ctx context.Context, input *route53.DeleteHostedZoneInput, opts ...request.Option) (*route53.DeleteHostedZoneOutput, error) { + e2e.Logf("Deleting hosted zone %s", aws.StringValue(input.Id)) + deleteHostedZoneOutput, err := route53Client.DeleteHostedZoneWithContext(ctx, input, opts...) + if err != nil { + return nil, err + } + + e2e.Logf("Waiting until the deletion takes effect") + err = route53Client.WaitUntilResourceRecordSetsChangedWithContext(ctx, &route53.GetChangeInput{ + Id: deleteHostedZoneOutput.ChangeInfo.Id, + }) + if err != nil { + return nil, err + } + deleteHostedZoneOutput.ChangeInfo.Status = aws.String(route53.ChangeStatusInsync) + + return deleteHostedZoneOutput, nil +} + +// EmptyHostedZoneWithContext removes all except NS/SOA records in a hosted zone, +// then wait until the changes to take effect. +func (route53Client *Route53Client) EmptyHostedZoneWithContext(ctx context.Context, hostedZoneId string) (*route53.ChangeResourceRecordSetsOutput, error) { + e2e.Logf("Emptying hosted zone %s", hostedZoneId) + var changes []*route53.Change + pagingCallback := func(page *route53.ListResourceRecordSetsOutput, lastPage bool) bool { + for _, recordSet := range page.ResourceRecordSets { + // Skip NS and SOA records + if aws.StringValue(recordSet.Type) == route53.RRTypeNs || aws.StringValue(recordSet.Type) == route53.RRTypeSoa { + continue + } + + changes = append(changes, &route53.Change{ + Action: aws.String(route53.ChangeActionDelete), + ResourceRecordSet: recordSet, + }) + } + return !lastPage + } + + var err error + e2e.Logf("Extracting all except NS/SOA records in the hosted zone") + if err = route53Client.ListResourceRecordSetsPagesWithContext(ctx, + &route53.ListResourceRecordSetsInput{ + HostedZoneId: aws.String(hostedZoneId), + }, + pagingCallback, + ); err != nil { + return nil, err + } + + var changeResourceRecordSetsOutput *route53.ChangeResourceRecordSetsOutput + e2e.Logf("Emptying hosted zone") + if changeResourceRecordSetsOutput, err = route53Client.ChangeResourceRecordSetsWithContext(ctx, &route53.ChangeResourceRecordSetsInput{ + HostedZoneId: aws.String(hostedZoneId), + ChangeBatch: &route53.ChangeBatch{ + Changes: changes, + }, + }); err != nil { + return nil, err + } + + e2e.Logf("Waiting until changes to the hosted zone take effect") + if err = route53Client.WaitUntilResourceRecordSetsChangedWithContext(ctx, &route53.GetChangeInput{ + Id: changeResourceRecordSetsOutput.ChangeInfo.Id, + }); err != nil { + return nil, err + } + changeResourceRecordSetsOutput.ChangeInfo.Status = aws.String(route53.ChangeStatusInsync) + + return changeResourceRecordSetsOutput, nil +} + +// StsClient extends the sts.STS client without overriding its existing methods. +type StsClient struct { + *sts.STS +} + +// NewDelegatingStsClient creates an StsClient which delegates calls to methods that are not implemented by itself +// to the wrapped sts.STS client. +func NewDelegatingStsClient(wrappedClient *sts.STS) *StsClient { + return &StsClient{ + STS: wrappedClient, + } +} + +// ECRClient struct +type ECRClient struct { + svc *ecr.ECR +} + +// NewECRClient creates an ECRClient +func NewECRClient(region string) *ECRClient { + mySession := session.Must(session.NewSession()) + ecrClient := &ECRClient{ + svc: ecr.New(mySession, aws.NewConfig().WithRegion(region)), + } + + return ecrClient +} + +// CreateContainerRepository create a container repository +func (ecrClient *ECRClient) CreateContainerRepository(repositoryName string) (string, error) { + createRes, err := ecrClient.svc.CreateRepository(&ecr.CreateRepositoryInput{ + RepositoryName: aws.String(repositoryName), + }) + if err != nil { + e2e.Logf("Error creating repository %s", err.Error()) + return "", err + } + e2e.Logf("Repository created: %s", *createRes.Repository.RepositoryUri) + return *createRes.Repository.RepositoryUri, nil +} + +// DeleteContainerRepository delete container repository +func (ecrClient *ECRClient) DeleteContainerRepository(repositoryName string) error { + _, err := ecrClient.svc.DeleteRepository(&ecr.DeleteRepositoryInput{ + RepositoryName: aws.String(repositoryName), + Force: aws.Bool(true), + }) + return err +} + +// GetAuthorizationToken get container repository credential +func (ecrClient *ECRClient) GetAuthorizationToken() (string, error) { + loginRes, err := ecrClient.svc.GetAuthorizationToken(&ecr.GetAuthorizationTokenInput{}) + if err != nil { + e2e.Logf("Error getting authorization token: %s", err.Error()) + return "", err + } + authData := loginRes.AuthorizationData[0] + password := aws.StringValue(authData.AuthorizationToken) + return password, nil +} + +// KMSClient struct. +type KMSClient struct { + kmssvc *kms.KMS +} + +// Init the aws KMS client. +func NewKMSClient(region string) *KMSClient { + awsSession := session.Must(session.NewSession()) + kmsClient := &KMSClient{ + kmssvc: kms.New(awsSession, aws.NewConfig().WithRegion(region)), + } + + return kmsClient +} + +// CreateKey create a key +func (kmsClient *KMSClient) CreateKey(description string) (string, error) { + createRes, err := kmsClient.kmssvc.CreateKey(&kms.CreateKeyInput{ + Description: aws.String(description), + }) + if err != nil { + e2e.Logf("Error creating key %s", err.Error()) + return "", err + } + e2e.Logf("key created: %s", *createRes.KeyMetadata.Arn) + return *createRes.KeyMetadata.Arn, nil +} + +// DeleteKey delete a key +func (kmsClient *KMSClient) DeleteKey(key string) error { + _, err := kmsClient.kmssvc.ScheduleKeyDeletion(&kms.ScheduleKeyDeletionInput{ + KeyId: aws.String(key), + PendingWindowInDays: aws.Int64(7), + }) + return err +} diff --git a/test/util/azure/config_file.go b/test/util/azure/config_file.go new file mode 100644 index 000000000..99ea119d2 --- /dev/null +++ b/test/util/azure/config_file.go @@ -0,0 +1,98 @@ +package azure + +import ( + "context" + "encoding/json" + "errors" + "io/ioutil" + "os" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + e2e "k8s.io/kubernetes/test/e2e/framework" + "sigs.k8s.io/cloud-provider-azure/pkg/provider" + "sigs.k8s.io/yaml" +) + +// LoadConfigFile uses the cluster to fetch the cloud provider config from `openshift-config/cloud-provider-config` config map's config key. +// It then uses the `AZURE_AUTH_LOCATION` to load the credentials for Azure API and update the cloud provider config with the client secret. In-cluster cloud provider config +// uses Azure Managed Identity attached to virtual machines to provide Azure API access, while the e2e tests are usually run from outside the cluster and therefore need explicit auth creds. +func LoadConfigFile() ([]byte, error) { + // LoadClientset but don't set the UserAgent to include the current test name because + // we don't run any test yet and this call panics + client, err := e2e.LoadClientset(true) + if err != nil { + return nil, err + } + config, err := cloudProviderConfigFromCluster(client.CoreV1()) + if err != nil { + return nil, err + } + + settings, err := getAuthFile() + if err != nil { + return nil, err + } + config.AADClientID = settings.ClientID + config.AADClientSecret = settings.ClientSecret + config.UseManagedIdentityExtension = false + config.UseInstanceMetadata = false + + data, err := yaml.Marshal(config) + if err != nil { + return nil, err + } + return data, nil +} + +func cloudProviderConfigFromCluster(client clientcorev1.ConfigMapsGetter) (*provider.Config, error) { + cm, err := client.ConfigMaps("openshift-config").Get(context.Background(), "cloud-provider-config", metav1.GetOptions{}) + if err != nil { + return nil, err + } + data, ok := cm.Data["config"] + if !ok { + return nil, errors.New("No cloud provider config was set in openshift-config/cloud-provider-config") + } + config := &provider.Config{} + if err := yaml.Unmarshal([]byte(data), config); err != nil { + return nil, err + } + return config, nil +} + +// loads the auth file using the file provided by AZURE_AUTH_LOCATION +// this mimics the function https://godoc.org/github.com/Azure/go-autorest/autorest/azure/auth#GetSettingsFromFile which is not currently available with vendor Azure SDK. +func getAuthFile() (*file, error) { + fileLocation := os.Getenv("AZURE_AUTH_LOCATION") + if fileLocation == "" { + return nil, errors.New("environment variable AZURE_AUTH_LOCATION is not set") + } + + contents, err := ioutil.ReadFile(fileLocation) + if err != nil { + return nil, err + } + + authFile := file{} + err = json.Unmarshal(contents, &authFile) + if err != nil { + return nil, err + } + + return &authFile, nil +} + +// File represents the authentication file +type file struct { + ClientID string `json:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` + SubscriptionID string `json:"subscriptionId,omitempty"` + TenantID string `json:"tenantId,omitempty"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpointUrl,omitempty"` + ResourceManagerEndpoint string `json:"resourceManagerEndpointUrl,omitempty"` + GraphResourceID string `json:"activeDirectoryGraphResourceId,omitempty"` + SQLManagementEndpoint string `json:"sqlManagementEndpointUrl,omitempty"` + GalleryEndpoint string `json:"galleryEndpointUrl,omitempty"` + ManagementEndpoint string `json:"managementEndpointUrl,omitempty"` +} diff --git a/test/util/azure_client.go b/test/util/azure_client.go new file mode 100644 index 000000000..e3347bd41 --- /dev/null +++ b/test/util/azure_client.go @@ -0,0 +1,492 @@ +package util + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "net/url" + "os" + "os/exec" + "regexp" + "strings" + + "github.com/Azure/azure-sdk-for-go/services/authorization/mgmt/2020-10-01/authorization" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" + "github.com/Azure/azure-sdk-for-go/services/msi/mgmt/2018-11-30/msi" + "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" + "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage" + "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure/auth" + + "github.com/Azure/go-autorest/autorest/to" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// AzureSession is an object representing session for subscription +type AzureSession struct { + SubscriptionID string + Authorizer autorest.Authorizer +} + +// NewAzureSessionFromEnv new azure session from env credentials +func NewAzureSessionFromEnv() (*AzureSession, error) { + authorizer, azureSessErr := auth.NewAuthorizerFromEnvironment() + if azureSessErr != nil { + e2e.Logf("New Azure Session from ENV error: %v", azureSessErr) + return nil, azureSessErr + } + sess := AzureSession{ + SubscriptionID: os.Getenv("AZURE_SUBSCRIPTION_ID"), + Authorizer: authorizer, + } + return &sess, nil +} + +// getNicClient get nic client +func getNicClient(sess *AzureSession) network.InterfacesClient { + nicClient := network.NewInterfacesClient(sess.SubscriptionID) + nicClient.Authorizer = sess.Authorizer + return nicClient +} + +// getStorageClient get storage client +func getStorageClient(sess *AzureSession) storage.AccountsClient { + storageClient := storage.NewAccountsClient(sess.SubscriptionID) + storageClient.Authorizer = sess.Authorizer + return storageClient +} + +// GetAzureStorageAccount get Azure Storage Account +func GetAzureStorageAccount(sess *AzureSession, resourceGroupName string) (string, error) { + storageClient := getStorageClient(sess) + listGroupAccounts, err := storageClient.ListByResourceGroup(context.Background(), resourceGroupName) + if err != nil { + return "", err + } + for _, acc := range *listGroupAccounts.Value { + fmt.Printf("\t%s\n", *acc.Name) + match, err := regexp.MatchString("cluster", *acc.Name) + if err != nil { + return "", err + } + + if match { + e2e.Logf("The storage account name is %s,", *acc.Name) + return *acc.Name, nil + } + } + e2e.Logf("There is no storage account name matching regex : cluster") + return "", nil +} + +// getIPClient get publicIP client +func getIPClient(sess *AzureSession) network.PublicIPAddressesClient { + ipClient := network.NewPublicIPAddressesClient(sess.SubscriptionID) + ipClient.Authorizer = sess.Authorizer + return ipClient +} + +// GetAzureVMPrivateIP get Azure vm private IP +func GetAzureVMPrivateIP(sess *AzureSession, rg, vmName string) (string, error) { + nicClient := getNicClient(sess) + privateIP := "" + + //find private IP + for iter, err := nicClient.ListComplete(context.Background(), rg); iter.NotDone(); err = iter.Next() { + if err != nil { + return "", err + } + if strings.Contains(*iter.Value().Name, vmName) { + e2e.Logf("Found int-svc VM with name %s", *iter.Value().Name) + intF := *iter.Value().InterfacePropertiesFormat.IPConfigurations + privateIP = *intF[0].InterfaceIPConfigurationPropertiesFormat.PrivateIPAddress + e2e.Logf("The private IP for vm %s is %s,", vmName, privateIP) + break + } + } + + return privateIP, nil + +} + +// GetAzureVMPublicIPByNameRegex returns the first public IP whose name matches the given regex +func GetAzureVMPublicIPByNameRegex(sess *AzureSession, rg, publicIPNameRegex string) (string, error) { + //find public IP + e2e.Logf("Looking for publicIP with name matching %s", publicIPNameRegex) + ipClient := getIPClient(sess) + + for iter, err := ipClient.ListAll(context.Background()); iter.NotDone(); err = iter.Next() { + if err != nil { + return "", err + } + + for _, value := range iter.Values() { + match, err := regexp.MatchString(publicIPNameRegex, *value.Name) + if err != nil { + return "", err + } + + if match { + e2e.Logf("The public IP with name %s is %s,", *value.Name, *value.IPAddress) + return *value.IPAddress, nil + } + } + } + + e2e.Logf("There is no public IP with its name matching regex : %s", publicIPNameRegex) + return "", nil + +} + +// GetAzureVMPublicIP get azure vm public IP +func GetAzureVMPublicIP(sess *AzureSession, rg, vmName string) (string, error) { + publicIPName := vmName + "PublicIP" + publicIP := "" + //find public IP + ipClient := getIPClient(sess) + publicIPAtt, getIPErr := ipClient.Get(context.Background(), rg, publicIPName, "") + if getIPErr != nil { + return "", getIPErr + } + publicIP = *publicIPAtt.IPAddress + e2e.Logf("The public IP for vm %s is %s,", vmName, publicIP) + return publicIP, nil + +} + +// StartAzureVM starts the selected VM +func StartAzureVM(sess *AzureSession, vmName string, resourceGroupName string) (osr autorest.Response, err error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vmClient := compute.NewVirtualMachinesClient(sess.SubscriptionID) + vmClient.Authorizer = sess.Authorizer + future, vmErr := vmClient.Start(ctx, resourceGroupName, vmName) + if vmErr != nil { + e2e.Logf("cannot start vm: %v", vmErr) + return osr, vmErr + } + + err = future.WaitForCompletionRef(ctx, vmClient.Client) + if err != nil { + e2e.Logf("cannot get the vm start future response: %v", err) + return osr, err + } + return future.Result(vmClient) +} + +// StopAzureVM stops the selected VM +func StopAzureVM(sess *AzureSession, vmName string, resourceGroupName string) (osr autorest.Response, err error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vmClient := compute.NewVirtualMachinesClient(sess.SubscriptionID) + vmClient.Authorizer = sess.Authorizer + var skipShutdown bool = true + // skipShutdown parameter is optional, we are taking its true value here + future, vmErr := vmClient.PowerOff(ctx, resourceGroupName, vmName, &skipShutdown) + if err != nil { + e2e.Logf("cannot power off vm: %v", vmErr) + return osr, vmErr + } + + err = future.WaitForCompletionRef(ctx, vmClient.Client) + if err != nil { + e2e.Logf("cannot get the vm power off future response: %v", err) + return osr, err + } + return future.Result(vmClient) +} + +// GetAzureVMInstance get vm instance +func GetAzureVMInstance(sess *AzureSession, vmName string, resourceGroupName string) (string, error) { + vmClient := compute.NewVirtualMachinesClient(sess.SubscriptionID) + vmClient.Authorizer = sess.Authorizer + for vm, vmErr := vmClient.ListComplete(context.Background(), resourceGroupName); vm.NotDone(); vmErr = vm.Next() { + if vmErr != nil { + e2e.Logf("got error while traverising RG list: %v", vmErr) + return "", vmErr + } + instanceName := vm.Value() + if *instanceName.Name == vmName { + e2e.Logf("Azure instance found :: %s", vmName) + return vmName, nil + } + } + return "", nil +} + +// GetAzureVMInstanceState get vm instance state +func GetAzureVMInstanceState(sess *AzureSession, vmName string, resourceGroupName string) (string, error) { + var vmErr error + vmClient := compute.NewVirtualMachinesClient(sess.SubscriptionID) + vmClient.Authorizer = sess.Authorizer + vmStatus, vmErr := vmClient.Get(context.Background(), resourceGroupName, vmName, compute.InstanceView) + if vmErr != nil { + e2e.Logf("Failed to get vm status :: %v", vmErr) + return "", vmErr + } + status1 := *vmStatus.VirtualMachineProperties.InstanceView.Statuses + status2 := *status1[1].DisplayStatus + newStatus := strings.Split(status2, " ") + e2e.Logf("Azure instance status found :: %v", newStatus[1]) + return string(newStatus[1]), nil +} + +// GetAzureCredentialFromCluster gets Azure credentials from cluster and loads them as environment variables +func GetAzureCredentialFromCluster(oc *CLI) (string, error) { + credential, getSecErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-credentials", "-n", "kube-system", "-o=jsonpath={.data}").Output() + if getSecErr != nil { + credential, getSecErr = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/azure-cloud-credentials", "-n", "openshift-cloud-controller-manager", "-o=jsonpath={.data}").Output() + if getSecErr != nil { + return "", nil + } + } + + type azureCredentials struct { + AzureClientID string `json:"azure_client_id,omitempty"` + AzureClientSecret string `json:"azure_client_secret,omitempty"` + AzureSubscriptionID string `json:"azure_subscription_id,omitempty"` + AzureTenantID string `json:"azure_tenant_id,omitempty"` + AzureResourceGroup string `json:"azure_resourcegroup,omitempty"` + AzureResourcePrefix string `json:"azure_resource_prefix,omitempty"` + } + azureCreds := azureCredentials{} + if err := json.Unmarshal([]byte(credential), &azureCreds); err != nil { + return "", err + } + + azureClientID, err := base64.StdEncoding.DecodeString(azureCreds.AzureClientID) + if err != nil { + return "", err + } + + azureClientSecret, err := base64.StdEncoding.DecodeString(azureCreds.AzureClientSecret) + if err != nil { + return "", err + } + + azureSubscriptionID, err := base64.StdEncoding.DecodeString(azureCreds.AzureSubscriptionID) + if err != nil { + return "", err + } + + azureTenantID, err := base64.StdEncoding.DecodeString(azureCreds.AzureTenantID) + if err != nil { + return "", err + } + + azureResourceGroup, err := base64.StdEncoding.DecodeString(azureCreds.AzureResourceGroup) + if err != nil { + return "", err + } + + azureResourcePrefix, err := base64.StdEncoding.DecodeString(azureCreds.AzureResourcePrefix) + if err != nil { + return "", err + } + os.Setenv("AZURE_CLIENT_ID", string(azureClientID)) + os.Setenv("AZURE_CLIENT_SECRET", string(azureClientSecret)) + os.Setenv("AZURE_SUBSCRIPTION_ID", string(azureSubscriptionID)) + os.Setenv("AZURE_TENANT_ID", string(azureTenantID)) + os.Setenv("AZURE_RESOURCE_PREFIX", string(azureResourcePrefix)) + e2e.Logf("Azure credentials successfully loaded.") + return string(azureResourceGroup), nil +} + +// GetAzureStorageAccountFromCluster gets azure storage accountName and accountKey from image registry +// TODO: create a storage account and use that accout to manage azure container +func GetAzureStorageAccountFromCluster(oc *CLI) (string, string, error) { + var accountName string + imageRegistry, err := oc.AdminKubeClient().AppsV1().Deployments("openshift-image-registry").Get(context.Background(), "image-registry", metav1.GetOptions{}) + if err != nil { + return "", "", err + } + for _, container := range imageRegistry.Spec.Template.Spec.Containers { + for _, env := range container.Env { + if env.Name == "REGISTRY_STORAGE_AZURE_ACCOUNTNAME" { + accountName = env.Value + break + } + } + } + + dirname := "/tmp/" + oc.Namespace() + "-creds" + defer os.RemoveAll(dirname) + _ = os.MkdirAll(dirname, 0777) + _, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/image-registry-private-configuration", "-n", "openshift-image-registry", "--confirm", "--to="+dirname).Output() + if err != nil { + return accountName, "", err + } + accountKey, _ := os.ReadFile(dirname + "/REGISTRY_STORAGE_AZURE_ACCOUNTKEY") + return accountName, string(accountKey), nil +} + +// NewAzureContainerClient initializes a new azure blob container client +func NewAzureContainerClient(oc *CLI, accountName, accountKey, azContainerName string) (azblob.ContainerURL, error) { + storageAccountURISuffix := ".blob.core.windows.net" + cloudName, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.azure.cloudName}").Output() + if strings.ToLower(cloudName) == "azureusgovernmentcloud" { + storageAccountURISuffix = ".blob.core.usgovcloudapi.net" + } + //placeholder if strings.ToLower(cloudName) == "azurechinacloud" + //placeholder if strings.ToLower(cloudName) == "azuregermancloud" + u, _ := url.Parse(fmt.Sprintf("https://%s%s", accountName, storageAccountURISuffix)) + credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) + p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) + serviceURL := azblob.NewServiceURL(*u, p) + return serviceURL.NewContainerURL(azContainerName), err +} + +// CreateAzureStorageBlobContainer creates azure storage container +func CreateAzureStorageBlobContainer(container azblob.ContainerURL) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // check if the container exists or not + // if exists, then remove the blobs in the container, if not, create the container + _, err := container.GetProperties(ctx, azblob.LeaseAccessConditions{}) + message := fmt.Sprintf("%v", err) + if strings.Contains(message, "ContainerNotFound") { + _, err = container.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) + return err + } + return EmptyAzureBlobContainer(container) +} + +// DeleteAzureStorageBlobContainer deletes azure storage container +func DeleteAzureStorageBlobContainer(container azblob.ContainerURL) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + err := EmptyAzureBlobContainer(container) + if err != nil { + return err + } + _, err = container.Delete(ctx, azblob.ContainerAccessConditions{}) + if err != nil { + return fmt.Errorf("error deleting container: %v", err) + } + e2e.Logf("Azure storage container is deleted") + return nil +} + +// EmptyAzureBlobContainer removes all the files in azure storage container +func EmptyAzureBlobContainer(container azblob.ContainerURL) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for marker := (azblob.Marker{}); marker.NotDone(); { // The parens around Marker{} are required to avoid compiler error. + // Get a result segment starting with the blob indicated by the current Marker. + listBlob, err := container.ListBlobsFlatSegment(ctx, marker, azblob.ListBlobsSegmentOptions{}) + if err != nil { + return fmt.Errorf("error listing blobs in container: %v", err) + } + + // IMPORTANT: ListBlobs returns the start of the next segment; you MUST use this to get + // the next segment (after processing the current result segment). + marker = listBlob.NextMarker + + // Process the blobs returned in this result segment (if the segment is empty, the loop body won't execute) + for _, blobInfo := range listBlob.Segment.BlobItems { + blobURL := container.NewBlockBlobURL(blobInfo.Name) + _, err := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) + if err != nil { + return fmt.Errorf("error deleting blob %s: %v", blobInfo.Name, err) + } + } + } + e2e.Logf("deleted all blob items in the container.") + return nil +} + +// getUserAssignedIdentitiesClient get user assigned identities client +func getUserAssignedIdentitiesClient(sess *AzureSession) msi.UserAssignedIdentitiesClient { + msiClient := msi.NewUserAssignedIdentitiesClient(sess.SubscriptionID) + msiClient.Authorizer = sess.Authorizer + return msiClient +} + +// GetUserAssignedIdentityPrincipalID get user assigned identity PrincipalID +func GetUserAssignedIdentityPrincipalID(sess *AzureSession, resourceGroup string, identityName string) (string, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + msiClient := getUserAssignedIdentitiesClient(sess) + identity, err := msiClient.Get(ctx, resourceGroup, identityName) + if err != nil { + return "", err + } + return identity.PrincipalID.String(), nil +} + +// getRoleAssignmentsClient get role assignments client +func getRoleAssignmentsClient(sess *AzureSession) authorization.RoleAssignmentsClient { + roleAssignmentsClient := authorization.NewRoleAssignmentsClient(sess.SubscriptionID) + roleAssignmentsClient.Authorizer = sess.Authorizer + return roleAssignmentsClient +} + +// GrantRoleToPrincipalIDByResourceGroup grant role to principalID by resource group +func GrantRoleToPrincipalIDByResourceGroup(sess *AzureSession, principalID string, resourceGroup string, roleId string) (roleAssignmentName string, scope string) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + roleAssignmentsClient := getRoleAssignmentsClient(sess) + + roleAssignment := authorization.RoleAssignmentCreateParameters{ + Properties: &authorization.RoleAssignmentProperties{ + PrincipalID: &principalID, + RoleDefinitionID: to.StringPtr("/subscriptions/" + sess.SubscriptionID + "/providers/Microsoft.Authorization/roleDefinitions/" + roleId), + }, + } + roleAssignmentName = sess.SubscriptionID[:8] + principalID[8:24] + roleId[24:] + scope = "/subscriptions/" + sess.SubscriptionID + "/resourceGroups/" + resourceGroup + result, err := roleAssignmentsClient.Create(ctx, scope, roleAssignmentName, roleAssignment) + if err != nil { + e2e.Logf("Error creating role assignment: %v", err) + } else { + e2e.Logf("Role assignment created: %s", *result.Name) + } + return roleAssignmentName, scope +} + +// DeleteRoleAssignments deletes role assignments +func DeleteRoleAssignments(sess *AzureSession, roleAssignmentName string, scope string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + roleAssignmentsClient := getRoleAssignmentsClient(sess) + _, err := roleAssignmentsClient.Delete(ctx, scope, roleAssignmentName) + if err != nil { + return fmt.Errorf("error deleting role assignment: %v", err) + } + e2e.Logf("Role assignment is deleted") + return nil +} + +// StopAzureStackVM stops the virtual machine with the given name in the specified resource group using Azure CLI +func StopAzureStackVM(resourceGroupName, vmName string) error { + cmd := fmt.Sprintf(`az vm stop --name %s --resource-group %s --no-wait`, vmName, resourceGroupName) + err := exec.Command("bash", "-c", cmd).Run() + if err != nil { + return fmt.Errorf("error stopping VM: %v", err) + } + return nil +} + +// StartAzureStackVM starts the virtual machine with the given name in the specified resource group using Azure CLI +func StartAzureStackVM(resourceGroupName, vmName string) error { + cmd := fmt.Sprintf(`az vm start --name %s --resource-group %s`, vmName, resourceGroupName) + output, err := exec.Command("bash", "-c", cmd).Output() + if err != nil { + return fmt.Errorf("error starting VM: %v, output: %s", err, output) + } + return nil +} + +// GetAzureStackVMStatus gets the status of the virtual machine with the given name in the specified resource group using Azure CLI +func GetAzureStackVMStatus(resourceGroupName, vmName string) (string, error) { + cmd := fmt.Sprintf(`az vm show --name %s --resource-group %s --query 'powerState' --show-details |awk '{print $2}' | cut -d '"' -f1`, vmName, resourceGroupName) + instanceState, err := exec.Command("bash", "-c", cmd).Output() + if string(instanceState) == "" || err != nil { + return "", fmt.Errorf("Not able to get vm instance state :: %s", err) + } + return strings.Trim(string(instanceState), "\n"), err +} diff --git a/test/util/azure_client_v2.go b/test/util/azure_client_v2.go new file mode 100644 index 000000000..0a5ecf20e --- /dev/null +++ b/test/util/azure_client_v2.go @@ -0,0 +1,369 @@ +package util + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + azTo "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/keyvault/armkeyvault" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armfeatures" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" + "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage" + msgraphsdkgo "github.com/microsoftgraph/msgraph-sdk-go" + + e2e "k8s.io/kubernetes/test/e2e/framework" + + o "github.com/onsi/gomega" +) + +// AzureClientSet encapsulates Azure account information and multiple clients. +type AzureClientSet struct { + // Account information + SubscriptionID string + tokenCredential azcore.TokenCredential + + // Clients + capacityReservationGroupClient *armcompute.CapacityReservationGroupsClient + capacityReservationsClient *armcompute.CapacityReservationsClient + featuresClient *armfeatures.Client + graphServiceClient *msgraphsdkgo.GraphServiceClient + keysClient *armkeyvault.KeysClient + resourceGroupsClient *armresources.ResourceGroupsClient + vaultsClient *armkeyvault.VaultsClient + virtualMachinesClient *armcompute.VirtualMachinesClient +} + +func NewAzureClientSet(subscriptionId string, tokenCredential azcore.TokenCredential) *AzureClientSet { + return &AzureClientSet{ + SubscriptionID: subscriptionId, + tokenCredential: tokenCredential, + } +} + +// NewAzureClientSetWithRootCreds constructs an AzureClientSet with info gleaned from the in-cluster root credential. +func NewAzureClientSetWithRootCreds(oc *CLI) *AzureClientSet { + azCreds := NewEmptyAzureCredentials() + o.Expect(azCreds.GetFromClusterAndDecode(oc)).NotTo(o.HaveOccurred()) + o.Expect(azCreds.SetSdkEnvVars()).NotTo(o.HaveOccurred()) + azureCredentials, err := azidentity.NewDefaultAzureCredential(nil) + o.Expect(err).NotTo(o.HaveOccurred()) + return NewAzureClientSet(azCreds.AzureSubscriptionID, azureCredentials) +} + +// NewAzureClientSetWithCredsFromFile constructs an AzureClientSet with info gleaned from a file. +func NewAzureClientSetWithCredsFromFile(filePath string) *AzureClientSet { + azCreds := NewEmptyAzureCredentialsFromFile() + o.Expect(azCreds.LoadFromFile(filePath)).NotTo(o.HaveOccurred()) + o.Expect(azCreds.SetSdkEnvVars()).NotTo(o.HaveOccurred()) + azureCredentials, err := azidentity.NewDefaultAzureCredential(nil) + o.Expect(err).NotTo(o.HaveOccurred()) + return NewAzureClientSet(azCreds.AzureSubscriptionID, azureCredentials) +} + +// NewAzureClientSetWithCredsFromCanonicalFile creates an AzureClientSet using credentials from +// the canonical file location defined by AZURE_CREDS_LOCATION. +func NewAzureClientSetWithCredsFromCanonicalFile() *AzureClientSet { + return NewAzureClientSetWithCredsFromFile(MustGetAzureCredsLocation()) +} + +// GetResourceGroupClient gets the resource group client from the AzureClientSet, constructs it if necessary. +// Concurrent invocation of this method is safe only when AzureClientSet.resourceGroupsClient is non-nil, +// which is the case when the resourceGroupsClient is eagerly initialized. +func (cs *AzureClientSet) GetResourceGroupClient(options *arm.ClientOptions) *armresources.ResourceGroupsClient { + if cs.resourceGroupsClient == nil { + rgClient, err := armresources.NewResourceGroupsClient(cs.SubscriptionID, cs.tokenCredential, options) + o.Expect(err).NotTo(o.HaveOccurred()) + cs.resourceGroupsClient = rgClient + } + return cs.resourceGroupsClient +} + +// GetCapacityReservationGroupClient get capacity reservation group Client +func (cs *AzureClientSet) GetCapacityReservationGroupClient(options *arm.ClientOptions) *armcompute.CapacityReservationGroupsClient { + if cs.capacityReservationGroupClient == nil { + capacityReservationGroupClient, err := armcompute.NewCapacityReservationGroupsClient(cs.SubscriptionID, cs.tokenCredential, options) + o.Expect(err).NotTo(o.HaveOccurred()) + cs.capacityReservationGroupClient = capacityReservationGroupClient + } + return cs.capacityReservationGroupClient +} + +// GetCapacityReservationsClient get capacity reservations client +func (cs *AzureClientSet) GetCapacityReservationsClient(options *arm.ClientOptions) *armcompute.CapacityReservationsClient { + if cs.capacityReservationsClient == nil { + capacityReservationsClient, err := armcompute.NewCapacityReservationsClient(cs.SubscriptionID, cs.tokenCredential, options) + o.Expect(err).NotTo(o.HaveOccurred()) + cs.capacityReservationsClient = capacityReservationsClient + } + return cs.capacityReservationsClient +} + +// GetVaultsClient gets the vaults client from AzureClientSet, constructs it if necessary. +func (cs *AzureClientSet) GetVaultsClient(options *arm.ClientOptions) *armkeyvault.VaultsClient { + if cs.vaultsClient == nil { + vaultsClient, err := armkeyvault.NewVaultsClient(cs.SubscriptionID, cs.tokenCredential, options) + o.Expect(err).NotTo(o.HaveOccurred()) + cs.vaultsClient = vaultsClient + } + return cs.vaultsClient +} + +// GetKeysClient gets the keys client from AzureClientSet, constructs it if necessary. +func (cs *AzureClientSet) GetKeysClient(options *arm.ClientOptions) *armkeyvault.KeysClient { + if cs.keysClient == nil { + keysClient, err := armkeyvault.NewKeysClient(cs.SubscriptionID, cs.tokenCredential, options) + o.Expect(err).NotTo(o.HaveOccurred()) + cs.keysClient = keysClient + } + return cs.keysClient +} + +// GetVirtualMachinesClient gets the virtual machine client from AzureClientSet, constructs it if necessary. +func (cs *AzureClientSet) GetVirtualMachinesClient(options *arm.ClientOptions) *armcompute.VirtualMachinesClient { + if cs.virtualMachinesClient == nil { + virtualMachineClient, err := armcompute.NewVirtualMachinesClient(cs.SubscriptionID, cs.tokenCredential, options) + o.Expect(err).NotTo(o.HaveOccurred()) + cs.virtualMachinesClient = virtualMachineClient + } + return cs.virtualMachinesClient +} + +// GetGraphServiceClient gets the graph service client from AzureClientSet, constructs it if necessary. +// Pass a nil slice to use the default scope. +func (cs *AzureClientSet) GetGraphServiceClient(scopes []string) *msgraphsdkgo.GraphServiceClient { + if cs.graphServiceClient == nil { + graphServiceClient, err := msgraphsdkgo.NewGraphServiceClientWithCredentials(cs.tokenCredential, scopes) + o.Expect(err).NotTo(o.HaveOccurred()) + cs.graphServiceClient = graphServiceClient + } + return cs.graphServiceClient +} + +// CreateCapacityReservationGroup create a capacity reservation group +func (cs *AzureClientSet) CreateCapacityReservationGroup(ctx context.Context, capacityReservationGroupName string, resourceGroupName string, location string, zone string) (string, error) { + capacityReservationGroupClient := cs.GetCapacityReservationGroupClient(nil) + capacityReservationGroup := armcompute.CapacityReservationGroup{ + Location: &location, + Zones: []*string{&zone}, + } + + resp, err := capacityReservationGroupClient.CreateOrUpdate( + ctx, + resourceGroupName, + capacityReservationGroupName, + capacityReservationGroup, + nil, + ) + if err != nil { + return "", fmt.Errorf("Failed to create Capacity Reservation Group: %v", err) + } + e2e.Logf("Capacity Reservation Group created successfully, capacity reservation group ID: %s", *resp.ID) + return *resp.ID, err +} + +// CreateCapacityReservation create a capacity reservation +func (cs *AzureClientSet) CreateCapacityReservation(ctx context.Context, capacityReservationGroupName string, capacityReservationName string, location string, resourceGroupName string, skuName string, zone string) error { + capacityReservationsClient := cs.GetCapacityReservationsClient(nil) + instanceCapacity := int64(1) + capacityReservation := armcompute.CapacityReservation{ + Location: &location, + SKU: &armcompute.SKU{ + Capacity: &instanceCapacity, + Name: &skuName, + }, + Zones: []*string{&zone}, + } + resp, err := capacityReservationsClient.BeginCreateOrUpdate( + ctx, + resourceGroupName, + capacityReservationGroupName, + capacityReservationName, + capacityReservation, + nil, + ) + if err != nil { + return fmt.Errorf("Failed to create Capacity Reservation: %v", err) + } + finalResp, err := resp.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("Failed to wait for the Capacity Reservation creation to complete: %v", err) + } + e2e.Logf("Capacity Reservation created successfully %s", *finalResp.ID) + return nil +} + +// DeleteCapacityReservationGroup delete capacity reservation group +func (cs *AzureClientSet) DeleteCapacityReservationGroup(ctx context.Context, capacityReservationGroupName string, resourceGroupName string) error { + capacityReservationGroupClient := cs.GetCapacityReservationGroupClient(nil) + _, err := capacityReservationGroupClient.Delete( + ctx, + resourceGroupName, + capacityReservationGroupName, + nil, + ) + if err != nil { + return fmt.Errorf("Failed to delete Capacity Reservation Group: %v", err) + } + e2e.Logf("Capacity Reservation Group deleted successfully") + return nil +} + +// DeleteCapacityReservation delete capacity reservation +func (cs *AzureClientSet) DeleteCapacityReservation(ctx context.Context, capacityReservationGroupName string, capacityReservationName string, resourceGroupName string) error { + capacityReservationsClient := cs.GetCapacityReservationsClient(nil) + resp, err := capacityReservationsClient.BeginDelete( + ctx, + resourceGroupName, + capacityReservationGroupName, + capacityReservationName, + nil, + ) + if err != nil { + return fmt.Errorf("Failed to delete Capacity Reservation: %v", err) + } + _, err = resp.PollUntilDone(ctx, nil) + if err != nil { + return fmt.Errorf("Failed to wait for the capacity reservation deletation to complete: %v", err) + } + e2e.Logf("Capacity Reservation deleted successfully") + return nil +} + +func (cs *AzureClientSet) DeleteResourceGroup(ctx context.Context, rgName string) error { + poller, err := cs.GetResourceGroupClient(nil).BeginDelete(ctx, rgName, nil) + if err != nil { + return err + } + _, err = poller.PollUntilDone(ctx, nil) + if err != nil { + return err + } + return nil +} + +func (cs *AzureClientSet) GetServicePrincipalObjectId(ctx context.Context, appId string) (string, error) { + sp, err := cs.GetGraphServiceClient(nil).ServicePrincipalsWithAppId(&appId).Get(ctx, nil) + if err != nil { + return "", fmt.Errorf("failed to get service principal: %v", err) + } + objectId := sp.GetId() + if objectId == nil { + return "", fmt.Errorf("object ID is nil") + } + return *objectId, nil +} + +func ProcessAzurePages[T any](ctx context.Context, pager *runtime.Pager[T], handlePage func(page T) error) error { + for pager.More() { + page, err := pager.NextPage(ctx) + if err != nil { + return fmt.Errorf("failed to fetch next page: %w", err) + } + err = handlePage(page) + if err != nil { + return fmt.Errorf("error processing page: %w", err) + } + } + return nil +} + +// Create an Azure resource group. +func (cs *AzureClientSet) CreateResourceGroup(ctx context.Context, resourceGroupName, region string) (armresources.ResourceGroupsClientCreateOrUpdateResponse, error) { + rgClient, _ := armresources.NewResourceGroupsClient(cs.SubscriptionID, cs.tokenCredential, nil) + param := armresources.ResourceGroup{ + Location: azTo.Ptr(region), + } + return rgClient.CreateOrUpdate(ctx, resourceGroupName, param, nil) +} + +// Creates Azure storage account. +func (cs *AzureClientSet) CreateStorageAccount(ctx context.Context, resourceGroupName, storageAccountName, region string) (armstorage.AccountsClientListKeysResponse, error) { + storageClient, _ := armstorage.NewAccountsClient(cs.SubscriptionID, cs.tokenCredential, nil) + result, _ := storageClient.BeginCreate(ctx, resourceGroupName, storageAccountName, armstorage.AccountCreateParameters{ + Location: azTo.Ptr(region), + SKU: &armstorage.SKU{ + Name: azTo.Ptr(armstorage.SKUNameStandardLRS), + }, + Kind: azTo.Ptr(armstorage.KindStorageV2), + }, nil) + + // Poll until the Storage account is ready + _, err := result.PollUntilDone(context.Background(), &runtime.PollUntilDoneOptions{ + Frequency: 10 * time.Second, + }) + AssertWaitPollNoErr(err, "Storage account is not ready...") + + resultKey, err := storageClient.ListKeys(ctx, resourceGroupName, storageAccountName, &armstorage.AccountsClientListKeysOptions{Expand: nil}) + return resultKey, err +} + +// Delete the created storage account. +func (cs *AzureClientSet) DeleteStorageAccount(ctx context.Context, resourceGroupName, storageAccountName string) { + clientFactory, err := armstorage.NewClientFactory(cs.SubscriptionID, cs.tokenCredential, nil) + if err != nil { + e2e.Failf("failed to create client: %v", err) + } + _, err = clientFactory.NewAccountsClient().Delete(ctx, resourceGroupName, storageAccountName, nil) + if err != nil { + e2e.Failf("failed to finish the request: %v", err) + } +} + +func (cs *AzureClientSet) GetStorageAccountProperties(storageAccountName string, resourceGroupName string) armstorage.AccountsClientGetPropertiesResponse { + ctx := context.Background() + clientFactory, err := armstorage.NewClientFactory(cs.SubscriptionID, cs.tokenCredential, nil) + o.Expect(err).NotTo(o.HaveOccurred()) + + res, err := clientFactory.NewAccountsClient().GetProperties(ctx, resourceGroupName, storageAccountName, &armstorage.AccountsClientGetPropertiesOptions{Expand: nil}) + o.Expect(err).NotTo(o.HaveOccurred()) + + return res +} + +// GetFeaturesClient get feature Client +func (cs *AzureClientSet) GetFeaturesClient(options *arm.ClientOptions) *armfeatures.Client { + if cs.featuresClient == nil { + featuresClient, err := armfeatures.NewClient(cs.SubscriptionID, cs.tokenCredential, options) + o.Expect(err).NotTo(o.HaveOccurred()) + cs.featuresClient = featuresClient + } + return cs.featuresClient +} + +// RegisterEncryptionAtHost enable EncryptionAtHost in azure +func (cs *AzureClientSet) RegisterEncryptionAtHost(ctx context.Context) error { + featuresClient := cs.GetFeaturesClient(nil) + featureName := "EncryptionAtHost" + resourceProvider := "Microsoft.Compute" + + // Check if already registered EncryptionAtHost + feature, err := featuresClient.Get(ctx, resourceProvider, featureName, nil) + if err == nil && *feature.Properties.State == "Registered" { + return nil + } + + // Register and wait for 5 mins to finish registration + _, err = featuresClient.Register(ctx, resourceProvider, featureName, nil) + if err != nil { + return fmt.Errorf("EncryptionAtHost registered failed: %v", err) + } + const maxRetries = 10 + for i := 0; i < maxRetries; i++ { + feature, err := featuresClient.Get(ctx, resourceProvider, featureName, nil) + if err != nil { + return fmt.Errorf("Failed to get feature: %v", err) + } + + if *feature.Properties.State == "Registered" { + return nil + } + time.Sleep(30 * time.Second) + } + return fmt.Errorf("Register timeout") +} diff --git a/test/util/azure_creds.go b/test/util/azure_creds.go new file mode 100644 index 000000000..2a9dde356 --- /dev/null +++ b/test/util/azure_creds.go @@ -0,0 +1,184 @@ +package util + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "reflect" + "strings" + + o "github.com/onsi/gomega" +) + +const ( + AzureCredsLocationEnv = "AZURE_AUTH_LOCATION" +) + +type AzureCredentials struct { + AzureClientID string `json:"azure_client_id,omitempty"` + AzureClientSecret string `json:"azure_client_secret,omitempty" sensitive:"true"` + AzureSubscriptionID string `json:"azure_subscription_id,omitempty"` + AzureTenantID string `json:"azure_tenant_id,omitempty"` + AzureResourceGroup string `json:"azure_resourcegroup,omitempty"` + AzureResourcePrefix string `json:"azure_resource_prefix,omitempty"` + AzureRegion string `json:"azure_region"` + + decoded bool +} + +func NewEmptyAzureCredentials() *AzureCredentials { + return &AzureCredentials{} +} + +func (ac *AzureCredentials) String() string { + return AzureCredentialsStructToString(*ac) +} + +func (ac *AzureCredentials) GetFromClusterAndDecode(oc *CLI) error { + if err := ac.getFromCluster(oc); err != nil { + return fmt.Errorf("error getting credentials from the cluster: %v", err) + } + if err := ac.decodeLazy(); err != nil { + return fmt.Errorf("error decoding the credentials: %v", err) + } + return nil +} + +// SetSdkEnvVars sets some environment variables used by azure-sdk-for-go. +// See https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/README.md#environment-variables. +func (ac *AzureCredentials) SetSdkEnvVars() error { + if err := ac.decodeLazy(); err != nil { + return fmt.Errorf("error setting environment variables used by azure-sdk-for-go: %v", err) + } + return errors.Join( + os.Setenv("AZURE_CLIENT_ID", ac.AzureClientID), + os.Setenv("AZURE_CLIENT_SECRET", ac.AzureClientSecret), + os.Setenv("AZURE_TENANT_ID", ac.AzureTenantID), + ) +} + +func (ac *AzureCredentials) getFromCluster(oc *CLI) error { + stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("get"). + Args("secret/azure-credentials", "-n", "kube-system", "-o=jsonpath={.data}").Outputs() + if err != nil { + return fmt.Errorf("error getting in-cluster root credentials: %v", err) + } + + if err = json.Unmarshal([]byte(stdout), ac); err != nil { + return fmt.Errorf("error unmarshaling in-cluster root credentials: %v", err) + } + ac.decoded = false + return nil +} + +func (ac *AzureCredentials) decodeLazy() error { + if ac.decoded { + return nil + } + return ac.decode() +} + +func (ac *AzureCredentials) decode() error { + v := reflect.ValueOf(ac).Elem() + t := v.Type() + for i := 0; i < v.NumField(); i++ { + if !t.Field(i).IsExported() { + continue + } + + field := v.Field(i) + for field.Kind() == reflect.Ptr { + field = field.Elem() + } + if field.Kind() != reflect.String { + continue + } + + decoded, err := base64.StdEncoding.DecodeString(field.String()) + if err != nil { + return fmt.Errorf("error performing base64 decoding: %v", err) + } + field.SetString(string(decoded)) + } + ac.decoded = true + return nil +} + +type AzureCredentialsFromFile struct { + AzureClientID string `json:"clientId,omitempty"` + AzureClientSecret string `json:"clientSecret,omitempty" sensitive:"true"` + AzureSubscriptionID string `json:"subscriptionId,omitempty"` + AzureTenantID string `json:"tenantId,omitempty"` +} + +func NewEmptyAzureCredentialsFromFile() *AzureCredentialsFromFile { + return &AzureCredentialsFromFile{} +} + +func (ac *AzureCredentialsFromFile) String() string { + return AzureCredentialsStructToString(*ac) +} + +func (ac *AzureCredentialsFromFile) LoadFromFile(filePath string) error { + fileData, err := os.ReadFile(filePath) + if err != nil { + return fmt.Errorf("error reading credentials file: %v", err) + } + fileData = bytes.ReplaceAll(fileData, []byte("azure_subscription_id"), []byte("subscriptionId")) + fileData = bytes.ReplaceAll(fileData, []byte("azure_client_id"), []byte("clientId")) + fileData = bytes.ReplaceAll(fileData, []byte("azure_client_secret"), []byte("clientSecret")) + fileData = bytes.ReplaceAll(fileData, []byte("azure_tenant_id"), []byte("tenantId")) + if err = json.Unmarshal(fileData, ac); err != nil { + return fmt.Errorf("error unmarshaling credentials file: %v", err) + } + return nil +} + +// SetSdkEnvVars sets some environment variables used by azure-sdk-for-go. +// See https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/README.md#environment-variables. +func (ac *AzureCredentialsFromFile) SetSdkEnvVars() error { + return errors.Join( + os.Setenv("AZURE_CLIENT_ID", ac.AzureClientID), + os.Setenv("AZURE_CLIENT_SECRET", ac.AzureClientSecret), + os.Setenv("AZURE_TENANT_ID", ac.AzureTenantID), + ) +} + +func AzureCredentialsStructToString[T any](s T) string { + var sb strings.Builder + v := reflect.ValueOf(s) + t := v.Type() + for i := 0; i < v.NumField(); i++ { + fieldType := t.Field(i) + if !fieldType.IsExported() { + continue + } + // Censorship + if tag, ok := fieldType.Tag.Lookup("sensitive"); ok && tag == "true" { + continue + } + + if i > 0 { + sb.WriteString("\n") + } + sb.WriteString(fmt.Sprintf("%s: %v", fieldType.Name, v.Field(i))) + } + return sb.String() +} + +func GetAzureCredsLocation() (string, error) { + credsLocation := os.Getenv(AzureCredsLocationEnv) + if len(credsLocation) == 0 { + return "", fmt.Errorf("found empty azure credentials location. Please export %s=", AzureCredsLocationEnv) + } + return credsLocation, nil +} + +func MustGetAzureCredsLocation() string { + credsLocation, err := GetAzureCredsLocation() + o.Expect(err).NotTo(o.HaveOccurred(), "failed to get azure credentials location") + return credsLocation +} diff --git a/test/util/bootstrap/aws.go b/test/util/bootstrap/aws.go new file mode 100644 index 000000000..21aa898eb --- /dev/null +++ b/test/util/bootstrap/aws.go @@ -0,0 +1,79 @@ +package bootstrap + +import ( + "os" + + exutil "github.com/openshift/openshift-tests-private/test/extended/util" + clusterinfra "github.com/openshift/openshift-tests-private/test/extended/util/clusterinfra" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + // EnvVarSSHCloudPrivAWSUser stores the environment variable used to configure the AWS ssh user + EnvVarSSHCloudPrivAWSUser = "SSH_CLOUD_PRIV_AWS_USER" +) + +// AWSBSInfoProvider implements interface BSInfoProvider +type AWSBSInfoProvider struct{} + +// GetIPs returns the IPs of the boostrap machine if this machine exists in AWS +func (a AWSBSInfoProvider) GetIPs(oc *exutil.CLI) (*Ips, error) { + // Extract the infrastructure name from the cluster infrastructure resource + infraName, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.infrastructureName}").Output() + if err != nil { + e2e.Logf("Could not get bootstrap's IP in AWS. Unable to get infrastructure's name. Error: %s", err) + return nil, err + } + + bootstrapName := infraName + "-bootstrap" + + clusterinfra.GetAwsCredentialFromCluster(oc.AsAdmin()) + aws := exutil.InitAwsSession() + bootstrapInstanceID, err := aws.GetAwsInstanceID(bootstrapName) + if err != nil { + // If the instance cannot be found, but no other error happens we return a &InstanceNotFound error, so that + // it can be used to skip the test case if no bootstrap is present + if notFoundErr, notFound := err.(*exutil.AWSInstanceNotFound); notFound { + return nil, &InstanceNotFound{notFoundErr.InstanceName} + } + e2e.Logf("Could not get bootstrap's IP in AWS. Unable to get bootstrap instance ID from infrastructure name '%s'. Error: %s", + infraName, err) + return nil, err + + } + + state, err := aws.GetAwsInstanceState(bootstrapInstanceID) + if err != nil { + e2e.Logf("Could not get bootstrap's IP in AWS. Unable to get state for bootstrap instance ID '%s'. Error: %s", + bootstrapInstanceID, err) + return nil, err + + } + + if state == "terminated" { + e2e.Logf("Boostrap instance's state: %s", state) + // If the found instance is terminated, return a &InstanceNotFound error + // it can be used to skip the test case if no bootstrap is present + return nil, &InstanceNotFound{bootstrapInstanceID} + } + + bootstrapIP, err := aws.GetAwsIntIPs(bootstrapInstanceID) + if err != nil { + e2e.Logf("Could not get bootstrap's IP in AWS. Unable to get bootstrap IPs from instance ID '%s'. Error: %s", + bootstrapInstanceID, err) + return nil, err + } + + return &Ips{PublicIPAddress: bootstrapIP["publicIP"], PrivateIPAddress: bootstrapIP["privateIP"]}, nil +} + +// GetSSHUser returns the user needed to connect to the bootstrap machine via ssh +func (a AWSBSInfoProvider) GetSSHUser() string { + user, exists := os.LookupEnv(EnvVarSSHCloudPrivAWSUser) + + if !exists { + user = DefaultSSHUser + } + + return user +} diff --git a/test/util/bootstrap/azure.go b/test/util/bootstrap/azure.go new file mode 100644 index 000000000..001dc8054 --- /dev/null +++ b/test/util/bootstrap/azure.go @@ -0,0 +1,103 @@ +package bootstrap + +import ( + "fmt" + "os" + + exutil "github.com/openshift/openshift-tests-private/test/extended/util" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + // EnvVarSSHCloudPrivAzureUser stores the environment variable used to configure the Azure ssh user + EnvVarSSHCloudPrivAzureUser = "SSH_CLOUD_PRIV_AZURE_USER" +) + +// AzureBSInfoProvider implements interface BSInfoProvider +type AzureBSInfoProvider struct{} + +// GetIPs returns the IPs of the boostrap machine if this machine exists in Azure +func (a AzureBSInfoProvider) GetIPs(oc *exutil.CLI) (*Ips, error) { + // Extract the infrastructure name from the cluster infrastructure resource + infraName, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.infrastructureName}").Output() + if err != nil { + e2e.Logf("Could not get bootstrap's IP in Azure. Unable to get infrastructure's name. Error: %s", err) + return nil, err + } + + bootstrapName := infraName + "-bootstrap" + + resourceGroupName, err := exutil.GetAzureCredentialFromCluster(oc.AsAdmin()) + if err != nil { + e2e.Logf("Error reading cluster's azure credentials") + return nil, err + } + + azSession, err := exutil.NewAzureSessionFromEnv() + if err != nil { + e2e.Logf("Error creating a new azure session") + return nil, err + } + + bootstrapInstanceID, err := exutil.GetAzureVMInstance(azSession, bootstrapName, resourceGroupName) + if err != nil { + e2e.Logf("Could not get bootstrap's IP in Azure. Unable to get bootstrap instance ID from infrastructure name '%s'. Error: %s", + infraName, err) + return nil, err + + } + e2e.Logf("Instance vm %s", bootstrapInstanceID) + // If the instance cannot be found, but no other error happens we return a &InstanceNotFound error, so that + // it can be used to skip the test case if no bootstrap is present + if bootstrapInstanceID == "" { + return nil, &InstanceNotFound{bootstrapName} + } + + state, err := exutil.GetAzureVMInstanceState(azSession, bootstrapInstanceID, resourceGroupName) + if err != nil { + e2e.Logf("Could not get bootstrap's IP in Azure. Unable to get state for bootstrap instance ID '%s' in resource group '%s'. Error: %s", + bootstrapInstanceID, resourceGroupName, err) + return nil, err + + } + + if state != "running" { + e2e.Logf("Boostrap instance's state: %s", state) + // If the found instance is not running, return a &InstanceNotFound error + // it can be used to skip the test case if no bootstrap is present + return nil, &InstanceNotFound{bootstrapInstanceID} + } + + // In ipi deployments the name of the public IP is xxxx-bootstrap-pip-v4 and in upi deployments it is xxx-bootstrap-ssh-pip + // so we need to use a regex search in order to get the IP no matter if upi or ipi + bootstrapPublicIP, err := exutil.GetAzureVMPublicIPByNameRegex(azSession, resourceGroupName, bootstrapInstanceID) + if err != nil { + e2e.Logf("Could not get bootstrap's public IP in Azure. Unable to get bootstrap public IP from instance ID '%s' in resource group '%s'. Error: %s", + bootstrapInstanceID, resourceGroupName, err) + return nil, err + } + + if bootstrapPublicIP == "" { + return nil, fmt.Errorf("No public IP is assigned for the boostrap machine %s in resource group %s", bootstrapInstanceID, resourceGroupName) + } + + bootstrapPrivateIP, err := exutil.GetAzureVMPrivateIP(azSession, resourceGroupName, bootstrapInstanceID) + if err != nil { + e2e.Logf("Could not get bootstrap's private IP in Azure. Unable to get bootstrap private IP from instance ID '%s' in resource group '%s'. Error: %s", + bootstrapInstanceID, resourceGroupName, err) + return nil, err + } + + return &Ips{PublicIPAddress: bootstrapPublicIP, PrivateIPAddress: bootstrapPrivateIP}, nil +} + +// GetSSHUser returns the user needed to connect to the bootstrap machine via ssh +func (a AzureBSInfoProvider) GetSSHUser() string { + user, exists := os.LookupEnv(EnvVarSSHCloudPrivAzureUser) + + if !exists { + user = DefaultSSHUser + } + + return user +} diff --git a/test/util/bootstrap/bootstrap.go b/test/util/bootstrap/bootstrap.go new file mode 100644 index 000000000..22e33bf89 --- /dev/null +++ b/test/util/bootstrap/bootstrap.go @@ -0,0 +1,88 @@ +package bootstrap + +import ( + "fmt" + "os" + + exutil "github.com/openshift/openshift-tests-private/test/extended/util" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + // EnvVarSSHCloudPrivKey storest the environment variable used to configure the location of the ssh private key to connect to bootstrap machine + EnvVarSSHCloudPrivKey = "SSH_CLOUD_PRIV_KEY" + // DefaultSSHUser default user in case of not being configured via envvar + DefaultSSHUser = "core" +) + +// InstanceNotFound reports an error because the bootstrap instance is not found. It can be used to skip the test case +type InstanceNotFound struct{ InstanceName string } + +// Error implements the error interface +func (inferr *InstanceNotFound) Error() string { + return fmt.Sprintf("Instance %s has 'terminated' status", inferr.InstanceName) +} + +// BSInfoProvider any struct implementing this interface can be used to create a Boostrap object. +// Currently it is only implemented by AWSBSInfoProvider +type BSInfoProvider interface { + GetIPs(*exutil.CLI) (*Ips, error) + GetSSHUser() string +} + +// Bootstrap contains the functionality regarding the bootstrap machine +type Bootstrap struct { + SSH exutil.SshClient + IPs Ips +} + +// Ips struct to store the public and the private IPs of the bootstrap machine +type Ips struct { + PrivateIPAddress string + PublicIPAddress string +} + +// GetBootstrap returns a bootstrap struct pointing to the bootstrap machine if exists +func GetBootstrap(oc *exutil.CLI) (*Bootstrap, error) { + bsInfoProvider, err := GetBSInfoProvider(oc) + if err != nil { + return nil, err + } + + bootstrapIPs, err := bsInfoProvider.GetIPs(oc.AsAdmin()) + if err != nil { + return nil, err + } + + user := bsInfoProvider.GetSSHUser() + + return buildBootstrap(user, *bootstrapIPs, 22), nil +} + +// GetBSInfoProvider returns a struct implementing BSInfoProvider for the current platform +func GetBSInfoProvider(oc *exutil.CLI) (BSInfoProvider, error) { + platform := exutil.CheckPlatform(oc) + switch platform { + case "aws": + return AWSBSInfoProvider{}, nil + case "azure": + return AzureBSInfoProvider{}, nil + default: + return nil, fmt.Errorf("Platform not already supported. Cannot get bootstrap information for platform: %s", platform) + } + +} + +// GetBootstrapPrivateKey returns the location of the private key needed to login to the bootstrap machine +func GetBootstrapPrivateKey() string { + return os.Getenv(EnvVarSSHCloudPrivKey) +} + +func buildBootstrap(user string, bootstrapIPs Ips, port int) *Bootstrap { + privateKey := GetBootstrapPrivateKey() + publicIP := bootstrapIPs.PublicIPAddress + e2e.Logf("Creating bootstrap with ip '%s', user: '%s', private key: '%s', port '%d'", + publicIP, user, privateKey, port) + return &Bootstrap{SSH: exutil.SshClient{User: user, Host: publicIP, Port: port, PrivateKey: privateKey}, + IPs: bootstrapIPs} +} diff --git a/test/util/ccoctl.go b/test/util/ccoctl.go new file mode 100644 index 000000000..442b7b3ec --- /dev/null +++ b/test/util/ccoctl.go @@ -0,0 +1,69 @@ +package util + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + + o "github.com/onsi/gomega" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// ExtractCcotl extracts the specified version of the ccoctl binary from the given release image. +// It supports different versions of ccoctl for RHEL environments, including: +// - "ccoctl" +// - "ccoctl.rhel8" (RHEL 8 version) +// - "ccoctl.rhel9" (RHEL 9 version) +// +// Usage example: +// +// ccoctlTarget := "ccoctl.rhel8" +// ccoctlPath := exutil.ExtractCcotl(oc, testOCPImage, ccoctlTarget, true) +// defer os.Remove(filepath.Dir(ccoctlPath)) +// +// Parameters: +// - oc: CLI object to interact with OpenShift commands. +// - releaseImage: The OpenShift release image from which to extract the ccoctl binary. +// - ccoctlTarget: The target ccoctl version to extract (e.g., "ccoctl"(default), "ccoctl.rhel8", "ccoctl.rhel9"). +// +// Returns: +// - A string containing the file path of the extracted ccoctl binary. +func ExtractCcoctl(oc *CLI, releaseImage, ccoctlTarget string) string { + e2e.Logf("Extracting ccoctl from release image %v ...", releaseImage) + dirname := "/tmp/" + oc.Namespace() + "-ccoctl" + err := os.MkdirAll(dirname, 0777) + o.Expect(err).NotTo(o.HaveOccurred()) + + e2e.Logf("Extracting the pull secret file") + pullSecretDirName := "/tmp/" + oc.Namespace() + "-auth" + err = os.MkdirAll(pullSecretDirName, 0777) + o.Expect(err).NotTo(o.HaveOccurred()) + defer os.Remove(pullSecretDirName) + + err = GetPullSec(oc, pullSecretDirName) + pullSecretFile := filepath.Join(pullSecretDirName, ".dockerconfigjson") + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Generated pullSecretFile: %s", pullSecretFile) + + e2e.Logf("Extracting CCO Image from release image") + ccoImage, err := oc.AsAdmin().WithoutNamespace().Run("adm").Args("release", "info", "--registry-config", pullSecretFile, "--image-for=cloud-credential-operator", releaseImage).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(ccoImage).NotTo(o.BeEmpty()) + e2e.Logf("CCO Image: %s", ccoImage) + + e2e.Logf("Extracting ccoctl binary from cco image") + _, err = oc.AsAdmin().WithoutNamespace().Run("image").Args("extract", ccoImage, "--registry-config", pullSecretFile, "--path=/usr/bin/"+ccoctlTarget+":"+dirname, "--confirm").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + ccoctlPath := filepath.Join(dirname, ccoctlTarget) + err = os.Chmod(ccoctlPath, 0775) + o.Expect(err).NotTo(o.HaveOccurred()) + + e2e.Logf("Making sure ccoctl is functional") + outputBytes1, err := exec.Command("bash", "-c", fmt.Sprintf("%s --help", ccoctlPath)).CombinedOutput() + e2e.Logf("ccoctl--help output: %s", string(outputBytes1)) + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("ccoctl path %s", ccoctlPath) + + return ccoctlPath +} diff --git a/test/util/client.go b/test/util/client.go new file mode 100644 index 000000000..eeb99e3af --- /dev/null +++ b/test/util/client.go @@ -0,0 +1,1394 @@ +package util + +import ( + "bufio" + "bytes" + "context" + "crypto/sha256" + "crypto/tls" + "encoding/base64" + "encoding/gob" + "encoding/hex" + "fmt" + "io" + "math/rand" + "net" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime/debug" + "strings" + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + "github.com/pborman/uuid" + "github.com/tidwall/gjson" + crdv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + + e2edebug "k8s.io/kubernetes/test/e2e/framework/debug" + + kubeauthorizationv1 "k8s.io/api/authorization/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/apiserver/pkg/storage/names" + memory "k8s.io/client-go/discovery/cached" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + watchtools "k8s.io/client-go/tools/watch" + "k8s.io/client-go/util/flowcontrol" + e2e "k8s.io/kubernetes/test/e2e/framework" + + configv1 "github.com/openshift/api/config/v1" + oauthv1 "github.com/openshift/api/oauth/v1" + projectv1 "github.com/openshift/api/project/v1" + userv1 "github.com/openshift/api/user/v1" + appsv1client "github.com/openshift/client-go/apps/clientset/versioned" + authorizationv1client "github.com/openshift/client-go/authorization/clientset/versioned" + buildv1client "github.com/openshift/client-go/build/clientset/versioned" + configv1client "github.com/openshift/client-go/config/clientset/versioned" + imagev1client "github.com/openshift/client-go/image/clientset/versioned" + oauthv1client "github.com/openshift/client-go/oauth/clientset/versioned" + operatorv1client "github.com/openshift/client-go/operator/clientset/versioned" + projectv1client "github.com/openshift/client-go/project/clientset/versioned" + quotav1client "github.com/openshift/client-go/quota/clientset/versioned" + routev1client "github.com/openshift/client-go/route/clientset/versioned" + securityv1client "github.com/openshift/client-go/security/clientset/versioned" + templatev1client "github.com/openshift/client-go/template/clientset/versioned" + userv1client "github.com/openshift/client-go/user/clientset/versioned" +) + +// CLI provides function to call the OpenShift CLI and Kubernetes and OpenShift +// clients. +type CLI struct { + execPath string + verb string + configPath string + guestConfigPath string + adminConfigPath string + username string + globalArgs []string + commandArgs []string + finalArgs []string + namespacesToDelete []string + stdin *bytes.Buffer + stdout io.Writer + stderr io.Writer + verbose bool + showInfo bool + withoutNamespace bool + withoutKubeconf bool + asGuestKubeconf bool + kubeFramework *e2e.Framework + + resourcesToDelete []resourceRef + pathsToDelete []string +} + +type resourceRef struct { + Resource schema.GroupVersionResource + Namespace string + Name string +} + +// NewCLI initialize the upstream E2E framework and set the namespace to match +// with the project name. Note that this function does not initialize the project +// role bindings for the namespace. +func NewCLI(project, adminConfigPath string) *CLI { + client := &CLI{} + + // must be registered before + // - framework initialization which registers other Ginkgo setup nodes + // - project setup which requires an OCP cluster + g.BeforeEach(func() { SkipOnOpenShiftNess(true) }) + + // must be registered before the e2e framework aftereach + g.AfterEach(client.TeardownProject) + + client.kubeFramework = e2e.NewDefaultFramework(project) + client.kubeFramework.SkipNamespaceCreation = true + client.username = "admin" + client.execPath = "oc" + client.showInfo = true + client.adminConfigPath = adminConfigPath + + g.BeforeEach(client.SetupProject) + + return client +} + +// NewCLIWithoutNamespace initialize the upstream E2E framework without adding a +// namespace. You may call SetupProject() to create one. +func NewCLIWithoutNamespace(project string) *CLI { + client := &CLI{} + + // must be registered before framework initialization which registers other Ginkgo setup nodes + g.BeforeEach(func() { SkipOnOpenShiftNess(true) }) + + // must be registered before the e2e framework aftereach + g.AfterEach(client.TeardownProject) + + client.kubeFramework = e2e.NewDefaultFramework(project) + client.kubeFramework.SkipNamespaceCreation = true + client.username = "admin" + client.execPath = "oc" + client.adminConfigPath = KubeConfigPath() + client.showInfo = true + return client +} + +// NewCLIForKube initializes a *CLI object which works against Kubernetes clusters. +func NewCLIForKube(basename string) *CLI { + client := &CLI{} + + // must be registered before framework initialization which registers other Ginkgo setup nodes + g.BeforeEach(func() { SkipOnOpenShiftNess(false) }) + + client.adminConfigPath = KubeConfigPath() + client.execPath = "oc" + client.kubeFramework = e2e.NewDefaultFramework(basename) + client.showInfo = true + client.username = "admin" + + g.BeforeEach(func() { client.SetKubeconf(DuplicateFileToTemp(client.adminConfigPath, "")) }) + return client +} + +// NewCLIForKubeOpenShift initializes a *CLI object which works against Kubernetes AND OpenShift clusters. +func NewCLIForKubeOpenShift(basename string) *CLI { + switch IsKubernetesClusterFlag { + case "yes": + return NewCLIForKube(basename) + default: + return NewCLI(basename, KubeConfigPath()) + } +} + +// KubeFramework returns Kubernetes framework which contains helper functions +// specific for Kubernetes resources +func (c *CLI) KubeFramework() *e2e.Framework { + return c.kubeFramework +} + +// Username returns the name of currently logged user. If there is no user assigned +// for the current session, it returns 'admin'. +func (c *CLI) Username() string { + return c.username +} + +// AsAdmin changes current config file path to the admin config. +func (c *CLI) AsAdmin() *CLI { + nc := *c + nc.configPath = c.adminConfigPath + return &nc +} + +// ChangeUser changes the user used by the current CLI session. +func (c *CLI) ChangeUser(name string) *CLI { + clientConfig := c.GetClientConfigForUser(name) + + kubeConfig, err := createConfig(c.Namespace(), clientConfig) + if err != nil { + FatalErr(err) + } + + f, err := os.CreateTemp("", "configfile") + if err != nil { + FatalErr(err) + } + c.configPath = f.Name() + err = clientcmd.WriteToFile(*kubeConfig, c.configPath) + if err != nil { + FatalErr(err) + } + + c.username = name + e2e.Logf("configPath is now %q", c.configPath) + return c +} + +// ChangeUserForKeycloakExtOIDC changes the user of current CLI session for an Keycloak external OIDC cluster +func (c *CLI) ChangeUserForKeycloakExtOIDC() *CLI { + // IsKeycloakExtOIDCCluster() should be already called to ensure the KEYCLOAK_* env vars passed from Prow CI jobs exist + keycloakIssuer := os.Getenv("KEYCLOAK_ISSUER") + keycloakTestUsers := os.Getenv("KEYCLOAK_TEST_USERS") + // KEYCLOAK_TEST_USERS has format like "user1:password1,user2:password2,...,usern:passwordn" and n (i.e. 50) is enough for parallel running cases + re := regexp.MustCompile(`([^:,]+):([^,]+)`) + testUsers := re.FindAllStringSubmatch(keycloakTestUsers, -1) + usersTotal := len(testUsers) + var username, password string + err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, 30*time.Second, true, func(ctx context.Context) (bool, error) { + // Pick a random user for current running case to use + rand.Seed(time.Now().UnixMilli()) + userIndex := rand.Intn(usersTotal) + username = testUsers[userIndex][1] + o.Expect(username).NotTo(o.BeEmpty()) + password = testUsers[userIndex][2] + o.Expect(password).NotTo(o.BeEmpty()) + // We assume the "keycloak" namespace exists in CI jobs that use Keycloak external oidc + _, err := c.AsAdmin().WithoutNamespace().Run("create").Args("cm", username+"-being-used", "--from-literal=any=any", "-n", "keycloak").Output() + if err != nil { + e2e.Logf("Failed to create the configmap '%s-being-used': %v. Retrying ...", username, err) + return false, nil + } + e2e.Logf("Random test user for use: '%s'. Marked it being used via a configmap '%s-being-used'.", username, username) + c.AddExplicitResourceToDelete(corev1.SchemeGroupVersion.WithResource("configmaps"), "keycloak", username+"-being-used") + return true, nil + }) + AssertWaitPollNoErr(err, "Failed to pick a random user for current running case to use") + + proxy := "" + var proxyURL *url.URL + if os.Getenv("http_proxy") != "" { + proxy = os.Getenv("http_proxy") + } else if os.Getenv("https_proxy") != "" { + proxy = os.Getenv("https_proxy") + } + + if proxy != "" { + proxyURL, err = url.Parse(proxy) + o.Expect(err).NotTo(o.HaveOccurred()) + } else { + proxyURL = nil + } + + httpClient := &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyURL(proxyURL), + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + requestURL := keycloakIssuer + "/protocol/openid-connect/token" + oidcClientID := os.Getenv("KEYCLOAK_CLI_CLIENT_ID") + o.Expect(oidcClientID).NotTo(o.BeEmpty()) + formData := url.Values{ + "client_id": []string{oidcClientID}, + "grant_type": []string{"password"}, + "password": []string{password}, + "scope": []string{"openid email profile"}, + "username": []string{username}, + } + + response, err := httpClient.PostForm(requestURL, formData) + o.Expect(err).NotTo(o.HaveOccurred()) + defer response.Body.Close() + o.Expect(response.StatusCode).To(o.Equal(http.StatusOK)) + + body, err := io.ReadAll(response.Body) + o.Expect(err).NotTo(o.HaveOccurred()) + responseStr := string(body) + idToken := gjson.Get(responseStr, "id_token").String() + o.Expect(idToken).NotTo(o.BeEmpty()) + refreshToken := gjson.Get(responseStr, "refresh_token").String() + o.Expect(refreshToken).NotTo(o.BeEmpty()) + tokenCache := fmt.Sprintf(`{"id_token":"%s","refresh_token":"%s"}`, idToken, refreshToken) + // The CI job that uses Keycloak external OIDC already sets Keycloak token lifetime proper to run case. + + // "type Key" is copied from https://github.com/openshift/oc/blob/master/pkg/cli/gettoken/tokencache/tokencache.go + // We must keep the def of "type Key" as exactly same as original oc repo so that EncodeToString generates correct output + type Key struct { + IssuerURL string + ClientID string + } + + key := Key{IssuerURL: keycloakIssuer, ClientID: oidcClientID} + s := sha256.New() + e := gob.NewEncoder(s) + if err := e.Encode(&key); err != nil { + FatalErr(fmt.Sprintf("could not encode the key: %w", err)) + } + tokenCacheFile := hex.EncodeToString(s.Sum(nil)) + tokenCacheDir, err := os.MkdirTemp("", username) + c.AddPathsToDelete(tokenCacheDir) + o.Expect(err).NotTo(o.HaveOccurred()) + err = os.Mkdir(tokenCacheDir+"/oc", 0700) + o.Expect(err).NotTo(o.HaveOccurred()) + // The tokenCacheDir dir only includes a small file. So we don't clean it given no easy way + err = os.WriteFile(filepath.Join(tokenCacheDir, "oc", tokenCacheFile), []byte(tokenCache), 0600) + o.Expect(err).NotTo(o.HaveOccurred()) + + clientConfig := c.GetClientConfigForExtOIDCUser(tokenCacheDir) + kubeConfig, err := createConfig(c.Namespace(), clientConfig) + if err != nil { + FatalErr(err) + } + + f, err := os.CreateTemp("", "configfile") + if err != nil { + FatalErr(err) + } + c.configPath = f.Name() + err = clientcmd.WriteToFile(*kubeConfig, c.configPath) + if err != nil { + FatalErr(err) + } + + usernameWhoAmI, err := getUserPartOfNickname(clientConfig) + if err != nil { + FatalErr(err) + } + c.username = usernameWhoAmI + e2e.Logf("configPath is now %q", c.configPath) + return c +} + +// SetNamespace sets a new namespace +func (c *CLI) SetNamespace(ns string) *CLI { + c.kubeFramework.Namespace = &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: ns, + }, + } + return c +} + +// IsNamespacePrivileged returns bool +// Judge whether the input namespace has the privileged label +// Privileged label: "pod-security.kubernetes.io/enforce=privileged" +func IsNamespacePrivileged(oc *CLI, namespace string) (bool, error) { + nsSecurityLabelValue, err := GetResourceSpecificLabelValue(oc, "ns/"+namespace, "", "pod-security\\.kubernetes\\.io/enforce") + if err != nil { + e2e.Logf(`Failed to get label "pod-security.kubernetes.io/enforce" value from ns/%s: "%v"`, namespace, err) + return false, err + } + return strings.Contains(nsSecurityLabelValue, "privileged"), nil +} + +// SetNamespacePrivileged adds the privileged labels to the input namespace +// Privileged labels: "security.openshift.io/scc.podSecurityLabelSync=false", "pod-security.kubernetes.io/enforce=privileged", +// "pod-security.kubernetes.io/audit=privileged", "pod-security.kubernetes.io/warn=privileged" +// Without audit label "pod-security.kubernetes.io/audit=privileged", an important alert will fire on cluster after pod created +// https://github.com/openshift/cluster-kube-apiserver-operator/pull/1362 +// The warn label "pod-security.kubernetes.io/warn=privileged" is optional, it could make the warning info output gone. +func SetNamespacePrivileged(oc *CLI, namespace string) error { + _, labeledError := AddLabelsToSpecificResource(oc, "ns/"+namespace, "", "security.openshift.io/scc.podSecurityLabelSync=false", + "pod-security.kubernetes.io/enforce=privileged", "pod-security.kubernetes.io/audit=privileged", "pod-security.kubernetes.io/warn=privileged") + if labeledError != nil { + e2e.Logf(`Failed to add privileged labels to ns/%s: "%v"`, namespace, labeledError) + return labeledError + } + return nil +} + +// RecoverNamespaceRestricted removes the privileged labels from the input namespace +func RecoverNamespaceRestricted(oc *CLI, namespace string) error { + _, unlabeledError := DeleteLabelsFromSpecificResource(oc, "ns/"+namespace, "", "security.openshift.io/scc.podSecurityLabelSync", + "pod-security.kubernetes.io/enforce", "pod-security.kubernetes.io/audit", "pod-security.kubernetes.io/warn") + if unlabeledError != nil { + e2e.Logf(`Failed to recover restricted labels for ns/%s: "%v"`, namespace, unlabeledError) + return unlabeledError + } + return nil +} + +func (c *CLI) GetKubeconf() string { + return c.configPath +} + +// NotShowInfo instructs the command will not be logged +func (c *CLI) NotShowInfo() *CLI { + c.showInfo = false + return c +} + +// SetShowInfo instructs the command will not be logged +func (c *CLI) SetShowInfo() *CLI { + c.showInfo = true + return c +} + +// SetKubeconf instructs the cluster kubeconf file is set +func (c *CLI) SetKubeconf(kubeconf string) *CLI { + c.configPath = kubeconf + return c +} + +// SetGuestKubeconf instructs the guest cluster kubeconf file is set +func (c *CLI) SetGuestKubeconf(guestKubeconf string) *CLI { + c.guestConfigPath = guestKubeconf + return c +} + +// GetGuestKubeconf gets the guest cluster kubeconf file +func (c *CLI) GetGuestKubeconf() string { + return c.guestConfigPath +} + +// SetAdminKubeconf instructs the admin cluster kubeconf file is set +func (c *CLI) SetAdminKubeconf(adminKubeconf string) *CLI { + c.adminConfigPath = adminKubeconf + return c +} + +// WithoutNamespace instructs the command should be invoked without adding --namespace parameter +func (c CLI) WithoutNamespace() *CLI { + c.withoutNamespace = true + return &c +} + +// WithoutKubeconf instructs the command should be invoked without adding --kubeconfig parameter +func (c CLI) WithoutKubeconf() *CLI { + c.withoutKubeconf = true + return &c +} + +// WithKubectl instructs the command should be invoked with binary kubectl, not oc. +func (c CLI) WithKubectl() *CLI { + c.execPath = "kubectl" + return &c +} + +// AsGuestKubeconf instructs the command should take kubeconfig of guest cluster +func (c CLI) AsGuestKubeconf() *CLI { + c.asGuestKubeconf = true + c.withoutNamespace = true // if you want to use guest cluster config to opeate guest cluster, you have to set + //withoutNamespace as true (like calling WithoutNamespace), so you can not get ns of + // management cluster, and you have to set ns of guest cluster in Args. + return &c +} + +// SetupProject initializes and transitions to a new project. +// All resources created henceforth will reside within this project. +// For clusters that are not using external OIDC, it also generates and switches to a random temporary user. +func (c *CLI) SetupProject() { + newNamespace := names.SimpleNameGenerator.GenerateName(fmt.Sprintf("e2e-test-%s-", c.kubeFramework.BaseName)) + c.SetNamespace(newNamespace) + + isExternalOIDCCluster, err := IsExternalOIDCCluster(c) + o.Expect(err).NotTo(o.HaveOccurred()) + + // The user.openshift.io and oauth.openshift.io APIs are unavailable on external OIDC clusters by design. + // We will create and switch to a temporary user for non-external-oidc clusters only. + // + // For all cluster types, a temporary KUBECONFIG file will be created (with c.configPath points to it). + // This file will be deleted when calling TeardownProject(). + if isExternalOIDCCluster { + if IsKeycloakExtOIDCCluster() { + c.ChangeUserForKeycloakExtOIDC() + e2e.Logf("Detected external OIDC cluster using Keycloak as the provider. The user is now %q", c.Username()) + } else { + // The external oidc provider is Microsoft Entra ID + // Clear username to avoid manipulation to users (e.g. oc adm policy ...) + c.username = "" + c.SetKubeconf(DuplicateFileToTemp(c.adminConfigPath, "")) + e2e.Logf("External OIDC cluster detected, keep using the current user") + } + } else { + c.ChangeUser(fmt.Sprintf("%s-user", newNamespace)) + e2e.Logf("The user is now %q", c.Username()) + } + + e2e.Logf("Creating project %q", newNamespace) + _, err = c.ProjectClient().ProjectV1().ProjectRequests().Create(context.Background(), &projectv1.ProjectRequest{ + ObjectMeta: metav1.ObjectMeta{ + Name: newNamespace, + }, + }, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + c.kubeFramework.AddNamespacesToDelete(&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: newNamespace}}) + + e2e.Logf("Waiting on permissions in project %q ...", newNamespace) + err = WaitForSelfSAR(1*time.Second, 60*time.Second, c.KubeClient(), kubeauthorizationv1.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &kubeauthorizationv1.ResourceAttributes{ + Namespace: newNamespace, + Verb: "create", + Group: "", + Resource: "pods", + }, + }) + o.Expect(err).NotTo(o.HaveOccurred()) + + // Wait for SAs and default dockercfg Secret to be injected + // TODO: it would be nice to have a shared list but it is defined in at least 3 place, + // TODO: some of them not even using the constants + DefaultServiceAccounts := []string{ + "default", + } + roleBindingNames := []string{} + shouldCheckSecret := false + clusterVersion, err := c.AdminConfigClient().ConfigV1().ClusterVersions().Get(context.Background(), "version", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + checkCapability := func(capabilities []configv1.ClusterVersionCapability, checked configv1.ClusterVersionCapability) bool { + for _, capability := range capabilities { + if capability == checked { + return true + } + } + return false + } + imageRegistryRemoved := func() bool { + pods, err := c.AdminKubeClient().CoreV1().Pods("openshift-image-registry").List(context.Background(), metav1.ListOptions{LabelSelector: "docker-registry=default"}) + if err != nil { + return true + } + if len(pods.Items) > 0 { + return false + } + return true + } + if clusterVersion.Status.Capabilities.KnownCapabilities == nil || + !checkCapability(clusterVersion.Status.Capabilities.KnownCapabilities, configv1.ClusterVersionCapabilityBuild) || + (clusterVersion.Status.Capabilities.EnabledCapabilities != nil && + checkCapability(clusterVersion.Status.Capabilities.EnabledCapabilities, configv1.ClusterVersionCapabilityBuild)) { + DefaultServiceAccounts = append(DefaultServiceAccounts, "builder") + roleBindingNames = append(roleBindingNames, "system:image-builders") + } + if clusterVersion.Status.Capabilities.KnownCapabilities == nil || + !checkCapability(clusterVersion.Status.Capabilities.KnownCapabilities, configv1.ClusterVersionCapabilityDeploymentConfig) || + (clusterVersion.Status.Capabilities.EnabledCapabilities != nil && + checkCapability(clusterVersion.Status.Capabilities.EnabledCapabilities, configv1.ClusterVersionCapabilityDeploymentConfig)) { + DefaultServiceAccounts = append(DefaultServiceAccounts, "deployer") + roleBindingNames = append(roleBindingNames, "system:deployers") + } + if (clusterVersion.Status.Capabilities.KnownCapabilities == nil || + !checkCapability(clusterVersion.Status.Capabilities.KnownCapabilities, configv1.ClusterVersionCapabilityImageRegistry) || + (clusterVersion.Status.Capabilities.EnabledCapabilities != nil && + checkCapability(clusterVersion.Status.Capabilities.EnabledCapabilities, configv1.ClusterVersionCapabilityImageRegistry))) && !imageRegistryRemoved() { + shouldCheckSecret = true + roleBindingNames = append(roleBindingNames, "system:image-pullers") + } + for _, sa := range DefaultServiceAccounts { + e2e.Logf("Waiting for ServiceAccount %q to be provisioned...", sa) + err = WaitForServiceAccount(c.KubeClient().CoreV1().ServiceAccounts(newNamespace), sa, shouldCheckSecret) + o.Expect(err).NotTo(o.HaveOccurred()) + } + + var ctx context.Context + cancel := func() {} + defer func() { cancel() }() + // Wait for default role bindings for those SAs + for _, name := range roleBindingNames { + e2e.Logf("Waiting for RoleBinding %q to be provisioned...", name) + + ctx, cancel = watchtools.ContextWithOptionalTimeout(context.Background(), 3*time.Minute) + + fieldSelector := fields.OneTermEqualSelector("metadata.name", name).String() + lw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = fieldSelector + return c.KubeClient().RbacV1().RoleBindings(newNamespace).List(context.Background(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = fieldSelector + return c.KubeClient().RbacV1().RoleBindings(newNamespace).Watch(context.Background(), options) + }, + } + + _, err := watchtools.UntilWithSync(ctx, lw, &rbacv1.RoleBinding{}, nil, func(event watch.Event) (b bool, e error) { + switch t := event.Type; t { + case watch.Added, watch.Modified: + return true, nil + + case watch.Deleted: + return true, fmt.Errorf("object has been deleted") + + default: + return true, fmt.Errorf("internal error: unexpected event %#v", e) + } + }) + o.Expect(err).NotTo(o.HaveOccurred()) + } + + e2e.Logf("Project %q has been fully provisioned.", newNamespace) +} + +// CreateNamespaceUDN creates a new namespace with required user defined network label during creation time only +// required for testing networking UDN features on 4.17z+ +func (c *CLI) CreateNamespaceUDN() { + newNamespace := names.SimpleNameGenerator.GenerateName(fmt.Sprintf("e2e-test-udn-%s-", c.kubeFramework.BaseName)) + c.SetNamespace(newNamespace) + labelKey := "k8s.ovn.org/primary-user-defined-network" + labelValue := "null" + e2e.Logf("Creating project %q", newNamespace) + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: newNamespace, + Labels: map[string]string{labelKey: labelValue}, + }, + } + // Create the namespace + _, err := c.AdminKubeClient().CoreV1().Namespaces().Create(context.TODO(), namespace, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + c.kubeFramework.AddNamespacesToDelete(&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: newNamespace}}) + e2e.Logf("Namespace %q has been created successfully.", newNamespace) +} + +// CreateSpecificNamespaceUDN creates an UDN namespace with pre-defined name, and the namespace requires user defined network label during creation time only +// required for testing networking UDN features on 4.17z+ +// Important Note: the namespace created by this function will not be automatically deleted, user need to explicitly delete the namespace after test is done +func (c *CLI) CreateSpecificNamespaceUDN(ns string) { + c.SetNamespace(ns) + labelKey := "k8s.ovn.org/primary-user-defined-network" + labelValue := "null" + e2e.Logf("Creating a pre-defined project %q with label for UDN", ns) + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: ns, + Labels: map[string]string{labelKey: labelValue}, + }, + } + // Create the namespace + _, err := c.AdminKubeClient().CoreV1().Namespaces().Create(context.TODO(), namespace, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Namespace %q has been created successfully.", ns) +} + +// CreateProject creates a new project and assign a random user to the project. +// All resources will be then created within this project. +// TODO this should be removed. It's only used by image tests. +func (c *CLI) CreateProject() string { + newNamespace := names.SimpleNameGenerator.GenerateName(fmt.Sprintf("e2e-test-%s-", c.kubeFramework.BaseName)) + e2e.Logf("Creating project %q", newNamespace) + _, err := c.ProjectClient().ProjectV1().ProjectRequests().Create(context.Background(), &projectv1.ProjectRequest{ + ObjectMeta: metav1.ObjectMeta{Name: newNamespace}, + }, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + actualNs, err := c.AdminKubeClient().CoreV1().Namespaces().Get(context.Background(), newNamespace, metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + c.kubeFramework.AddNamespacesToDelete(actualNs) + + e2e.Logf("Waiting on permissions in project %q ...", newNamespace) + err = WaitForSelfSAR(1*time.Second, 60*time.Second, c.KubeClient(), kubeauthorizationv1.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &kubeauthorizationv1.ResourceAttributes{ + Namespace: newNamespace, + Verb: "create", + Group: "", + Resource: "pods", + }, + }) + o.Expect(err).NotTo(o.HaveOccurred()) + return newNamespace +} + +// TeardownProject removes projects created by this test. +func (c *CLI) TeardownProject() { + e2e.TestContext.DumpLogsOnFailure = os.Getenv("DUMP_EVENTS_ON_FAILURE") != "false" + if len(c.Namespace()) > 0 && g.CurrentSpecReport().Failed() && e2e.TestContext.DumpLogsOnFailure { + e2edebug.DumpAllNamespaceInfo(context.TODO(), c.kubeFramework.ClientSet, c.Namespace()) + } + + if len(c.configPath) > 0 { + os.Remove(c.configPath) + } + + dynamicClient := c.AdminDynamicClient() + for _, resource := range c.resourcesToDelete { + err := dynamicClient.Resource(resource.Resource).Namespace(resource.Namespace).Delete(context.Background(), resource.Name, metav1.DeleteOptions{}) + e2e.Logf("Deleted %v, err: %v", resource, err) + } + for _, path := range c.pathsToDelete { + err := os.RemoveAll(path) + e2e.Logf("Deleted path %v, err: %v", path, err) + } +} + +// CreateNamespace creates and returns a test namespace, automatically torn down after the test. +func (c *CLI) CreateNamespace(ctx context.Context, labels map[string]string) (*corev1.Namespace, error) { + return c.KubeFramework().CreateNamespace(ctx, c.KubeFramework().BaseName, labels) +} + +// MustCreateNamespace creates a test namespace and fails the test if creation fails. +func (c *CLI) MustCreateNamespace(ctx context.Context, labels map[string]string) *corev1.Namespace { + ns, err := c.CreateNamespace(ctx, labels) + if err != nil { + FatalErr(fmt.Sprintf("failed to create namespace: %v", err)) + } + return ns +} + +// CreateSpecifiedNamespaceAsAdmin creates specified name namespace. +func (c *CLI) CreateSpecifiedNamespaceAsAdmin(namespace string) { + err := c.AsAdmin().WithoutNamespace().Run("create").Args("namespace", namespace).Execute() + o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to create namespace/%s", namespace)) +} + +// DeleteSpecifiedNamespaceAsAdmin deletes specified name namespace. +func (c *CLI) DeleteSpecifiedNamespaceAsAdmin(namespace string) { + err := c.AsAdmin().WithoutNamespace().Run("delete").Args("namespace", namespace).Execute() + e2e.Logf("Deleted namespace/%s, err: %v", namespace, err) +} + +// Verbose turns on printing verbose messages when executing OpenShift commands +func (c *CLI) Verbose() *CLI { + c.verbose = true + return c +} + +// RESTMapper method +func (c *CLI) RESTMapper() meta.RESTMapper { + ret := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(c.KubeClient().Discovery())) + ret.Reset() + return ret +} + +// AppsClient method +func (c *CLI) AppsClient() appsv1client.Interface { + return appsv1client.NewForConfigOrDie(c.UserConfig()) +} + +// AuthorizationClient method +func (c *CLI) AuthorizationClient() authorizationv1client.Interface { + return authorizationv1client.NewForConfigOrDie(c.UserConfig()) +} + +// BuildClient method +func (c *CLI) BuildClient() buildv1client.Interface { + return buildv1client.NewForConfigOrDie(c.UserConfig()) +} + +// ImageClient method +func (c *CLI) ImageClient() imagev1client.Interface { + return imagev1client.NewForConfigOrDie(c.UserConfig()) +} + +// ProjectClient method +func (c *CLI) ProjectClient() projectv1client.Interface { + return projectv1client.NewForConfigOrDie(c.UserConfig()) +} + +// QuotaClient method +func (c *CLI) QuotaClient() quotav1client.Interface { + return quotav1client.NewForConfigOrDie(c.UserConfig()) +} + +// RouteClient method +func (c *CLI) RouteClient() routev1client.Interface { + return routev1client.NewForConfigOrDie(c.UserConfig()) +} + +// TemplateClient method +func (c *CLI) TemplateClient() templatev1client.Interface { + return templatev1client.NewForConfigOrDie(c.UserConfig()) +} + +// AdminAppsClient method +func (c *CLI) AdminAppsClient() appsv1client.Interface { + return appsv1client.NewForConfigOrDie(c.AdminConfig()) +} + +// AdminAuthorizationClient method +func (c *CLI) AdminAuthorizationClient() authorizationv1client.Interface { + return authorizationv1client.NewForConfigOrDie(c.AdminConfig()) +} + +// AdminBuildClient method +func (c *CLI) AdminBuildClient() buildv1client.Interface { + return buildv1client.NewForConfigOrDie(c.AdminConfig()) +} + +// AdminConfigClient method +func (c *CLI) AdminConfigClient() configv1client.Interface { + return configv1client.NewForConfigOrDie(c.AdminConfig()) +} + +// AdminImageClient method +func (c *CLI) AdminImageClient() imagev1client.Interface { + return imagev1client.NewForConfigOrDie(c.AdminConfig()) +} + +// AdminOauthClient method +func (c *CLI) AdminOauthClient() oauthv1client.Interface { + return oauthv1client.NewForConfigOrDie(c.AdminConfig()) +} + +// AdminOperatorClient method +func (c *CLI) AdminOperatorClient() operatorv1client.Interface { + return operatorv1client.NewForConfigOrDie(c.AdminConfig()) +} + +// AdminProjectClient method +func (c *CLI) AdminProjectClient() projectv1client.Interface { + return projectv1client.NewForConfigOrDie(c.AdminConfig()) +} + +// AdminQuotaClient method +func (c *CLI) AdminQuotaClient() quotav1client.Interface { + return quotav1client.NewForConfigOrDie(c.AdminConfig()) +} + +// AdminOAuthClient method +func (c *CLI) AdminOAuthClient() oauthv1client.Interface { + return oauthv1client.NewForConfigOrDie(c.AdminConfig()) +} + +// AdminRouteClient method +func (c *CLI) AdminRouteClient() routev1client.Interface { + return routev1client.NewForConfigOrDie(c.AdminConfig()) +} + +// AdminUserClient method +func (c *CLI) AdminUserClient() userv1client.Interface { + return userv1client.NewForConfigOrDie(c.AdminConfig()) +} + +// AdminSecurityClient method +func (c *CLI) AdminSecurityClient() securityv1client.Interface { + return securityv1client.NewForConfigOrDie(c.AdminConfig()) +} + +// AdminTemplateClient method +func (c *CLI) AdminTemplateClient() templatev1client.Interface { + return templatev1client.NewForConfigOrDie(c.AdminConfig()) +} + +// KubeClient provides a Kubernetes client for the current namespace +func (c *CLI) KubeClient() kubernetes.Interface { + return kubernetes.NewForConfigOrDie(c.UserConfig()) +} + +// DynamicClient method +func (c *CLI) DynamicClient() dynamic.Interface { + return dynamic.NewForConfigOrDie(c.UserConfig()) +} + +// AdminKubeClient provides a Kubernetes client for the cluster admin user. +func (c *CLI) AdminKubeClient() kubernetes.Interface { + return kubernetes.NewForConfigOrDie(c.AdminConfig()) +} + +// GuestKubeClient provides a Kubernetes client for the guest cluster user. +func (c *CLI) GuestKubeClient() kubernetes.Interface { + return kubernetes.NewForConfigOrDie(c.GuestConfig()) +} + +// AdminDynamicClient method +func (c *CLI) AdminDynamicClient() dynamic.Interface { + return dynamic.NewForConfigOrDie(c.AdminConfig()) +} + +// UserConfig method +func (c *CLI) UserConfig() *rest.Config { + clientConfig, err := getClientConfig(c.configPath) + if err != nil { + FatalErr(err) + } + return clientConfig +} + +// AdminConfig method +func (c *CLI) AdminConfig() *rest.Config { + clientConfig, err := getClientConfig(c.adminConfigPath) + if err != nil { + FatalErr(err) + } + return clientConfig +} + +// GuestConfig method +func (c *CLI) GuestConfig() *rest.Config { + clientConfig, err := getClientConfig(c.guestConfigPath) + if err != nil { + FatalErr(err) + } + return clientConfig +} + +// Namespace returns the name of the namespace used in the current test case. +// If the namespace is not set, an empty string is returned. +func (c *CLI) Namespace() string { + if c.kubeFramework.Namespace == nil { + return "" + } + return c.kubeFramework.Namespace.Name +} + +// setOutput allows to override the default command output +func (c *CLI) setOutput(out io.Writer) *CLI { + c.stdout = out + return c +} + +// AdminAPIExtensionsV1Client returns a ClientSet for the APIExtensionsV1Beta1 API +func (c *CLI) AdminAPIExtensionsV1Client() crdv1.ApiextensionsV1Interface { + return crdv1.NewForConfigOrDie(c.AdminConfig()) +} + +// Run executes given OpenShift CLI command verb (iow. "oc "). +// This function also override the default 'stdout' to redirect all output +// to a buffer and prepare the global flags such as namespace and config path. +func (c *CLI) Run(commands ...string) *CLI { + in, out, errout := &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{} + nc := &CLI{ + execPath: c.execPath, + verb: commands[0], + kubeFramework: c.KubeFramework(), + adminConfigPath: c.adminConfigPath, + configPath: c.configPath, + showInfo: c.showInfo, + guestConfigPath: c.guestConfigPath, + username: c.username, + globalArgs: commands, + } + if !c.withoutKubeconf { + if c.asGuestKubeconf { + if c.guestConfigPath != "" { + nc.globalArgs = append([]string{fmt.Sprintf("--kubeconfig=%s", c.guestConfigPath)}, nc.globalArgs...) + } else { + FatalErr("want to use guest cluster kubeconfig, but it is not set, so please use oc.SetGuestKubeconf to set it firstly") + } + } else { + nc.globalArgs = append([]string{fmt.Sprintf("--kubeconfig=%s", c.configPath)}, nc.globalArgs...) + } + } + if c.asGuestKubeconf && !c.withoutNamespace { + FatalErr("you are doing something in ns of guest cluster, please use WithoutNamespace and set ns in Args, for example, oc.AsGuestKubeconf().WithoutNamespace().Run(\"get\").Args(\"pods\", \"-n\", \"guestclusterns\").Output()") + } + if !c.withoutNamespace { + nc.globalArgs = append([]string{fmt.Sprintf("--namespace=%s", c.Namespace())}, nc.globalArgs...) + } + nc.stdin, nc.stdout, nc.stderr = in, out, errout + return nc.setOutput(c.stdout) +} + +// Template sets a Go template for the OpenShift CLI command. +// This is equivalent of running "oc get foo -o template --template='{{ .spec }}'" +func (c *CLI) Template(t string) *CLI { + if c.verb != "get" { + FatalErr("Cannot use Template() for non-get verbs.") + } + templateArgs := []string{"--output=template", fmt.Sprintf("--template=%s", t)} + commandArgs := append(c.commandArgs, templateArgs...) + c.finalArgs = append(c.globalArgs, commandArgs...) + return c +} + +// InputString adds expected input to the command +func (c *CLI) InputString(input string) *CLI { + c.stdin.WriteString(input) + return c +} + +// Args sets the additional arguments for the OpenShift CLI command +func (c *CLI) Args(args ...string) *CLI { + c.commandArgs = args + c.finalArgs = append(c.globalArgs, c.commandArgs...) + return c +} + +func (c *CLI) printCmd() string { + return strings.Join(c.finalArgs, " ") +} + +// ExitError struct +type ExitError struct { + Cmd string + StdErr string + *exec.ExitError +} + +// Output executes the command and returns stdout/stderr combined into one string +func (c *CLI) Output() (string, error) { + if c.verbose { + fmt.Printf("DEBUG: oc %s\n", c.printCmd()) + } + cmd := exec.Command(c.execPath, c.finalArgs...) + cmd.Stdin = c.stdin + if c.showInfo { + e2e.Logf("Running '%s %s'", c.execPath, strings.Join(c.finalArgs, " ")) + } + out, err := cmd.CombinedOutput() + trimmed := strings.TrimSpace(string(out)) + switch err.(type) { + case nil: + c.stdout = bytes.NewBuffer(out) + return trimmed, nil + case *exec.ExitError: + e2e.Logf("Error running %v:\n%s", cmd, trimmed) + return trimmed, &ExitError{ExitError: err.(*exec.ExitError), Cmd: c.execPath + " " + strings.Join(c.finalArgs, " "), StdErr: trimmed} + default: + FatalErr(fmt.Errorf("unable to execute %q: %v", c.execPath, err)) + // unreachable code + return "", nil + } +} + +// Outputs executes the command and returns the stdout/stderr output as separate strings +func (c *CLI) Outputs() (string, string, error) { + if c.verbose { + fmt.Printf("DEBUG: oc %s\n", c.printCmd()) + } + cmd := exec.Command(c.execPath, c.finalArgs...) + cmd.Stdin = c.stdin + e2e.Logf("showInfo is %v", c.showInfo) + if c.showInfo { + e2e.Logf("Running '%s %s'", c.execPath, strings.Join(c.finalArgs, " ")) + } + //out, err := cmd.CombinedOutput() + var stdErrBuff, stdOutBuff bytes.Buffer + cmd.Stdout = &stdOutBuff + cmd.Stderr = &stdErrBuff + err := cmd.Run() + + stdOutBytes := stdOutBuff.Bytes() + stdErrBytes := stdErrBuff.Bytes() + stdOut := strings.TrimSpace(string(stdOutBytes)) + stdErr := strings.TrimSpace(string(stdErrBytes)) + switch err.(type) { + case nil: + c.stdout = bytes.NewBuffer(stdOutBytes) + c.stderr = bytes.NewBuffer(stdErrBytes) + return stdOut, stdErr, nil + case *exec.ExitError: + e2e.Logf("Error running %v:\nStdOut>\n%s\nStdErr>\n%s\n", cmd, stdOut, stdErr) + return stdOut, stdErr, &ExitError{ExitError: err.(*exec.ExitError), Cmd: c.execPath + " " + strings.Join(c.finalArgs, " "), StdErr: stdErr} + default: + FatalErr(fmt.Errorf("unable to execute %q: %v", c.execPath, err)) + // unreachable code + return "", "", nil + } +} + +// Background executes the command in the background and returns the Cmd object +// which may be killed later via cmd.Process.Kill(). It also returns buffers +// holding the stdout & stderr of the command, which may be read from only after +// calling cmd.Wait(). +func (c *CLI) Background() (*exec.Cmd, *bytes.Buffer, *bytes.Buffer, error) { + if c.verbose { + fmt.Printf("DEBUG: oc %s\n", c.printCmd()) + } + cmd := exec.Command(c.execPath, c.finalArgs...) + cmd.Stdin = c.stdin + var stdout, stderr bytes.Buffer + cmd.Stdout = bufio.NewWriter(&stdout) + cmd.Stderr = bufio.NewWriter(&stderr) + + e2e.Logf("Running '%s %s'", c.execPath, strings.Join(c.finalArgs, " ")) + + err := cmd.Start() + return cmd, &stdout, &stderr, err +} + +// BackgroundRC executes the command in the background and returns the Cmd +// object which may be killed later via cmd.Process.Kill(). It returns a +// ReadCloser for stdout. If in doubt, use Background(). Consult the os/exec +// documentation. +func (c *CLI) BackgroundRC() (*exec.Cmd, io.ReadCloser, error) { + if c.verbose { + fmt.Printf("DEBUG: oc %s\n", c.printCmd()) + } + cmd := exec.Command(c.execPath, c.finalArgs...) + cmd.Stdin = c.stdin + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, nil, err + } + + e2e.Logf("Running '%s %s'", c.execPath, strings.Join(c.finalArgs, " ")) + + err = cmd.Start() + return cmd, stdout, err +} + +// OutputToFile executes the command and store output to a file +func (c *CLI) OutputToFile(filename string) (string, error) { + content, err := c.Output() + if err != nil { + return "", err + } + path := filepath.Join(e2e.TestContext.OutputDir, c.Namespace()+"-"+filename) + return path, os.WriteFile(path, []byte(content), 0644) +} + +// OutputsToFiles executes the command and store the stdout in one file and stderr in another one +// The stdout output will be written to fileName+'.stdout' +// The stderr output will be written to fileName+'.stderr' +func (c *CLI) OutputsToFiles(fileName string) (string, string, error) { + stdoutFilename := fileName + ".stdout" + stderrFilename := fileName + ".stderr" + + stdout, stderr, err := c.Outputs() + if err != nil { + return "", "", err + } + stdoutPath := filepath.Join(e2e.TestContext.OutputDir, c.Namespace()+"-"+stdoutFilename) + stderrPath := filepath.Join(e2e.TestContext.OutputDir, c.Namespace()+"-"+stderrFilename) + + if err := os.WriteFile(stdoutPath, []byte(stdout), 0644); err != nil { + return "", "", err + } + + if err := os.WriteFile(stderrPath, []byte(stderr), 0644); err != nil { + return stdoutPath, "", err + } + + return stdoutPath, stderrPath, nil +} + +// Execute executes the current command and return error if the execution failed +// This function will set the default output to Ginkgo writer. +func (c *CLI) Execute() error { + out, err := c.Output() + if _, err := io.Copy(g.GinkgoWriter, strings.NewReader(out+"\n")); err != nil { + fmt.Fprintln(os.Stderr, "ERROR: Unable to copy the output to ginkgo writer") + } + os.Stdout.Sync() + return err +} + +// FatalErr exits the test in case a fatal error has occurred. +func FatalErr(msg interface{}) { + // the path that leads to this being called isn't always clear... + fmt.Fprintln(g.GinkgoWriter, string(debug.Stack())) + e2e.Failf("%v", msg) +} + +// AddExplicitResourceToDelete method +func (c *CLI) AddExplicitResourceToDelete(resource schema.GroupVersionResource, namespace, name string) { + c.resourcesToDelete = append(c.resourcesToDelete, resourceRef{Resource: resource, Namespace: namespace, Name: name}) +} + +// AddResourceToDelete method +func (c *CLI) AddResourceToDelete(resource schema.GroupVersionResource, metadata metav1.Object) { + c.resourcesToDelete = append(c.resourcesToDelete, resourceRef{Resource: resource, Namespace: metadata.GetNamespace(), Name: metadata.GetName()}) +} + +// AddPathsToDelete method +func (c *CLI) AddPathsToDelete(dir string) { + c.pathsToDelete = append(c.pathsToDelete, dir) +} + +// CreateUser method +func (c *CLI) CreateUser(prefix string) *userv1.User { + user, err := c.AdminUserClient().UserV1().Users().Create(context.Background(), &userv1.User{ + ObjectMeta: metav1.ObjectMeta{GenerateName: prefix + c.Namespace()}, + }, metav1.CreateOptions{}) + if err != nil { + FatalErr(err) + } + c.AddResourceToDelete(userv1.GroupVersion.WithResource("users"), user) + + return user +} + +// GetClientConfigForUser method +func (c *CLI) GetClientConfigForUser(username string) *rest.Config { + userClient := c.AdminUserClient() + + user, err := userClient.UserV1().Users().Get(context.Background(), username, metav1.GetOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + FatalErr(err) + } + if err != nil { + user, err = userClient.UserV1().Users().Create(context.Background(), &userv1.User{ + ObjectMeta: metav1.ObjectMeta{Name: username}, + }, metav1.CreateOptions{}) + if err != nil { + FatalErr(err) + } + c.AddResourceToDelete(userv1.GroupVersion.WithResource("users"), user) + } + + oauthClient := c.AdminOauthClient() + oauthClientName := "e2e-client-" + c.Namespace() + oauthClientObj, err := oauthClient.OauthV1().OAuthClients().Create(context.Background(), &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{Name: oauthClientName}, + GrantMethod: oauthv1.GrantHandlerAuto, + }, metav1.CreateOptions{}) + if err != nil && !apierrors.IsAlreadyExists(err) { + FatalErr(err) + } + if oauthClientObj != nil { + c.AddExplicitResourceToDelete(oauthv1.GroupVersion.WithResource("oauthclients"), "", oauthClientName) + } + + privToken, pubToken := GenerateOAuthTokenPair() + token, err := oauthClient.OauthV1().OAuthAccessTokens().Create(context.Background(), &oauthv1.OAuthAccessToken{ + ObjectMeta: metav1.ObjectMeta{Name: pubToken}, + ClientName: oauthClientName, + UserName: username, + UserUID: string(user.UID), + Scopes: []string{"user:full"}, + RedirectURI: "https://localhost:8443/oauth/token/implicit", + }, metav1.CreateOptions{}) + + if err != nil { + FatalErr(err) + } + c.AddResourceToDelete(oauthv1.GroupVersion.WithResource("oauthaccesstokens"), token) + + userClientConfig := rest.AnonymousClientConfig(turnOffRateLimiting(rest.CopyConfig(c.AdminConfig()))) + userClientConfig.BearerToken = privToken + + return userClientConfig +} + +// GetClientConfigForExtOIDCUser gets a client config for an external OIDC cluster +func (c *CLI) GetClientConfigForExtOIDCUser(tokenCacheDir string) *rest.Config { + userClientConfig := rest.AnonymousClientConfig(turnOffRateLimiting(rest.CopyConfig(c.AdminConfig()))) + var oidcIssuerURL, oidcClientID, oidcCertCAPath string + if IsKeycloakExtOIDCCluster() { + oidcIssuerURL = os.Getenv("KEYCLOAK_ISSUER") + oidcClientID = os.Getenv("KEYCLOAK_CLI_CLIENT_ID") + } else { + e2e.Failf("Currently the GetClientConfigForExtOIDCUser func only supports limited external OIDC providers.") + } + oidcCertCAPath = filepath.Join(os.Getenv("SHARED_DIR"), "oidcProviders-ca.crt") + args := []string{ + "get-token", + fmt.Sprintf("--issuer-url=%s", oidcIssuerURL), + fmt.Sprintf("--client-id=%s", oidcClientID), + "--extra-scopes=email,profile", + "--callback-address=127.0.0.1:8080", + } + if _, err := os.Stat(oidcCertCAPath); err == nil { + args = append(args, fmt.Sprintf("--certificate-authority=%s", oidcCertCAPath)) + } + userClientConfig.ExecProvider = &clientcmdapi.ExecConfig{ + APIVersion: "client.authentication.k8s.io/v1", + Command: "oc", + Args: args, + // We can't use os.Setenv("KUBECACHEDIR", tokenCacheDir), so we use "ExecEnvVar" that ensures each + // single user has unique cache path to avoid the parallel running users mess up the same cache path, + // because the cache file name is decided by the issuer URL & client ID provided in CLI + Env: []clientcmdapi.ExecEnvVar{ + {Name: "KUBECACHEDIR", Value: tokenCacheDir}, + }, + InstallHint: "Please be sure that oc is defined in $PATH to be executed as credentials exec plugin", + InteractiveMode: clientcmdapi.IfAvailableExecInteractiveMode, + ProvideClusterInfo: false, + } + + return userClientConfig +} + +// GenerateOAuthTokenPair returns two tokens to use with OpenShift OAuth-based authentication. +// The first token is a private token meant to be used as a Bearer token to send +// queries to the API, the second token is a hashed token meant to be stored in +// the database. +func GenerateOAuthTokenPair() (privToken, pubToken string) { + const sha256Prefix = "sha256~" + randomToken := base64.RawURLEncoding.EncodeToString(uuid.NewRandom()) + hashed := sha256.Sum256([]byte(randomToken)) + return sha256Prefix + string(randomToken), sha256Prefix + base64.RawURLEncoding.EncodeToString(hashed[:]) +} + +// turnOffRateLimiting reduces the chance that a flaky test can be written while using this package +func turnOffRateLimiting(config *rest.Config) *rest.Config { + configCopy := *config + configCopy.QPS = 10000 + configCopy.Burst = 10000 + configCopy.RateLimiter = flowcontrol.NewFakeAlwaysRateLimiter() + // We do not set a timeout because that will cause watches to fail + // Integration tests are already limited to 5 minutes + // configCopy.Timeout = time.Minute + return &configCopy +} + +// WaitForAccessAllowed method +func (c *CLI) WaitForAccessAllowed(review *kubeauthorizationv1.SelfSubjectAccessReview, user string) error { + if user == "system:anonymous" { + return waitForAccess(kubernetes.NewForConfigOrDie(rest.AnonymousClientConfig(c.AdminConfig())), true, review) + } + + kubeClient, err := kubernetes.NewForConfig(c.GetClientConfigForUser(user)) + if err != nil { + FatalErr(err) + } + return waitForAccess(kubeClient, true, review) +} + +// WaitForAccessDenied method +func (c *CLI) WaitForAccessDenied(review *kubeauthorizationv1.SelfSubjectAccessReview, user string) error { + if user == "system:anonymous" { + return waitForAccess(kubernetes.NewForConfigOrDie(rest.AnonymousClientConfig(c.AdminConfig())), false, review) + } + + kubeClient, err := kubernetes.NewForConfig(c.GetClientConfigForUser(user)) + if err != nil { + FatalErr(err) + } + return waitForAccess(kubeClient, false, review) +} + +func waitForAccess(c kubernetes.Interface, allowed bool, review *kubeauthorizationv1.SelfSubjectAccessReview) error { + return wait.Poll(time.Second, time.Minute, func() (bool, error) { + response, err := c.AuthorizationV1().SelfSubjectAccessReviews().Create(context.Background(), review, metav1.CreateOptions{}) + if err != nil { + return false, err + } + return response.Status.Allowed == allowed, nil + }) +} + +func getClientConfig(kubeConfigFile string) (*rest.Config, error) { + kubeConfigBytes, err := os.ReadFile(kubeConfigFile) + if err != nil { + return nil, err + } + kubeConfig, err := clientcmd.NewClientConfigFromBytes(kubeConfigBytes) + if err != nil { + return nil, err + } + clientConfig, err := kubeConfig.ClientConfig() + if err != nil { + return nil, err + } + clientConfig.WrapTransport = defaultClientTransport + + return clientConfig, nil +} + +// defaultClientTransport sets defaults for a client Transport that are suitable +// for use by infrastructure components. +func defaultClientTransport(rt http.RoundTripper) http.RoundTripper { + transport, ok := rt.(*http.Transport) + if !ok { + return rt + } + + // TODO: this should be configured by the caller, not in this method. + dialer := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + } + transport.Dial = dialer.Dial + // Hold open more internal idle connections + // TODO: this should be configured by the caller, not in this method. + transport.MaxIdleConnsPerHost = 100 + return transport +} + +// SilentOutput executes the command and returns stdout/stderr combined into one string +func (c *CLI) SilentOutput() (string, error) { + if c.verbose { + fmt.Printf("DEBUG: oc %s\n", c.printCmd()) + } + cmd := exec.Command(c.execPath, c.finalArgs...) + cmd.Stdin = c.stdin + if c.showInfo { + e2e.Logf("Running '%s %s'", c.execPath, strings.Join(c.finalArgs, " ")) + } + out, err := cmd.CombinedOutput() + trimmed := strings.TrimSpace(string(out)) + switch err.(type) { + case nil: + c.stdout = bytes.NewBuffer(out) + return trimmed, nil + case *exec.ExitError: + e2e.Logf("Error running %v", cmd) + return trimmed, &ExitError{ExitError: err.(*exec.ExitError), Cmd: c.execPath + " " + strings.Join(c.finalArgs, " "), StdErr: trimmed} + default: + FatalErr(fmt.Errorf("unable to execute %q: %v", c.execPath, err)) + return "", nil + } +} diff --git a/test/util/cloud/cloud.go b/test/util/cloud/cloud.go new file mode 100644 index 000000000..182d5a71e --- /dev/null +++ b/test/util/cloud/cloud.go @@ -0,0 +1,107 @@ +package cloud + +import ( + "context" + "fmt" + "io/ioutil" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + e2e "k8s.io/kubernetes/test/e2e/framework" + + configv1 "github.com/openshift/api/config/v1" + configclient "github.com/openshift/client-go/config/clientset/versioned" + "github.com/openshift/openshift-tests-private/test/extended/util/azure" +) + +// LoadConfig uses the cluster to setup the cloud provider config. +func LoadConfig() (string, *e2e.CloudConfig, error) { + // LoadClientset but don't set the UserAgent to include the current test name because + // we don't run any test yet and this call panics + coreClient, err := e2e.LoadClientset(true) + if err != nil { + return "", nil, err + } + // LoadConfig but don't set the UserAgent to include the current test name because + // we don't run any test yet and this call panics + clientConfig, err := e2e.LoadConfig(true) + if err != nil { + return "", nil, err + } + client := configclient.NewForConfigOrDie(clientConfig) + + infra, err := client.ConfigV1().Infrastructures().Get(context.Background(), "cluster", metav1.GetOptions{}) + if err != nil { + return "", nil, err + } + p := infra.Status.PlatformStatus + if p == nil { + return "", nil, fmt.Errorf("status.platformStatus must be set") + } + if p.Type == configv1.NonePlatformType { + return "", nil, nil + } + + masters, err := coreClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{ + LabelSelector: "node-role.kubernetes.io/master=", + }) + if err != nil { + return "", nil, err + } + zones := sets.NewString() + for _, node := range masters.Items { + zones.Insert(node.Labels["failure-domain.beta.kubernetes.io/zone"]) + } + zones.Delete("") + + nonMasters, err := coreClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{ + LabelSelector: "!node-role.kubernetes.io/master", + }) + if err != nil { + return "", nil, err + } + + cloudConfig := &e2e.CloudConfig{ + MultiMaster: len(masters.Items) > 1, + MultiZone: zones.Len() > 1, + } + if zones.Len() > 0 { + cloudConfig.Zone = zones.List()[0] + } + if len(nonMasters.Items) == 0 { + cloudConfig.NumNodes = len(nonMasters.Items) + } else { + cloudConfig.NumNodes = len(masters.Items) + } + + var provider string + switch { + case p.AWS != nil: + provider = "aws" + cloudConfig.Region = p.AWS.Region + + case p.GCP != nil: + provider = "gce" + cloudConfig.ProjectID = p.GCP.ProjectID + cloudConfig.Region = p.GCP.Region + + case p.Azure != nil: + provider = "azure" + + data, err := azure.LoadConfigFile() + if err != nil { + return "", nil, err + } + tmpFile, err := ioutil.TempFile("", "e2e-*") + if err != nil { + return "", nil, err + } + tmpFile.Close() + if err := ioutil.WriteFile(tmpFile.Name(), data, 0600); err != nil { + return "", nil, err + } + cloudConfig.ConfigFile = tmpFile.Name() + } + + return provider, cloudConfig, nil +} diff --git a/test/util/clusterinfra/cluster_helpers.go b/test/util/clusterinfra/cluster_helpers.go new file mode 100644 index 000000000..0678496cb --- /dev/null +++ b/test/util/clusterinfra/cluster_helpers.go @@ -0,0 +1,280 @@ +package clusterinfra + +import ( + "encoding/base64" + "encoding/json" + "os" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" + e2e "k8s.io/kubernetes/test/e2e/framework" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + exutil "github.com/openshift/openshift-tests-private/test/extended/util" + "github.com/tidwall/gjson" +) + +type PlatformType int + +const ( + AWS PlatformType = iota + GCP + Azure + VSphere + Nutanix + OpenStack + IBMCloud + AlibabaCloud + None + BareMetal + Ovirt + PowerVS + KubeVirt + External +) + +const ( + //VsphereServer vSphere server hostname + VsphereServer = "vcenter.sddc-44-236-21-251.vmwarevmc.com" +) + +// FromString returns the PlatformType value for the given string +func FromString(platform string) PlatformType { + switch platform { + case "aws": + return AWS + case "gcp": + return GCP + case "azure": + return Azure + case "vsphere": + return VSphere + case "nutanix": + return Nutanix + case "openstack": + return OpenStack + case "ibmcloud": + return IBMCloud + case "alibabacloud": + return AlibabaCloud + case "none": + return None + case "baremetal": + return BareMetal + case "ovirt": + return Ovirt + case "powervs": + return PowerVS + case "kubevirt": + return KubeVirt + case "external": + return External + default: + e2e.Failf("Unknown platform %s", platform) + } + return AWS +} + +// String returns the string value for the given PlatformType +func (p PlatformType) String() string { + switch p { + case AWS: + return "aws" + case GCP: + return "gcp" + case Azure: + return "azure" + case VSphere: + return "vsphere" + case Nutanix: + return "nutanix" + case OpenStack: + return "openstack" + case IBMCloud: + return "ibmcloud" + case AlibabaCloud: + return "alibabacloud" + case None: + return "none" + case BareMetal: + return "baremetal" + case Ovirt: + return "ovirt" + case PowerVS: + return "powervs" + case KubeVirt: + return "kubevirt" + case External: + return "external" + default: + e2e.Failf("Unknown platform %d", p) + } + return "" +} + +// CheckPlatform check the cluster's platform, rewrite this function in util/machine_helpers but return PlatformType +func CheckPlatform(oc *exutil.CLI) PlatformType { + pstr := exutil.CheckPlatform(oc) + return FromString(pstr) +} + +// SkipTestIfNotSupportedPlatform skip the test if current platform matches one of the provided not supported platforms +func SkipTestIfNotSupportedPlatform(oc *exutil.CLI, notsupported ...PlatformType) { + p := CheckPlatform(oc) + for _, nsp := range notsupported { + if nsp == p { + g.Skip("Skip this test scenario because it is not supported on the " + p.String() + " platform") + } + } +} + +// SkipTestIfSupportedPlatformNotMatched skip the test if supported platforms are not matched +func SkipTestIfSupportedPlatformNotMatched(oc *exutil.CLI, supported ...PlatformType) { + var match bool + p := CheckPlatform(oc) + for _, sp := range supported { + if sp == p { + match = true + break + } + } + + if !match { + g.Skip("Skip this test scenario because it is not supported on the " + p.String() + " platform") + } +} + +// GetAwsVolumeInfoAttachedToInstanceID get detail info of the volume attached to the instance id +func GetAwsVolumeInfoAttachedToInstanceID(instanceID string) (string, error) { + mySession := session.Must(session.NewSession()) + svc := ec2.New(mySession, aws.NewConfig()) + input := &ec2.DescribeVolumesInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("attachment.instance-id"), + Values: []*string{ + aws.String(instanceID), + }, + }, + }, + } + volumeInfo, err := svc.DescribeVolumes(input) + newValue, _ := json.Marshal(volumeInfo) + return string(newValue), err +} + +// GetAwsCredentialFromCluster get aws credential from cluster +func GetAwsCredentialFromCluster(oc *exutil.CLI) { + credential, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/aws-creds", "-n", "kube-system", "-o", "json").Output() + // Skip for sts and c2s clusters. + if err != nil { + g.Skip("Did not get credential to access aws, skip the testing.") + + } + o.Expect(err).NotTo(o.HaveOccurred()) + accessKeyIDBase64, secureKeyBase64 := gjson.Get(credential, `data.aws_access_key_id`).String(), gjson.Get(credential, `data.aws_secret_access_key`).String() + accessKeyID, err1 := base64.StdEncoding.DecodeString(accessKeyIDBase64) + o.Expect(err1).NotTo(o.HaveOccurred()) + secureKey, err2 := base64.StdEncoding.DecodeString(secureKeyBase64) + o.Expect(err2).NotTo(o.HaveOccurred()) + clusterRegion, err3 := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output() + o.Expect(err3).NotTo(o.HaveOccurred()) + os.Setenv("AWS_ACCESS_KEY_ID", string(accessKeyID)) + os.Setenv("AWS_SECRET_ACCESS_KEY", string(secureKey)) + os.Setenv("AWS_REGION", clusterRegion) +} + +// GetVsphereCredentialFromCluster retrieves vSphere credentials as env variables +func GetVsphereCredentialFromCluster(oc *exutil.CLI) { + + credential, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/vsphere-creds", "-n", "kube-system", "-o", "json").Output() + // Skip for sts and c2s clusters. + if err != nil { + g.Skip("Did not get credential to access vSphere, skip the testing.") + + } + + // Scape the dots in the vsphere server hostname to access the json value + scapedVsphereName := strings.ReplaceAll(VsphereServer, ".", "\\.") + usernameBase64, passwordBase64 := gjson.Get(credential, `data.`+scapedVsphereName+`\.username`).String(), gjson.Get(credential, `data.`+scapedVsphereName+`\.password`).String() + + username, err := base64.StdEncoding.DecodeString(usernameBase64) + o.Expect(err).NotTo(o.HaveOccurred()) + password, err := base64.StdEncoding.DecodeString(passwordBase64) + o.Expect(err).NotTo(o.HaveOccurred()) + + os.Setenv("VSPHERE_USER", string(username)) + os.Setenv("VSPHERE_PASSWORD", string(password)) + os.Setenv("VSPHERE_SERVER", VsphereServer) + +} + +// GetGcpCredentialFromCluster retrieves vSphere credentials as env variables +func GetGcpCredentialFromCluster(oc *exutil.CLI) { + + credential, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/gcp-credentials", "-n", "kube-system", "-o", "json").Output() + // Skip for sts and c2s clusters. + if err != nil { + g.Skip("Did not get credential to access GCP, skip the testing.") + + } + + serviceAccountBase64 := gjson.Get(credential, `data.service_account\.json`).String() + + serviceAccount, err := base64.StdEncoding.DecodeString(serviceAccountBase64) + o.Expect(err).NotTo(o.HaveOccurred()) + + os.Setenv("GOOGLE_CREDENTIALS", string(serviceAccount)) + +} + +// IsAwsOutpostCluster judges whether the aws test cluster has outpost workers +func IsAwsOutpostCluster(oc *exutil.CLI) bool { + if CheckPlatform(oc) != AWS { + return false + } + workersLabel, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "node-role.kubernetes.io/worker", "--show-labels").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + return strings.Contains(workersLabel, `topology.ebs.csi.aws.com/outpost-id`) +} + +// SkipForAwsOutpostCluster skip for Aws Outpost cluster +func SkipForAwsOutpostCluster(oc *exutil.CLI) { + if IsAwsOutpostCluster(oc) { + g.Skip("Skip for Aws Outpost cluster.") + } +} + +// IsAwsOutpostMixedCluster check whether the cluster is aws outpost mixed workers cluster +func IsAwsOutpostMixedCluster(oc *exutil.CLI) bool { + return IsAwsOutpostCluster(oc) && len(ListNonOutpostWorkerNodes(oc)) > 0 +} + +// SkipForNotAwsOutpostMixedCluster skip for not Aws Outpost Mixed cluster +func SkipForNotAwsOutpostMixedCluster(oc *exutil.CLI) { + if !IsAwsOutpostMixedCluster(oc) { + g.Skip("Skip for not Aws Outpost Mixed cluster.") + } +} + +// CheckProxy checks whether the cluster is proxy kind +func CheckProxy(oc *exutil.CLI) bool { + httpProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-o=jsonpath={.status.httpProxy}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + httpsProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy", "cluster", "-o=jsonpath={.status.httpsProxy}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if httpProxy != "" || httpsProxy != "" { + return true + } + return false +} + +// GetInfrastructureName get infrastructure name +func GetInfrastructureName(oc *exutil.CLI) string { + infrastructureName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.infrastructureName}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + return infrastructureName +} diff --git a/test/util/clusterinfra/machine_arch.go b/test/util/clusterinfra/machine_arch.go new file mode 100644 index 000000000..066f2bece --- /dev/null +++ b/test/util/clusterinfra/machine_arch.go @@ -0,0 +1,163 @@ +package clusterinfra + +import ( + "fmt" + "io/ioutil" + "math/rand" + "strings" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + exutil "github.com/openshift/openshift-tests-private/test/extended/util" + "github.com/openshift/openshift-tests-private/test/extended/util/architecture" + "github.com/tidwall/sjson" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// CreateMachineSetByArch create a new machineset by arch +func (ms *MachineSetDescription) CreateMachineSetByArch(oc *exutil.CLI, arch architecture.Architecture) { + e2e.Logf("Creating a new MachineSets ...") + machinesetName := GetRandomMachineSetNameByArch(oc, arch) + machineSetJSON, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, machinesetName, "-n", MachineAPINamespace, "-o=json").OutputToFile("machineset.json") + o.Expect(err).NotTo(o.HaveOccurred()) + + bytes, _ := ioutil.ReadFile(machineSetJSON) + machinesetjsonWithName, _ := sjson.Set(string(bytes), "metadata.name", ms.Name) + machinesetjsonWithSelector, _ := sjson.Set(machinesetjsonWithName, "spec.selector.matchLabels.machine\\.openshift\\.io/cluster-api-machineset", ms.Name) + machinesetjsonWithTemplateLabel, _ := sjson.Set(machinesetjsonWithSelector, "spec.template.metadata.labels.machine\\.openshift\\.io/cluster-api-machineset", ms.Name) + machinesetjsonWithReplicas, _ := sjson.Set(machinesetjsonWithTemplateLabel, "spec.replicas", ms.Replicas) + // Adding taints to machineset so that pods without toleration can not schedule to the nodes we provision + machinesetjsonWithTaints, _ := sjson.Set(machinesetjsonWithReplicas, "spec.template.spec.taints.0", map[string]interface{}{"effect": "NoSchedule", "key": "mapi", "value": "mapi_test"}) + err = ioutil.WriteFile(machineSetJSON, []byte(machinesetjsonWithTaints), 0644) + o.Expect(err).NotTo(o.HaveOccurred()) + + if err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", machineSetJSON).Execute(); err != nil { + ms.DeleteMachineSet(oc) + o.Expect(err).NotTo(o.HaveOccurred()) + } else { + WaitForMachinesRunning(oc, ms.Replicas, ms.Name) + } +} + +// ListWorkerMachineSetNamesByArch list all linux worker machineSets by arch +func ListWorkerMachineSetNamesByArch(oc *exutil.CLI, arch architecture.Architecture) []string { + e2e.Logf("Listing all MachineSets by arch ...") + machineSetNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, "-o=jsonpath={.items[*].metadata.name}", "-n", MachineAPINamespace).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if machineSetNames == "" { + g.Skip("Skip this test scenario because there are no machinesets in this cluster") + } + workerMachineSetNames := strings.Split(machineSetNames, " ") + var linuxWorkerMachineSetNames []string + for _, workerMachineSetName := range workerMachineSetNames { + machineSetLabels, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, workerMachineSetName, "-o=jsonpath={.spec.template.metadata.labels}", "-n", MachineAPINamespace).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + machineSetAnnotation, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, workerMachineSetName, "-o=jsonpath={.metadata.annotations.capacity\\.cluster-autoscaler\\.kubernetes\\.io/labels}", "-n", MachineAPINamespace).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if strings.Contains(machineSetAnnotation, architecture.NodeArchitectureLabel+"="+arch.String()) && !strings.Contains(machineSetLabels, `"machine.openshift.io/os-id":"Windows"`) { + linuxWorkerMachineSetNames = append(linuxWorkerMachineSetNames, workerMachineSetName) + } + } + e2e.Logf("linuxWorkerMachineSetNames: %s", linuxWorkerMachineSetNames) + return linuxWorkerMachineSetNames +} + +// GetRandomMachineSetNameByArch get a random MachineSet name by arch +func GetRandomMachineSetNameByArch(oc *exutil.CLI, arch architecture.Architecture) string { + e2e.Logf("Getting a random MachineSet by arch ...") + machinesetNames := ListWorkerMachineSetNamesByArch(oc, arch) + if len(machinesetNames) == 0 { + g.Skip(fmt.Sprintf("Skip this test scenario because there are no linux/%s machinesets in this cluster", arch)) + } + return machinesetNames[rand.Int31n(int32(len(machinesetNames)))] +} + +// GetArchitectureFromMachineSet get the architecuture of a machineset +func GetArchitectureFromMachineSet(oc *exutil.CLI, machineSetName string) (architecture.Architecture, error) { + nodeNames := GetNodeNamesFromMachineSet(oc, machineSetName) + if len(nodeNames) == 0 { + e2e.Logf("no nodes associated with %s. Using the capacity annotation", machineSetName) + machineSetAnnotationCapacity, err := oc.AsAdmin().WithoutNamespace().Run("get").Args( + exutil.MapiMachineset, machineSetName, + "-o=jsonpath={.metadata.annotations.capacity\\.cluster-autoscaler\\.kubernetes\\.io/labels}", + "-n", exutil.MachineAPINamespace).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + capacityLabels := mapFromCommaSeparatedKV(machineSetAnnotationCapacity) + e2e.Logf("capacityLabels: %s", capacityLabels) + for k, v := range capacityLabels { + if strings.Contains(k, architecture.NodeArchitectureLabel) { + return architecture.FromString(v), nil + } + } + return architecture.UNKNOWN, fmt.Errorf( + "error getting the machineSet's nodes and unable to infer the architecture from the %s's capacity annotations", + machineSetName) + } + arch, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeNames[0], + "-o=jsonpath={.status.nodeInfo.architecture}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + return architecture.FromString(arch), nil +} + +// mapFromCommaSeparatedKV convert a comma separated string of key=value pairs into a map +func mapFromCommaSeparatedKV(list string) map[string]string { + merged := make(map[string]string) + for _, kv := range strings.Split(list, ",") { + kv := strings.Split(kv, "=") + if len(kv) != 2 { + // ignore invalid key=value pairs + continue + } + merged[kv[0]] = kv[1] + } + return merged +} + +// GetInstanceTypeByProviderAndArch get intance types for this provider and architecture +func GetInstanceTypeValuesByProviderAndArch(cloudProvider PlatformType, arch architecture.Architecture) []string { + e2e.Logf("Getting instance type by provider and arch ...") + instanceTypesMap := map[PlatformType]map[architecture.Architecture][]string{ + AWS: { + architecture.AMD64: { + "m5.xlarge", + "m6i.xlarge", + }, + architecture.ARM64: { + "m6gd.xlarge", + "m6g.xlarge", + }, + }, + GCP: { + architecture.AMD64: { + "n2-standard-4", + "n2d-standard-4", + }, + architecture.ARM64: { + "t2a-standard-4", + "t2a-standard-8", + }, + }, + Azure: { + architecture.AMD64: { + "Standard_D4s_v3", + "Standard_D8s_v3", + }, + architecture.ARM64: { + "Standard_D4ps_v5", + "Standard_D8ps_v5", + }, + }, + } + return instanceTypesMap[cloudProvider][arch] +} + +// GetInstanceTypeKeyByProvider get intance type key for this provider +func GetInstanceTypeKeyByProvider(cloudProvider PlatformType) string { + e2e.Logf("Getting instance type key by provider ...") + instanceTypeKey := map[PlatformType]string{ + AWS: "instanceType", + GCP: "machineType", + Azure: "vmSize", + } + return instanceTypeKey[cloudProvider] +} diff --git a/test/util/clusterinfra/machine_helpers.go b/test/util/clusterinfra/machine_helpers.go new file mode 100644 index 000000000..24030e016 --- /dev/null +++ b/test/util/clusterinfra/machine_helpers.go @@ -0,0 +1,545 @@ +package clusterinfra + +import ( + "context" + "fmt" + "io/ioutil" + "math/rand" + "strconv" + "strings" + "time" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + exutil "github.com/openshift/openshift-tests-private/test/extended/util" + "github.com/tidwall/sjson" + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + MachineAPINamespace = "openshift-machine-api" + //MapiMachineset means the fullname of mapi machineset + MapiMachineset = "machinesets.machine.openshift.io" + //MapiMachine means the fullname of mapi machine + MapiMachine = "machines.machine.openshift.io" + //MapiMHC means the fullname of mapi machinehealthcheck + MapiMHC = "machinehealthchecks.machine.openshift.io" +) + +// MachineSetDescription define fields to create machineset +type MachineSetDescription struct { + Name string + Replicas int +} + +// CreateMachineSet create a new machineset +func (ms *MachineSetDescription) CreateMachineSet(oc *exutil.CLI) { + e2e.Logf("Creating a new MachineSets ...") + machinesetName := GetRandomMachineSetName(oc) + machineSetJSON, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, machinesetName, "-n", MachineAPINamespace, "-o=json").OutputToFile("machineset.json") + o.Expect(err).NotTo(o.HaveOccurred()) + + bytes, _ := ioutil.ReadFile(machineSetJSON) + value1, _ := sjson.Set(string(bytes), "metadata.name", ms.Name) + value2, _ := sjson.Set(value1, "spec.selector.matchLabels.machine\\.openshift\\.io/cluster-api-machineset", ms.Name) + value3, _ := sjson.Set(value2, "spec.template.metadata.labels.machine\\.openshift\\.io/cluster-api-machineset", ms.Name) + value4, _ := sjson.Set(value3, "spec.replicas", ms.Replicas) + // Adding taints to machineset so that pods without toleration can not schedule to the nodes we provision + value5, _ := sjson.Set(value4, "spec.template.spec.taints.0", map[string]interface{}{"effect": "NoSchedule", "key": "mapi", "value": "mapi_test"}) + err = ioutil.WriteFile(machineSetJSON, []byte(value5), 0644) + o.Expect(err).NotTo(o.HaveOccurred()) + + if err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", machineSetJSON).Execute(); err != nil { + ms.DeleteMachineSet(oc) + o.Expect(err).NotTo(o.HaveOccurred()) + } else { + WaitForMachinesRunning(oc, ms.Replicas, ms.Name) + } +} + +// DeleteMachineSet delete a machineset +func (ms *MachineSetDescription) DeleteMachineSet(oc *exutil.CLI) error { + e2e.Logf("Deleting a MachineSets ...") + return oc.AsAdmin().WithoutNamespace().Run("delete").Args(MapiMachineset, ms.Name, "-n", MachineAPINamespace).Execute() +} + +// ListAllMachineNames list all machines +func ListAllMachineNames(oc *exutil.CLI) []string { + e2e.Logf("Listing all Machines ...") + machineNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-o=jsonpath={.items[*].metadata.name}", "-n", MachineAPINamespace).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + return strings.Split(machineNames, " ") +} + +// ListWorkerMachineSetNames list all linux worker machineSets +func ListWorkerMachineSetNames(oc *exutil.CLI) []string { + e2e.Logf("Listing all MachineSets ...") + machineSetNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, "-o=jsonpath={.items[*].metadata.name}", "-n", MachineAPINamespace).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if machineSetNames == "" { + g.Skip("Skip this test scenario because there are no machinesets in this cluster") + } + workerMachineSetNames := strings.Split(machineSetNames, " ") + var linuxWorkerMachineSetNames []string + for _, workerMachineSetName := range workerMachineSetNames { + machineSetLabels, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, workerMachineSetName, "-o=jsonpath={.spec.template.metadata.labels}", "-n", MachineAPINamespace).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if !strings.Contains(machineSetLabels, `"machine.openshift.io/os-id":"Windows"`) { + linuxWorkerMachineSetNames = append(linuxWorkerMachineSetNames, workerMachineSetName) + } + } + e2e.Logf("linuxWorkerMachineSetNames: %s", linuxWorkerMachineSetNames) + return linuxWorkerMachineSetNames +} + +// ListWorkerMachineNames list all worker machines +func ListWorkerMachineNames(oc *exutil.CLI) []string { + e2e.Logf("Listing all Machines ...") + machineNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-o=jsonpath={.items[*].metadata.name}", "-l", "machine.openshift.io/cluster-api-machine-type=worker", "-n", MachineAPINamespace).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + return strings.Split(machineNames, " ") +} + +// ListMasterMachineNames list all master machines +func ListMasterMachineNames(oc *exutil.CLI) []string { + e2e.Logf("Listing all Machines ...") + machineNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-o=jsonpath={.items[*].metadata.name}", "-l", "machine.openshift.io/cluster-api-machine-type=master", "-n", MachineAPINamespace).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + return strings.Split(machineNames, " ") +} + +// ListNonOutpostWorkerNodes lists all public nodes in the aws outposts mixed cluster +func ListNonOutpostWorkerNodes(oc *exutil.CLI) []string { + e2e.Logf("Listing all regular nodes ...") + var nodeNames []string + var regularNodes []string + nodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/worker", "-o=jsonpath={.items[*].metadata.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if nodes == "" { + g.Skip("Skip this test scenario because there are no worker nodes in this cluster") + } + nodeNames = strings.Split(nodes, " ") + for _, node := range nodeNames { + nodeLabels, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", node, "-o=jsonpath={.metadata.labels}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if !strings.Contains(nodeLabels, "topology.ebs.csi.aws.com/outpost-id") { + regularNodes = append(regularNodes, node) + } + } + return regularNodes +} + +// GetMachineNamesFromMachineSet get all Machines in a Machineset +func GetMachineNamesFromMachineSet(oc *exutil.CLI, machineSetName string) []string { + e2e.Logf("Getting all Machines in a Machineset ...") + machineNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-o=jsonpath={.items[*].metadata.name}", "-l", "machine.openshift.io/cluster-api-machineset="+machineSetName, "-n", MachineAPINamespace).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + return strings.Split(machineNames, " ") +} + +// GetNodeNamesFromMachineSet get all Nodes in a Machineset +func GetNodeNamesFromMachineSet(oc *exutil.CLI, machineSetName string) []string { + e2e.Logf("Getting all Nodes in a Machineset ...") + nodeNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-o=jsonpath={.items[*].status.nodeRef.name}", "-l", "machine.openshift.io/cluster-api-machineset="+machineSetName, "-n", MachineAPINamespace).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if nodeNames == "" { + return []string{} + } + return strings.Split(nodeNames, " ") +} + +// GetNodeNameFromMachine get node name for a machine +func GetNodeNameFromMachine(oc *exutil.CLI, machineName string) string { + nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, machineName, "-o=jsonpath={.status.nodeRef.name}", "-n", MachineAPINamespace).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + return nodeName +} + +// GetLatestMachineFromMachineSet returns the new created machine by a given machineSet. +func GetLatestMachineFromMachineSet(oc *exutil.CLI, machineSet string) string { + machines := GetMachineNamesFromMachineSet(oc, machineSet) + if len(machines) == 0 { + e2e.Logf("Unable to get the latest machine for machineset %s", machineSet) + return "" + } + + var machine string + newest := time.Date(2020, 0, 1, 12, 0, 0, 0, time.UTC) + for key := range machines { + machineCreationTime, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, machines[key], "-o=jsonpath={.metadata.creationTimestamp}", "-n", MachineAPINamespace).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + parsedMachineCreationTime, err := time.Parse(time.RFC3339, machineCreationTime) + if err != nil { + e2e.Logf("Error parsing time:", err) + return "" + } + if parsedMachineCreationTime.After(newest) { + newest = parsedMachineCreationTime + machine = machines[key] + } + } + return machine +} + +// GetRandomMachineSetName get a random RHCOS MachineSet name, if it's aws outpost cluster, return a outpost machineset +func GetRandomMachineSetName(oc *exutil.CLI) string { + e2e.Logf("Getting a random MachineSet ...") + if IsAwsOutpostCluster(oc) { + return GetOneOutpostMachineSet(oc) + } + allMachineSetNames := ListWorkerMachineSetNames(oc) + var filteredMachineSetNames []string + + // Filter out MachineSet names containing 'rhel' + for _, name := range allMachineSetNames { + if !strings.Contains(name, "rhel") { + filteredMachineSetNames = append(filteredMachineSetNames, name) + } + } + + // Check if there are any machine sets left after filtering + if len(filteredMachineSetNames) == 0 { + g.Skip("Skip this test scenario because there are no suitable machinesets in this cluster to copy") + } + + // Return a random MachineSet name from the filtered list + return filteredMachineSetNames[rand.Int31n(int32(len(filteredMachineSetNames)))] +} + +// GetMachineSetReplicas get MachineSet replicas +func GetMachineSetReplicas(oc *exutil.CLI, machineSetName string) int { + e2e.Logf("Getting MachineSets replicas ...") + replicasVal, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, machineSetName, "-o=jsonpath={.spec.replicas}", "-n", MachineAPINamespace).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + replicas, _ := strconv.Atoi(replicasVal) + return replicas +} + +// ScaleMachineSet scale a MachineSet by replicas +func ScaleMachineSet(oc *exutil.CLI, machineSetName string, replicas int) { + e2e.Logf("Scaling MachineSets ...") + _, err := oc.AsAdmin().WithoutNamespace().Run("scale").Args("--replicas="+strconv.Itoa(replicas), MapiMachineset, machineSetName, "-n", MachineAPINamespace).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + WaitForMachinesRunning(oc, replicas, machineSetName) +} + +// DeleteMachine delete a machine +func DeleteMachine(oc *exutil.CLI, machineName string) error { + e2e.Logf("Deleting Machine ...") + return oc.AsAdmin().WithoutNamespace().Run("delete").Args(MapiMachine, machineName, "-n", MachineAPINamespace).Execute() +} + +// WaitForMachinesRunning check if all the machines are Running in a MachineSet +func WaitForMachinesRunning(oc *exutil.CLI, machineNumber int, machineSetName string) { + e2e.Logf("Waiting for the machines Running ...") + if machineNumber >= 1 { + // Wait 180 seconds first, as it uses total 1200 seconds in wait.poll, it may not be enough for some platform(s) + time.Sleep(180 * time.Second) + } + pollErr := wait.Poll(60*time.Second, 1200*time.Second, func() (bool, error) { + msg, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, machineSetName, "-o=jsonpath={.status.readyReplicas}", "-n", MachineAPINamespace).Output() + machinesRunning, _ := strconv.Atoi(msg) + if machinesRunning != machineNumber { + phase, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machineSetName, "-o=jsonpath={.items[*].status.phase}").Output() + if strings.Contains(phase, "Failed") { + output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machineSetName, "-o=yaml").Output() + e2e.Logf("%v", output) + if strings.Contains(output, "error launching instance: Instances in the pgcluster Placement Group") { + e2e.Logf("%v", output) + return false, fmt.Errorf("error launching instance in the pgcluster Placement Group") + } + return false, fmt.Errorf("Some machine go into Failed phase!") + } + if strings.Contains(phase, "Provisioning") { + output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machineSetName, "-o=yaml").Output() + if strings.Contains(output, "InsufficientInstanceCapacity") || strings.Contains(output, "InsufficientCapacityOnOutpost") { + e2e.Logf("%v", output) + return false, fmt.Errorf("InsufficientInstanceCapacity") + } + if strings.Contains(output, "InsufficientResources") { + e2e.Logf("%v", output) + return false, fmt.Errorf("InsufficientResources") + } + } + e2e.Logf("Expected %v machine are not Running yet and waiting up to 1 minutes ...", machineNumber) + return false, nil + } + e2e.Logf("Expected %v machines are Running", machineNumber) + return true, nil + }) + if pollErr != nil { + if pollErr.Error() == "InsufficientInstanceCapacity" { + g.Skip("InsufficientInstanceCapacity, skip this test") + } + if pollErr.Error() == "InsufficientResources" { + g.Skip("InsufficientResources, skip this test") + } + if pollErr.Error() == "error launching instance in the pgcluster Placement Group" { + g.Skip("launching instance in the pgcluster Placement Group Zone is not suppoted, skip this test") + } + output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machineSetName, "-o=yaml").Output() + e2e.Logf("%v", output) + e2e.Failf("Expected %v machines are not Running after waiting up to 20 minutes ...", machineNumber) + } + e2e.Logf("All machines are Running ...") + //add WaitForNodesReady here because we found sometimes the machine get Running but the node is still NotReady, it will take a little longer to be Ready + if machineNumber >= 1 { + WaitForNodesReady(oc, machineSetName) + } +} + +// WaitForNodesReady check if all the nodes are Ready in a MachineSet, then check if node has uninitialized taint, because healthy node should not has uninitialized taint +func WaitForNodesReady(oc *exutil.CLI, machineSetName string) { + machineNumber := GetMachineSetReplicas(oc, machineSetName) + if machineNumber >= 1 { + e2e.Logf("Wait nodes ready then check nodes haven't uninitialized taints...") + err := wait.PollUntilContextTimeout(context.Background(), 5*time.Second, 60*time.Second, false, func(cxt context.Context) (bool, error) { + for _, nodeName := range GetNodeNamesFromMachineSet(oc, machineSetName) { + readyStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.status.conditions[?(@.type==\"Ready\")].status}").Output() + // If node NotFound,skip check this node + if strings.Contains(readyStatus, "NotFound") { + e2e.Logf("Node %s does not exist, skipping...", nodeName) + continue + } + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("node %s readyStatus: %s", nodeName, readyStatus) + if readyStatus != "True" { + return false, nil + } + taints, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.spec.taints}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(taints).ShouldNot(o.ContainSubstring("uninitialized")) + } + e2e.Logf("All nodes are ready and haven't uninitialized taints ...") + return true, nil + }) + exutil.AssertWaitPollNoErr(err, "some nodes are not ready in 1 minutes") + } +} + +// WaitForMachineFailed check if all the machines are Failed in a MachineSet +func WaitForMachineFailed(oc *exutil.CLI, machineSetName string) { + e2e.Logf("Wait for machines to go into Failed phase") + err := wait.Poll(30*time.Second, 300*time.Second, func() (bool, error) { + machineNames := GetMachineNamesFromMachineSet(oc, machineSetName) + if len(machineNames) == 0 { + e2e.Logf("no machine for machineset %s", machineSetName) + return false, nil + } + for _, machine := range machineNames { + output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, machine, "-n", "openshift-machine-api", "-o=jsonpath={.status.phase}").Output() + if output != "Failed" { + e2e.Logf("machine %s is not in Failed phase and waiting up to 30 seconds ...", machine) + return false, nil + } + } + e2e.Logf("machines are in Failed phase") + return true, nil + }) + exutil.AssertWaitPollNoErr(err, "Check machines phase failed") +} + +// WaitForMachineProvisioned check if all the machines are Provisioned in a MachineSet +func WaitForMachineProvisioned(oc *exutil.CLI, machineSetName string) { + e2e.Logf("Wait for machine to go into Provisioned phase") + err := wait.Poll(60*time.Second, 300*time.Second, func() (bool, error) { + output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machineSetName, "-o=jsonpath={.items[0].status.phase}").Output() + if output != "Provisioned" { + e2e.Logf("machine is not in Provisioned phase and waiting up to 60 seconds ...") + return false, nil + } + e2e.Logf("machine is in Provisioned phase") + return true, nil + }) + exutil.AssertWaitPollNoErr(err, "Check machine phase failed") +} + +// WaitForMachinesDisapper check if all the machines are Dissappered in a MachineSet +func WaitForMachinesDisapper(oc *exutil.CLI, machineSetName string) { + e2e.Logf("Waiting for the machines Dissapper ...") + err := wait.Poll(60*time.Second, 1200*time.Second, func() (bool, error) { + machineNames, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-o=jsonpath={.items[*].metadata.name}", "-l", "machine.openshift.io/cluster-api-machineset="+machineSetName, "-n", MachineAPINamespace).Output() + if machineNames != "" { + e2e.Logf(" Still have machines are not Disappered yet and waiting up to 1 minutes ...") + return false, nil + } + e2e.Logf("All machines are Disappered") + return true, nil + }) + exutil.AssertWaitPollNoErr(err, "Wait machine disappear failed.") +} + +// WaitForMachinesRunningByLabel check if all the machines with the specific labels are Running +func WaitForMachinesRunningByLabel(oc *exutil.CLI, machineNumber int, labels string) []string { + e2e.Logf("Waiting for the machines Running ...") + err := wait.Poll(60*time.Second, 960*time.Second, func() (bool, error) { + msg, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-l", labels, "-o=jsonpath={.items[*].status.phase}", "-n", MachineAPINamespace).Output() + machinesRunning := strings.Count(msg, "Running") + if machinesRunning == machineNumber { + e2e.Logf("Expected %v machines are Running", machineNumber) + return true, nil + } + e2e.Logf("Expected %v machine are not Running yet and waiting up to 1 minutes ...", machineNumber) + return false, nil + }) + exutil.AssertWaitPollNoErr(err, "Wait machine running failed.") + msg, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-l", labels, "-o=jsonpath={.items[*].metadata.name}", "-n", MachineAPINamespace).Output() + return strings.Split(msg, " ") +} + +// WaitForMachineRunningByField check if the machine is Running by field +func WaitForMachineRunningByField(oc *exutil.CLI, field string, fieldValue string, labels string) string { + e2e.Logf("Waiting for the machine Running ...") + var newMachineName string + err := wait.Poll(60*time.Second, 960*time.Second, func() (bool, error) { + msg, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-l", labels, "-o=jsonpath={.items[*].metadata.name}", "-n", MachineAPINamespace).Output() + if err2 != nil { + e2e.Logf("The server was unable to return a response and waiting up to 1 minutes ...") + return false, nil + } + for _, machineName := range strings.Split(msg, " ") { + phase, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, machineName, "-o=jsonpath={.status.phase}", "-n", MachineAPINamespace).Output() + machineFieldValue, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, machineName, field, "-n", MachineAPINamespace).Output() + if phase == "Running" && machineFieldValue == fieldValue { + e2e.Logf("The machine with field %s = %s is Running %s", field, fieldValue, machineName) + newMachineName = machineName + return true, nil + } + } + e2e.Logf("The machine with field %s = %s is not Running and waiting up to 1 minutes ...", field, fieldValue) + return false, nil + }) + exutil.AssertWaitPollNoErr(err, "Wait machine Running failed.") + return newMachineName +} + +// WaitForMachineRunningBySuffix check if the machine is Running by suffix +func WaitForMachineRunningBySuffix(oc *exutil.CLI, machineNameSuffix string, labels string) string { + e2e.Logf("Waiting for the machine Running ...") + var newMachineName string + err := wait.Poll(60*time.Second, 960*time.Second, func() (bool, error) { + msg, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-l", labels, "-o=jsonpath={.items[*].metadata.name}", "-n", MachineAPINamespace).Output() + if err2 != nil { + e2e.Logf("The server was unable to return a response and waiting up to 1 minutes ...") + return false, nil + } + for _, machineName := range strings.Split(msg, " ") { + phase, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, machineName, "-o=jsonpath={.status.phase}", "-n", MachineAPINamespace).Output() + if phase == "Running" && strings.HasSuffix(machineName, machineNameSuffix) { + e2e.Logf("The machine with suffix %s is Running %s", machineNameSuffix, machineName) + newMachineName = machineName + return true, nil + } + } + e2e.Logf("The machine with suffix %s is not Running and waiting up to 1 minutes ...", machineNameSuffix) + return false, nil + }) + exutil.AssertWaitPollNoErr(err, "Wait machine Running failed.") + return newMachineName +} + +// WaitForMachineRunningByName check if the machine is Running by name +func WaitForMachineRunningByName(oc *exutil.CLI, machineName string) { + e2e.Logf("Waiting for %s machine Running ...", machineName) + err := wait.Poll(60*time.Second, 960*time.Second, func() (bool, error) { + phase, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, machineName, "-o=jsonpath={.status.phase}", "-n", MachineAPINamespace).Output() + if phase == "Running" { + e2e.Logf("The machine %s is Running", machineName) + return true, nil + } + e2e.Logf("The machine %s is not Running and waiting up to 1 minutes ...", machineName) + return false, nil + }) + exutil.AssertWaitPollNoErr(err, "Wait machine Running failed.") +} + +// WaitForMachineDisappearBySuffix check if the machine is disappear by machine suffix +func WaitForMachineDisappearBySuffix(oc *exutil.CLI, machineNameSuffix string, labels string) { + e2e.Logf("Waiting for the machine disappear by suffix ...") + err := wait.Poll(60*time.Second, 1800*time.Second, func() (bool, error) { + msg, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-l", labels, "-o=jsonpath={.items[*].metadata.name}", "-n", MachineAPINamespace).Output() + if err2 != nil { + e2e.Logf("The server was unable to return a response and waiting up to 1 minutes ...") + return false, nil + } + for _, machineName := range strings.Split(msg, " ") { + if strings.HasSuffix(machineName, machineNameSuffix) { + e2e.Logf("The machine %s is not disappear and waiting up to 1 minutes ...", machineName) + return false, nil + } + } + e2e.Logf("The machine with suffix %s is disappear", machineNameSuffix) + return true, nil + }) + exutil.AssertWaitPollNoErr(err, "Wait machine disappear by suffix failed.") +} + +// WaitForMachineDisappearBySuffixAndField check if the machine is disappear by machine suffix and field +func WaitForMachineDisappearBySuffixAndField(oc *exutil.CLI, machineNameSuffix string, field string, fieldValue string, labels string) { + e2e.Logf("Waiting for the machine disappear by suffix and field...") + err := wait.Poll(60*time.Second, 1800*time.Second, func() (bool, error) { + msg, err2 := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-l", labels, "-o=jsonpath={.items[*].metadata.name}", "-n", MachineAPINamespace).Output() + if err2 != nil { + e2e.Logf("The server was unable to return a response and waiting up to 1 minutes ...") + return false, nil + } + for _, machineName := range strings.Split(msg, " ") { + machineFieldValue, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, machineName, field, "-n", MachineAPINamespace).Output() + if strings.HasSuffix(machineName, machineNameSuffix) && machineFieldValue == fieldValue { + e2e.Logf("The machine %s is not disappear and waiting up to 1 minutes ...", machineName) + return false, nil + } + } + e2e.Logf("The machine with suffix %s and %s = %s is disappear", machineNameSuffix, field, fieldValue) + return true, nil + }) + exutil.AssertWaitPollNoErr(err, "Wait machine disappear by suffix and field failed.") +} + +// WaitForMachineDisappearByName check if the machine is disappear by machine name +func WaitForMachineDisappearByName(oc *exutil.CLI, machineName string) { + e2e.Logf("Waiting for the machine disappear by name ...") + err := wait.Poll(60*time.Second, 1800*time.Second, func() (bool, error) { + output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, machineName, "-n", MachineAPINamespace).Output() + if strings.Contains(output, "not found") { + e2e.Logf("machine %s is disappear", machineName) + return true, nil + } + e2e.Logf("machine %s is not disappear and waiting up to 1 minutes ...", machineName) + return false, nil + }) + exutil.AssertWaitPollNoErr(err, "Wait machine disappear by name failed.") +} + +// SkipConditionally check the total number of Running machines, if greater than zero, we think machines are managed by machine api operator. +func SkipConditionally(oc *exutil.CLI) { + msg, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "--no-headers", "-n", MachineAPINamespace).Output() + machinesRunning := strings.Count(msg, "Running") + if machinesRunning == 0 { + g.Skip("Expect at least one Running machine. Found none!!!") + } +} + +// Check if the cluster uses spot instances +func UseSpotInstanceWorkersCheck(oc *exutil.CLI) bool { + machines, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machines.machine.openshift.io", "-o=jsonpath={.items[*].metadata.name}", "-n", "openshift-machine-api", "-l", "machine.openshift.io/interruptible-instance=").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if machines != "" { + e2e.Logf("\nSpot instance workers are used\n") + return true + } + return false +} + +// GetOneOutpostMachineSet return one outpost machineset name +func GetOneOutpostMachineSet(oc *exutil.CLI) string { + outpostMachines, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "node-role.kubernetes.io/worker", "-l", "topology.ebs.csi.aws.com/outpost-id", "-o=jsonpath={.items[*].metadata.annotations.machine\\.openshift\\.io\\/machine}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + oneOutpostMachine := strings.Split(outpostMachines, " ")[0] + start := strings.Index(oneOutpostMachine, "openshift-machine-api/") + suffix := strings.LastIndex(oneOutpostMachine, "-") + oneOutpostMachineSet := oneOutpostMachine[start+22 : suffix] + e2e.Logf("oneOutpostMachineSet: %s", oneOutpostMachineSet) + return oneOutpostMachineSet +} diff --git a/test/util/clusterinfra/machineset_helper.go b/test/util/clusterinfra/machineset_helper.go new file mode 100644 index 000000000..1c4a6a1e4 --- /dev/null +++ b/test/util/clusterinfra/machineset_helper.go @@ -0,0 +1,108 @@ +package clusterinfra + +import ( + "fmt" + "io/ioutil" + "strconv" + "time" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + exutil "github.com/openshift/openshift-tests-private/test/extended/util" + + "github.com/tidwall/sjson" + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// MachineSetwithLabelDescription to create machineset with labels to put pods on specific machines +type MachineSetwithLabelDescription struct { + Name string + Replicas int + Metadatalabels string + Diskparams string +} + +// CreateMachineSet create a new machineset +func (ms *MachineSetwithLabelDescription) CreateMachineSet(oc *exutil.CLI) { + e2e.Logf("Creating a new MachineSets with labels ...") + machinesetName := GetRandomMachineSetName(oc) + machineSetJSON, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, machinesetName, "-n", MachineAPINamespace, "-o=json").OutputToFile("machineset.json") + o.Expect(err).NotTo(o.HaveOccurred()) + + bytes, _ := ioutil.ReadFile(machineSetJSON) + machinesetjsonWithName, _ := sjson.Set(string(bytes), "metadata.name", ms.Name) + machinesetjsonWithSelector, _ := sjson.Set(machinesetjsonWithName, "spec.selector.matchLabels.machine\\.openshift\\.io/cluster-api-machineset", ms.Name) + machinesetjsonWithTemplateLabel, _ := sjson.Set(machinesetjsonWithSelector, "spec.template.metadata.labels.machine\\.openshift\\.io/cluster-api-machineset", ms.Name) + machinesetjsonWithReplicas, _ := sjson.Set(machinesetjsonWithTemplateLabel, "spec.replicas", ms.Replicas) + // Adding labels to machineset so that pods can be scheduled to specific machines + machinesetjsonWithMetadataLabels, _ := sjson.Set(machinesetjsonWithReplicas, "spec.template.spec.metadata.labels.nodeName", ms.Metadatalabels) + machinesetjsonWithDiskParams, _ := sjson.Set(machinesetjsonWithMetadataLabels, "spec.template.spec.providerSpec.value.ultraSSDCapability", ms.Diskparams) + err = ioutil.WriteFile(machineSetJSON, []byte(machinesetjsonWithDiskParams), 0644) + o.Expect(err).NotTo(o.HaveOccurred()) + if err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", machineSetJSON).Execute(); err != nil { + ms.DeleteMachineSet(oc) + o.Expect(err).NotTo(o.HaveOccurred()) + } else { + e2e.Logf("Checking machine status ...") + FailedStatus := WaitForMachineFailedToSkip(oc, ms.Name) + e2e.Logf("FailedStatus: %v\n", FailedStatus) + if FailedStatus == nil { + ms.DeleteMachineSet(oc) + g.Skip("Something wrong invalid configuration for machines , not worth to continue") + + } + if FailedStatus.Error() != "timed out waiting for the condition" { + + e2e.Logf("Check machineset yaml , machine is in failed status ...") + ms.DeleteMachineSet(oc) + g.Skip(" Failed due to invalid configuration for machines, not worth to continue") + } + ms.AssertLabelledMachinesRunningDeleteIfNot(oc, ms.Replicas, ms.Name) + } + +} + +// DeleteMachineSet delete a machineset +func (ms *MachineSetwithLabelDescription) DeleteMachineSet(oc *exutil.CLI) error { + e2e.Logf("Deleting a MachineSets ...") + return oc.AsAdmin().WithoutNamespace().Run("delete").Args(MapiMachineset, ms.Name, "-n", MachineAPINamespace).Execute() +} + +// AssertLabelledMachinesRunningDeleteIfNot check labeled machines are running if not delete machineset +func (ms *MachineSetwithLabelDescription) AssertLabelledMachinesRunningDeleteIfNot(oc *exutil.CLI, machineNumber int, machineSetName string) { + e2e.Logf("Waiting for the machines Running ...") + pollErr := wait.Poll(60*time.Second, 920*time.Second, func() (bool, error) { + msg, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, machineSetName, "-o=jsonpath={.status.readyReplicas}", "-n", MachineAPINamespace).Output() + machinesRunning, _ := strconv.Atoi(msg) + if machinesRunning != machineNumber { + e2e.Logf("Expected %v machine are not Running yet and waiting up to 1 minutes ...", machineNumber) + return false, nil + } + e2e.Logf("Expected %v machines are Running", machineNumber) + return true, nil + }) + if pollErr != nil { + e2e.Logf("Deleting a MachineSets ...") + ms.DeleteMachineSet(oc) + exutil.AssertWaitPollNoErr(pollErr, fmt.Sprintf("Expected %v machines are not Running after waiting up to 12 minutes ...", machineNumber)) + } + e2e.Logf("All machines are Running ...") +} + +// WaitForMachineFailedToSkip for machines if failed to help skip test early +func WaitForMachineFailedToSkip(oc *exutil.CLI, machineSetName string) error { + e2e.Logf("Wait for machine to go into Failed phase") + err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { + output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machineSetName, "-o=jsonpath={.items[0].status.phase}").Output() + if output != "Failed" { + e2e.Logf("machine is not in Failed phase and waiting up to 10 seconds ...") + return false, nil + } + e2e.Logf("machine is in Failed phase") + return true, nil + }) + + return err + +} diff --git a/test/util/clusterinfra/machineset_nonspot.go b/test/util/clusterinfra/machineset_nonspot.go new file mode 100644 index 000000000..72cd1978f --- /dev/null +++ b/test/util/clusterinfra/machineset_nonspot.go @@ -0,0 +1,153 @@ +package clusterinfra + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" + "time" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + exutil "github.com/openshift/openshift-tests-private/test/extended/util" + "github.com/tidwall/sjson" + + "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// MachineSetNonSpotDescription to create machineset without spot machines +type MachineSetNonSpotDescription struct { + Name string + Replicas int +} + +// CreateMachineSet create a new machineset +func (ms *MachineSetNonSpotDescription) CreateMachineSet(oc *exutil.CLI) { + g.By("Creating a new MachineSets having dedicated machines ...") + machinesetName := GetRandomMachineSetName(oc) + machineSetJSON, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, machinesetName, "-n", MachineAPINamespace, "-o=json").OutputToFile("machineset.json") + o.Expect(err).NotTo(o.HaveOccurred()) + + bytes, _ := ioutil.ReadFile(machineSetJSON) + machinesetjsonWithName, _ := sjson.Set(string(bytes), "metadata.name", ms.Name) + machinesetjsonWithSelector, _ := sjson.Set(machinesetjsonWithName, "spec.selector.matchLabels.machine\\.openshift\\.io/cluster-api-machineset", ms.Name) + machinesetjsonWithTemplateLabel, _ := sjson.Set(machinesetjsonWithSelector, "spec.template.metadata.labels.machine\\.openshift\\.io/cluster-api-machineset", ms.Name) + machinesetjsonWithReplicas, _ := sjson.Set(machinesetjsonWithTemplateLabel, "spec.replicas", ms.Replicas) + //Removing spot option if present , nothing happens if it is not present + machinesetjsonNonSpot := strings.ReplaceAll(machinesetjsonWithReplicas, "\"spotVMOptions\": {},", "") //azure + machinesetjsonNonSpot = strings.ReplaceAll(machinesetjsonNonSpot, "\"spotMarketOptions\": {},", "") //aws + machinesetjsonNonSpot = strings.ReplaceAll(machinesetjsonNonSpot, "\"preemptible: true\",", "") //gcp + + err = ioutil.WriteFile(machineSetJSON, []byte(machinesetjsonNonSpot), 0o644) + o.Expect(err).NotTo(o.HaveOccurred()) + if err := oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", machineSetJSON).Execute(); err != nil { + ms.DeleteMachineSet(oc) + o.Expect(err).NotTo(o.HaveOccurred()) + } + g.By("Checking machine status ...") + FailedStatus := WaitForMachineFailedToSkip(oc, ms.Name) + e2e.Logf("FailedStatus: %v\n", FailedStatus) + if FailedStatus == nil { + ms.DeleteMachineSet(oc) + g.Skip("Something wrong invalid configuration for machines , not worth to continue") + + } + if FailedStatus.Error() != "timed out waiting for the condition" { + + e2e.Logf("Check machineset yaml , machine is in failed status ...") + ms.DeleteMachineSet(oc) + g.Skip(" Failed due to invalid configuration for machines, not worth to continue") + } + ms.DeleteMachinesetIfDedicatedMachinesAreNotRunning(oc, ms.Replicas, ms.Name) + +} + +// CreateMachineSetBasedOnExisting creates a non-spot MachineSet based on an existing one +func (ms *MachineSetNonSpotDescription) CreateMachineSetBasedOnExisting(oc *exutil.CLI, existingMset string, waitForMachinesRunning bool) { + e2e.Logf("Creating MachineSet/%s based on MachineSet/%s", ms.Name, existingMset) + existingMsetJson, err := oc. + AsAdmin(). + WithoutNamespace(). + Run("get"). + Args(MapiMachineset, existingMset, "-n", MachineAPINamespace, "-o=json"). + OutputToFile("machineset.json") + o.Expect(err).NotTo(o.HaveOccurred()) + defer func() { + _ = os.Remove(existingMsetJson) + }() + + existingMsetJsonBytes, err := os.ReadFile(existingMsetJson) + o.Expect(err).NotTo(o.HaveOccurred()) + existingMsetJsonStr, err := sjson.Set(string(existingMsetJsonBytes), "metadata.name", ms.Name) + o.Expect(err).NotTo(o.HaveOccurred()) + existingMsetJsonStr, err = sjson.Set(existingMsetJsonStr, "spec.selector.matchLabels.machine\\.openshift\\.io/cluster-api-machineset", ms.Name) + o.Expect(err).NotTo(o.HaveOccurred()) + existingMsetJsonStr, err = sjson.Set(existingMsetJsonStr, "spec.template.metadata.labels.machine\\.openshift\\.io/cluster-api-machineset", ms.Name) + o.Expect(err).NotTo(o.HaveOccurred()) + existingMsetJsonStr, err = sjson.Set(existingMsetJsonStr, "spec.replicas", ms.Replicas) + o.Expect(err).NotTo(o.HaveOccurred()) + // Disable spot options for Azure + existingMsetJsonStr = strings.ReplaceAll(existingMsetJsonStr, "\"spotVMOptions\": {},", "") + // Disable spot options for AWS + existingMsetJsonStr = strings.ReplaceAll(existingMsetJsonStr, "\"spotMarketOptions\": {},", "") + // Disable spot options for GCP + existingMsetJsonStr = strings.ReplaceAll(existingMsetJsonStr, "\"preemptible: true\",", "") + err = os.WriteFile(existingMsetJson, []byte(existingMsetJsonStr), 0644) + o.Expect(err).NotTo(o.HaveOccurred()) + + err = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", existingMsetJson).Execute() + if err != nil { + errDeleteMset := ms.DeleteMachineSet(oc) + e2e.Failf("Error creating/deleting machineset: %v", errors.NewAggregate([]error{err, errDeleteMset})) + } + if waitForMachinesRunning { + WaitForMachinesRunning(oc, ms.Replicas, ms.Name) + } + return +} + +// DeleteMachineSet delete a machineset +func (ms *MachineSetNonSpotDescription) DeleteMachineSet(oc *exutil.CLI) error { + exutil.By("Deleting a MachineSet ...") + return oc.AsAdmin().WithoutNamespace().Run("delete").Args(MapiMachineset, ms.Name, "-n", MachineAPINamespace).Execute() +} + +// DeleteMachinesetIfDedicatedMachinesAreNotRunning check labeled machines are running if not delete machineset +func (ms *MachineSetNonSpotDescription) DeleteMachinesetIfDedicatedMachinesAreNotRunning(oc *exutil.CLI, machineNumber int, machineSetName string) { + g.By("Waiting for the machines Running ...") + pollErr := wait.Poll(60*time.Second, 920*time.Second, func() (bool, error) { + msg, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, machineSetName, "-o=jsonpath={.status.readyReplicas}", "-n", MachineAPINamespace).Output() + machinesRunning, _ := strconv.Atoi(msg) + if machinesRunning != machineNumber { + e2e.Logf("Expected %v machine are not Running yet and waiting up to 1 minutes ...", machineNumber) + return false, nil + } + e2e.Logf("Expected %v machines are Running", machineNumber) + return true, nil + }) + if pollErr != nil { + ms.DeleteMachineSet(oc) + exutil.AssertWaitPollNoErr(pollErr, fmt.Sprintf("Expected %v machines are not Running after waiting up to 12 minutes ...", machineNumber)) + } + g.By("All machines are Running ...") +} + +// WaitForDedicatedMachineFailedToSkip for machines if failed to help skip test early +func WaitForDedicatedMachineFailedToSkip(oc *exutil.CLI, machineSetName string) error { + g.By("Wait for machine to go into Failed phase") + err := wait.Poll(10*time.Second, 60*time.Second, func() (bool, error) { + output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+machineSetName, "-o=jsonpath={.items[0].status.phase}").Output() + if output != "Failed" { + g.By("machine is not in Failed phase and waiting up to 10 seconds ...") + return false, nil + } + g.By("machine is in Failed phase") + return true, nil + }) + + return err + +} diff --git a/test/util/clusters.go b/test/util/clusters.go new file mode 100644 index 000000000..ad8245a7b --- /dev/null +++ b/test/util/clusters.go @@ -0,0 +1,411 @@ +package util + +import ( + "context" + "fmt" + "os" + "strings" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + configv1 "github.com/openshift/api/config/v1" + "github.com/tidwall/gjson" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + AKSNodeLabel = "kubernetes.azure.com/cluster" +) + +// Extract pull secrect from cluster +func GetPullSec(oc *CLI, dirname string) (err error) { + if err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/pull-secret", "-n", "openshift-config", "--to="+dirname, "--confirm").Execute(); err != nil { + return fmt.Errorf("extract pull-secret failed: %v", err) + } + return +} + +// GetMirrorRegistry returns mirror registry from icsp +func GetMirrorRegistry(oc *CLI) (registry string, err error) { + if registry, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("ImageContentSourcePolicy", + "-o", "jsonpath={.items[0].spec.repositoryDigestMirrors[0].mirrors[0]}").Output(); err == nil { + registry, _, _ = strings.Cut(registry, "/") + } else { + err = fmt.Errorf("failed to acquire mirror registry from ICSP: %v", err) + } + return +} + +// GetUserCA dump user certificate from user-ca-bundle configmap to File +func GetUserCAToFile(oc *CLI, filename string) (err error) { + cert, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("configmap", "-n", "openshift-config", + "user-ca-bundle", "-o", "jsonpath={.data.ca-bundle\\.crt}").Output() + if err != nil { + return fmt.Errorf("failed to acquire user ca bundle from configmap: %v", err) + } else { + err = os.WriteFile(filename, []byte(cert), 0644) + if err != nil { + return fmt.Errorf("failed to dump cert to file: %v", err) + } + return + } +} + +// GetClusterVersion returns the cluster version as string value (Ex: 4.8) and cluster build (Ex: 4.8.0-0.nightly-2021-09-28-165247) +func GetClusterVersion(oc *CLI) (string, string, error) { + clusterBuild, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-o", "jsonpath={..desired.version}").Output() + if err != nil { + return "", "", err + } + splitValues := strings.Split(clusterBuild, ".") + clusterVersion := splitValues[0] + "." + splitValues[1] + return clusterVersion, clusterBuild, err +} + +// GetReleaseImage returns the release image as string value (Ex: registry.ci.openshift.org/ocp/release@sha256:b13971e61312f5dddd6435ccf061ac1a8447285a85828456edcd4fc2504cfb8f) +func GetReleaseImage(oc *CLI) (string, error) { + releaseImage, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "-o", "jsonpath={..desired.image}").Output() + if err != nil { + return "", err + } + return releaseImage, nil +} + +// GetInfraID returns the infra id +func GetInfraID(oc *CLI) (string, error) { + infraID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o", "jsonpath='{.status.infrastructureName}'").Output() + if err != nil { + return "", err + } + return strings.Trim(infraID, "'"), err +} + +// GetGcpProjectID returns the gcp project id +func GetGcpProjectID(oc *CLI) (string, error) { + projectID, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o", "jsonpath='{.status.platformStatus.gcp.projectID}'").Output() + if err != nil { + return "", err + } + return strings.Trim(projectID, "'"), err +} + +// GetClusterPrefixName return Cluster Prefix Name +func GetClusterPrefixName(oc *CLI) string { + output, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("route", "console", "-n", "openshift-console", "-o=jsonpath={.spec.host}").Output() + if err != nil { + e2e.Logf("Get cluster console route failed with err %v .", err) + return "" + } + return strings.Split(output, ".")[2] +} + +// SkipBaselineCaps skip the test if cluster has no required resources. +// sets is comma separated list of baselineCapabilitySets to skip. +// for example: "None, v4.11" +func SkipBaselineCaps(oc *CLI, sets string) { + baselineCapabilitySet, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("clusterversion", "version", "-o=jsonpath={.spec.capabilities.baselineCapabilitySet}").Output() + if err != nil { + e2e.Failf("get baselineCapabilitySet failed err %v .", err) + } + sets = strings.ReplaceAll(sets, " ", "") + for _, s := range strings.Split(sets, ",") { + if strings.Contains(baselineCapabilitySet, s) { + g.Skip("Skip for cluster with baselineCapabilitySet = '" + baselineCapabilitySet + "' matching filter: " + s) + } + } +} + +// SkipNoCapabilities skip the test if the cluster has no one capability +func SkipNoCapabilities(oc *CLI, capability string) { + clusterVersion, err := oc.AdminConfigClient().ConfigV1().ClusterVersions().Get(context.Background(), "version", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + hasCapability := func(capabilities []configv1.ClusterVersionCapability, checked string) bool { + cap := configv1.ClusterVersionCapability(checked) + for _, capability := range capabilities { + if capability == cap { + return true + } + } + return false + } + if clusterVersion.Status.Capabilities.KnownCapabilities != nil && + hasCapability(clusterVersion.Status.Capabilities.KnownCapabilities, capability) && + (clusterVersion.Status.Capabilities.EnabledCapabilities == nil || + !hasCapability(clusterVersion.Status.Capabilities.EnabledCapabilities, capability)) { + g.Skip(fmt.Sprintf("the cluster has no %v and skip it", capability)) + } +} + +// SkipIfCapEnabled skips the test if a capability is enabled +func SkipIfCapEnabled(oc *CLI, capability string) { + clusterversion, err := oc. + AdminConfigClient(). + ConfigV1(). + ClusterVersions(). + Get(context.Background(), "version", metav1.GetOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + var capKnown bool + for _, knownCap := range clusterversion.Status.Capabilities.KnownCapabilities { + if capability == string(knownCap) { + capKnown = true + break + } + } + if !capKnown { + g.Skip(fmt.Sprintf("Will skip as capability %s is unknown (i.e. cannot be disabled in the first place)", capability)) + } + for _, enabledCap := range clusterversion.Status.Capabilities.EnabledCapabilities { + if capability == string(enabledCap) { + g.Skip(fmt.Sprintf("Will skip as capability %s is enabled", capability)) + } + } +} + +// SkipNoOLMCore skip the test if the cluster has no OLM component +// from 4.15, OLM become optional core component. it means there is no OLM component for some profiles. +// so, the OLM case and optioinal operator case can not run on such cluster. +func SkipNoOLMCore(oc *CLI) { + SkipNoCapabilities(oc, "OperatorLifecycleManager") +} + +func SkipNoOLMv1Core(oc *CLI) { + SkipNoCapabilities(oc, "OperatorLifecycleManagerV1") +} + +// SkipNoBuild skip the test if the cluster has no Build component +func SkipNoBuild(oc *CLI) { + SkipNoCapabilities(oc, "Build") +} + +// SkipNoDeploymentConfig skip the test if the cluster has no DeploymentConfig component +func SkipNoDeploymentConfig(oc *CLI) { + SkipNoCapabilities(oc, "DeploymentConfig") +} + +// SkipNoImageRegistry skip the test if the cluster has no ImageRegistry component +func SkipNoImageRegistry(oc *CLI) { + SkipNoCapabilities(oc, "ImageRegistry") +} + +// IsTechPreviewNoUpgrade checks if a cluster is a TechPreviewNoUpgrade cluster +func IsTechPreviewNoUpgrade(oc *CLI) bool { + featureGate, err := oc.AdminConfigClient().ConfigV1().FeatureGates().Get(context.Background(), "cluster", metav1.GetOptions{}) + if err != nil { + if apierrors.IsNotFound(err) { + return false + } + o.Expect(err).NotTo(o.HaveOccurred(), "could not retrieve feature-gate: %v", err) + } + + return featureGate.Spec.FeatureSet == configv1.TechPreviewNoUpgrade +} + +// GetAWSClusterRegion returns AWS region of the cluster +func GetAWSClusterRegion(oc *CLI) (string, error) { + region, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.aws.region}").Output() + return region, err +} + +// SkipNoDefaultSC skip the test if cluster has no default storageclass or has more than 1 default storageclass +func SkipNoDefaultSC(oc *CLI) { + allSCRes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("sc", "-o", "json").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + defaultSCRes := gjson.Get(allSCRes, "items.#(metadata.annotations.storageclass\\.kubernetes\\.io\\/is-default-class=true)#.metadata.name") + e2e.Logf("The default storageclass list: %s", defaultSCRes) + defaultSCNub := len(defaultSCRes.Array()) + if defaultSCNub != 1 { + e2e.Logf("oc get sc:\n%s", allSCRes) + g.Skip("Skip for unexpected default storageclass!") + } +} + +// SkipIfPlatformTypeNot skips all platforms other than supported +// platforms is comma separated list of allowed platforms +// for example: "gcp, aws" +func SkipIfPlatformTypeNot(oc *CLI, platforms string) { + platformType, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.type}").Output() + if err != nil { + e2e.Failf("get infrastructure platformStatus type failed err %v .", err) + } + if !strings.Contains(strings.ToLower(platforms), strings.ToLower(platformType)) { + g.Skip("Skip for non-" + platforms + " cluster: " + platformType) + } +} + +// skip platform +func SkipIfPlatformType(oc *CLI, platforms string) { + platformType, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.type}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if strings.Contains(strings.ToLower(platforms), strings.ToLower(platformType)) { + g.Skip("Skip for " + platforms + " cluster: " + platformType) + } +} + +// IsHypershiftHostedCluster +func IsHypershiftHostedCluster(oc *CLI) bool { + topology, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("infrastructures.config.openshift.io", "cluster", "-o=jsonpath={.status.controlPlaneTopology}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("topology is %s", topology) + if topology == "" { + status, _ := oc.WithoutNamespace().AsAdmin().Run("get").Args("infrastructures.config.openshift.io", "cluster", "-o=jsonpath={.status}").Output() + e2e.Logf("cluster status %s", status) + e2e.Failf("failure: controlPlaneTopology returned empty") + } + return strings.Compare(topology, "External") == 0 +} + +// IsRosaCluster +func IsRosaCluster(oc *CLI) bool { + product, _ := oc.WithoutNamespace().AsAdmin().Run("get").Args("clusterclaims/product.open-cluster-management.io", "-o=jsonpath={.spec.value}").Output() + return strings.Compare(product, "ROSA") == 0 +} + +// IsSTSCluster determines if an AWS cluster is using STS +func IsSTSCluster(oc *CLI) bool { + return IsWorkloadIdentityCluster(oc) +} + +// IsWorkloadIdentityCluster judges whether the Azure/GCP cluster is using the Workload Identity +func IsWorkloadIdentityCluster(oc *CLI) bool { + serviceAccountIssuer, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("authentication", "cluster", "-o=jsonpath={.spec.serviceAccountIssuer}").Output() + o.Expect(err).ShouldNot(o.HaveOccurred(), "Failed to get serviceAccountIssuer") + return len(serviceAccountIssuer) > 0 +} + +// GetOIDCProvider returns the OIDC provider for current cluster +func GetOIDCProvider(oc *CLI) (string, error) { + oidc, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("authentication.config", "cluster", "-o=jsonpath={.spec.serviceAccountIssuer}").Output() + if err != nil { + return "", err + } + return strings.TrimPrefix(oidc, "https://"), nil +} + +// Skip the test if there is not catalogsource/qe-app-registry in the cluster +func SkipMissingQECatalogsource(oc *CLI) { + output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("-n", "openshift-marketplace", "catalogsource", "qe-app-registry").Output() + if strings.Contains(output, "NotFound") || err != nil { + g.Skip("Skip the test since no catalogsource/qe-app-registry in the cluster") + } +} + +// Skip the test if default catsrc is disable +func SkipIfDisableDefaultCatalogsource(oc *CLI) { + output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("operatorhubs", "cluster", "-o=jsonpath={.spec.disableAllDefaultSources}").Output() + if output == "true" || err != nil { + g.Skip("Skip the test, the default catsrc is disable or don't have operatorhub resource") + } +} + +// IsInfrastructuresHighlyAvailable check if it is HighlyAvailable for infrastructures. Available for both classic OCP and the hosted cluster. +func IsInfrastructuresHighlyAvailable(oc *CLI) bool { + topology, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructures.config.openshift.io", "cluster", `-o=jsonpath={.status.infrastructureTopology}`).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("infrastructures topology is %s", topology) + if topology == "" { + status, _ := oc.WithoutNamespace().AsAdmin().Run("get").Args("infrastructures.config.openshift.io", "cluster", "-o=jsonpath={.status}").Output() + e2e.Logf("cluster status %s", status) + e2e.Failf("failure: controlPlaneTopology returned empty") + } + return strings.Compare(topology, "HighlyAvailable") == 0 +} + +// IsExternalOIDCCluster checks if the cluster is using external OIDC. +func IsExternalOIDCCluster(oc *CLI) (bool, error) { + switch IsExternalOIDCClusterFlag { + case "yes": + e2e.Logf("it is external oidc cluster") + return true, nil + case "no": + e2e.Logf("it is not external oidc cluster") + return false, nil + default: + e2e.Logf("do not know if it is external oidc cluster or not, and try to check it again") + authType, stdErr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("authentication/cluster", "-o=jsonpath={.spec.type}").Outputs() + if err != nil { + return false, fmt.Errorf("error checking if the cluster is using external OIDC: %v", stdErr) + } + e2e.Logf("Found authentication type used: %v", authType) + return authType == string(configv1.AuthenticationTypeOIDC), nil + } +} + +// IsKeycloakExtOIDCCluster assumes the cluster uses external oidc auth but checks if the oidc issuer is Keycloak. +func IsKeycloakExtOIDCCluster() bool { + if os.Getenv("KEYCLOAK_ISSUER") != "" && os.Getenv("KEYCLOAK_TEST_USERS") != "" && os.Getenv("KEYCLOAK_CLI_CLIENT_ID") != "" { + return true + } + return false +} + +// IsOpenShiftCluster checks if the active cluster is OpenShift or a derivative +func IsOpenShiftCluster(ctx context.Context, c corev1client.NamespaceInterface) (bool, error) { + switch _, err := c.Get(ctx, "openshift-controller-manager", metav1.GetOptions{}); { + case err == nil: + return true, nil + case apierrors.IsNotFound(err): + return false, nil + default: + return false, fmt.Errorf("unable to determine if we are running against an OpenShift cluster: %v", err) + } +} + +// SkipOnOpenShiftNess skips the test if the cluster type doesn't match the expected type. +func SkipOnOpenShiftNess(expectOpenShift bool) { + switch IsKubernetesClusterFlag { + case "yes": + if expectOpenShift { + g.Skip("Expecting OpenShift but the active cluster is not, skipping the test") + } + // Treat both "no" and "unknown" as OpenShift + default: + if !expectOpenShift { + g.Skip("Expecting non-OpenShift but the active cluster is OpenShift, skipping the test") + } + } +} + +// IsAKSCluster checks if the active cluster is an AKS (Azure Kubernetes Service) cluster or not +func IsAKSCluster(ctx context.Context, oc *CLI) (bool, error) { + nodeList, err := oc.AdminKubeClient().CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + if err != nil { + return false, fmt.Errorf("failed to list nodes: %w", err) + } + _, labelFound := nodeList.Items[0].Labels[AKSNodeLabel] + return labelFound, nil +} + +func CheckAKSCluster(ctx context.Context, oc *CLI) bool { + isAKS, err := IsAKSCluster(ctx, oc) + if err != nil { + e2e.Logf("failed to determine if the active cluster is AKS or not: %v, defaulting to non-AKS", err) + return false + } + return isAKS +} + +func SkipOnAKSNess(ctx context.Context, oc *CLI, expectAKS bool) { + isAKS := CheckAKSCluster(ctx, oc) + if isAKS && !expectAKS { + g.Skip("Expecting non-AKS but the active cluster is AKS, skip the test") + } + if !isAKS && expectAKS { + g.Skip("Expecting AKS but the active cluster is not, skip the test") + } +} + +// Skip for proxy platform +func SkipOnProxyCluster(oc *CLI) { + g.By("Check if cluster is a proxy platform") + httpProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy/cluster", "-o=jsonpath={.spec.httpProxy}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + httpsProxy, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("proxy/cluster", "-o=jsonpath={.spec.httpsProxy}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if len(httpProxy) != 0 || len(httpsProxy) != 0 { + g.Skip("Skip for proxy platform") + } +} diff --git a/test/util/container/docker_client.go b/test/util/container/docker_client.go new file mode 100644 index 000000000..e22a3a5fd --- /dev/null +++ b/test/util/container/docker_client.go @@ -0,0 +1,265 @@ +package container + +import ( + "bytes" + "context" + "fmt" + "io" + "os/exec" + "strings" + + e2e "k8s.io/kubernetes/test/e2e/framework" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" +) + +// contains check list contain one string +func contains(s []string, e string) bool { + for _, a := range s { + if strings.Contains(a, e) { + return true + } + } + return false +} + +// DockerCLI provides function to run the docker command +type DockerCLI struct { + CLI *client.Client + execPath string + ExecCommandPath string + globalArgs []string + commandArgs []string + finalArgs []string + verbose bool + stdin *bytes.Buffer + stdout io.Writer + stderr io.Writer + showInfo bool +} + +// NewDockerCLI initialize the docker cli framework +func NewDockerCLI() *DockerCLI { + newclient := &DockerCLI{} + cli, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + e2e.Failf("get docker client failed") + } + newclient.CLI = cli + newclient.execPath = "docker" + newclient.showInfo = true + return newclient +} + +// Run executes given docker command +func (c *DockerCLI) Run(commands ...string) *DockerCLI { + in, out, errout := &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{} + docker := &DockerCLI{ + execPath: c.execPath, + ExecCommandPath: c.ExecCommandPath, + } + docker.globalArgs = commands + docker.stdin, docker.stdout, docker.stderr = in, out, errout + return docker.setOutput(c.stdout) +} + +// setOutput allows to override the default command output +func (c *DockerCLI) setOutput(out io.Writer) *DockerCLI { + c.stdout = out + return c +} + +// Args sets the additional arguments for the docker CLI command +func (c *DockerCLI) Args(args ...string) *DockerCLI { + c.commandArgs = args + c.finalArgs = append(c.globalArgs, c.commandArgs...) + return c +} + +func (c *DockerCLI) printCmd() string { + return strings.Join(c.finalArgs, " ") +} + +// Output executes the command and returns stdout/stderr combined into one string +func (c *DockerCLI) Output() (string, error) { + if c.verbose { + e2e.Logf("DEBUG: docker %s\n", c.printCmd()) + } + cmd := exec.Command(c.execPath, c.finalArgs...) + if c.ExecCommandPath != "" { + e2e.Logf("set exec command path is %s\n", c.ExecCommandPath) + cmd.Dir = c.ExecCommandPath + } + cmd.Stdin = c.stdin + if c.showInfo { + e2e.Logf("Running '%s %s'", c.execPath, strings.Join(c.finalArgs, " ")) + } + out, err := cmd.CombinedOutput() + trimmed := strings.TrimSpace(string(out)) + switch err.(type) { + case nil: + c.stdout = bytes.NewBuffer(out) + return trimmed, nil + case *exec.ExitError: + e2e.Logf("Error running %v:\n%s", cmd, trimmed) + return trimmed, &ExitError{ExitError: err.(*exec.ExitError), Cmd: c.execPath + " " + strings.Join(c.finalArgs, " "), StdErr: trimmed} + default: + FatalErr(fmt.Errorf("unable to execute %q: %v", c.execPath, err)) + // unreachable code + return "", nil + } +} + +// GetImageID is to get the image ID by image tag +func (c *DockerCLI) GetImageID(imageTag string) (string, error) { + imageID := "" + ctx := context.Background() + images, err := c.CLI.ImageList(ctx, types.ImageListOptions{}) + if err != nil { + e2e.Logf("get docker image list failed") + return imageID, err + } + for _, image := range images { + if strings.Contains(strings.Join(image.RepoTags, ","), imageTag) { + e2e.Logf("image ID is %s\n", image.ID) + return image.ID, nil + } + } + return imageID, nil +} + +// RemoveImage is to remove image +func (c *DockerCLI) RemoveImage(imageIndex string) (bool, error) { + imageID, err := c.GetImageID(imageIndex) + if err != nil { + return false, err + } + e2e.Logf("%s imageID is %s\n", imageIndex, imageID) + ctx := context.Background() + if imageID == "" { + e2e.Logf("there is no image with tag is %s", imageIndex) + return true, nil + } + e2e.Logf("delete image %s\n", imageID) + _, err = c.CLI.ImageRemove(ctx, imageID, types.ImageRemoveOptions{Force: true}) + if err != nil { + e2e.Logf("remove docker image %s failed", imageID) + return false, err + } + e2e.Logf("remove image %s success\n", imageID) + return true, nil +} + +// GetImageList is to get the image list +func (c *DockerCLI) GetImageList() ([]string, error) { + var imageList []string + ctx := context.Background() + + images, err := c.CLI.ImageList(ctx, types.ImageListOptions{}) + if err != nil { + e2e.Logf("get docker image list failed") + return imageList, err + } + for _, image := range images { + e2e.Logf("image: %s\n", strings.Join(image.RepoTags, ",")) + imageList = append(imageList, strings.Join(image.RepoTags, ",")) + } + return imageList, nil +} + +// CheckImageExist check the image exist +func (c *DockerCLI) CheckImageExist(imageIndex string) (bool, error) { + imageList, err := c.GetImageList() + if err != nil { + return false, err + } + return contains(imageList, imageIndex), nil +} + +func (c *DockerCLI) ContainerCreate(imageName string, containerName string, entrypoint string, openStdin bool) (string, error) { + cli := c.CLI + ctx := context.Background() + resp, err := cli.ContainerCreate(ctx, &container.Config{ + Image: imageName, + OpenStdin: openStdin, + Tty: true, + Entrypoint: []string{entrypoint}, + }, nil, nil, nil, containerName) + return resp.ID, err +} + +func (c *DockerCLI) ContainerStop(id string) error { + cli := c.CLI + ctx := context.Background() + err := cli.ContainerStop(ctx, id, container.StopOptions{}) + return err +} + +func (c *DockerCLI) ContainerRemove(id string) error { + cli := c.CLI + ctx := context.Background() + err := cli.ContainerRemove(ctx, id, types.ContainerRemoveOptions{Force: true}) + return err +} + +func (c *DockerCLI) ContainerStart(id string) error { + cli := c.CLI + ctx := context.Background() + err := cli.ContainerStart(ctx, id, types.ContainerStartOptions{}) + return err +} + +func (c *DockerCLI) Exec(id string, cmd []string) (int, string, string, error) { + // prepare exec + cli := c.CLI + ctx := context.Background() + execConfig := types.ExecConfig{ + AttachStdout: true, + AttachStderr: true, + Cmd: cmd, + } + cresp, err := cli.ContainerExecCreate(ctx, id, execConfig) + if err != nil { + return 1, "", "", err + } + execID := cresp.ID + + // run it, with stdout/stderr attached + aresp, err := cli.ContainerExecAttach(ctx, execID, types.ExecStartCheck{}) + if err != nil { + return 1, "", "", err + } + defer aresp.Close() + + // read the output + var outBuf, errBuf bytes.Buffer + outputDone := make(chan error) + + go func() { + // StdCopy demultiplexes the stream into two buffers + _, err = stdcopy.StdCopy(&outBuf, &errBuf, aresp.Reader) + outputDone <- err + }() + + select { + case err := <-outputDone: + if err != nil { + return 1, "", "", err + } + break + + case <-ctx.Done(): + return 1, "", "", ctx.Err() + } + + // get the exit code + iresp, err := cli.ContainerExecInspect(ctx, execID) + if err != nil { + return 1, "", "", err + } + + return iresp.ExitCode, outBuf.String(), errBuf.String(), nil +} diff --git a/test/util/container/podman_client.go b/test/util/container/podman_client.go new file mode 100644 index 000000000..98ab9166e --- /dev/null +++ b/test/util/container/podman_client.go @@ -0,0 +1,287 @@ +package container + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "os/exec" + "runtime/debug" + "strings" + + g "github.com/onsi/ginkgo/v2" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// ExitError returns the error info +type ExitError struct { + Cmd string + StdErr string + *exec.ExitError +} + +// FatalErr exits the test in case a fatal error has occurred. +func FatalErr(msg interface{}) { + // the path that leads to this being called isn't always clear... + fmt.Fprintln(g.GinkgoWriter, string(debug.Stack())) + e2e.Failf("%v", msg) +} + +// PodmanImage podman image +type PodmanImage struct { + ID string `json:"Id"` + Size int64 `json:"Size"` + Labels map[string]string `json:"Labels"` + Names []string `json:"Names"` + Digest string `json:"Digest"` + Digests []string `json:"Digests"` + Dangling bool `json:"Dangling"` + History []string `json:"History"` + Containers int64 `json:"Containers"` +} + +// PodmanCLI provides function to run the docker command +type PodmanCLI struct { + execPath string + ExecCommandPath string + globalArgs []string + commandArgs []string + finalArgs []string + verbose bool + stdin *bytes.Buffer + stdout io.Writer + stderr io.Writer + showInfo bool + UnsetProxy bool + env []string +} + +// NewPodmanCLI initialize the docker cli framework +func NewPodmanCLI() *PodmanCLI { + newclient := &PodmanCLI{} + newclient.execPath = "podman" + newclient.showInfo = true + newclient.UnsetProxy = false + return newclient +} + +// Run executes given Podman command +func (c *PodmanCLI) Run(commands ...string) *PodmanCLI { + in, out, errout := &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{} + podman := &PodmanCLI{ + execPath: c.execPath, + ExecCommandPath: c.ExecCommandPath, + UnsetProxy: c.UnsetProxy, + showInfo: c.showInfo, + env: c.env, + } + podman.globalArgs = commands + podman.stdin, podman.stdout, podman.stderr = in, out, errout + return podman.setOutput(c.stdout) +} + +// setOutput allows to override the default command output +func (c *PodmanCLI) setOutput(out io.Writer) *PodmanCLI { + c.stdout = out + return c +} + +// Args sets the additional arguments for the podman CLI command +func (c *PodmanCLI) Args(args ...string) *PodmanCLI { + c.commandArgs = args + c.finalArgs = append(c.globalArgs, c.commandArgs...) + return c +} + +func (c *PodmanCLI) printCmd() string { + return strings.Join(c.finalArgs, " ") +} + +// Output executes the command and returns stdout/stderr combined into one string +func (c *PodmanCLI) Output() (string, error) { + if c.verbose { + e2e.Logf("DEBUG: podman %s\n", c.printCmd()) + } + cmd := exec.Command(c.execPath, c.finalArgs...) + cmd.Env = os.Environ() + if c.UnsetProxy { + var envCmd []string + for _, envIndex := range cmd.Env { + if !(strings.Contains(strings.ToUpper(envIndex), "HTTP_PROXY") || strings.Contains(strings.ToUpper(envIndex), "HTTPS_PROXY") || strings.Contains(strings.ToUpper(envIndex), "NO_PROXY")) { + envCmd = append(envCmd, envIndex) + } + } + cmd.Env = envCmd + } + if c.env != nil { + cmd.Env = append(cmd.Env, c.env...) + } + if c.ExecCommandPath != "" { + e2e.Logf("set exec command path is %s\n", c.ExecCommandPath) + cmd.Dir = c.ExecCommandPath + } + cmd.Stdin = c.stdin + if c.showInfo { + e2e.Logf("Running '%s %s'", c.execPath, strings.Join(c.finalArgs, " ")) + } + out, err := cmd.CombinedOutput() + trimmed := strings.TrimSpace(string(out)) + switch err.(type) { + case nil: + c.stdout = bytes.NewBuffer(out) + return trimmed, nil + case *exec.ExitError: + e2e.Logf("Error running %v:\n%s", cmd, trimmed) + return trimmed, &ExitError{ExitError: err.(*exec.ExitError), Cmd: c.execPath + " " + strings.Join(c.finalArgs, " "), StdErr: trimmed} + default: + FatalErr(fmt.Errorf("unable to execute %q: %v", c.execPath, err)) + // unreachable code + return "", nil + } +} + +// GetImageList to get the image list +func (c *PodmanCLI) GetImageList() ([]string, error) { + var imageList []string + images, err := c.GetImages() + if err != nil { + return imageList, err + } + for _, imageIndex := range images { + e2e.Logf("ID %s, name: %s", imageIndex.ID, strings.Join(imageIndex.Names, ",")) + imageList = append(imageList, strings.Join(imageIndex.Names, ",")) + } + return imageList, nil +} + +func (c *PodmanCLI) GetImages() ([]PodmanImage, error) { + output, err := c.Run("images").Args("--format", "json").Output() + if err != nil { + e2e.Logf("Failed to run 'podman images --format json'") + return nil, err + } + + images, err := c.GetImagesByJSON(output) + if err != nil { + return nil, err + } + return images, nil +} + +func (c *PodmanCLI) GetImagesByJSON(jsonStr string) ([]PodmanImage, error) { + var images []PodmanImage + + if err := json.Unmarshal([]byte(jsonStr), &images); err != nil { + return nil, fmt.Errorf("failed to unmarshal JSON file: %v", err) + } + + return images, nil +} + +// CheckImageExist is to check the image is exist +func (c *PodmanCLI) CheckImageExist(imageIndex string) (bool, error) { + e2e.Logf("check image %s is exist", imageIndex) + imageList, err := c.GetImageList() + if err != nil { + return false, err + } + return contains(imageList, imageIndex), nil +} + +// GetImageID is to get the image ID by image tag +func (c *PodmanCLI) GetImageID(imageTag string) (string, error) { + imageID, err := c.Run("images").Args(imageTag, "--format", "{{.ID}}").Output() + if err != nil { + e2e.Logf("Failed to run 'podman images --format {{.ID}}'") + return "", err + } + return imageID, nil +} + +// RemoveImage is to remove image +func (c *PodmanCLI) RemoveImage(imageIndex string) (bool, error) { + imageID, err := c.GetImageID(imageIndex) + if err != nil { + return false, err + } + if imageID == "" { + return true, nil + } + e2e.Logf("imageID is %s\n", imageID) + _, err = c.Run("image").Args("rm", "-f", imageID).Output() + if err != nil { + e2e.Logf("remove image %s failed", imageID) + return false, err + } + e2e.Logf("remove image %s success\n", imageID) + + return true, nil +} + +func (c *PodmanCLI) ContainerCreate(imageName string, containerName string, entrypoint string, openStdin bool) (string, error) { + interactiveStr := "--interactive=false" + if openStdin { + interactiveStr = "--interactive=true" + } + output, err := c.Run("create").Args(interactiveStr, "--entrypoint="+entrypoint, "--name="+containerName, imageName).Output() + if err != nil { + e2e.Logf("run podman create faild") + return "", err + } + outputLines := strings.Split(strings.Trim(output, "\n"), "\n") + containerID := outputLines[len(outputLines)-1] + return containerID, nil +} + +func (c *PodmanCLI) ContainerStart(id string) error { + _, err := c.Run("start").Args(id).Output() + if err != nil { + e2e.Logf("run podman start %s failed", id) + } + return err +} + +func (c *PodmanCLI) ContainerStop(id string) error { + _, err := c.Run("stop").Args(id).Output() + if err != nil { + e2e.Logf("run podman stop %s failed", id) + } + return err +} + +func (c *PodmanCLI) ContainerRemove(id string) error { + _, err := c.Run("rm").Args(id, "-f").Output() + if err != nil { + e2e.Logf("run podman rm %s failed", id) + } + return err +} + +func (c *PodmanCLI) Exec(id string, commands []string) (string, error) { + commands = append([]string{id}, commands...) + output, err := c.Run("exec").Args(commands...).Output() + if err != nil { + e2e.Logf("run podman exec %s faild", commands) + return "", err + } + return output, nil +} + +func (c *PodmanCLI) ExecBackgroud(id string, commands []string) (string, error) { + commands = append([]string{"--detach", id}, commands...) + output, err := c.Run("exec").Args(commands...).Output() + if err != nil { + e2e.Logf("run podman exec %s faild", commands) + return "", err + } + return output, nil +} + +func (c *PodmanCLI) CopyFile(id string, src string, target string) error { + _, err := c.Run("cp").Args(src, id+":"+target).Output() + if err != nil { + e2e.Logf("run podman cp failed") + } + return err +} diff --git a/test/util/container/quay_client.go b/test/util/container/quay_client.go new file mode 100644 index 000000000..900c0d186 --- /dev/null +++ b/test/util/container/quay_client.go @@ -0,0 +1,288 @@ +package container + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "strings" + + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// AuthInfo returns the error info +type AuthInfo struct { + Authorization string `json:"authorization"` +} + +// TagInfo returns the images tag info +type TagInfo struct { + Name string `json:"name"` + Reversion bool `json:"reversion"` + StartTs int64 `json:"start_ts"` + EndTs int64 `json:"end_ts"` + ManifestDigest string `json:"manifest_digest"` + ImageID string `json:"image_id"` + LastModified string `json:"last_modified"` + Expiration string `json:"expiration"` + DockerImageID string `json:"docker_image_id"` + IsManifestList bool `json:"is_manifest_list"` + Size int64 `json:"size"` +} + +// TagsResult returns the images tag info +type TagsResult struct { + HasAdditional bool `json:"has_additional"` + Page int `json:"page"` + Tags []TagInfo `json:"tags"` +} + +// QuayCLI provides function to run the quay command +type QuayCLI struct { + EndPointPre string + Authorization string +} + +// NewQuayCLI initialize the quay api +func NewQuayCLI() *QuayCLI { + newclient := &QuayCLI{} + newclient.EndPointPre = "https://quay.io/api/v1/repository/" + authString := "" + authFilepath := "" + if strings.Compare(os.Getenv("QUAY_AUTH_FILE"), "") != 0 { + authFilepath = os.Getenv("QUAY_AUTH_FILE") + } else { + authFilepath = "/home/cloud-user/.docker/auto/quay_auth.json" + } + if _, err := os.Stat(authFilepath); os.IsNotExist(err) { + e2e.Logf("Quay auth file does not exist") + } else { + content, err := ioutil.ReadFile(authFilepath) + if err != nil { + e2e.Logf("File reading error") + } else { + var authJSON AuthInfo + if err := json.Unmarshal(content, &authJSON); err != nil { + e2e.Logf("parser json error") + } else { + authString = "Bearer " + authJSON.Authorization + } + } + } + if strings.Compare(os.Getenv("QUAY_AUTH"), "") != 0 { + e2e.Logf("get quay auth from env QUAY_AUTH") + authString = "Bearer " + os.Getenv("QUAY_AUTH") + } + if strings.Compare(authString, "Bearer ") == 0 { + e2e.Failf("get quay auth failed!") + } + newclient.Authorization = authString + return newclient +} + +// TryDeleteTag will delete the image +func (c *QuayCLI) TryDeleteTag(imageIndex string) (bool, error) { + if strings.Contains(imageIndex, ":") { + imageIndex = strings.Replace(imageIndex, ":", "/tag/", 1) + } + endpoint := c.EndPointPre + imageIndex + e2e.Logf("endpoint is %s", endpoint) + + client := &http.Client{} + reqest, err := http.NewRequest("DELETE", endpoint, nil) + if strings.Compare(c.Authorization, "") != 0 { + reqest.Header.Add("Authorization", c.Authorization) + } + + if err != nil { + return false, err + } + response, err := client.Do(reqest) + defer response.Body.Close() + if err != nil { + return false, err + } + if response.StatusCode != 204 { + e2e.Logf("delete %s failed, response code is %d", imageIndex, response.StatusCode) + return false, nil + } + return true, nil +} + +// DeleteTag will delete the image +func (c *QuayCLI) DeleteTag(imageIndex string) (bool, error) { + rc, error := c.TryDeleteTag(imageIndex) + if rc != true { + e2e.Logf("try to delete %s again", imageIndex) + rc, error = c.TryDeleteTag(imageIndex) + if rc != true { + e2e.Failf("delete tag failed on quay.io") + } + } + return rc, error +} + +// CheckTagNotExist check the image exist +func (c *QuayCLI) CheckTagNotExist(imageIndex string) (bool, error) { + if strings.Contains(imageIndex, ":") { + imageIndex = strings.Replace(imageIndex, ":", "/tag/", 1) + } + endpoint := c.EndPointPre + imageIndex + "/images" + e2e.Logf("endpoint is %s", endpoint) + + client := &http.Client{} + reqest, err := http.NewRequest("GET", endpoint, nil) + reqest.Header.Add("Authorization", c.Authorization) + + if err != nil { + return false, err + } + response, err := client.Do(reqest) + defer response.Body.Close() + if err != nil { + return false, err + } + if response.StatusCode == 404 { + e2e.Logf("tag %s not exist", imageIndex) + return true, nil + } + contents, _ := ioutil.ReadAll(response.Body) + e2e.Logf("responce is %s", string(contents)) + return false, nil + +} + +// GetTagNameList get the tag name list in quay +func (c *QuayCLI) GetTagNameList(imageIndex string) ([]string, error) { + var TagNameList []string + tags, err := c.GetTags(imageIndex) + if err != nil { + return TagNameList, err + } + for _, tagIndex := range tags { + TagNameList = append(TagNameList, tagIndex.Name) + } + return TagNameList, nil +} + +// GetTags list the specificTag in repository +func (c *QuayCLI) GetTags(imageIndex string) ([]TagInfo, error) { + + var result []TagInfo + var specificTag, indexRepository, endpoint string + + if strings.Contains(imageIndex, ":") { + indexRepository = strings.Split(imageIndex, ":")[0] + "/tag" + specificTag = strings.Split(imageIndex, ":")[1] + // GET /api/v1/repository/{repository}/tag?specificTag={tag} #Filters the tags to the specific tag. + endpoint = c.EndPointPre + indexRepository + "?specificTag=" + specificTag + if specificTag == "" { + // GET /api/v1/repository/{repository}/tag?onlyActiveTags=true #Filter to all active tags. + endpoint = c.EndPointPre + indexRepository + "?onlyActiveTags=true" + } + } else if strings.Contains(imageIndex, "/tag/") { + imageIndex = strings.Split(imageIndex, "tag/")[0] + "tag/" + endpoint = c.EndPointPre + imageIndex + } + + e2e.Logf("endpoint is %s", endpoint) + + client := &http.Client{} + reqest, err := http.NewRequest("GET", endpoint, nil) + if strings.Compare(c.Authorization, "") != 0 { + reqest.Header.Add("Authorization", c.Authorization) + } + if err != nil { + return result, err + } + response, err := client.Do(reqest) + defer response.Body.Close() + if err != nil { + return result, err + } + e2e.Logf("%s", response.Status) + if response.StatusCode != 200 { + e2e.Logf("get %s failed, response code is %d", imageIndex, response.StatusCode) + return result, fmt.Errorf("return code is %d, not 200", response.StatusCode) + } + contents, err := ioutil.ReadAll(response.Body) + if err != nil { + return result, err + } + //e2e.Logf(string(contents)) + //unmarshal json file + var TagsResultOut TagsResult + if err := json.Unmarshal(contents, &TagsResultOut); err != nil { + return result, err + } + result = TagsResultOut.Tags + return result, nil + +} + +// GetImageDigest gets the ID of the specified image +func (c *QuayCLI) GetImageDigest(imageIndex string) (string, error) { + + var result string + tags, err := c.GetTags(imageIndex) + if err != nil { + e2e.Logf("Can't get the digest, GetTags failed.") + return result, err + } + imageTag := strings.Split(imageIndex, ":")[1] + for image := range tags { + if tags[image].Name == imageTag { + result := tags[image].ManifestDigest + return result, nil + } + } + e2e.Logf("Can't get the digest, Manifest_digest not found.") + return result, nil + +} + +func (c *QuayCLI) TryChangeTag(imageTag, manifestDigest string) (bool, error) { + if strings.Contains(imageTag, ":") { + imageTag = strings.Replace(imageTag, ":", "/tag/", 1) + } + endpoint := c.EndPointPre + imageTag + e2e.Logf("endpoint is %s", endpoint) + + payload := ("{\"manifest_digest\": \"" + manifestDigest + "\"}") + + client := &http.Client{} + request, err := http.NewRequest("PUT", endpoint, bytes.NewBuffer([]byte(payload))) + if strings.Compare(c.Authorization, "") != 0 { + request.Header.Add("Authorization", c.Authorization) + } + request.Header.Set("Content-Type", "application/json") + + if err != nil { + return false, err + } + response, err := client.Do(request) + defer response.Body.Close() + if err != nil { + return false, err + } + if response.StatusCode != 201 { + e2e.Logf("change %s failed, response code is %d", imageTag, response.StatusCode) + return false, nil + } + return true, nil +} + +// ChangeTag will change the image tag +func (c *QuayCLI) ChangeTag(imageTag, manifestDigest string) (bool, error) { + rc, error := c.TryChangeTag(imageTag, manifestDigest) + if rc != true { + e2e.Logf("try to tag %s again", manifestDigest) + rc, error = c.TryChangeTag(imageTag, manifestDigest) + if rc != true { + e2e.Logf("Change tag failed on quay.io") + } + } + return rc, error +} diff --git a/test/util/db/common.go b/test/util/db/common.go new file mode 100644 index 000000000..265f4289a --- /dev/null +++ b/test/util/db/common.go @@ -0,0 +1,64 @@ +package db + +import ( + "context" + "fmt" + "os/exec" + "strings" + + "github.com/openshift/openshift-tests-private/test/extended/util" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kcoreclient "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// PodConfig holds configuration for a pod. +type PodConfig struct { + Container string + Env map[string]string +} + +func getPodConfig(c kcoreclient.PodInterface, podName string) (conf *PodConfig, err error) { + pod, err := c.Get(context.Background(), podName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + env := make(map[string]string) + for _, container := range pod.Spec.Containers { + for _, e := range container.Env { + env[e.Name] = e.Value + } + } + return &PodConfig{pod.Spec.Containers[0].Name, env}, nil +} + +func firstContainerName(c kcoreclient.PodInterface, podName string) (string, error) { + pod, err := c.Get(context.Background(), podName, metav1.GetOptions{}) + if err != nil { + return "", err + } + + return pod.Spec.Containers[0].Name, nil +} + +func isReady(oc *util.CLI, podName string, pingCommand, expectedOutput string) (bool, error) { + out, err := executeShellCommand(oc, podName, pingCommand) + ok := strings.Contains(out, expectedOutput) + if !ok { + err = fmt.Errorf("Expected output: %q but actual: %q", expectedOutput, out) + } + return ok, err +} + +func executeShellCommand(oc *util.CLI, podName string, command string) (string, error) { + out, err := oc.Run("exec").Args(podName, "--", "bash", "-c", command).Output() + if err != nil { + switch err.(type) { + case *util.ExitError, *exec.ExitError: + return "", nil + default: + return "", err + } + } + + return out, nil +} diff --git a/test/util/db/mongodb.go b/test/util/db/mongodb.go new file mode 100644 index 000000000..9fb51c0fb --- /dev/null +++ b/test/util/db/mongodb.go @@ -0,0 +1,66 @@ +package db + +import ( + "errors" + "fmt" + + "github.com/openshift/openshift-tests-private/test/extended/util" +) + +// MongoDB is a MongoDB helper for executing commands. +type MongoDB struct { + podName string +} + +// NewMongoDB creates a new util.Database instance. +func NewMongoDB(podName string) util.Database { + return &MongoDB{ + podName: podName, + } +} + +// PodName implements Database. +func (m MongoDB) PodName() string { + return m.podName +} + +// IsReady pings the MongoDB server. +func (m MongoDB) IsReady(oc *util.CLI) (bool, error) { + return isReady( + oc, + m.podName, + `mongo --quiet --eval '{"ping", 1}'`, + "1", + ) +} + +// Query executes a query as an ordinary user and returns the result. +func (m MongoDB) Query(oc *util.CLI, query string) (string, error) { + return executeShellCommand( + oc, + m.podName, + fmt.Sprintf(`mongo --quiet "$MONGODB_DATABASE" --username "$MONGODB_USER" --password "$MONGODB_PASSWORD" --eval '%s'`, query), + ) +} + +// QueryPrivileged queries the database as a privileged user. +func (m MongoDB) QueryPrivileged(oc *util.CLI, query string) (string, error) { + return "", errors.New("not implemented") +} + +// TestRemoteLogin tests whether it is possible to remote login to hostAddress. +func (m MongoDB) TestRemoteLogin(oc *util.CLI, hostAddress string) error { + return errors.New("not implemented") +} + +// // QueryPrimary queries the database on primary node as a regular user. +func (m MongoDB) QueryPrimary(oc *util.CLI, query string) (string, error) { + return executeShellCommand( + oc, + m.podName, + fmt.Sprintf( + `mongo --quiet "$MONGODB_DATABASE" --username "$MONGODB_USER" --password "$MONGODB_PASSWORD" --host "$MONGODB_REPLICA_NAME/localhost" --eval '%s'`, + query, + ), + ) +} diff --git a/test/util/db/mysql.go b/test/util/db/mysql.go new file mode 100644 index 000000000..78d3d492a --- /dev/null +++ b/test/util/db/mysql.go @@ -0,0 +1,103 @@ +package db + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/openshift/openshift-tests-private/test/extended/util" +) + +// MySQL is a MySQL helper for executing commands. +type MySQL struct { + podName string + masterPodName string +} + +// NewMysql creates a new util.Database instance. +func NewMysql(podName, masterPodName string) util.Database { + if masterPodName == "" { + masterPodName = podName + } + return &MySQL{ + podName: podName, + masterPodName: masterPodName, + } +} + +// PodName implements Database. +func (m MySQL) PodName() string { + return m.podName +} + +// IsReady pings the MySQL server. +func (m MySQL) IsReady(oc *util.CLI) (bool, error) { + conf, err := getPodConfig(oc.KubeClient().CoreV1().Pods(oc.Namespace()), m.podName) + if err != nil { + return false, err + } + masterConf, err := getPodConfig(oc.KubeClient().CoreV1().Pods(oc.Namespace()), m.masterPodName) + if err != nil { + return false, err + } + + out, err := oc.Run("exec").Args(m.podName, "-c", conf.Container, "--", "bash", "-c", + fmt.Sprintf("mysqladmin -h localhost -u%s -p%s ping", masterConf.Env["MYSQL_USER"], masterConf.Env["MYSQL_PASSWORD"])).Output() + if err != nil { + switch err.(type) { + case *util.ExitError, *exec.ExitError: + return false, nil + default: + return false, err + } + } + return strings.Contains(out, "mysqld is alive"), nil +} + +// Query executes an SQL query as an ordinary user and returns the result. +func (m MySQL) Query(oc *util.CLI, query string) (string, error) { + container, err := firstContainerName(oc.KubeClient().CoreV1().Pods(oc.Namespace()), m.podName) + if err != nil { + return "", err + } + masterConf, err := getPodConfig(oc.KubeClient().CoreV1().Pods(oc.Namespace()), m.masterPodName) + if err != nil { + return "", err + } + return oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c", + fmt.Sprintf("mysql -h 127.0.0.1 -u%s -p%s -e \"%s\" %s", + masterConf.Env["MYSQL_USER"], masterConf.Env["MYSQL_PASSWORD"], query, + masterConf.Env["MYSQL_DATABASE"])).Output() +} + +// QueryPrivileged executes an SQL query as a root user and returns the result. +func (m MySQL) QueryPrivileged(oc *util.CLI, query string) (string, error) { + container, err := firstContainerName(oc.KubeClient().CoreV1().Pods(oc.Namespace()), m.podName) + if err != nil { + return "", err + } + masterConf, err := getPodConfig(oc.KubeClient().CoreV1().Pods(oc.Namespace()), m.masterPodName) + if err != nil { + return "", err + } + return oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c", + fmt.Sprintf("mysql -h 127.0.0.1 -uroot -e \"%s\" %s", + query, masterConf.Env["MYSQL_DATABASE"])).Output() +} + +// TestRemoteLogin will test whether we can login through to a remote database. +func (m MySQL) TestRemoteLogin(oc *util.CLI, hostAddress string) error { + container, err := firstContainerName(oc.KubeClient().CoreV1().Pods(oc.Namespace()), m.podName) + if err != nil { + return err + } + masterConf, err := getPodConfig(oc.KubeClient().CoreV1().Pods(oc.Namespace()), m.masterPodName) + if err != nil { + return err + } + err = oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c", + fmt.Sprintf("mysql -h %s -u%s -p%s -e \"SELECT 1;\" %s", + hostAddress, masterConf.Env["MYSQL_USER"], masterConf.Env["MYSQL_PASSWORD"], + masterConf.Env["MYSQL_DATABASE"])).Execute() + return err +} diff --git a/test/util/db/postgresql.go b/test/util/db/postgresql.go new file mode 100644 index 000000000..655759143 --- /dev/null +++ b/test/util/db/postgresql.go @@ -0,0 +1,99 @@ +package db + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/openshift/openshift-tests-private/test/extended/util" +) + +// PostgreSQL is a PostgreSQL helper for executing commands. +type PostgreSQL struct { + podName string + masterPodName string +} + +// NewPostgreSQL creates a new util.Database instance. +func NewPostgreSQL(podName, masterPodName string) util.Database { + if masterPodName == "" { + masterPodName = podName + } + return &PostgreSQL{ + podName: podName, + masterPodName: masterPodName, + } +} + +// PodName implements Database. +func (m PostgreSQL) PodName() string { + return m.podName +} + +// IsReady pings the PostgreSQL server. +func (m PostgreSQL) IsReady(oc *util.CLI) (bool, error) { + conf, err := getPodConfig(oc.KubeClient().CoreV1().Pods(oc.Namespace()), m.podName) + if err != nil { + return false, err + } + out, err := oc.Run("exec").Args(m.podName, "-c", conf.Container, "--", "bash", "-c", + "psql postgresql://postgres@127.0.0.1 -x -c \"SELECT 1;\"").Output() + if err != nil { + switch err.(type) { + case *util.ExitError, *exec.ExitError: + return false, nil + default: + return false, err + } + } + return strings.Contains(out, "-[ RECORD 1 ]\n?column? | 1"), nil +} + +// Query executes an SQL query as an ordinary user and returns the result. +func (m PostgreSQL) Query(oc *util.CLI, query string) (string, error) { + container, err := firstContainerName(oc.KubeClient().CoreV1().Pods(oc.Namespace()), m.podName) + if err != nil { + return "", err + } + masterConf, err := getPodConfig(oc.KubeClient().CoreV1().Pods(oc.Namespace()), m.masterPodName) + if err != nil { + return "", err + } + return oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c", + fmt.Sprintf("psql postgres://%s:%s@127.0.0.1/%s -x -c \"%s\"", + masterConf.Env["POSTGRESQL_USER"], masterConf.Env["POSTGRESQL_PASSWORD"], + masterConf.Env["POSTGRESQL_DATABASE"], query)).Output() +} + +// QueryPrivileged executes an SQL query as a root user and returns the result. +func (m PostgreSQL) QueryPrivileged(oc *util.CLI, query string) (string, error) { + container, err := firstContainerName(oc.KubeClient().CoreV1().Pods(oc.Namespace()), m.podName) + if err != nil { + return "", err + } + masterConf, err := getPodConfig(oc.KubeClient().CoreV1().Pods(oc.Namespace()), m.masterPodName) + if err != nil { + return "", err + } + return oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c", + fmt.Sprintf("psql postgres://postgres:%s@127.0.0.1/%s -x -c \"%s\"", + masterConf.Env["POSTGRESQL_ADMIN_PASSWORD"], + masterConf.Env["POSTGRESQL_DATABASE"], query)).Output() +} + +// TestRemoteLogin will test whether we can login through to a remote database. +func (m PostgreSQL) TestRemoteLogin(oc *util.CLI, hostAddress string) error { + container, err := firstContainerName(oc.KubeClient().CoreV1().Pods(oc.Namespace()), m.podName) + if err != nil { + return err + } + masterConf, err := getPodConfig(oc.KubeClient().CoreV1().Pods(oc.Namespace()), m.masterPodName) + if err != nil { + return err + } + err = oc.Run("exec").Args(m.podName, "-c", container, "--", "bash", "-c", + fmt.Sprintf("psql postgres://%s:%s@%s/%s -x -c \"SELECT 1;\"", + masterConf.Env["POSTGRESQL_USER"], masterConf.Env["POSTGRESQL_PASSWORD"], + hostAddress, masterConf.Env["POSTGRESQL_DATABASE"])).Execute() + return err +} diff --git a/test/util/db/sqlit.go b/test/util/db/sqlit.go new file mode 100644 index 000000000..fe6b33ed3 --- /dev/null +++ b/test/util/db/sqlit.go @@ -0,0 +1,304 @@ +package db + +import ( + "database/sql" + "fmt" + "os" + "reflect" + "strings" + + _ "github.com/mattn/go-sqlite3" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// Sqlit is a Sqlit helper for executing commands. +type Sqlit struct { + driverName string +} + +type OperatorBundle struct { + name string + bundlepath string + version string +} + +type Channel struct { + entry_id int64 + channel_name string + package_name string + operatorbundle_name string + replaces string + depth int +} + +type Package struct { + name string + default_channel string +} + +type Image struct { + image string + operatorbundle_name string +} + +// NewSqlit creates a new sqlit instance. +func NewSqlit() *Sqlit { + return &Sqlit{ + driverName: "sqlite3", + } +} + +// QueryDB executes an sqlit query as an ordinary user and returns the result. +func (c *Sqlit) QueryDB(dbFilePath string, query string) (*sql.Rows, error) { + if _, err := os.Stat(dbFilePath); os.IsNotExist(err) { + e2e.Logf("file %s do not exist", dbFilePath) + return nil, err + } + database, err := sql.Open(c.driverName, dbFilePath) + if err != nil { + return nil, err + } + defer database.Close() + rows, err := database.Query(query) + if err != nil { + return nil, err + } + return rows, err +} + +// QueryOperatorBundle executes an sqlit query as an ordinary user and returns the result. +func (c *Sqlit) QueryOperatorBundle(dbFilePath string) ([]OperatorBundle, error) { + rows, err := c.QueryDB(dbFilePath, "SELECT name,bundlepath,version FROM operatorbundle") + if err != nil { + return nil, err + } + defer rows.Close() + var OperatorBundles []OperatorBundle + var name string + var bundlepath string + var version string + for rows.Next() { + rows.Scan(&name, &bundlepath, &version) + OperatorBundles = append(OperatorBundles, OperatorBundle{name: name, bundlepath: bundlepath, version: version}) + e2e.Logf("OperatorBundles: name: %s,bundlepath: %s, version: %s", name, bundlepath, version) + } + return OperatorBundles, nil +} + +// CheckOperatorBundlePathExist is to check the OperatorBundlePath exist +func (c *Sqlit) CheckOperatorBundlePathExist(dbFilePath string, bundlepath string) (bool, error) { + OperatorBundles, err := c.QueryOperatorBundle(dbFilePath) + if err != nil { + return false, err + } + for _, OperatorBundle := range OperatorBundles { + if strings.Compare(OperatorBundle.bundlepath, bundlepath) == 0 { + return true, nil + } + } + return false, nil +} + +// CheckOperatorBundleNameExist is to check the OperatorBundleName exist +func (c *Sqlit) CheckOperatorBundleNameExist(dbFilePath string, bundleName string) (bool, error) { + OperatorBundles, err := c.QueryOperatorBundle(dbFilePath) + if err != nil { + return false, err + } + for _, OperatorBundle := range OperatorBundles { + if strings.Compare(OperatorBundle.name, bundleName) == 0 { + return true, nil + } + } + return false, nil +} + +// QueryOperatorChannel executes an sqlit query as an ordinary user and returns the result. +func (c *Sqlit) QueryOperatorChannel(dbFilePath string) ([]Channel, error) { + rows, err := c.QueryDB(dbFilePath, "select * from channel_entry;") + var ( + Channels []Channel + entry_id int64 + channel_name string + package_name string + operatorbundle_name string + replaces string + depth int + ) + defer rows.Close() + if err != nil { + return Channels, err + } + + for rows.Next() { + rows.Scan(&entry_id, &channel_name, &package_name, &operatorbundle_name, &replaces, &depth) + Channels = append(Channels, Channel{entry_id: entry_id, + channel_name: channel_name, + package_name: package_name, + operatorbundle_name: operatorbundle_name, + replaces: replaces, + depth: depth}) + } + return Channels, nil +} + +// QueryPackge executes an sqlit query as an ordinary user and returns the result. +func (c *Sqlit) QueryPackge(dbFilePath string) ([]Package, error) { + rows, err := c.QueryDB(dbFilePath, "select * from package;") + var ( + Packages []Package + name string + default_channel string + ) + defer rows.Close() + if err != nil { + return Packages, err + } + + for rows.Next() { + rows.Scan(&name, &default_channel) + Packages = append(Packages, Package{name: name, + default_channel: default_channel}) + } + return Packages, nil +} + +// QueryRelatedImage executes an sqlit query as an ordinary user and returns the result. +func (c *Sqlit) QueryRelatedImage(dbFilePath string) ([]Image, error) { + rows, err := c.QueryDB(dbFilePath, "select * from related_image;") + var ( + relatedImages []Image + image string + operatorbundle_name string + ) + defer rows.Close() + if err != nil { + return relatedImages, err + } + + for rows.Next() { + rows.Scan(&image, &operatorbundle_name) + relatedImages = append(relatedImages, Image{image: image, + operatorbundle_name: operatorbundle_name}) + } + return relatedImages, nil +} + +// GetOperatorChannelByColumn +func (c *Sqlit) GetOperatorChannelByColumn(dbFilePath string, column string) ([]string, error) { + channels, err := c.QueryOperatorChannel(dbFilePath) + if err != nil { + return nil, err + } + var channelList []string + for _, channel := range channels { + //valueDB := reflections.GetField(channel, column) + value := reflect.Indirect(reflect.ValueOf(&channel)).FieldByName(column) + channelList = append(channelList, value.String()) + } + return channelList, nil +} + +func (c *Sqlit) Query(dbFilePath string, table string, column string) ([]string, error) { + var valueList []string + switch table { + case "operatorbundle": + result, err := c.QueryOperatorBundle(dbFilePath) + if err != nil { + return nil, err + } + for _, channel := range result { + value := reflect.Indirect(reflect.ValueOf(&channel)).FieldByName(column) + valueList = append(valueList, value.String()) + } + return valueList, nil + case "channel_entry": + result, err := c.QueryOperatorChannel(dbFilePath) + if err != nil { + return nil, err + } + for _, channel := range result { + value := reflect.Indirect(reflect.ValueOf(&channel)).FieldByName(column) + valueList = append(valueList, value.String()) + } + return valueList, nil + case "package": + result, err := c.QueryPackge(dbFilePath) + if err != nil { + return nil, err + } + for _, packageIndex := range result { + value := reflect.Indirect(reflect.ValueOf(&packageIndex)).FieldByName(column) + valueList = append(valueList, value.String()) + } + return valueList, nil + case "related_image": + result, err := c.QueryRelatedImage(dbFilePath) + if err != nil { + return nil, err + } + for _, imageIndex := range result { + value := reflect.Indirect(reflect.ValueOf(&imageIndex)).FieldByName(column) + valueList = append(valueList, value.String()) + } + return valueList, nil + default: + err := fmt.Errorf("do not support to query table " + table) + return nil, err + } +} + +// DBHas +func (c *Sqlit) DBHas(dbFilePath string, table string, column string, valueList []string) (bool, error) { + valueListDB, err := c.Query(dbFilePath, table, column) + if err != nil { + return false, err + } + return contains(valueListDB, valueList), nil +} + +// DBMatch +func (c *Sqlit) DBMatch(dbFilePath string, table string, column string, valueList []string) (bool, error) { + valueListDB, err := c.Query(dbFilePath, table, column) + if err != nil { + return false, err + } + return match(valueListDB, valueList), nil +} + +func contains(stringList1 []string, stringList2 []string) bool { + for _, stringIndex2 := range stringList2 { + containFlag := false + for _, stringIndex1 := range stringList1 { + if strings.Compare(stringIndex1, stringIndex2) == 0 { + containFlag = true + break + } + } + if !containFlag { + e2e.Logf("[%s] do not contain [%s]", strings.Join(stringList1, ","), strings.Join(stringList2, ",")) + return false + } + } + return true +} + +func match(stringList1 []string, stringList2 []string) bool { + if len(stringList1) != len(stringList2) { + return false + } + for _, stringIndex2 := range stringList2 { + containFlag := false + for _, stringIndex1 := range stringList1 { + if strings.Compare(stringIndex1, stringIndex2) == 0 { + containFlag = true + break + } + } + if !containFlag { + e2e.Logf("[%s] do not equal to [%s]", strings.Join(stringList1, ","), strings.Join(stringList2, ",")) + return false + } + } + return true +} diff --git a/test/util/db_image_helpers.go b/test/util/db_image_helpers.go new file mode 100644 index 000000000..0a08c9832 --- /dev/null +++ b/test/util/db_image_helpers.go @@ -0,0 +1,107 @@ +package util + +import ( + "fmt" + "os/exec" + "reflect" + "strings" + "time" + + g "github.com/onsi/ginkgo/v2" + + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// Database interface allows testing database images. +type Database interface { + // PodName returns the name of the Pod this helper is bound to. + PodName() string + + // IsReady indicates whether the underlying Pod is ready for queries. + IsReady(oc *CLI) (bool, error) + + // Query queries the database as a regular user. + Query(oc *CLI, query string) (string, error) + + // QueryPrivileged queries the database as a privileged user. + QueryPrivileged(oc *CLI, query string) (string, error) + + // TestRemoteLogin tests whether it is possible to remote login to hostAddress. + TestRemoteLogin(oc *CLI, hostAddress string) error +} + +// ReplicaSet interface allows to interact with database on multiple nodes. +type ReplicaSet interface { + // QueryPrimary queries the database on primary node as a regular user. + QueryPrimary(oc *CLI, query string) (string, error) +} + +// WaitForQueryOutputSatisfies will execute the query multiple times, until the +// specified predicate function is return true. +func WaitForQueryOutputSatisfies(oc *CLI, d Database, timeout time.Duration, admin bool, query string, predicate func(string) bool) error { + err := wait.Poll(5*time.Second, timeout, func() (bool, error) { + var ( + out string + err error + ) + + if admin { + out, err = d.QueryPrivileged(oc, query) + } else { + out, err = d.Query(oc, query) + } + fmt.Fprintf(g.GinkgoWriter, "Query %s result: %s\n", query, out) + if _, ok := err.(*ExitError); ok { + // Ignore exit errors + return false, nil + } + if _, ok := err.(*exec.ExitError); ok { + // Ignore exit errors + return false, nil + } + if err != nil { + e2e.Logf("failing immediately with error: %v, type=%v", err, reflect.TypeOf(err)) + return false, err + } + if predicate(out) { + return true, nil + } + return false, nil + }) + if err == wait.ErrWaitTimeout { + return fmt.Errorf("timed out waiting for query: %q", query) + } + return err +} + +// WaitForQueryOutputContains will execute the query multiple times, until the +// specified substring is found in the results. This function should be used for +// testing replication, since it might take some time until the data is propagated +// to slaves. +func WaitForQueryOutputContains(oc *CLI, d Database, timeout time.Duration, admin bool, query, resultSubstr string) error { + return WaitForQueryOutputSatisfies(oc, d, timeout, admin, query, func(resultOutput string) bool { + return strings.Contains(resultOutput, resultSubstr) + }) +} + +// WaitUntilUp continuously waits for the server to become ready, up until timeout. +func WaitUntilUp(oc *CLI, d Database, timeout time.Duration) error { + err := wait.Poll(2*time.Second, timeout, func() (bool, error) { + return d.IsReady(oc) + }) + if err == wait.ErrWaitTimeout { + return fmt.Errorf("timed out waiting for pod %s get up", d.PodName()) + } + return err +} + +// WaitUntilAllHelpersAreUp waits until all helpers are ready to serve requests. +func WaitUntilAllHelpersAreUp(oc *CLI, helpers []Database) error { + for _, m := range helpers { + if err := WaitUntilUp(oc, m, 3*time.Minute); err != nil { + return err + } + } + return nil +} diff --git a/test/util/deployment.go b/test/util/deployment.go new file mode 100644 index 000000000..44bee8ac2 --- /dev/null +++ b/test/util/deployment.go @@ -0,0 +1,55 @@ +package util + +import ( + "context" + "time" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + + appsv1 "k8s.io/api/apps/v1" + e2e "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/utils/format" + "k8s.io/utils/ptr" +) + +// WaitForDeploymentsReady polls listDeployments() until deployments obtained are ready +func WaitForDeploymentsReady(ctx context.Context, listDeployments func(ctx context.Context) (*appsv1.DeploymentList, error), + isDeploymentReady func(*appsv1.Deployment) bool, timeout, interval time.Duration, printDebugInfo bool) { + g.GinkgoHelper() + e2e.Logf("Waiting for deployments to be ready") + o.Eventually(func() bool { + deployList, err := listDeployments(ctx) + if err != nil { + e2e.Logf("Error listing deployments: %v, keep polling", err) + return false + } + if len(deployList.Items) == 0 { + e2e.Logf("No deployments found, keep polling") + return false + } + for _, deploy := range deployList.Items { + e2e.Logf("Waiting for deployment %s", deploy.Name) + if isDeploymentReady(&deploy) { + continue + } + e2e.Logf("Deployment/%v is not ready, keep polling", deploy.Name) + if printDebugInfo { + e2e.Logf("Deployment status:\n%s", format.Object(deploy.Status, 0)) + } + return false + } + return true + }).WithTimeout(timeout).WithPolling(interval).WithContext(ctx).Should(o.BeTrue(), "Failed waiting for deployments to be ready") + e2e.Logf("Deployments are ready") +} + +// IsDeploymentReady checks if an *appsv1.Deployment is ready +func IsDeploymentReady(deploy *appsv1.Deployment) bool { + expectedReplicas := ptr.Deref[int32](deploy.Spec.Replicas, -1) + return expectedReplicas == deploy.Status.AvailableReplicas && + expectedReplicas == deploy.Status.UpdatedReplicas && + expectedReplicas == deploy.Status.ReadyReplicas && + deploy.Generation <= deploy.Status.ObservedGeneration && + deploy.Status.UnavailableReplicas == 0 +} diff --git a/test/util/deploymentconfigs.go b/test/util/deploymentconfigs.go new file mode 100644 index 000000000..800f22b13 --- /dev/null +++ b/test/util/deploymentconfigs.go @@ -0,0 +1,48 @@ +package util + +import ( + "context" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kutilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// RemoveDeploymentConfigs deletes the given DeploymentConfigs in a namespace +func RemoveDeploymentConfigs(oc *CLI, dcs ...string) error { + errs := []error{} + for _, dc := range dcs { + e2e.Logf("Removing deployment config %s/%s", oc.Namespace(), dc) + if err := oc.AdminAppsClient().AppsV1().DeploymentConfigs(oc.Namespace()).Delete(context.Background(), dc, metav1.DeleteOptions{}); err != nil { + e2e.Logf("Error occurred removing deployment config: %v", err) + errs = append(errs, err) + } + + err := wait.PollImmediate(5*time.Second, 5*time.Minute, func() (bool, error) { + pods, err := GetApplicationPods(oc, dc) + if err != nil { + e2e.Logf("Unable to get pods for dc/%s: %v", dc, err) + return false, err + } + if len(pods.Items) > 0 { + e2e.Logf("Waiting for pods for dc/%s to terminate", dc) + return false, nil + } + e2e.Logf("Pods for dc/%s have terminated", dc) + return true, nil + }) + + if err != nil { + e2e.Logf("Error occurred waiting for pods to terminate for dc/%s: %v", dc, err) + errs = append(errs, err) + } + } + + if len(errs) != 0 { + return kutilerrors.NewAggregate(errs) + } + + return nil +} diff --git a/test/util/disruption/disruption.go b/test/util/disruption/disruption.go new file mode 100644 index 000000000..d639d77e6 --- /dev/null +++ b/test/util/disruption/disruption.go @@ -0,0 +1,161 @@ +package disruption + +import ( + "context" + "encoding/xml" + "fmt" + "os" + "path/filepath" + "reflect" + "regexp" + "runtime/debug" + "strings" + "sync" + "time" + + g "github.com/onsi/ginkgo/v2" + + "k8s.io/kubernetes/test/e2e/chaosmonkey" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/upgrades" + "k8s.io/kubernetes/test/utils/junit" +) + +// TestData is passed to the invariant tests executed during the upgrade. The default UpgradeType +// is MasterUpgrade. +type TestData struct { + UpgradeType upgrades.UpgradeType + UpgradeContext upgrades.UpgradeContext +} + +// Run executes the provided fn in a test context, ensuring that invariants are preserved while the +// test is being executed. Description is used to populate the JUnit suite name, and testname is +// used to define the overall test that will be run. +func Run(description, testname string, adapter TestData, invariants []upgrades.Test, fn func()) { + testSuite := &junit.TestSuite{Name: description, Package: testname} + test := &junit.TestCase{Name: testname, Classname: testname} + testSuite.TestCases = append(testSuite.TestCases, test) + cm := chaosmonkey.New(func(ctx context.Context) { + start := time.Now() + defer finalizeTest(start, test) + fn() + }) + runChaosmonkey(cm, adapter, invariants, testSuite) +} + +func runChaosmonkey( + cm *chaosmonkey.Chaosmonkey, + testData TestData, + tests []upgrades.Test, + testSuite *junit.TestSuite, +) { + testFrameworks := createTestFrameworks(tests) + for _, t := range tests { + testCase := &junit.TestCase{ + Name: t.Name(), + Classname: "disruption_tests", + } + testSuite.TestCases = append(testSuite.TestCases, testCase) + + f, ok := testFrameworks[t.Name()] + if !ok { + panic(fmt.Sprintf("can't find test framework for %q", t.Name())) + } + cma := chaosMonkeyAdapter{ + TestData: testData, + framework: f, + test: t, + testReport: testCase, + } + cm.Register(cma.Test) + } + + start := time.Now() + defer func() { + testSuite.Update() + testSuite.Time = time.Since(start).Seconds() + if framework.TestContext.ReportDir != "" { + fname := filepath.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%s_%d.xml", testSuite.Package, time.Now().Unix())) + f, err := os.Create(fname) + if err != nil { + return + } + defer f.Close() + xml.NewEncoder(f).Encode(testSuite) + } + }() + cm.Do(context.Background()) +} + +type chaosMonkeyAdapter struct { + TestData + + test upgrades.Test + testReport *junit.TestCase + framework *framework.Framework +} + +func (cma *chaosMonkeyAdapter) Test(ctx context.Context, sem *chaosmonkey.Semaphore) { + start := time.Now() + var once sync.Once + ready := func() { + once.Do(func() { + sem.Ready() + }) + } + defer finalizeTest(start, cma.testReport) + defer g.GinkgoRecover() + defer ready() + if skippable, ok := cma.test.(upgrades.Skippable); ok && skippable.Skip(cma.UpgradeContext) { + g.By("skipping test " + cma.test.Name()) + cma.testReport.Skipped = "skipping test " + cma.test.Name() + return + } + + cma.framework.BeforeEach(ctx) + cma.test.Setup(ctx, cma.framework) + defer cma.test.Teardown(ctx, cma.framework) + ready() + cma.test.Test(ctx, cma.framework, sem.StopCh, cma.UpgradeType) +} + +func finalizeTest(start time.Time, tc *junit.TestCase) { + tc.Time = time.Since(start).Seconds() + r := recover() + if r == nil { + return + } + + switch r := r.(type) { + default: + tc.Errors = []*junit.Error{ + { + Message: fmt.Sprintf("%v", r), + Type: "Failure", + Value: fmt.Sprintf("%v\n\n%s", r, debug.Stack()), + }, + } + } +} + +// TODO: accept a default framework +func createTestFrameworks(tests []upgrades.Test) map[string]*framework.Framework { + nsFilter := regexp.MustCompile("[^[:word:]-]+") // match anything that's not a word character or hyphen + testFrameworks := map[string]*framework.Framework{} + for _, t := range tests { + ns := nsFilter.ReplaceAllString(t.Name(), "-") // and replace with a single hyphen + ns = strings.Trim(ns, "-") + // identify tests that come from kube as strictly e2e tests so they get the correct semantics + if strings.Contains(reflect.ValueOf(t).Elem().Type().PkgPath(), "/kubernetes/test/e2e/") { + ns = "e2e-k8s-" + ns + } + testFrameworks[t.Name()] = &framework.Framework{ + BaseName: ns, + Options: framework.Options{ + ClientQPS: 20, + ClientBurst: 50, + }, + } + } + return testFrameworks +} diff --git a/test/util/docker.go b/test/util/docker.go new file mode 100644 index 000000000..4818bbf6d --- /dev/null +++ b/test/util/docker.go @@ -0,0 +1,78 @@ +package util + +import ( + "fmt" + + dockerClient "github.com/fsouza/go-dockerclient" +) + +// ListImages initiates the equivalent of a `docker images` +func ListImages() ([]string, error) { + client, err := dockerClient.NewClientFromEnv() + if err != nil { + return nil, err + } + imageList, err := client.ListImages(dockerClient.ListImagesOptions{}) + if err != nil { + return nil, err + } + returnIds := make([]string, 0) + for _, image := range imageList { + for _, tag := range image.RepoTags { + returnIds = append(returnIds, tag) + } + } + return returnIds, nil +} + +type MissingTagError struct { + Tags []string +} + +func (mte MissingTagError) Error() string { + return fmt.Sprintf("the tag %s passed in was invalid, and not found in the list of images returned from docker", mte.Tags) +} + +// GetImageIDForTags will obtain the hexadecimal IDs for the array of human readible image tags IDs provided +func GetImageIDForTags(comps []string) ([]string, error) { + client, dcerr := dockerClient.NewClientFromEnv() + if dcerr != nil { + return nil, dcerr + } + imageList, serr := client.ListImages(dockerClient.ListImagesOptions{}) + if serr != nil { + return nil, serr + } + + returnTags := make([]string, 0) + missingTags := make([]string, 0) + for _, comp := range comps { + var found bool + for _, image := range imageList { + for _, repTag := range image.RepoTags { + if repTag == comp { + found = true + returnTags = append(returnTags, image.ID) + break + } + } + if found { + break + } + } + + if !found { + returnTags = append(returnTags, "") + missingTags = append(missingTags, comp) + } + } + + if len(missingTags) == 0 { + return returnTags, nil + } else { + mte := MissingTagError{ + Tags: missingTags, + } + return returnTags, mte + } +} diff --git a/test/util/endpoints.go b/test/util/endpoints.go new file mode 100644 index 000000000..3e026d39f --- /dev/null +++ b/test/util/endpoints.go @@ -0,0 +1,21 @@ +package util + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" +) + +func WaitForEndpointsAvailable(oc *CLI, serviceName string) error { + return wait.Poll(200*time.Millisecond, 3*time.Minute, func() (bool, error) { + ep, err := oc.KubeClient().CoreV1().Endpoints(oc.Namespace()).Get(context.Background(), serviceName, metav1.GetOptions{}) + if err != nil && !errors.IsNotFound(err) { + return false, err + } + + return (len(ep.Subsets) > 0) && (len(ep.Subsets[0].Addresses) > 0), nil + }) +} diff --git a/test/util/file.go b/test/util/file.go new file mode 100644 index 000000000..12b295a7e --- /dev/null +++ b/test/util/file.go @@ -0,0 +1,59 @@ +package util + +import ( + "io" + "os" + "strings" + + o "github.com/onsi/gomega" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// DuplicateFileToPath copies the file at srcPath to destPath. +func DuplicateFileToPath(srcPath string, destPath string) { + var destFile, srcFile *os.File + var err error + + srcFile, err = os.Open(srcPath) + o.Expect(err).NotTo(o.HaveOccurred()) + defer func() { + o.Expect(srcFile.Close()).NotTo(o.HaveOccurred()) + }() + + // If the file already exists, it is truncated. If the file does not exist, it is created with mode 0666. + destFile, err = os.Create(destPath) + o.Expect(err).NotTo(o.HaveOccurred()) + defer func() { + o.Expect(destFile.Close()).NotTo(o.HaveOccurred()) + }() + + _, err = io.Copy(destFile, srcFile) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(destFile.Sync()).NotTo(o.HaveOccurred()) +} + +// DuplicateFileToTemp creates a temporary duplicate of the file at srcPath using destPattern for naming, +// returning the path of the duplicate. +func DuplicateFileToTemp(srcPath string, destPrefix string) string { + destFile, err := os.CreateTemp(os.TempDir(), destPrefix) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create temporary file") + o.Expect(destFile.Close()).NotTo(o.HaveOccurred(), "Failed to close temporary file") + + destPath := destFile.Name() + DuplicateFileToPath(srcPath, destPath) + return destPath +} + +// MoveFileToPath attempts to move a file from srcPath to destPath. +func MoveFileToPath(srcPath string, destPath string) { + switch err := os.Rename(srcPath, destPath); { + case err == nil: + return + case strings.Contains(err.Error(), "invalid cross-device link"): + e2e.Logf("Failed to rename file from %s to %s: %v, attempting an alternative", srcPath, destPath, err) + DuplicateFileToPath(srcPath, destPath) + o.Expect(os.Remove(srcPath)).NotTo(o.HaveOccurred(), "Failed to remove source file") + default: + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to rename source file") + } +} diff --git a/test/util/framework.go b/test/util/framework.go new file mode 100644 index 000000000..991cec582 --- /dev/null +++ b/test/util/framework.go @@ -0,0 +1,1928 @@ +package util + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "os" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "time" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + + authorizationapi "k8s.io/api/authorization/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + kapiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/apitesting" + "k8s.io/apimachinery/pkg/api/errors" + kapierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/apimachinery/pkg/util/wait" + quota "k8s.io/apiserver/pkg/quota/v1" + "k8s.io/client-go/kubernetes" + kclientset "k8s.io/client-go/kubernetes" + batchv1client "k8s.io/client-go/kubernetes/typed/batch/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + e2e "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/pod" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" + "k8s.io/kubernetes/test/e2e/framework/statefulset" + "k8s.io/kubernetes/test/utils/image" + + appsv1 "github.com/openshift/api/apps/v1" + buildv1 "github.com/openshift/api/build/v1" + configv1 "github.com/openshift/api/config/v1" + imagev1 "github.com/openshift/api/image/v1" + operatorv1 "github.com/openshift/api/operator/v1" + appsv1clienttyped "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1" + buildv1clienttyped "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" + imagev1typedclient "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + "github.com/openshift/library-go/pkg/apps/appsutil" + "github.com/openshift/library-go/pkg/build/naming" + "github.com/openshift/library-go/pkg/git" + "github.com/openshift/library-go/pkg/image/imageutil" + "github.com/openshift/openshift-tests-private/test/extended/testdata" + + . "github.com/onsi/gomega" +) + +func init() { + if KubeConfigPath() == "" { + fmt.Fprintf(os.Stderr, "Please set KUBECONFIG first!\n") + os.Exit(0) + } +} + +// WaitForInternalRegistryHostname waits for the internal registry hostname to be made available to the cluster. +func WaitForInternalRegistryHostname(oc *CLI) (string, error) { + e2e.Logf("Waiting up to 2 minutes for the internal registry hostname to be published") + var registryHostname string + foundOCMLogs := false + isOCMProgressing := true + podLogs := map[string]string{} + err := wait.Poll(2*time.Second, 2*time.Minute, func() (bool, error) { + imageConfig, err := oc.AsAdmin().AdminConfigClient().ConfigV1().Images().Get(context.Background(), "cluster", metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + e2e.Logf("Image config object not found") + return false, nil + } + e2e.Logf("Error accessing image config object: %#v", err) + return false, err + } + if imageConfig == nil { + e2e.Logf("Image config object nil") + return false, nil + } + registryHostname = imageConfig.Status.InternalRegistryHostname + if len(registryHostname) == 0 { + e2e.Logf("Internal Registry Hostname is not set in image config object") + return false, nil + } + + // verify that the OCM config's internal registry hostname matches + // the image config's internal registry hostname + ocm, err := oc.AdminOperatorClient().OperatorV1().OpenShiftControllerManagers().Get(context.Background(), "cluster", metav1.GetOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return false, nil + } + return false, err + } + observedConfig := map[string]interface{}{} + err = json.Unmarshal(ocm.Spec.ObservedConfig.Raw, &observedConfig) + if err != nil { + return false, nil + } + internalRegistryHostnamePath := []string{"dockerPullSecret", "internalRegistryHostname"} + currentRegistryHostname, _, err := unstructured.NestedString(observedConfig, internalRegistryHostnamePath...) + if err != nil { + e2e.Logf("error procesing observed config %#v", err) + return false, nil + } + if currentRegistryHostname != registryHostname { + e2e.Logf("OCM observed config hostname %s does not match image config hostname %s", currentRegistryHostname, registryHostname) + return false, nil + } + // check pod logs for messages around image config's internal registry hostname has been observed and + // and that the build controller was started after that observation + pods, err := oc.AdminKubeClient().CoreV1().Pods("openshift-controller-manager").List(context.Background(), metav1.ListOptions{}) + if err != nil { + if errors.IsNotFound(err) { + return false, nil + } + return false, err + } + for _, pod := range pods.Items { + req := oc.AdminKubeClient().CoreV1().Pods("openshift-controller-manager").GetLogs(pod.Name, &corev1.PodLogOptions{}) + readCloser, err := req.Stream(context.Background()) + if err == nil { + b, err := ioutil.ReadAll(readCloser) + if err == nil { + podLog := string(b) + podLogs[pod.Name] = podLog + scanner := bufio.NewScanner(strings.NewReader(podLog)) + firstLog := false + for scanner.Scan() { + line := scanner.Text() + if strings.Contains(line, "build_controller.go") && strings.Contains(line, "Starting build controller") { + firstLog = true + continue + } + if firstLog && strings.Contains(line, "build_controller.go") && strings.Contains(line, registryHostname) { + e2e.Logf("the OCM pod logs indicate the build controller was started after the internal registry hostname has been set in the OCM config") + foundOCMLogs = true + break + } + } + } + } else { + e2e.Logf("error getting pod logs: %#v", err) + } + } + if !foundOCMLogs { + e2e.Logf("did not find the sequence in the OCM pod logs around the build controller getting started after the internal registry hostname has been set in the OCM config") + return false, nil + } + + if !isOCMProgressing { + return true, nil + } + // now cycle through the OCM operator conditions and make sure the Progressing condition is done + for _, condition := range ocm.Status.Conditions { + if condition.Type != operatorv1.OperatorStatusTypeProgressing { + continue + } + if condition.Status != operatorv1.ConditionFalse { + e2e.Logf("OCM rollout still progressing or in error: %v", condition.Status) + return false, nil + } + e2e.Logf("OCM rollout progressing status reports complete") + isOCMProgressing = true + return true, nil + } + e2e.Logf("OCM operator progressing condition not present yet") + return false, nil + }) + + if !foundOCMLogs { + e2e.Logf("dumping OCM pod logs since we never found the internal registry hostname and start build controller sequence") + for podName, podLog := range podLogs { + e2e.Logf("pod %s logs:\n%s", podName, podLog) + } + } + if err == wait.ErrWaitTimeout { + return "", fmt.Errorf("Timed out waiting for internal registry hostname to be published") + } + if err != nil { + return "", err + } + return registryHostname, nil +} + +// WaitForOpenShiftNamespaceImageStreams waits for the standard set of imagestreams to be imported +func WaitForOpenShiftNamespaceImageStreams(oc *CLI) error { + // First wait for the internal registry hostname to be published + registryHostname, err := WaitForInternalRegistryHostname(oc) + if err != nil { + return err + } + langs := []string{"ruby", "nodejs", "perl", "php", "python", "mysql", "postgresql", "mongodb", "jenkins"} + scan := func() bool { + // check the samples operator to see about imagestream import status + samplesOperatorConfig, err := oc.AdminConfigClient().ConfigV1().ClusterOperators().Get(context.Background(), "openshift-samples", metav1.GetOptions{}) + if err != nil { + e2e.Logf("Samples Operator ClusterOperator Error: %#v", err) + return false + } + for _, condition := range samplesOperatorConfig.Status.Conditions { + switch { + case condition.Type == configv1.OperatorDegraded && condition.Status == configv1.ConditionTrue: + // if degraded, bail ... unexpected results can ensue + e2e.Logf("SamplesOperator degraded!!!") + return false + case condition.Type == configv1.OperatorProgressing: + // if the imagestreams for one of our langs above failed, we abort, + // but if it is for say only EAP streams, we allow + if condition.Reason == "FailedImageImports" { + msg := condition.Message + for _, lang := range langs { + if strings.Contains(msg, " "+lang+" ") || strings.HasSuffix(msg, " "+lang) { + e2e.Logf("SamplesOperator detected error during imagestream import: %s with details %s", condition.Reason, condition.Message) + stream, err := oc.AsAdmin().ImageClient().ImageV1().ImageStreams("openshift").Get(context.Background(), lang, metav1.GetOptions{}) + if err != nil { + e2e.Logf("after seeing FailedImageImports for %s retrieval failed with %s", lang, err.Error()) + return false + } + isi := &imagev1.ImageStreamImport{} + isi.Name = lang + isi.Namespace = "openshift" + isi.ResourceVersion = stream.ResourceVersion + isi.Spec = imagev1.ImageStreamImportSpec{ + Import: true, + Images: []imagev1.ImageImportSpec{}, + } + for _, tag := range stream.Spec.Tags { + if tag.From != nil && tag.From.Kind == "DockerImage" { + iis := imagev1.ImageImportSpec{} + iis.From = *tag.From + iis.To = &corev1.LocalObjectReference{Name: tag.Name} + isi.Spec.Images = append(isi.Spec.Images, iis) + } + } + _, err = oc.AsAdmin().ImageClient().ImageV1().ImageStreamImports("openshift").Create(context.Background(), isi, metav1.CreateOptions{}) + if err != nil { + e2e.Logf("after seeing FailedImageImports for %s the manual image import failed with %s", lang, err.Error()) + } + e2e.Logf("after seeing FailedImageImports for %s a manual image-import was submitted", lang) + return false + } + } + } + if condition.Status == configv1.ConditionTrue { + // updates still in progress ... not "ready" + e2e.Logf("SamplesOperator still in progress") + return false + } + case condition.Type == configv1.OperatorAvailable && condition.Status == configv1.ConditionFalse: + e2e.Logf("SamplesOperator not available") + return false + default: + e2e.Logf("SamplesOperator at steady state") + } + } + for _, lang := range langs { + e2e.Logf("Checking language %v \n", lang) + is, err := oc.ImageClient().ImageV1().ImageStreams("openshift").Get(context.Background(), lang, metav1.GetOptions{}) + if err != nil { + e2e.Logf("ImageStream Error: %#v \n", err) + return false + } + if !strings.HasPrefix(is.Status.DockerImageRepository, registryHostname) { + e2e.Logf("ImageStream repository %s does not match expected host %s \n", is.Status.DockerImageRepository, registryHostname) + return false + } + for _, tag := range is.Spec.Tags { + e2e.Logf("Checking tag %v \n", tag) + if _, found := imageutil.StatusHasTag(is, tag.Name); !found { + e2e.Logf("Tag Error: %#v \n", tag) + return false + } + } + } + return true + } + + // with the move to ocp/rhel as the default for the samples in 4.0, there are alot more imagestreams; + // if by some chance this path runs very soon after the cluster has come up, the original time out would + // not be sufficient; + // so we've bumped what was 30 seconds to 2 min 30 seconds or 150 seconds (manual perf testing shows typical times of + // 1 to 2 minutes, assuming registry.access.redhat.com / registry.redhat.io are behaving ... they + // have proven less reliable that docker.io) + // we've also determined that e2e-aws-image-ecosystem can be started before all the operators have completed; while + // that is getting sorted out, the longer time will help there as well + e2e.Logf("Scanning openshift ImageStreams \n") + success := false + wait.Poll(10*time.Second, 150*time.Second, func() (bool, error) { + success = scan() + return success, nil + }) + if success { + e2e.Logf("Success! \n") + return nil + } + DumpImageStreams(oc) + DumpSampleOperator(oc) + return fmt.Errorf("Failed to import expected imagestreams") +} + +// DumpImageStreams will dump both the openshift namespace and local namespace imagestreams +// as part of debugging when the language imagestreams in the openshift namespace seem to disappear +func DumpImageStreams(oc *CLI) { + out, err := oc.AsAdmin().Run("get").Args("is", "-n", "openshift", "-o", "yaml", "--config", KubeConfigPath()).Output() + if err == nil { + e2e.Logf("\n imagestreams in openshift namespace: \n%s\n", out) + } else { + e2e.Logf("\n error on getting imagestreams in openshift namespace: %+v\n%#v\n", err, out) + } + out, err = oc.AsAdmin().Run("get").Args("is", "-o", "yaml").Output() + if err == nil { + e2e.Logf("\n imagestreams in dynamic test namespace: \n%s\n", out) + } else { + e2e.Logf("\n error on getting imagestreams in dynamic test namespace: %+v\n%#v\n", err, out) + } + ids, err := ListImages() + if err != nil { + e2e.Logf("\n got error on container images %+v\n", err) + } else { + for _, id := range ids { + e2e.Logf(" found local image %s\n", id) + } + } +} + +func DumpSampleOperator(oc *CLI) { + out, err := oc.AsAdmin().Run("get").Args("configs.samples.operator.openshift.io", "cluster", "-o", "yaml", "--config", KubeConfigPath()).Output() + if err == nil { + e2e.Logf("\n samples operator CR: \n%s\n", out) + } else { + e2e.Logf("\n error on getting samples operator CR: %+v\n%#v\n", err, out) + } + DumpPodLogsStartingWithInNamespace("cluster-samples-operator", "openshift-cluster-samples-operator", oc) + +} + +// DumpBuildLogs will dump the latest build logs for a BuildConfig for debug purposes +func DumpBuildLogs(bc string, oc *CLI) { + buildOutput, err := oc.AsAdmin().Run("logs").Args("-f", "bc/"+bc, "--timestamps").Output() + if err == nil { + e2e.Logf("\n\n build logs : %s\n\n", buildOutput) + } else { + e2e.Logf("\n\n got error on build logs %+v\n\n", err) + } + + // if we suspect that we are filling up the registry file system, call ExamineDiskUsage / ExaminePodDiskUsage + // also see if manipulations of the quota around /mnt/openshift-xfs-vol-dir exist in the extended test set up scripts + ExamineDiskUsage() + ExaminePodDiskUsage(oc) +} + +// DumpBuilds will dump the yaml for every build in the test namespace; remember, pipeline builds +// don't have build pods so a generic framework dump won't cat our pipeline builds objs in openshift +func DumpBuilds(oc *CLI) { + buildOutput, err := oc.AsAdmin().Run("get").Args("builds", "-o", "yaml").Output() + if err == nil { + e2e.Logf("\n\n builds yaml:\n%s\n\n", buildOutput) + } else { + e2e.Logf("\n\n got error on build yaml dump: %#v\n\n", err) + } +} + +func GetDeploymentConfigPods(oc *CLI, dcName string, version int64) (*kapiv1.PodList, error) { + return oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).List(context.Background(), metav1.ListOptions{LabelSelector: ParseLabelsOrDie(fmt.Sprintf("%s=%s-%d", + appsv1.DeployerPodForDeploymentLabel, dcName, version)).String()}) +} + +func GetApplicationPods(oc *CLI, dcName string) (*kapiv1.PodList, error) { + return oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).List(context.Background(), metav1.ListOptions{LabelSelector: ParseLabelsOrDie(fmt.Sprintf("deploymentconfig=%s", dcName)).String()}) +} + +func GetStatefulSetPods(oc *CLI, setName string) (*kapiv1.PodList, error) { + return oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).List(context.Background(), metav1.ListOptions{LabelSelector: ParseLabelsOrDie(fmt.Sprintf("name=%s", setName)).String()}) +} + +// DumpDeploymentLogs will dump the latest deployment logs for a DeploymentConfig for debug purposes +func DumpDeploymentLogs(dcName string, version int64, oc *CLI) { + e2e.Logf("Dumping deployment logs for deploymentconfig %q\n", dcName) + + pods, err := GetDeploymentConfigPods(oc, dcName, version) + if err != nil { + e2e.Logf("Unable to retrieve pods for deploymentconfig %q: %v\n", dcName, err) + return + } + + DumpPodLogs(pods.Items, oc) +} + +// DumpApplicationPodLogs will dump the latest application logs for a DeploymentConfig for debug purposes +func DumpApplicationPodLogs(dcName string, oc *CLI) { + e2e.Logf("Dumping application logs for deploymentconfig %q\n", dcName) + + pods, err := GetApplicationPods(oc, dcName) + if err != nil { + e2e.Logf("Unable to retrieve pods for deploymentconfig %q: %v\n", dcName, err) + return + } + + DumpPodLogs(pods.Items, oc) +} + +// DumpPodStates dumps the state of all pods in the CLI's current namespace. +func DumpPodStates(oc *CLI) { + e2e.Logf("Dumping pod state for namespace %s", oc.Namespace()) + out, err := oc.AsAdmin().Run("get").Args("pods", "-o", "yaml").Output() + if err != nil { + e2e.Logf("Error dumping pod states: %v", err) + return + } + e2e.Logf(out) +} + +// DumpPodStatesInNamespace dumps the state of all pods in the provided namespace. +func DumpPodStatesInNamespace(namespace string, oc *CLI) { + e2e.Logf("Dumping pod state for namespace %s", namespace) + out, err := oc.AsAdmin().Run("get").Args("pods", "-n", namespace, "-o", "yaml").Output() + if err != nil { + e2e.Logf("Error dumping pod states: %v", err) + return + } + e2e.Logf(out) +} + +// DumpPodLogsStartingWith will dump any pod starting with the name prefix provided +func DumpPodLogsStartingWith(prefix string, oc *CLI) { + podsToDump := []kapiv1.Pod{} + podList, err := oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).List(context.Background(), metav1.ListOptions{}) + if err != nil { + e2e.Logf("Error listing pods: %v", err) + return + } + for _, pod := range podList.Items { + if strings.HasPrefix(pod.Name, prefix) { + podsToDump = append(podsToDump, pod) + } + } + if len(podsToDump) > 0 { + DumpPodLogs(podsToDump, oc) + } +} + +// DumpPodLogsStartingWith will dump any pod starting with the name prefix provided +func DumpPodLogsStartingWithInNamespace(prefix, namespace string, oc *CLI) { + podsToDump := []kapiv1.Pod{} + podList, err := oc.AdminKubeClient().CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{}) + if err != nil { + e2e.Logf("Error listing pods: %v", err) + return + } + for _, pod := range podList.Items { + if strings.HasPrefix(pod.Name, prefix) { + podsToDump = append(podsToDump, pod) + } + } + if len(podsToDump) > 0 { + DumpPodLogs(podsToDump, oc) + } +} + +func DumpPodLogs(pods []kapiv1.Pod, oc *CLI) { + for _, pod := range pods { + descOutput, err := oc.AsAdmin().Run("describe").WithoutNamespace().Args("pod/"+pod.Name, "-n", pod.Namespace).Output() + if err == nil { + e2e.Logf("Describing pod %q\n%s\n\n", pod.Name, descOutput) + } else { + e2e.Logf("Error retrieving description for pod %q: %v\n\n", pod.Name, err) + } + + dumpContainer := func(container *kapiv1.Container) { + depOutput, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args("pod/"+pod.Name, "-c", container.Name, "-n", pod.Namespace).Output() + if err == nil { + e2e.Logf("Log for pod %q/%q\n---->\n%s\n<----end of log for %[1]q/%[2]q\n", pod.Name, container.Name, depOutput) + } else { + e2e.Logf("Error retrieving logs for pod %q/%q: %v\n\n", pod.Name, container.Name, err) + } + } + + for _, c := range pod.Spec.InitContainers { + dumpContainer(&c) + } + for _, c := range pod.Spec.Containers { + dumpContainer(&c) + } + } +} + +// DumpPodsCommand runs the provided command in every pod identified by selector in the provided namespace. +func DumpPodsCommand(c kubernetes.Interface, ns string, selector labels.Selector, cmd string) { + podList, err := c.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{LabelSelector: selector.String()}) + o.Expect(err).NotTo(o.HaveOccurred()) + + values := make(map[string]string) + for _, pod := range podList.Items { + stdout, err := e2eoutput.RunHostCmdWithRetries(pod.Namespace, pod.Name, cmd, statefulset.StatefulSetPoll, statefulset.StatefulPodTimeout) + o.Expect(err).NotTo(o.HaveOccurred()) + values[pod.Name] = stdout + } + for name, stdout := range values { + stdout = strings.TrimSuffix(stdout, "\n") + e2e.Logf(name + ": " + strings.Join(strings.Split(stdout, "\n"), fmt.Sprintf("\n%s: ", name))) + } +} + +// DumpConfigMapStates dumps the state of all ConfigMaps in the CLI's current namespace. +func DumpConfigMapStates(oc *CLI) { + e2e.Logf("Dumping configMap state for namespace %s", oc.Namespace()) + out, err := oc.AsAdmin().Run("get").Args("configmaps", "-o", "yaml").Output() + if err != nil { + e2e.Logf("Error dumping configMap states: %v", err) + return + } + e2e.Logf(out) +} + +// GetMasterThreadDump will get a golang thread stack dump +func GetMasterThreadDump(oc *CLI) { + out, err := oc.AsAdmin().Run("get").Args("--raw", "/debug/pprof/goroutine?debug=2").Output() + if err == nil { + e2e.Logf("\n\n Master thread stack dump:\n\n%s\n\n", string(out)) + return + } + e2e.Logf("\n\n got error on oc get --raw /debug/pprof/goroutine?godebug=2: %v\n\n", err) +} + +func PreTestDump() { + // dump any state we want to know prior to running tests +} + +// ExamineDiskUsage will dump df output on the testing system; leveraging this as part of diagnosing +// the registry's disk filling up during external tests on jenkins +func ExamineDiskUsage() { + // disabling this for now, easier to do it here than everywhere that's calling it. + return + /* + out, err := exec.Command("/bin/df", "-m").Output() + if err == nil { + e2e.Logf("\n\n df -m output: %s\n\n", string(out)) + } else { + e2e.Logf("\n\n got error on df %v\n\n", err) + } + DumpDockerInfo() + */ +} + +// ExaminePodDiskUsage will dump df/du output on registry pod; leveraging this as part of diagnosing +// the registry's disk filling up during external tests on jenkins +func ExaminePodDiskUsage(oc *CLI) { + // disabling this for now, easier to do it here than everywhere that's calling it. + return + /* + out, err := oc.Run("get").Args("pods", "-o", "json", "-n", "default", "--config", KubeConfigPath()).Output() + var podName string + if err == nil { + b := []byte(out) + var list kapiv1.PodList + err = json.Unmarshal(b, &list) + if err == nil { + for _, pod := range list.Items { + e2e.Logf("\n\n looking at pod %s \n\n", pod.ObjectMeta.Name) + if strings.Contains(pod.ObjectMeta.Name, "docker-registry-") && !strings.Contains(pod.ObjectMeta.Name, "deploy") { + podName = pod.ObjectMeta.Name + break + } + } + } else { + e2e.Logf("\n\n got json unmarshal err: %v\n\n", err) + } + } else { + e2e.Logf("\n\n got error on get pods: %v\n\n", err) + } + if len(podName) == 0 { + e2e.Logf("Unable to determine registry pod name, so we can't examine its disk usage.") + return + } + + out, err = oc.Run("exec").Args("-n", "default", podName, "df", "--config", KubeConfigPath()).Output() + if err == nil { + e2e.Logf("\n\n df from registry pod: \n%s\n\n", out) + } else { + e2e.Logf("\n\n got error on reg pod df: %v\n", err) + } + out, err = oc.Run("exec").Args("-n", "default", podName, "du", "/registry", "--config", KubeConfigPath()).Output() + if err == nil { + e2e.Logf("\n\n du from registry pod: \n%s\n\n", out) + } else { + e2e.Logf("\n\n got error on reg pod du: %v\n", err) + } + */ +} + +// VarSubOnFile reads in srcFile, finds instances of ${key} from the map +// and replaces them with their associated values. +func VarSubOnFile(srcFile string, destFile string, vars map[string]string) error { + srcData, err := ioutil.ReadFile(srcFile) + if err == nil { + srcString := string(srcData) + for k, v := range vars { + k = "${" + k + "}" + srcString = strings.Replace(srcString, k, v, -1) // -1 means unlimited replacements + } + err = ioutil.WriteFile(destFile, []byte(srcString), 0644) + } + return err +} + +// StartBuild executes OC start-build with the specified arguments. StdOut and StdErr from the process +// are returned as separate strings. +func StartBuild(oc *CLI, args ...string) (stdout, stderr string, err error) { + stdout, stderr, err = oc.Run("start-build").Args(args...).Outputs() + e2e.Logf("\n\nstart-build output with args %v:\nError>%v\nStdOut>\n%s\nStdErr>\n%s\n\n", args, err, stdout, stderr) + return stdout, stderr, err +} + +var buildPathPattern = regexp.MustCompile(`^build\.build\.openshift\.io/([\w\-\._]+)$`) + +type LogDumperFunc func(oc *CLI, br *BuildResult) (string, error) + +func NewBuildResult(oc *CLI, build *buildv1.Build) *BuildResult { + return &BuildResult{ + Oc: oc, + BuildName: build.Name, + BuildPath: "builds/" + build.Name, + } +} + +type BuildResult struct { + // BuildPath is a resource qualified name (e.g. "build/test-1"). + BuildPath string + // BuildName is the non-resource qualified name. + BuildName string + // StartBuildStdErr is the StdErr output generated by oc start-build. + StartBuildStdErr string + // StartBuildStdOut is the StdOut output generated by oc start-build. + StartBuildStdOut string + // StartBuildErr is the error, if any, returned by the direct invocation of the start-build command. + StartBuildErr error + // The buildconfig which generated this build. + BuildConfigName string + // Build is the resource created. May be nil if there was a timeout. + Build *buildv1.Build + // BuildAttempt represents that a Build resource was created. + // false indicates a severe error unrelated to Build success or failure. + BuildAttempt bool + // BuildSuccess is true if the build was finshed successfully. + BuildSuccess bool + // BuildFailure is true if the build was finished with an error. + BuildFailure bool + // BuildCancelled is true if the build was canceled. + BuildCancelled bool + // BuildTimeout is true if there was a timeout waiting for the build to finish. + BuildTimeout bool + // Alternate log dumper function. If set, this is called instead of 'oc logs' + LogDumper LogDumperFunc + // The openshift client which created this build. + Oc *CLI +} + +// DumpLogs sends logs associated with this BuildResult to the GinkgoWriter. +func (t *BuildResult) DumpLogs() { + e2e.Logf("\n\n*****************************************\n") + e2e.Logf("Dumping Build Result: %#v\n", *t) + + if t == nil { + e2e.Logf("No build result available!\n\n") + return + } + + desc, err := t.Oc.Run("describe").Args(t.BuildPath).Output() + + e2e.Logf("\n** Build Description:\n") + if err != nil { + e2e.Logf("Error during description retrieval: %+v\n", err) + } else { + e2e.Logf("%s\n", desc) + } + + e2e.Logf("\n** Build Logs:\n") + + buildOuput, err := t.Logs() + if err != nil { + e2e.Logf("Error during log retrieval: %+v\n", err) + } else { + e2e.Logf("%s\n", buildOuput) + } + + e2e.Logf("\n\n") + + t.dumpRegistryLogs() + + // if we suspect that we are filling up the registry file system, call ExamineDiskUsage / ExaminePodDiskUsage + // also see if manipulations of the quota around /mnt/openshift-xfs-vol-dir exist in the extended test set up scripts + /* + ExamineDiskUsage() + ExaminePodDiskUsage(t.oc) + e2e.Logf( "\n\n") + */ +} + +func (t *BuildResult) dumpRegistryLogs() { + var buildStarted *time.Time + oc := t.Oc + e2e.Logf("\n** Registry Logs:\n") + + if t.Build != nil && !t.Build.CreationTimestamp.IsZero() { + buildStarted = &t.Build.CreationTimestamp.Time + } else { + proj, err := oc.ProjectClient().ProjectV1().Projects().Get(context.Background(), oc.Namespace(), metav1.GetOptions{}) + if err != nil { + e2e.Logf("Failed to get project %s: %v\n", oc.Namespace(), err) + } else { + buildStarted = &proj.CreationTimestamp.Time + } + } + + if buildStarted == nil { + e2e.Logf("Could not determine test' start time\n\n\n") + return + } + + since := time.Now().Sub(*buildStarted) + + // Changing the namespace on the derived client still changes it on the original client + // because the kubeFramework field is only copied by reference. Saving the original namespace + // here so we can restore it when done with registry logs + // TODO remove the default/docker-registry log retrieval when we are fully migrated to 4.0 for our test env. + savedNamespace := t.Oc.Namespace() + oadm := t.Oc.AsAdmin().SetNamespace("default") + out, err := oadm.Run("logs").Args("dc/docker-registry", "--since="+since.String()).Output() + if err != nil { + e2e.Logf("Error during log retrieval: %+v\n", err) + } else { + e2e.Logf("%s\n", out) + } + oadm = t.Oc.AsAdmin().SetNamespace("openshift-image-registry") + out, err = oadm.Run("logs").Args("deployment/image-registry", "--since="+since.String()).Output() + if err != nil { + e2e.Logf("Error during log retrieval: %+v\n", err) + } else { + e2e.Logf("%s\n", out) + } + t.Oc.SetNamespace(savedNamespace) + + e2e.Logf("\n\n") +} + +// Logs returns the logs associated with this build. +func (t *BuildResult) Logs() (string, error) { + if t == nil || t.BuildPath == "" { + return "", fmt.Errorf("Not enough information to retrieve logs for %#v", *t) + } + + if t.LogDumper != nil { + return t.LogDumper(t.Oc, t) + } + + buildOuput, err := t.Oc.Run("logs").Args("-f", t.BuildPath, "--timestamps").Output() + if err != nil { + return "", fmt.Errorf("Error retrieving logs for %#v: %v", *t, err) + } + + return buildOuput, nil +} + +// LogsNoTimestamp returns the logs associated with this build. +func (t *BuildResult) LogsNoTimestamp() (string, error) { + if t == nil || t.BuildPath == "" { + return "", fmt.Errorf("Not enough information to retrieve logs for %#v", *t) + } + + if t.LogDumper != nil { + return t.LogDumper(t.Oc, t) + } + + buildOuput, err := t.Oc.Run("logs").Args("-f", t.BuildPath).Output() + if err != nil { + return "", fmt.Errorf("Error retrieving logs for %#v: %v", *t, err) + } + + return buildOuput, nil +} + +// Dumps logs and triggers a Ginkgo assertion if the build did NOT succeed. +func (t *BuildResult) AssertSuccess() *BuildResult { + if !t.BuildSuccess { + t.DumpLogs() + } + o.ExpectWithOffset(1, t.BuildSuccess).To(o.BeTrue()) + return t +} + +// Dumps logs and triggers a Ginkgo assertion if the build did NOT have an error (this will not assert on timeouts) +func (t *BuildResult) AssertFailure() *BuildResult { + if !t.BuildFailure { + t.DumpLogs() + } + o.ExpectWithOffset(1, t.BuildFailure).To(o.BeTrue()) + return t +} + +func StartBuildResult(oc *CLI, args ...string) (result *BuildResult, err error) { + args = append(args, "-o=name") // ensure that the build name is the only thing send to stdout + stdout, stderr, err := StartBuild(oc, args...) + + // Usually, with -o=name, we only expect the build path. + // However, the caller may have added --follow which can add + // content to stdout. So just grab the first line. + buildPath := strings.TrimSpace(strings.Split(stdout, "\n")[0]) + + result = &BuildResult{ + Build: nil, + BuildPath: buildPath, + StartBuildStdOut: stdout, + StartBuildStdErr: stderr, + StartBuildErr: nil, + BuildAttempt: false, + BuildSuccess: false, + BuildFailure: false, + BuildCancelled: false, + BuildTimeout: false, + Oc: oc, + } + + // An error here does not necessarily mean we could not run start-build. For example + // when --wait is specified, start-build returns an error if the build fails. Therefore, + // we continue to collect build information even if we see an error. + result.StartBuildErr = err + + matches := buildPathPattern.FindStringSubmatch(buildPath) + if len(matches) != 2 { + return result, fmt.Errorf("Build path output did not match expected format 'build/name' : %q", buildPath) + } + + result.BuildName = matches[1] + + return result, nil +} + +// StartBuildAndWait executes OC start-build with the specified arguments on an existing buildconfig. +// Note that start-build will be run with "-o=name" as a parameter when using this method. +// If no error is returned from this method, it means that the build attempted successfully, NOT that +// the build completed. For completion information, check the BuildResult object. +func StartBuildAndWait(oc *CLI, args ...string) (result *BuildResult, err error) { + result, err = StartBuildResult(oc, args...) + if err != nil { + return result, err + } + return result, WaitForBuildResult(oc.BuildClient().BuildV1().Builds(oc.Namespace()), result) +} + +// WaitForBuildResult updates result wit the state of the build +func WaitForBuildResult(c buildv1clienttyped.BuildInterface, result *BuildResult) error { + e2e.Logf("Waiting for %s to complete\n", result.BuildName) + err := WaitForABuild(c, result.BuildName, + func(b *buildv1.Build) bool { + result.Build = b + result.BuildSuccess = CheckBuildSuccess(b) + return result.BuildSuccess + }, + func(b *buildv1.Build) bool { + result.Build = b + result.BuildFailure = CheckBuildFailed(b) + return result.BuildFailure + }, + func(b *buildv1.Build) bool { + result.Build = b + result.BuildCancelled = CheckBuildCancelled(b) + return result.BuildCancelled + }, + ) + + if result.Build == nil { + // We only abort here if the build progress was unobservable. Only known cause would be severe, non-build related error in WaitForABuild. + return fmt.Errorf("Severe error waiting for build: %v", err) + } + + result.BuildAttempt = true + result.BuildTimeout = !(result.BuildFailure || result.BuildSuccess || result.BuildCancelled) + + e2e.Logf("Done waiting for %s: %#v\n with error: %v\n", result.BuildName, *result, err) + return nil +} + +// WaitForABuild waits for a Build object to match either isOK or isFailed conditions. +func WaitForABuild(c buildv1clienttyped.BuildInterface, name string, isOK, isFailed, isCanceled func(*buildv1.Build) bool) error { + if isOK == nil { + isOK = CheckBuildSuccess + } + if isFailed == nil { + isFailed = CheckBuildFailed + } + if isCanceled == nil { + isCanceled = CheckBuildCancelled + } + + // wait 2 minutes for build to exist + err := wait.Poll(1*time.Second, 2*time.Minute, func() (bool, error) { + if _, err := c.Get(context.Background(), name, metav1.GetOptions{}); err != nil { + return false, nil + } + return true, nil + }) + if err == wait.ErrWaitTimeout { + return fmt.Errorf("Timed out waiting for build %q to be created", name) + } + if err != nil { + return err + } + // wait longer for the build to run to completion + err = wait.Poll(5*time.Second, 10*time.Minute, func() (bool, error) { + list, err := c.List(context.Background(), metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String()}) + if err != nil { + e2e.Logf("error listing builds: %v", err) + return false, err + } + for i := range list.Items { + if name == list.Items[i].Name && (isOK(&list.Items[i]) || isCanceled(&list.Items[i])) { + return true, nil + } + if name != list.Items[i].Name { + return false, fmt.Errorf("While listing builds named %s, found unexpected build %#v", name, list.Items[i]) + } + if isFailed(&list.Items[i]) { + return false, fmt.Errorf("The build %q status is %q", name, list.Items[i].Status.Phase) + } + } + return false, nil + }) + if err != nil { + e2e.Logf("WaitForABuild returning with error: %v", err) + } + if err == wait.ErrWaitTimeout { + return fmt.Errorf("Timed out waiting for build %q to complete", name) + } + return err +} + +// CheckBuildSuccess returns true if the build succeeded +func CheckBuildSuccess(b *buildv1.Build) bool { + return b.Status.Phase == buildv1.BuildPhaseComplete +} + +// CheckBuildFailed return true if the build failed +func CheckBuildFailed(b *buildv1.Build) bool { + return b.Status.Phase == buildv1.BuildPhaseFailed || b.Status.Phase == buildv1.BuildPhaseError +} + +// CheckBuildCancelled return true if the build was canceled +func CheckBuildCancelled(b *buildv1.Build) bool { + return b.Status.Phase == buildv1.BuildPhaseCancelled +} + +// WaitForServiceAccount waits until the named service account gets fully +// provisioned +func WaitForServiceAccount(c corev1client.ServiceAccountInterface, name string, checkSecret bool) error { + countOutput := -1 + // add Logf for better debug, but it will possible generate many logs because of 100 millisecond + // so, add countOutput so that it output log every 100 times (10s) + waitFn := func() (bool, error) { + countOutput++ + sc, err := c.Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + // If we can't access the service accounts, let's wait till the controller + // create it. + if errors.IsNotFound(err) || errors.IsForbidden(err) { + if countOutput%100 == 0 { + e2e.Logf("Waiting for service account %q to be available: %v (will retry) ...", name, err) + } + return false, nil + } + return false, fmt.Errorf("failed to get service account %q: %v", name, err) + } + secretNames := []string{} + var hasDockercfg bool + for _, s := range sc.Secrets { + if strings.Contains(s.Name, "dockercfg") { + hasDockercfg = true + } + secretNames = append(secretNames, s.Name) + } + if hasDockercfg || !checkSecret { + return true, nil + } + if countOutput%100 == 0 { + e2e.Logf("Waiting for service account %q secrets (%s) to include dockercfg ...", name, strings.Join(secretNames, ",")) + } + return false, nil + } + return wait.Poll(time.Duration(100*time.Millisecond), 3*time.Minute, waitFn) +} + +// WaitForAnImageStream waits for an ImageStream to fulfill the isOK function +func WaitForAnImageStream(client imagev1typedclient.ImageStreamInterface, + name string, + isOK, isFailed func(*imagev1.ImageStream) bool) error { + for { + list, err := client.List(context.Background(), metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String()}) + if err != nil { + return err + } + for i := range list.Items { + if isOK(&list.Items[i]) { + return nil + } + if isFailed(&list.Items[i]) { + return fmt.Errorf("The image stream %q status is %q", + name, list.Items[i].Annotations[imagev1.DockerImageRepositoryCheckAnnotation]) + } + } + + rv := list.ResourceVersion + w, err := client.Watch(context.Background(), metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String(), ResourceVersion: rv}) + if err != nil { + return err + } + defer w.Stop() + + for { + val, ok := <-w.ResultChan() + if !ok { + // reget and re-watch + break + } + if e, ok := val.Object.(*imagev1.ImageStream); ok { + if isOK(e) { + return nil + } + if isFailed(e) { + return fmt.Errorf("The image stream %q status is %q", + name, e.Annotations[imagev1.DockerImageRepositoryCheckAnnotation]) + } + } + } + } +} + +// WaitForAnImageStreamTag waits until an image stream with given name has non-empty history for given tag. +// Defaults to waiting for 300 seconds +func WaitForAnImageStreamTag(oc *CLI, namespace, name, tag string) error { + return TimedWaitForAnImageStreamTag(oc, namespace, name, tag, time.Second*300) +} + +// TimedWaitForAnImageStreamTag waits until an image stream with given name has non-empty history for given tag. +// Gives up waiting after the specified waitTimeout +func TimedWaitForAnImageStreamTag(oc *CLI, namespace, name, tag string, waitTimeout time.Duration) error { + g.By(fmt.Sprintf("waiting for an is importer to import a tag %s into a stream %s", tag, name)) + start := time.Now() + c := make(chan error) + go func() { + err := WaitForAnImageStream( + oc.ImageClient().ImageV1().ImageStreams(namespace), + name, + func(is *imagev1.ImageStream) bool { + statusTag, exists := imageutil.StatusHasTag(is, tag) + if !exists || len(statusTag.Items) == 0 { + return false + } + return true + }, + func(is *imagev1.ImageStream) bool { + return time.Now().After(start.Add(waitTimeout)) + }) + c <- err + }() + + select { + case e := <-c: + return e + case <-time.After(waitTimeout): + return fmt.Errorf("timed out while waiting of an image stream tag %s/%s:%s", namespace, name, tag) + } +} + +// CheckImageStreamLatestTagPopulated returns true if the imagestream has a ':latest' tag filed +func CheckImageStreamLatestTagPopulated(i *imagev1.ImageStream) bool { + _, ok := imageutil.StatusHasTag(i, "latest") + return ok +} + +// CheckImageStreamTagNotFound return true if the imagestream update was not successful +func CheckImageStreamTagNotFound(i *imagev1.ImageStream) bool { + return strings.Contains(i.Annotations[imagev1.DockerImageRepositoryCheckAnnotation], "not") || + strings.Contains(i.Annotations[imagev1.DockerImageRepositoryCheckAnnotation], "error") +} + +// WaitForDeploymentConfig waits for a DeploymentConfig to complete transition +// to a given version and report minimum availability. +func WaitForDeploymentConfig(kc kubernetes.Interface, dcClient appsv1clienttyped.DeploymentConfigsGetter, namespace, name string, version int64, enforceNotProgressing bool, cli *CLI) error { + e2e.Logf("waiting for deploymentconfig %s/%s to be available with version %d\n", namespace, name, version) + var dc *appsv1.DeploymentConfig + + start := time.Now() + err := wait.Poll(time.Second, 15*time.Minute, func() (done bool, err error) { + dc, err = dcClient.DeploymentConfigs(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return false, err + } + + // TODO re-enable this check once @mfojtik introduces a test that ensures we'll only ever get + // exactly one deployment triggered. + /* + if dc.Status.LatestVersion > version { + return false, fmt.Errorf("latestVersion %d passed %d", dc.Status.LatestVersion, version) + } + */ + if dc.Status.LatestVersion < version { + return false, nil + } + + var progressing, available *appsv1.DeploymentCondition + for i, condition := range dc.Status.Conditions { + switch condition.Type { + case appsv1.DeploymentProgressing: + progressing = &dc.Status.Conditions[i] + + case appsv1.DeploymentAvailable: + available = &dc.Status.Conditions[i] + } + } + + if enforceNotProgressing { + if progressing != nil && progressing.Status == corev1.ConditionFalse { + return false, fmt.Errorf("not progressing") + } + } + + if progressing != nil && + progressing.Status == corev1.ConditionTrue && + progressing.Reason == appsutil.NewRcAvailableReason && + available != nil && + available.Status == corev1.ConditionTrue { + return true, nil + } + + return false, nil + }) + + if err != nil { + e2e.Logf("got error %q when waiting for deploymentconfig %s/%s to be available with version %d\n", err, namespace, name, version) + cli.Run("get").Args("dc", dc.Name, "-o", "yaml").Execute() + + DumpDeploymentLogs(name, version, cli) + DumpApplicationPodLogs(name, cli) + + return err + } + + requirement, err := labels.NewRequirement(appsutil.DeploymentLabel, selection.Equals, []string{appsutil.LatestDeploymentNameForConfigAndVersion( + dc.Name, dc.Status.LatestVersion)}) + if err != nil { + return err + } + + podnames, err := GetPodNamesByFilter(kc.CoreV1().Pods(namespace), labels.NewSelector().Add(*requirement), func(kapiv1.Pod) bool { return true }) + if err != nil { + return err + } + + e2e.Logf("deploymentconfig %s/%s available after %s\npods: %s\n", namespace, name, time.Now().Sub(start), strings.Join(podnames, ", ")) + + return nil +} + +func isUsageSynced(received, expected corev1.ResourceList, expectedIsUpperLimit bool) bool { + resourceNames := quota.ResourceNames(expected) + masked := quota.Mask(received, resourceNames) + if len(masked) != len(expected) { + return false + } + if expectedIsUpperLimit { + if le, _ := quota.LessThanOrEqual(masked, expected); !le { + return false + } + } else { + if le, _ := quota.LessThanOrEqual(expected, masked); !le { + return false + } + } + return true +} + +// WaitForResourceQuotaSync watches given resource quota until its usage is updated to desired level or a +// timeout occurs. If successful, used quota values will be returned for expected resources. Otherwise an +// ErrWaitTimeout will be returned. If expectedIsUpperLimit is true, given expected usage must compare greater +// or equal to quota's usage, which is useful for expected usage increment. Otherwise expected usage must +// compare lower or equal to quota's usage, which is useful for expected usage decrement. +func WaitForResourceQuotaSync( + client corev1client.ResourceQuotaInterface, + name string, + expectedUsage corev1.ResourceList, + expectedIsUpperLimit bool, + timeout time.Duration, +) (corev1.ResourceList, error) { + + startTime := time.Now() + endTime := startTime.Add(timeout) + + expectedResourceNames := quota.ResourceNames(expectedUsage) + + list, err := client.List(context.Background(), metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String()}) + if err != nil { + return nil, err + } + + for i := range list.Items { + used := quota.Mask(list.Items[i].Status.Used, expectedResourceNames) + if isUsageSynced(used, expectedUsage, expectedIsUpperLimit) { + return used, nil + } + } + + rv := list.ResourceVersion + w, err := client.Watch(context.Background(), metav1.ListOptions{FieldSelector: fields.Set{"metadata.name": name}.AsSelector().String(), ResourceVersion: rv}) + if err != nil { + return nil, err + } + defer w.Stop() + + for time.Now().Before(endTime) { + select { + case val, ok := <-w.ResultChan(): + if !ok { + // reget and re-watch + continue + } + if rq, ok := val.Object.(*corev1.ResourceQuota); ok { + used := quota.Mask(rq.Status.Used, expectedResourceNames) + if isUsageSynced(used, expectedUsage, expectedIsUpperLimit) { + return used, nil + } + } + case <-time.After(endTime.Sub(time.Now())): + return nil, wait.ErrWaitTimeout + } + } + return nil, wait.ErrWaitTimeout +} + +// GetPodNamesByFilter looks up pods that satisfy the predicate and returns their names. +func GetPodNamesByFilter(c corev1client.PodInterface, label labels.Selector, predicate func(kapiv1.Pod) bool) (podNames []string, err error) { + podList, err := c.List(context.Background(), metav1.ListOptions{LabelSelector: label.String()}) + if err != nil { + return nil, err + } + for _, pod := range podList.Items { + if predicate(pod) { + podNames = append(podNames, pod.Name) + } + } + return podNames, nil +} + +func WaitForAJob(c batchv1client.JobInterface, name string, timeout time.Duration) error { + return wait.Poll(1*time.Second, timeout, func() (bool, error) { + j, e := c.Get(context.Background(), name, metav1.GetOptions{}) + if e != nil { + return true, e + } + // TODO soltysh: replace this with a function once such exist, currently + // it's private in the controller + for _, c := range j.Status.Conditions { + if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == kapiv1.ConditionTrue { + return true, nil + } + } + return false, nil + }) +} + +// WaitForPods waits until given number of pods that match the label selector and +// satisfy the predicate are found +func WaitForPods(c corev1client.PodInterface, label labels.Selector, predicate func(kapiv1.Pod) bool, count int, timeout time.Duration) ([]string, error) { + var podNames []string + err := wait.Poll(1*time.Second, timeout, func() (bool, error) { + p, e := GetPodNamesByFilter(c, label, predicate) + if e != nil { + return true, e + } + if len(p) != count { + return false, nil + } + podNames = p + return true, nil + }) + return podNames, err +} + +// CheckPodIsRunning returns true if the pod is running +func CheckPodIsRunning(pod kapiv1.Pod) bool { + return pod.Status.Phase == kapiv1.PodRunning +} + +// CheckPodIsSucceeded returns true if the pod status is "Succdeded" +func CheckPodIsSucceeded(pod kapiv1.Pod) bool { + return pod.Status.Phase == kapiv1.PodSucceeded +} + +// CheckPodIsReady returns true if the pod's ready probe determined that the pod is ready. +func CheckPodIsReady(pod kapiv1.Pod) bool { + if pod.Status.Phase != kapiv1.PodRunning { + return false + } + for _, cond := range pod.Status.Conditions { + if cond.Type != kapiv1.PodReady { + continue + } + return cond.Status == kapiv1.ConditionTrue + } + return false +} + +// CheckPodNoOp always returns true +func CheckPodNoOp(pod kapiv1.Pod) bool { + return true +} + +// WaitUntilPodIsGone waits until the named Pod will disappear +func WaitUntilPodIsGone(c corev1client.PodInterface, podName string, timeout time.Duration) error { + return wait.Poll(1*time.Second, timeout, func() (bool, error) { + _, err := c.Get(context.Background(), podName, metav1.GetOptions{}) + if err != nil { + if strings.Contains(err.Error(), "not found") { + return true, nil + } + return true, err + } + return false, nil + }) +} + +// GetDockerImageReference retrieves the full Docker pull spec from the given ImageStream +// and tag +func GetDockerImageReference(c imagev1typedclient.ImageStreamInterface, name, tag string) (string, error) { + imageStream, err := c.Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + isTag, ok := imageutil.StatusHasTag(imageStream, tag) + if !ok { + return "", fmt.Errorf("ImageStream %q does not have tag %q", name, tag) + } + return isTag.Items[0].DockerImageReference, nil +} + +// GetPodForContainer creates a new Pod that runs specified container +func GetPodForContainer(container kapiv1.Container) *kapiv1.Pod { + name := naming.GetPodName("test-pod", string(uuid.NewUUID())) + return &kapiv1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{"name": name}, + }, + Spec: kapiv1.PodSpec{ + Containers: []kapiv1.Container{container}, + RestartPolicy: kapiv1.RestartPolicyNever, + }, + } +} + +// KubeConfigPath returns the value of KUBECONFIG environment variable +func KubeConfigPath() string { + // can't use gomega in this method since it is used outside of It() + return os.Getenv("KUBECONFIG") +} + +// ArtifactDirPath returns the value of ARTIFACT_DIR environment variable +func ArtifactDirPath() string { + path := os.Getenv("ARTIFACT_DIR") + o.Expect(path).NotTo(o.BeNil()) + o.Expect(path).NotTo(o.BeEmpty()) + return path +} + +// ArtifactPath returns the absolute path to the fix artifact file +// The path is relative to ARTIFACT_DIR +func ArtifactPath(elem ...string) string { + return filepath.Join(append([]string{ArtifactDirPath()}, elem...)...) +} + +var ( + fixtureDirLock sync.Once + fixtureDir string +) + +// FixturePath returns an absolute path to a fixture file in test/extended/testdata/, +// test/integration/, or examples/. +func FixturePath(elem ...string) string { + switch { + case len(elem) == 0: + panic("must specify path") + case len(elem) > 3 && elem[0] == ".." && elem[1] == ".." && elem[2] == "examples": + elem = elem[2:] + case len(elem) > 3 && elem[0] == ".." && elem[1] == ".." && elem[2] == "install": + elem = elem[2:] + case len(elem) > 3 && elem[0] == ".." && elem[1] == "integration": + elem = append([]string{"test"}, elem[1:]...) + case elem[0] == "testdata": + elem = append([]string{"test", "extended"}, elem...) + default: + panic(fmt.Sprintf("Fixtures must be in test/extended/testdata or examples not %s", path.Join(elem...))) + } + fixtureDirLock.Do(func() { + dir, err := ioutil.TempDir("", "fixture-testdata-dir") + if err != nil { + panic(err) + } + fixtureDir = dir + }) + relativePath := path.Join(elem...) + fullPath := path.Join(fixtureDir, relativePath) + if err := testdata.RestoreAsset(fixtureDir, relativePath); err != nil { + if err := testdata.RestoreAssets(fixtureDir, relativePath); err != nil { + panic(err) + } + if err := filepath.Walk(fullPath, func(path string, info os.FileInfo, err error) error { + if err := os.Chmod(path, 0640); err != nil { + return err + } + if stat, err := os.Lstat(path); err == nil && stat.IsDir() { + return os.Chmod(path, 0755) + } + return nil + }); err != nil { + panic(err) + } + } else { + if err := os.Chmod(fullPath, 0640); err != nil { + panic(err) + } + } + + p, err := filepath.Abs(fullPath) + if err != nil { + panic(err) + } + return p +} + +// FetchURL grabs the output from the specified url and returns it. +// It will retry once per second for duration retryTimeout if an error occurs during the request. +func FetchURL(oc *CLI, url string, retryTimeout time.Duration) (string, error) { + + ns := oc.KubeFramework().Namespace.Name + execPodName := CreateExecPodOrFail(oc.AdminKubeClient().CoreV1(), ns, string(uuid.NewUUID())) + defer func() { + oc.AdminKubeClient().CoreV1().Pods(ns).Delete(context.Background(), execPodName, *metav1.NewDeleteOptions(1)) + }() + + execPod, err := oc.AdminKubeClient().CoreV1().Pods(ns).Get(context.Background(), execPodName, metav1.GetOptions{}) + if err != nil { + return "", err + } + + var response string + waitFn := func() (bool, error) { + e2e.Logf("Waiting up to %v to wget %s", retryTimeout, url) + //cmd := fmt.Sprintf("wget -T 30 -O- %s", url) + cmd := fmt.Sprintf("curl -vvv %s", url) + response, err = e2eoutput.RunHostCmd(execPod.Namespace, execPod.Name, cmd) + if err != nil { + e2e.Logf("got err: %v, retry until timeout", err) + return false, nil + } + // Need to check output because wget -q might omit the error. + if strings.TrimSpace(response) == "" { + e2e.Logf("got empty stdout, retry until timeout") + return false, nil + } + return true, nil + } + pollErr := wait.Poll(time.Duration(1*time.Second), retryTimeout, waitFn) + if pollErr == wait.ErrWaitTimeout { + return "", fmt.Errorf("Timed out while fetching url %q", url) + } + if pollErr != nil { + return "", pollErr + } + return response, nil +} + +// ParseLabelsOrDie turns the given string into a label selector or +// panics; for tests or other cases where you know the string is valid. +// TODO: Move this to the upstream labels package. +func ParseLabelsOrDie(str string) labels.Selector { + ret, err := labels.Parse(str) + if err != nil { + panic(fmt.Sprintf("cannot parse '%v': %v", str, err)) + } + return ret +} + +// LaunchWebserverPod launches a pod serving http on port 8080 to act +// as the target for networking connectivity checks. The ip address +// of the created pod will be returned if the pod is launched +// successfully. +func LaunchWebserverPod(f *e2e.Framework, podName, nodeName string) (ip string) { + containerName := fmt.Sprintf("%s-container", podName) + port := 8080 + pod := &kapiv1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + }, + Spec: kapiv1.PodSpec{ + Containers: []kapiv1.Container{ + { + Name: containerName, + Image: image.GetE2EImage(image.Agnhost), + Args: []string{"netexec", "--http-port", fmt.Sprintf("%d", port)}, + Ports: []kapiv1.ContainerPort{{ContainerPort: int32(port)}}, + }, + }, + NodeName: nodeName, + RestartPolicy: kapiv1.RestartPolicyNever, + }, + } + podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) + _, err := podClient.Create(context.Background(), pod, metav1.CreateOptions{}) + e2e.ExpectNoError(err) + createdPod, err := podClient.Get(context.Background(), podName, metav1.GetOptions{}) + e2e.ExpectNoError(err) + ip = net.JoinHostPort(createdPod.Status.PodIP, strconv.Itoa(port)) + e2e.Logf("Target pod IP:port is %s", ip) + return +} + +func WaitForEndpoint(c kclientset.Interface, ns, name string) error { + for t := time.Now(); time.Since(t) < 3*time.Minute; time.Sleep(5 * time.Second) { + endpoint, err := c.CoreV1().Endpoints(ns).Get(context.Background(), name, metav1.GetOptions{}) + if kapierrs.IsNotFound(err) { + e2e.Logf("Endpoint %s/%s is not ready yet", ns, name) + continue + } + Expect(err).NotTo(HaveOccurred()) + if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 { + e2e.Logf("Endpoint %s/%s is not ready yet", ns, name) + continue + } else { + return nil + } + } + return fmt.Errorf("Failed to get endpoints for %s/%s", ns, name) +} + +// GetEndpointAddress will return an "ip:port" string for the endpoint. +func GetEndpointAddress(oc *CLI, name string) (string, error) { + err := WaitForEndpoint(oc.KubeFramework().ClientSet, oc.Namespace(), name) + if err != nil { + return "", err + } + endpoint, err := oc.KubeClient().CoreV1().Endpoints(oc.Namespace()).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return fmt.Sprintf("%s:%d", endpoint.Subsets[0].Addresses[0].IP, endpoint.Subsets[0].Ports[0].Port), nil +} + +// CreateExecPodOrFail creates a simple busybox pod in a sleep loop used as a +// vessel for kubectl exec commands. +// Returns the name of the created pod. +// TODO: expose upstream +func CreateExecPodOrFail(client corev1client.CoreV1Interface, ns, name string) string { + e2e.Logf("Creating new exec pod") + execPod := pod.NewExecPodSpec(ns, name, false) + created, err := client.Pods(ns).Create(context.Background(), execPod, metav1.CreateOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + err = wait.PollImmediate(e2e.Poll, 5*time.Minute, func() (bool, error) { + retrievedPod, err := client.Pods(execPod.Namespace).Get(context.Background(), created.Name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + return retrievedPod.Status.Phase == kapiv1.PodRunning, nil + }) + o.Expect(err).NotTo(o.HaveOccurred()) + return created.Name +} + +// CheckForBuildEvent will poll a build for up to 1 minute looking for an event with +// the specified reason and message template. +func CheckForBuildEvent(client corev1client.CoreV1Interface, build *buildv1.Build, reason, message string) { + scheme, _ := apitesting.SchemeForOrDie(buildv1.Install) + var expectedEvent *kapiv1.Event + err := wait.PollImmediate(e2e.Poll, 1*time.Minute, func() (bool, error) { + events, err := client.Events(build.Namespace).Search(scheme, build) + if err != nil { + return false, err + } + for _, event := range events.Items { + e2e.Logf("Found event %#v", event) + if reason == event.Reason { + expectedEvent = &event + return true, nil + } + } + return false, nil + }) + o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred(), "Should be able to get events from the build") + o.ExpectWithOffset(1, expectedEvent).NotTo(o.BeNil(), "Did not find a %q event on build %s/%s", reason, build.Namespace, build.Name) + o.ExpectWithOffset(1, expectedEvent.Message).To(o.Equal(fmt.Sprintf(message, build.Namespace, build.Name))) +} + +type podExecutor struct { + client *CLI + podName string +} + +// NewPodExecutor returns an executor capable of running commands in a Pod. +func NewPodExecutor(oc *CLI, name, image string) (*podExecutor, error) { + out, err := oc.Run("run").Args(name, "--labels", "name="+name, "--image", image, "--restart", "Never", "--command", "--", "/bin/bash", "-c", "sleep infinity").Output() + if err != nil { + return nil, fmt.Errorf("error: %v\n(%s)", err, out) + } + _, err = WaitForPods(oc.KubeClient().CoreV1().Pods(oc.Namespace()), ParseLabelsOrDie("name="+name), CheckPodIsReady, 1, 3*time.Minute) + if err != nil { + return nil, err + } + return &podExecutor{client: oc, podName: name}, nil +} + +// Exec executes a single command or a bash script in the running pod. It returns the +// command output and error if the command finished with non-zero status code or the +// command took longer then 3 minutes to run. +func (r *podExecutor) Exec(script string) (string, error) { + var out string + waitErr := wait.PollImmediate(1*time.Second, 3*time.Minute, func() (bool, error) { + var err error + out, err = r.client.Run("exec").Args(r.podName, "--", "/bin/bash", "-c", script).Output() + return true, err + }) + return out, waitErr +} + +func (r *podExecutor) CopyFromHost(local, remote string) error { + _, err := r.client.Run("cp").Args(local, fmt.Sprintf("%s:%s", r.podName, remote)).Output() + return err +} + +// RunOneShotCommandPod runs the given command in a pod and waits for completion and log output for the given timeout +// duration, returning the command output or an error. +// TODO: merge with the PodExecutor above +func RunOneShotCommandPod( + oc *CLI, + name, image, command string, + volumeMounts []corev1.VolumeMount, + volumes []corev1.Volume, + env []corev1.EnvVar, + timeout time.Duration, +) (string, []error) { + errs := []error{} + cmd := strings.Split(command, " ") + args := cmd[1:] + var output string + + pod, err := oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).Create(context.Background(), newCommandPod(name, image, cmd[0], args, + volumeMounts, volumes, env), metav1.CreateOptions{}) + if err != nil { + return "", []error{err} + } + + // Wait for command completion. + err = wait.PollImmediate(1*time.Second, timeout, func() (done bool, err error) { + cmdPod, getErr := oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).Get(context.Background(), pod.Name, v1.GetOptions{}) + if getErr != nil { + e2e.Logf("failed to get pod %q: %v", pod.Name, err) + return false, nil + } + + if podHasErrored(cmdPod) { + return true, fmt.Errorf("the pod errored trying to run the command") + } + return podHasCompleted(cmdPod), nil + }) + if err != nil { + errs = append(errs, fmt.Errorf("error waiting for the pod '%s' to complete: %v", pod.Name, err)) + } + + // Gather pod log output + err = wait.PollImmediate(1*time.Second, timeout, func() (done bool, err error) { + logs, logErr := getPodLogs(oc, pod) + if logErr != nil { + return false, logErr + } + if len(logs) == 0 { + return false, nil + } + output = logs + return true, nil + }) + if err != nil { + errs = append(errs, fmt.Errorf("command pod %s did not complete: %v", pod.Name, err)) + } + + return output, errs +} + +func podHasCompleted(pod *corev1.Pod) bool { + return len(pod.Status.ContainerStatuses) > 0 && + pod.Status.ContainerStatuses[0].State.Terminated != nil && + pod.Status.ContainerStatuses[0].State.Terminated.Reason == "Completed" +} + +func podHasErrored(pod *corev1.Pod) bool { + return len(pod.Status.ContainerStatuses) > 0 && + pod.Status.ContainerStatuses[0].State.Terminated != nil && + pod.Status.ContainerStatuses[0].State.Terminated.Reason == "Error" +} + +func getPodLogs(oc *CLI, pod *corev1.Pod) (string, error) { + reader, err := oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).GetLogs(pod.Name, &corev1.PodLogOptions{}).Stream(context.Background()) + if err != nil { + return "", err + } + logs, err := ioutil.ReadAll(reader) + if err != nil { + return "", err + } + return string(logs), nil +} + +func newCommandPod(name, image, command string, args []string, volumeMounts []corev1.VolumeMount, + volumes []corev1.Volume, env []corev1.EnvVar) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Name: name, + }, + Spec: corev1.PodSpec{ + Volumes: volumes, + RestartPolicy: corev1.RestartPolicyNever, + Containers: []corev1.Container{ + { + Name: name, + Image: image, + Command: []string{command}, + Args: args, + VolumeMounts: volumeMounts, + ImagePullPolicy: "Always", + Env: env, + }, + }, + }, + } +} + +type GitRepo struct { + baseTempDir string + upstream git.Repository + upstreamPath string + repo git.Repository + RepoPath string +} + +// AddAndCommit commits a file with its content to local repo +func (r GitRepo) AddAndCommit(file, content string) error { + dir := filepath.Dir(file) + if err := os.MkdirAll(filepath.Join(r.RepoPath, dir), 0777); err != nil { + return err + } + if err := ioutil.WriteFile(filepath.Join(r.RepoPath, file), []byte(content), 0666); err != nil { + return err + } + if err := r.repo.Add(r.RepoPath, file); err != nil { + return err + } + if err := r.repo.Commit(r.RepoPath, "added file "+file); err != nil { + return err + } + return nil +} + +// Remove performs cleanup of no longer needed directories with local and "remote" git repo +func (r GitRepo) Remove() { + if r.baseTempDir != "" { + os.RemoveAll(r.baseTempDir) + } +} + +// NewGitRepo creates temporary test directories with local and "remote" git repo +func NewGitRepo(repoName string) (GitRepo, error) { + testDir, err := ioutil.TempDir(os.TempDir(), repoName) + if err != nil { + return GitRepo{}, err + } + repoPath := filepath.Join(testDir, repoName) + upstreamPath := repoPath + `.git` + upstream := git.NewRepository() + if err = upstream.Init(upstreamPath, true); err != nil { + return GitRepo{baseTempDir: testDir}, err + } + repo := git.NewRepository() + if err = repo.Clone(repoPath, upstreamPath); err != nil { + return GitRepo{baseTempDir: testDir}, err + } + + return GitRepo{testDir, upstream, upstreamPath, repo, repoPath}, nil +} + +// WaitForUserBeAuthorized waits a minute until the cluster bootstrap roles are available +// and the provided user is authorized to perform the action on the resource. +func WaitForUserBeAuthorized(oc *CLI, user, verb, resource string) error { + sar := &authorizationapi.SubjectAccessReview{ + Spec: authorizationapi.SubjectAccessReviewSpec{ + ResourceAttributes: &authorizationapi.ResourceAttributes{ + Namespace: oc.Namespace(), + Verb: verb, + Resource: resource, + }, + User: user, + }, + } + return wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { + e2e.Logf("Waiting for user '%v' to be authorized to %v the %v resource", user, verb, resource) + resp, err := oc.AdminKubeClient().AuthorizationV1().SubjectAccessReviews().Create(context.Background(), sar, metav1.CreateOptions{}) + if err == nil && resp != nil && resp.Status.Allowed { + return true, nil + } + if err != nil { + e2e.Logf("Error creating SubjectAccessReview: %v", err) + } + if resp != nil { + e2e.Logf("SubjectAccessReview.Status: %#v", resp.Status) + } + return false, err + }) +} + +// GetRouterPodTemplate finds the router pod template across different namespaces, +// helping to mitigate the transition from the default namespace to an operator +// namespace. +func GetRouterPodTemplate(oc *CLI) (*corev1.PodTemplateSpec, string, error) { + appsclient := oc.AdminAppsClient().AppsV1() + k8sappsclient := oc.AdminKubeClient().AppsV1() + for _, ns := range []string{"default", "openshift-ingress", "tectonic-ingress"} { + dc, err := appsclient.DeploymentConfigs(ns).Get(context.Background(), "router", metav1.GetOptions{}) + if err == nil { + return dc.Spec.Template, ns, nil + } + if !errors.IsNotFound(err) { + return nil, "", err + } + deploy, err := k8sappsclient.Deployments(ns).Get(context.Background(), "router", metav1.GetOptions{}) + if err == nil { + return &deploy.Spec.Template, ns, nil + } + if !errors.IsNotFound(err) { + return nil, "", err + } + deploy, err = k8sappsclient.Deployments(ns).Get(context.Background(), "router-default", metav1.GetOptions{}) + if err == nil { + return &deploy.Spec.Template, ns, nil + } + if !errors.IsNotFound(err) { + return nil, "", err + } + } + return nil, "", errors.NewNotFound(schema.GroupResource{Group: "apps.openshift.io", Resource: "deploymentconfigs"}, "router") +} + +// FindImageFormatString returns a format string for components on the cluster. It returns false +// if no format string could be inferred from the cluster. OpenShift 4.0 clusters will not be able +// to infer an image format string, so you must wrap this method in one that can locate your specific +// image. +func FindImageFormatString(oc *CLI) (string, bool) { + // legacy support for 3.x clusters + template, _, err := GetRouterPodTemplate(oc) + if err == nil { + if strings.Contains(template.Spec.Containers[0].Image, "haproxy-router") { + return strings.Replace(template.Spec.Containers[0].Image, "haproxy-router", "${component}", -1), true + } + } + // in openshift 4.0, no image format can be calculated on cluster + return "openshift/origin-${component}:latest", false +} + +func FindCLIImage(oc *CLI) (string, bool) { + // look up image stream + is, err := oc.AdminImageClient().ImageV1().ImageStreams("openshift").Get(context.Background(), "cli", metav1.GetOptions{}) + if err == nil { + for _, tag := range is.Spec.Tags { + if tag.Name == "latest" && tag.From != nil && tag.From.Kind == "DockerImage" { + return tag.From.Name, true + } + } + } + + format, ok := FindImageFormatString(oc) + return strings.Replace(format, "${component}", "cli", -1), ok +} + +func FindRouterImage(oc *CLI) (string, error) { + configclient := oc.AdminConfigClient().ConfigV1() + o, err := configclient.ClusterOperators().Get(context.Background(), "ingress", metav1.GetOptions{}) + if err != nil { + return "", err + } + for _, v := range o.Status.Versions { + if v.Name == "ingress-controller" { + return v.Version, nil + } + } + return "", fmt.Errorf("expected to find ingress-controller version on clusteroperators/ingress") +} + +func IsClusterOperated(oc *CLI) bool { + configclient := oc.AdminConfigClient().ConfigV1() + o, err := configclient.Images().Get(context.Background(), "cluster", metav1.GetOptions{}) + if o == nil || err != nil { + e2e.Logf("Could not find image config object, assuming non-4.0 installed cluster: %v", err) + return false + } + return true +} diff --git a/test/util/gcloud_client.go b/test/util/gcloud_client.go new file mode 100644 index 000000000..92b29edcc --- /dev/null +++ b/test/util/gcloud_client.go @@ -0,0 +1,339 @@ +package util + +import ( + "context" + "errors" + "fmt" + "os/exec" + "strings" + + "cloud.google.com/go/storage" + o "github.com/onsi/gomega" + "google.golang.org/api/iterator" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// Gcloud struct +type Gcloud struct { + ProjectID string +} + +// Login logins to the gcloud. This function needs to be used only once to login into the GCP. +// the gcloud client is only used for the cluster which is on gcp platform. +func (gcloud *Gcloud) Login() *Gcloud { + checkCred, err := exec.Command("bash", "-c", `gcloud auth list --format="value(account)"`).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if string(checkCred) != "" { + return gcloud + } + credErr := exec.Command("bash", "-c", "gcloud auth login --cred-file=$GOOGLE_APPLICATION_CREDENTIALS").Run() + o.Expect(credErr).NotTo(o.HaveOccurred()) + projectErr := exec.Command("bash", "-c", fmt.Sprintf("gcloud config set project %s", gcloud.ProjectID)).Run() + o.Expect(projectErr).NotTo(o.HaveOccurred()) + return gcloud +} + +// GetIntSvcExternalIP returns the int svc external IP +func (gcloud *Gcloud) GetIntSvcExternalIP(infraID string) (string, error) { + externalIP, err := exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute instances list --filter="%s-int-svc" --format="value(EXTERNAL_IP)"`, infraID)).Output() + if string(externalIP) == "" { + return "", errors.New("additional VM is not found") + } + return strings.Trim(string(externalIP), "\n"), err +} + +// GetIntSvcInternalIP returns the int svc internal IP +func (gcloud *Gcloud) GetIntSvcInternalIP(infraID string) (string, error) { + internalIP, err := exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute instances list --filter="%s-int-svc" --format="value(networkInterfaces.networkIP)"`, infraID)).Output() + if string(internalIP) == "" { + return "", errors.New("additional VM is not found") + } + return strings.Trim(string(internalIP), "\n"), err +} + +// GetFirewallAllowPorts returns firewall allow ports +func (gcloud *Gcloud) GetFirewallAllowPorts(ruleName string) (string, error) { + ports, err := exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute firewall-rules list --filter="name=(%s)" --format="value(ALLOW)"`, ruleName)).Output() + return strings.Trim(string(ports), "\n"), err +} + +// UpdateFirewallAllowPorts updates the firewall allow ports +func (gcloud *Gcloud) UpdateFirewallAllowPorts(ruleName string, ports string) error { + return exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute firewall-rules update %s --allow %s`, ruleName, ports)).Run() +} + +// GetZone get zone information for an instance +func (gcloud *Gcloud) GetZone(infraID string, workerName string) (string, error) { + output, err := exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute instances list --filter="%s" --format="value(ZONE)"`, workerName)).Output() + if string(output) == "" { + return "", errors.New("Zone info for the instance is not found") + } + return string(output), err +} + +// StartInstance Bring GCP node/instance back up +func (gcloud *Gcloud) StartInstance(nodeName string, zoneName string) error { + return exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute instances start %s --zone=%s`, nodeName, zoneName)).Run() +} + +// StopInstance Shutdown GCP node/instance +func (gcloud *Gcloud) StopInstance(nodeName string, zoneName string) error { + return exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute instances stop %s --zone=%s`, nodeName, zoneName)).Run() +} + +// GetGcpInstanceByNode returns the instance name +func (gcloud *Gcloud) GetGcpInstanceByNode(nodeIdentity string) (string, error) { + instanceID, err := exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute instances list --filter="%s" --format="value(name)"`, nodeIdentity)).Output() + if string(instanceID) == "" { + return "", fmt.Errorf("VM is not found") + } + return strings.Trim(string(instanceID), "\n"), err +} + +// GetGcpInstanceStateByNode returns the instance state +func (gcloud *Gcloud) GetGcpInstanceStateByNode(nodeIdentity string) (string, error) { + instanceState, err := exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute instances list --filter="%s" --format="value(status)"`, nodeIdentity)).Output() + if string(instanceState) == "" { + return "", fmt.Errorf("Not able to get instance state") + } + return strings.Trim(string(instanceState), "\n"), err +} + +// StopInstanceAsync Shutdown GCP node/instance with async +func (gcloud *Gcloud) StopInstanceAsync(nodeName string, zoneName string) error { + return exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute instances stop %s --async --zone=%s`, nodeName, zoneName)).Run() +} + +// CreateGCSBucket creates a GCS bucket in a project +func CreateGCSBucket(projectID, bucketName string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // initialize the GCS client, the credentials are got from the env var GOOGLE_APPLICATION_CREDENTIALS + client, err := storage.NewClient(ctx) + if err != nil { + return fmt.Errorf("storage.NewClient: %v", err) + } + defer client.Close() + + // check if the bucket exists or not + // if exists, clear all the objects in the bucket + // if not, create the bucket + exist := false + buckets, err := ListGCSBuckets(*client, projectID) + if err != nil { + return err + } + for _, bu := range buckets { + if bu == bucketName { + exist = true + break + } + } + if exist { + return EmptyGCSBucket(*client, bucketName) + } + + bucket := client.Bucket(bucketName) + if err := bucket.Create(ctx, projectID, &storage.BucketAttrs{}); err != nil { + return fmt.Errorf("Bucket(%q).Create: %v", bucketName, err) + } + fmt.Printf("Created bucket %v\n", bucketName) + return nil +} + +// ListGCSBuckets gets all the bucket names under the projectID +func ListGCSBuckets(client storage.Client, projectID string) ([]string, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var buckets []string + it := client.Buckets(ctx, projectID) + for { + battrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, err + } + buckets = append(buckets, battrs.Name) + } + return buckets, nil +} + +// EmptyGCSBucket removes all the objects in the bucket +func EmptyGCSBucket(client storage.Client, bucketName string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bucket := client.Bucket(bucketName) + it := bucket.Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err != nil && err != iterator.Done { + return fmt.Errorf("can't get objects in bucket %s: %v", bucketName, err) + } + if err == iterator.Done { + break + } + if err := bucket.Object(objAttrs.Name).Delete(ctx); err != nil { + return fmt.Errorf("Object(%q).Delete: %v", objAttrs.Name, err) + } + } + e2e.Logf("deleted all object items in the bucket %s.", bucketName) + return nil +} + +// DeleteGCSBucket deletes the GCS bucket +func DeleteGCSBucket(bucketName string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + client, err := storage.NewClient(ctx) + if err != nil { + return fmt.Errorf("storage.NewClient: %v", err) + } + defer client.Close() + + // remove objects + err = EmptyGCSBucket(*client, bucketName) + if err != nil { + return err + } + bucket := client.Bucket(bucketName) + if err := bucket.Delete(ctx); err != nil { + return fmt.Errorf("Bucket(%q).Delete: %v", bucketName, err) + } + e2e.Logf("Bucket %v is deleted\n", bucketName) + return nil +} + +// GetFilestoreInstanceInfo returns filestore instance detailed info from banckend +func (gcloud *Gcloud) GetFilestoreInstanceInfo(pvName string, filterArgs ...string) ([]byte, error) { + filestoreInfo, err := exec.Command("bash", "-c", fmt.Sprintf(`gcloud filestore instances describe %s %s --format=json`, pvName, strings.Join(filterArgs, " "))).Output() + if len(filestoreInfo) == 0 { + return filestoreInfo, errors.New("gcloud filestore instance not found") + } + return filestoreInfo, err +} + +// GetPdVolumeInfo returns pd volume detailed info from backend +func (gcloud *Gcloud) GetPdVolumeInfo(pvName string, filterArgs ...string) ([]byte, error) { + pdVolumeInfo, err := exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute disks describe %s %s --format=json`, pvName, strings.Join(filterArgs, " "))).Output() + if len(pdVolumeInfo) == 0 { + err = fmt.Errorf(`Couldn't find the pd volume "%s" info`, pvName) + } + return pdVolumeInfo, err +} + +func (gcloud *Gcloud) GetResourceTags(bucketName string, zone string) ([]byte, error) { + ResourceTags, err := exec.Command("bash", "-c", fmt.Sprintf(`gcloud resource-manager tags bindings list --parent=//storage.googleapis.com/projects/_/buckets/%s --location=%s`, bucketName, zone)).Output() + if len(ResourceTags) == 0 { + err = fmt.Errorf("Couldn't find resourcetags") + } + return ResourceTags, err +} + +func (gcloud *Gcloud) CreateDeploymentManager(deploymentName string, config string) (deploymentManagerInfo []byte, err error) { + deploymentManagerInfo, err = exec.Command("bash", "-c", fmt.Sprintf(`gcloud deployment-manager deployments create %s --config %s`, deploymentName, config)).CombinedOutput() + if err != nil { + return deploymentManagerInfo, fmt.Errorf("couldn't create deployment manager: %v, output: %s", err, string(deploymentManagerInfo)) + } + return deploymentManagerInfo, nil +} + +func (gcloud *Gcloud) DeleteDeploymentManager(deploymentName string) (deploymentManagerInfo []byte, err error) { + deploymentManagerInfo, err = exec.Command("bash", "-c", fmt.Sprintf(`gcloud deployment-manager deployments delete %s -q`, deploymentName)).CombinedOutput() + if err != nil { + return deploymentManagerInfo, fmt.Errorf("couldn't delete deployment manager: %v, output: %s", err, string(deploymentManagerInfo)) + } + return deploymentManagerInfo, nil +} + +func (gcloud *Gcloud) CreateVPNGateway(gatewayName string, networkName string, region string) (createVPNGateway []byte, err error) { + createVPNGateway, err = exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute vpn-gateways create %s --network %s --region %s`, gatewayName, networkName, region)).CombinedOutput() + if err != nil { + return createVPNGateway, fmt.Errorf("couldn't create vpn gateway: %v, output: %s", err, string(createVPNGateway)) + } + return createVPNGateway, nil +} + +func (gcloud *Gcloud) DeleteVPNGateway(gatewayName string, region string) (deleteVPNGateway []byte, err error) { + deleteVPNGateway, err = exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute vpn-gateways delete %s --region %s -q`, gatewayName, region)).CombinedOutput() + if err != nil { + return deleteVPNGateway, fmt.Errorf("couldn't delete vpn gateway: %v, output: %s", err, string(deleteVPNGateway)) + } + return deleteVPNGateway, nil +} + +func (gcloud *Gcloud) GetVPNGatewayIP(gatewayName, region string, interfaceIndex int) (string, error) { + output, err := exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute vpn-gateways describe %s --region %s --format="get(vpnInterfaces[%d].ipAddress)"`, gatewayName, region, interfaceIndex)).CombinedOutput() + e2e.Logf("CMD: %s", fmt.Sprintf(`gcloud compute vpn-gateways describe %s --region %s --format="get(vpnInterfaces[%d].ipAddress)"`, gatewayName, region, interfaceIndex)) + if err != nil { + return "", fmt.Errorf("couldn't get vpn gateway ip: %v, output: %s", err, string(output)) + } + gatewayIP := strings.TrimSpace(string(output)) + return gatewayIP, nil +} + +func (gcloud *Gcloud) CreateVpnRouter(routerName string, networkName string, region string, asn int32) (createVpnRouter []byte, err error) { + createVpnRouter, err = exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute routers create %s --network %s --region %s --asn %d --advertisement-mode custom --set-advertisement-groups all_subnets`, routerName, networkName, region, asn)).CombinedOutput() + if err != nil { + return createVpnRouter, fmt.Errorf("couldn't create vpn router: %v, output: %s", err, string(createVpnRouter)) + } + return createVpnRouter, nil +} + +func (gcloud *Gcloud) DeleteVpnRouter(routerName string, region string) (deleteVpnRouter []byte, err error) { + deleteVpnRouter, err = exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute routers delete %s --region %s -q`, routerName, region)).CombinedOutput() + if err != nil { + return deleteVpnRouter, fmt.Errorf("couldn't delete vpn router: %v, output: %s", err, string(deleteVpnRouter)) + } + return deleteVpnRouter, nil +} + +func (gcloud *Gcloud) CreateExternalVPNGateway(gatewayName string, vpnAddress []string) (createExternalVPNGateway []byte, err error) { + createExternalVPNGateway, err = exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute external-vpn-gateways create %s --interfaces 0=%s,1=%s,2=%s,3=%s`, gatewayName, vpnAddress[0], vpnAddress[1], vpnAddress[2], vpnAddress[3])).CombinedOutput() + if err != nil { + return createExternalVPNGateway, fmt.Errorf("couldn't create external vpn gateway: %v, output: %s", err, string(createExternalVPNGateway)) + } + return createExternalVPNGateway, nil +} + +func (gcloud *Gcloud) DeleteExternalVPNGateway(gatewayName string) (deleteExternalVPNGateway []byte, err error) { + deleteExternalVPNGateway, err = exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute external-vpn-gateways delete %s -q`, gatewayName)).CombinedOutput() + if err != nil { + return deleteExternalVPNGateway, fmt.Errorf("couldn't delete external vpn gateway: %v, output: %s", err, string(deleteExternalVPNGateway)) + } + return deleteExternalVPNGateway, nil +} + +func (gcloud *Gcloud) CreateVPNTunnel(tunnelName string, peerGateway string, peerGatewayInterface int, region string, sharedSecret string, routerName string, vpnGateway string, interfaceIndex int) (createVPNTunnel []byte, err error) { + createVPNTunnel, err = exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute vpn-tunnels create %s --peer-external-gateway %s --peer-external-gateway-interface %d --region %s --ike-version 2 --shared-secret %s --router %s --vpn-gateway %s --interface %d`, tunnelName, peerGateway, peerGatewayInterface, region, sharedSecret, routerName, vpnGateway, interfaceIndex)).CombinedOutput() + if err != nil { + return createVPNTunnel, fmt.Errorf("couldn't create vpn tunnel: %v, output: %s", err, string(createVPNTunnel)) + } + return createVPNTunnel, nil +} + +func (gcloud *Gcloud) DeleteVPNTunnel(tunnelName string, region string) (deleteVPNTunnel []byte, err error) { + deleteVPNTunnel, err = exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute vpn-tunnels delete %s --region %s`, tunnelName, region)).CombinedOutput() + if err != nil { + return deleteVPNTunnel, fmt.Errorf("couldn't delete vpn tunnel: %v, output: %s", err, string(deleteVPNTunnel)) + } + return deleteVPNTunnel, nil +} + +func (gcloud *Gcloud) AddInterfaceToRouter(routerName string, interfaceName string, tunnelName string, ipAddress string, maskLength int, region string) (addInterfaceToRouter []byte, err error) { + addInterfaceToRouter, err = exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute routers add-interface %s --interface-name %s --vpn-tunnel %s --ip-address %s --mask-length %d --region %s`, routerName, interfaceName, tunnelName, ipAddress, maskLength, region)).CombinedOutput() + if err != nil { + return addInterfaceToRouter, fmt.Errorf("couldn't add interface to router: %v, output: %s", err, string(addInterfaceToRouter)) + } + return addInterfaceToRouter, nil +} + +func (gcloud *Gcloud) AddBGPPeerToRouter(routerName string, peerName string, peerASN int64, interfaceName string, peerIPAddress string, region string) (addBGPPeerToRouter []byte, err error) { + addBGPPeerToRouter, err = exec.Command("bash", "-c", fmt.Sprintf(`gcloud compute routers add-bgp-peer %s --peer-name %s --peer-asn %d --interface %s --peer-ip-address %s --region %s`, routerName, peerName, peerASN, interfaceName, peerIPAddress, region)).CombinedOutput() + if err != nil { + return addBGPPeerToRouter, fmt.Errorf("couldn't add bgp peer to router: %v, output: %s", err, string(addBGPPeerToRouter)) + } + return addBGPPeerToRouter, nil +} diff --git a/test/util/gomega_helpers.go b/test/util/gomega_helpers.go new file mode 100644 index 000000000..9c56e7d29 --- /dev/null +++ b/test/util/gomega_helpers.go @@ -0,0 +1,53 @@ +package util + +import ( + "fmt" + "os" + "reflect" + + "github.com/onsi/gomega/types" + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" +) + +var secureMatchesMessage = fmt.Sprintf( + "For security reasons we cannot print the compared values. If you need to debug this error you can `export %s=yes` and then the values will be printed", + logger.EnableDebugLog, +) + +// SecureMatcher it will not print the compared values when the matcher fails +type SecureMatcher struct { + securedMatcher types.GomegaMatcher +} + +// Match checks it the condition with the given type has the right value in the given field. +func (matcher *SecureMatcher) Match(actual interface{}) (success bool, err error) { + return matcher.securedMatcher.Match(actual) +} + +// FailureMessage returns the message in case of successful match +func (matcher *SecureMatcher) FailureMessage(actual interface{}) (message string) { + if _, enabled := os.LookupEnv(logger.EnableDebugLog); enabled { + return matcher.securedMatcher.FailureMessage(actual) + } + + matcherType := reflect.TypeOf(matcher.securedMatcher).String() + + return fmt.Sprintf("%s did NOT match!! ", matcherType) + secureMatchesMessage +} + +// NegatedFailureMessage returns the message in case of failed match +func (matcher *SecureMatcher) NegatedFailureMessage(actual interface{}) (message string) { + if _, enabled := os.LookupEnv(logger.EnableDebugLog); enabled { + return matcher.securedMatcher.NegatedFailureMessage(actual) + } + + matcherType := reflect.TypeOf(matcher.securedMatcher).String() + + return fmt.Sprintf("%s matched, but should NOT match!! ", matcherType) + secureMatchesMessage +} + +func Secure(securedMatcher types.GomegaMatcher) types.GomegaMatcher { + return &SecureMatcher{ + securedMatcher: securedMatcher, + } +} diff --git a/test/util/gwrapper.go b/test/util/gwrapper.go new file mode 100644 index 000000000..b64c4b307 --- /dev/null +++ b/test/util/gwrapper.go @@ -0,0 +1,22 @@ +package util + +import ( + "fmt" + "time" + + g "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +// when 4.14 synch with k1.27, there is gingkgo upgrade from 2.4 to 26 +// By method changes and it does not print "STEP:" information. some tester want to use it. so, make this wrapper to print +// if you want to get "STEP:", you need to change g.By to exutil.By +// text is the string you want to describe the step. +func By(text string) { + + formatter := formatter.NewWithNoColorBool(true) + fmt.Println(formatter.F("{{bold}} STEP:{{/}} %s {{gray}}%s{{/}}", text, time.Now().Format(types.GINKGO_TIME_FORMAT))) + g.By(text) + +} diff --git a/test/util/hypeshift_cluster.go b/test/util/hypeshift_cluster.go new file mode 100644 index 000000000..0b9947095 --- /dev/null +++ b/test/util/hypeshift_cluster.go @@ -0,0 +1,308 @@ +package util + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strings" + "time" + + "github.com/blang/semver/v4" + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +type HostedClusterPlatformType = string + +const ( + // AWSPlatform represents Amazon Web Services infrastructure. + AWSPlatform HostedClusterPlatformType = "AWS" + + // NonePlatform represents user supplied (e.g. bare metal) infrastructure. + NonePlatform HostedClusterPlatformType = "None" + + // IBMCloudPlatform represents IBM Cloud infrastructure. + IBMCloudPlatform HostedClusterPlatformType = "IBMCloud" + + // AgentPlatform represents user supplied insfrastructure booted with agents. + AgentPlatform HostedClusterPlatformType = "Agent" + + // KubevirtPlatform represents Kubevirt infrastructure. + KubevirtPlatform HostedClusterPlatformType = "KubeVirt" + + // AzurePlatform represents Azure infrastructure. + AzurePlatform HostedClusterPlatformType = "Azure" + + // PowerVSPlatform represents PowerVS infrastructure. + PowerVSPlatform HostedClusterPlatformType = "PowerVS" +) + +// ValidHypershiftAndGetGuestKubeConf check if it is hypershift env and get kubeconf of the hosted cluster +// the first return is hosted cluster name +// the second return is the file of kubeconfig of the hosted cluster +// the third return is the hostedcluster namespace in mgmt cluster which contains the generated resources +// if it is not hypershift env, it will skip test. +func ValidHypershiftAndGetGuestKubeConf(oc *CLI) (string, string, string) { + if IsROSA() { + e2e.Logf("there is a ROSA env") + hostedClusterName, hostedclusterKubeconfig, hostedClusterNs := ROSAValidHypershiftAndGetGuestKubeConf(oc) + if len(hostedClusterName) == 0 || len(hostedclusterKubeconfig) == 0 || len(hostedClusterNs) == 0 { + g.Skip("there is a ROSA env, but the env is problematic, skip test run") + } + return hostedClusterName, hostedclusterKubeconfig, hostedClusterNs + } + operatorNS := GetHyperShiftOperatorNameSpace(oc) + if len(operatorNS) <= 0 { + g.Skip("there is no hypershift operator on host cluster, skip test run") + } + + hostedclusterNS := GetHyperShiftHostedClusterNameSpace(oc) + if len(hostedclusterNS) <= 0 { + g.Skip("there is no hosted cluster NS in mgmt cluster, skip test run") + } + + clusterNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args( + "-n", hostedclusterNS, "hostedclusters", "-o=jsonpath={.items[*].metadata.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if len(clusterNames) <= 0 { + g.Skip("there is no hosted cluster, skip test run") + } + + hypersfhitPodStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args( + "-n", operatorNS, "pod", "-l", "hypershift.openshift.io/operator-component=operator", "-l", "app=operator", "-o=jsonpath={.items[*].status.phase}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(hypersfhitPodStatus).To(o.ContainSubstring("Running")) + + //get first hosted cluster to run test + e2e.Logf("the hosted cluster names: %s, and will select the first", clusterNames) + clusterName := strings.Split(clusterNames, " ")[0] + + var hostedClusterKubeconfigFile string + if os.Getenv("GUEST_KUBECONFIG") != "" { + e2e.Logf("the kubeconfig you set GUEST_KUBECONFIG must be that of the hosted cluster %s in namespace %s", clusterName, hostedclusterNS) + hostedClusterKubeconfigFile = os.Getenv("GUEST_KUBECONFIG") + e2e.Logf(fmt.Sprintf("use a known hosted cluster kubeconfig: %v", hostedClusterKubeconfigFile)) + } else { + hostedClusterKubeconfigFile = "/tmp/guestcluster-kubeconfig-" + clusterName + "-" + GetRandomString() + output, err := exec.Command("bash", "-c", fmt.Sprintf("hypershift create kubeconfig --name %s --namespace %s > %s", + clusterName, hostedclusterNS, hostedClusterKubeconfigFile)).Output() + e2e.Logf("the cmd output: %s", string(output)) + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf(fmt.Sprintf("create a new hosted cluster kubeconfig: %v", hostedClusterKubeconfigFile)) + } + e2e.Logf("if you want hostedcluster controlplane namespace, you could get it by combining %s and %s with -", hostedclusterNS, clusterName) + return clusterName, hostedClusterKubeconfigFile, hostedclusterNS +} + +// ValidHypershiftAndGetGuestKubeConfWithNoSkip check if it is hypershift env and get kubeconf of the hosted cluster +// the first return is hosted cluster name +// the second return is the file of kubeconfig of the hosted cluster +// the third return is the hostedcluster namespace in mgmt cluster which contains the generated resources +// if it is not hypershift env, it will not skip the testcase and return null string. +func ValidHypershiftAndGetGuestKubeConfWithNoSkip(oc *CLI) (string, string, string) { + if IsROSA() { + e2e.Logf("there is a ROSA env") + return ROSAValidHypershiftAndGetGuestKubeConf(oc) + } + operatorNS := GetHyperShiftOperatorNameSpace(oc) + if len(operatorNS) <= 0 { + return "", "", "" + } + + hostedclusterNS := GetHyperShiftHostedClusterNameSpace(oc) + if len(hostedclusterNS) <= 0 { + return "", "", "" + } + + clusterNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args( + "-n", hostedclusterNS, "hostedclusters", "-o=jsonpath={.items[*].metadata.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if len(clusterNames) <= 0 { + return "", "", "" + } + + hypersfhitPodStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args( + "-n", operatorNS, "pod", "-l", "hypershift.openshift.io/operator-component=operator", "-l", "app=operator", "-o=jsonpath={.items[*].status.phase}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(hypersfhitPodStatus).To(o.ContainSubstring("Running")) + + //get first hosted cluster to run test + e2e.Logf("the hosted cluster names: %s, and will select the first", clusterNames) + clusterName := strings.Split(clusterNames, " ")[0] + + var hostedClusterKubeconfigFile string + if os.Getenv("GUEST_KUBECONFIG") != "" { + e2e.Logf("the kubeconfig you set GUEST_KUBECONFIG must be that of the guestcluster %s in namespace %s", clusterName, hostedclusterNS) + hostedClusterKubeconfigFile = os.Getenv("GUEST_KUBECONFIG") + e2e.Logf(fmt.Sprintf("use a known hosted cluster kubeconfig: %v", hostedClusterKubeconfigFile)) + } else { + hostedClusterKubeconfigFile = "/tmp/guestcluster-kubeconfig-" + clusterName + "-" + GetRandomString() + output, err := exec.Command("bash", "-c", fmt.Sprintf("hypershift create kubeconfig --name %s --namespace %s > %s", + clusterName, hostedclusterNS, hostedClusterKubeconfigFile)).Output() + e2e.Logf("the cmd output: %s", string(output)) + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf(fmt.Sprintf("create a new hosted cluster kubeconfig: %v", hostedClusterKubeconfigFile)) + } + e2e.Logf("if you want hostedcluster controlplane namespace, you could get it by combining %s and %s with -", hostedclusterNS, clusterName) + return clusterName, hostedClusterKubeconfigFile, hostedclusterNS +} + +// GetHyperShiftOperatorNameSpace get hypershift operator namespace +// if not exist, it will return empty string. +func GetHyperShiftOperatorNameSpace(oc *CLI) string { + args := []string{ + "pods", "-A", + "-l", "hypershift.openshift.io/operator-component=operator", + "-l", "app=operator", + "--ignore-not-found", + "-ojsonpath={.items[0].metadata.namespace}", + } + namespace, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + return namespace +} + +// GetHyperShiftHostedClusterNameSpace get hypershift hostedcluster namespace +// if not exist, it will return empty string. If more than one exists, it will return the first one. +func GetHyperShiftHostedClusterNameSpace(oc *CLI) string { + namespace, err := oc.AsAdmin().WithoutNamespace().Run("get").Args( + "hostedcluster", "-A", "--ignore-not-found", "-ojsonpath={.items[*].metadata.namespace}").Output() + + if err != nil && !strings.Contains(namespace, "the server doesn't have a resource type") { + o.Expect(err).NotTo(o.HaveOccurred(), "get hostedcluster fail: %v", err) + } + + if len(namespace) <= 0 { + return namespace + } + namespaces := strings.Fields(namespace) + if len(namespaces) == 1 { + return namespaces[0] + } + ns := "" + for _, ns = range namespaces { + if ns != "clusters" { + break + } + } + return ns +} + +// ROSAValidHypershiftAndGetGuestKubeConf check if it is ROSA-hypershift env and get kubeconf of the hosted cluster, only support prow +// the first return is hosted cluster name +// the second return is the file of kubeconfig of the hosted cluster +// the third return is the hostedcluster namespace in mgmt cluster which contains the generated resources +// if it is not hypershift env, it will skip test. +func ROSAValidHypershiftAndGetGuestKubeConf(oc *CLI) (string, string, string) { + operatorNS := GetHyperShiftOperatorNameSpace(oc) + if len(operatorNS) <= 0 { + e2e.Logf("there is no hypershift operator on host cluster") + return "", "", "" + } + + data, err := ioutil.ReadFile(os.Getenv("SHARED_DIR") + "/cluster-name") + if err != nil { + e2e.Logf("can't get hostedcluster name %s SHARE_DIR: %s", err.Error(), os.Getenv("SHARED_DIR")) + return "", "", "" + } + clusterName := strings.ReplaceAll(string(data), "\n", "") + hostedclusterNS, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("-A", "hostedclusters", `-o=jsonpath={.items[?(@.metadata.name=="`+clusterName+`")].metadata.namespace}`).Output() + if len(hostedclusterNS) <= 0 { + e2e.Logf("there is no hosted cluster NS in mgmt cluster") + } + + hostedClusterKubeconfigFile := os.Getenv("SHARED_DIR") + "/nested_kubeconfig" + return clusterName, hostedClusterKubeconfigFile, hostedclusterNS +} + +// GetHostedClusterPlatformType returns a hosted cluster platform type +// oc is the management cluster client to query the hosted cluster platform type based on hostedcluster CR obj +func GetHostedClusterPlatformType(oc *CLI, clusterName, clusterNamespace string) (HostedClusterPlatformType, error) { + if IsHypershiftHostedCluster(oc) { + return "", fmt.Errorf("this is a hosted cluster env. You should use oc of the management cluster") + } + return oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedcluster", clusterName, "-n", clusterNamespace, `-ojsonpath={.spec.platform.type}`).Output() +} + +// GetNodePoolNamesbyHostedClusterName gets the nodepools names of the hosted cluster +func GetNodePoolNamesbyHostedClusterName(oc *CLI, hostedClusterName, hostedClusterNS string) []string { + var nodePoolName []string + nodePoolNameList, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodepool", "-n", hostedClusterNS, "-ojsonpath={.items[*].metadata.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(nodePoolNameList).NotTo(o.BeEmpty()) + + nodePoolName = strings.Fields(nodePoolNameList) + e2e.Logf("\n\nGot nodepool(s) for the hosted cluster %s: %v\n", hostedClusterName, nodePoolName) + return nodePoolName +} + +// GetHostedClusterVersion gets a HostedCluster's version from the management cluster. +func GetHostedClusterVersion(mgmtOc *CLI, hostedClusterName, hostedClusterNs string) semver.Version { + hcVersionStr, _, err := mgmtOc. + AsAdmin(). + WithoutNamespace(). + Run("get"). + Args("hostedcluster", hostedClusterName, "-n", hostedClusterNs, `-o=jsonpath={.status.version.history[?(@.state!="")].version}`). + Outputs() + o.Expect(err).NotTo(o.HaveOccurred()) + + hcVersion := semver.MustParse(hcVersionStr) + e2e.Logf("Found hosted cluster %s version = %q", hostedClusterName, hcVersion) + return hcVersion +} + +func CheckHypershiftOperatorExistence(mgmtOC *CLI) (bool, error) { + stdout, _, err := mgmtOC.AsAdmin().WithoutNamespace().Run("get"). + Args("pods", "-n", "hypershift", "-o=jsonpath={.items[*].metadata.name}").Outputs() + if err != nil { + return false, fmt.Errorf("failed to get HO Pods: %v", err) + } + return len(stdout) > 0, nil +} + +func SkipOnHypershiftOperatorExistence(mgmtOC *CLI, expectHO bool) { + HOExist, err := CheckHypershiftOperatorExistence(mgmtOC) + if err != nil { + e2e.Logf("failed to check Hypershift Operator existence: %v, defaulting to not found", err) + } + + if HOExist && !expectHO { + g.Skip("Not expecting Hypershift Operator but it is found, skip the test") + } + if !HOExist && expectHO { + g.Skip("Expecting Hypershift Operator but it is not found, skip the test") + } +} + +// WaitForHypershiftHostedClusterReady waits for the hostedCluster ready +func WaitForHypershiftHostedClusterReady(oc *CLI, hostedClusterName, hostedClusterNS string) { + pollWaitErr := wait.PollUntilContextTimeout(context.Background(), 20*time.Second, 10*time.Minute, false, func(cxt context.Context) (bool, error) { + hostedClusterAvailable, getStatusErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedclusters", "-n", hostedClusterNS, "--ignore-not-found", hostedClusterName, `-ojsonpath='{.status.conditions[?(@.type=="Available")].status}'`).Output() + if getStatusErr != nil { + e2e.Logf("Failed to get hosted cluster %q status: %v, try next round", hostedClusterName, getStatusErr) + return false, nil + } + if !strings.Contains(hostedClusterAvailable, "True") { + e2e.Logf("Hosted cluster %q status: Available=%s, try next round", hostedClusterName, hostedClusterAvailable) + return false, nil + } + + hostedClusterProgressState, getStateErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("hostedclusters", "-n", hostedClusterNS, "--ignore-not-found", hostedClusterName, `-ojsonpath={.status.version.history[?(@.state!="")].state}`).Output() + if getStateErr != nil { + e2e.Logf("Failed to get hosted cluster %q progress state: %v, try next round", hostedClusterName, getStateErr) + return false, nil + } + if !strings.Contains(hostedClusterProgressState, "Completed") { + e2e.Logf("Hosted cluster %q progress state: %q, try next round", hostedClusterName, hostedClusterProgressState) + return false, nil + } + e2e.Logf("Hosted cluster %q is ready now", hostedClusterName) + return true, nil + }) + AssertWaitPollNoErr(pollWaitErr, fmt.Sprintf("Hosted cluster %q still not ready", hostedClusterName)) + +} diff --git a/test/util/ibmcloud_client.go b/test/util/ibmcloud_client.go new file mode 100644 index 000000000..e8f51aa65 --- /dev/null +++ b/test/util/ibmcloud_client.go @@ -0,0 +1,365 @@ +package util + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "os" + "regexp" + "strings" + "unicode" + + v "github.com/IBM-Cloud/power-go-client/clients/instance" + ps "github.com/IBM-Cloud/power-go-client/ibmpisession" + ac "github.com/IBM-Cloud/power-go-client/power/models" + "github.com/IBM/go-sdk-core/v5/core" + "github.com/IBM/vpc-go-sdk/vpcv1" + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// IBMSession is an object representing an IBM session +type IBMSession struct { + vpcv1 *vpcv1.VpcV1 +} + +type IBMPowerVsSession struct { + powerVsSession *v.IBMPIInstanceClient +} + +// NewIBMSessionFromEnv creates a new IBM session from environment credentials +func NewIBMSessionFromEnv(ibmApiKey string) (*IBMSession, error) { + // Create an IAM authenticator + authenticator := &core.IamAuthenticator{ + ApiKey: ibmApiKey, + } + + // Create a VPC service client + vpcService, err := vpcv1.NewVpcV1(&vpcv1.VpcV1Options{ + Authenticator: authenticator, + }) + if err != nil { + return nil, fmt.Errorf("Error creating VPC service client: %v", err) + } + + session := &IBMSession{ + vpcv1: vpcService, + } + + return session, nil +} + +// IsBase64Encoded checks if the input string is likely base64-encoded. +func IsBase64Encoded(s string) bool { + // Check if the length is a multiple of 4 + if len(s)%4 != 0 { + return false + } + + // Check if the string contains only valid base64 characters + for _, c := range s { + if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '+' && c != '/' && c != '=' { + return false + } + } + + // Attempt to decode the string + _, err := base64.StdEncoding.DecodeString(s) + return err == nil +} + +// GetIBMCredentialFromCluster gets IBM credentials like ibmapikey, ibmvpc, and ibmregion from the cluster +func GetIBMCredentialFromCluster(oc *CLI) (string, string, string, error) { + var ( + credential string + credentialAPIKey []byte + credErr error + ) + + platform := CheckPlatform(oc) + if platform == "powervs" { + credential, credErr = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/ibm-cloud-credentials", "-n", "openshift-cloud-controller-manager", "-o=jsonpath={.data.ibmcloud_api_key}").Output() + } else { + credential, credErr = oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/qe-ibmcloud-creds", "-n", "kube-system", "-o=jsonpath={.data.apiKey}").Output() + } + + if credErr != nil || len(credential) == 0 { + // Fallback to reading from the CLUSTER_PROFILE_DIR + clusterProfileDir := os.Getenv("CLUSTER_PROFILE_DIR") + if clusterProfileDir == "" { + return "", "", "", fmt.Errorf("error getting environment variable CLUSTER_PROFILE_DIR") + } + credentialAPIKey, credErr = os.ReadFile(clusterProfileDir + "/ibmcloud-api-key") + if credErr != nil || len(credentialAPIKey) == 0 { + g.Skip("Failed to get credential to access IBM, skip the testing.") + } + credential = string(credentialAPIKey) + } + + credential = strings.TrimSpace(credential) + + if IsBase64Encoded(credential) { + credDecode, err := base64.StdEncoding.DecodeString(credential) + if err != nil || string(credDecode) == "" { + return "", "", "", fmt.Errorf("Error decoding IBM credentials: %s", err) + } + credential = string(credDecode) + credential = strings.TrimSpace(credential) + } + + ibmRegion, regionErr := GetIBMRegion(oc) + if regionErr != nil { + return "", "", "", regionErr + } + + ibmResourceGrpName, ibmResourceGrpNameErr := GetIBMResourceGrpName(oc) + if ibmResourceGrpNameErr != nil { + return "", "", "", ibmResourceGrpNameErr + } + + if platform == "powervs" { + return credential, ibmRegion, ibmResourceGrpName, nil + } + return credential, ibmRegion, ibmResourceGrpName + "-vpc", nil +} + +// StopIBMInstance stop the IBM instance +func StopIBMInstance(session *IBMSession, instanceID string) error { + stopInstanceOptions := session.vpcv1.NewCreateInstanceActionOptions(instanceID, "stop") + _, _, err := session.vpcv1.CreateInstanceAction(stopInstanceOptions) + if err != nil { + return fmt.Errorf("Unable to stop IBM instance: %v", err) + } + return nil +} + +// StartIBMInstance start the IBM instance +func StartIBMInstance(session *IBMSession, instanceID string) error { + startInstanceOptions := session.vpcv1.NewCreateInstanceActionOptions(instanceID, "start") + _, _, err := session.vpcv1.CreateInstanceAction(startInstanceOptions) + if err != nil { + return fmt.Errorf("Unable to start IBM instance: %v", err) + } + return nil +} + +// GetIBMInstanceID get IBM instance id +func GetIBMInstanceID(session *IBMSession, oc *CLI, region string, vpcName string, instanceID string, baseDomain string) (string, error) { + err := SetVPCServiceURLForRegion(session, region) + if err != nil { + return "", fmt.Errorf("Failed to set vpc api service url :: %v", err) + } + + // Retrieve the VPC ID based on the VPC name + listVpcsOptions := session.vpcv1.NewListVpcsOptions() + vpcs, _, err := session.vpcv1.ListVpcs(listVpcsOptions) + if err != nil { + return "", fmt.Errorf("Error listing VPCs: %v", err) + } + + var vpcID string + for _, vpc := range vpcs.Vpcs { + if *vpc.Name == vpcName { + vpcID = *vpc.ID + e2e.Logf("VpcID found of VpcName %s :: %s", vpcName, vpcID) + break + } + } + + if vpcID == "" { + // Attempt to extract VPC ID using the DNS base domain + vpcID, err = ExtractVPCIDFromBaseDomain(oc, vpcs.Vpcs, baseDomain) + if err != nil { + return "", fmt.Errorf("VPC not found: %s", vpcName) + } + } + + // Set the VPC ID in the listInstancesOptions + listInstancesOptions := session.vpcv1.NewListInstancesOptions() + listInstancesOptions.SetVPCID(vpcID) + + // Retrieve the list of instances in the specified VPC + instances, _, err := session.vpcv1.ListInstances(listInstancesOptions) + if err != nil { + return "", fmt.Errorf("Error listing instances: %v", err) + } + + // Search for the instance by name + for _, instance := range instances.Instances { + if *instance.Name == instanceID { + return *instance.ID, nil + } + } + + return "", fmt.Errorf("Instance not found for name: %s", instanceID) +} + +// GetIBMInstanceStatus check IBM instance running status +func GetIBMInstanceStatus(session *IBMSession, instanceID string) (string, error) { + getInstanceOptions := session.vpcv1.NewGetInstanceOptions(instanceID) + instance, _, err := session.vpcv1.GetInstance(getInstanceOptions) + if err != nil { + return "", err + } + return *instance.Status, nil +} + +// SetVPCServiceURLForRegion will set the VPC Service URL to a specific IBM Cloud Region, in order to access Region scoped resources +func SetVPCServiceURLForRegion(session *IBMSession, region string) error { + regionOptions := session.vpcv1.NewGetRegionOptions(region) + vpcRegion, _, err := session.vpcv1.GetRegion(regionOptions) + if err != nil { + return err + } + err = session.vpcv1.SetServiceURL(fmt.Sprintf("%s/v1", *vpcRegion.Endpoint)) + if err != nil { + return err + } + return nil +} + +// GetIBMRegion gets IBM cluster region +func GetIBMRegion(oc *CLI) (string, error) { + platformType := CheckPlatform(oc) + var ibmRegion string + var regionErr error + + switch platformType { + case "ibmcloud": + ibmRegion, regionErr = oc.AsAdmin().WithoutNamespace().Run("get").Args("Infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.ibmcloud.location}").Output() + case "powervs": + ibmRegion, regionErr = oc.AsAdmin().WithoutNamespace().Run("get").Args("Infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.powervs.zone}").Output() + default: + return "", fmt.Errorf("Unsupported platform type: %s", platformType) + } + + if regionErr != nil || ibmRegion == "" { + return "", regionErr + } + + return ibmRegion, nil +} + +// GetIBMResourceGrpName get IBM cluster resource group name +func GetIBMResourceGrpName(oc *CLI) (string, error) { + platformType := CheckPlatform(oc) + var ibmResourceGrpName string + var ibmResourceGrpNameErr error + switch platformType { + case "ibmcloud": + ibmResourceGrpName, ibmResourceGrpNameErr = oc.AsAdmin().WithoutNamespace().Run("get").Args("Infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.ibmcloud.resourceGroupName}").Output() + case "powervs": + ibmResourceGrpName, ibmResourceGrpNameErr = oc.AsAdmin().WithoutNamespace().Run("get").Args("Infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.powervs}").Output() + if ibmResourceGrpNameErr == nil && ibmResourceGrpName != "" { + re := regexp.MustCompile(`a/([a-f0-9]+):`) + match := re.FindStringSubmatch(ibmResourceGrpName) + if len(match) < 2 { + e2e.Failf("Not able to get ResourceGrp name") + } + ibmResourceGrpName = match[1] + } + default: + return "", fmt.Errorf("Unsupported platform type: %s", platformType) + } + + if ibmResourceGrpNameErr != nil || ibmResourceGrpName == "" { + return "", ibmResourceGrpNameErr + } + + return ibmResourceGrpName, nil +} + +// LoginIBMPowerVsCloud authenticates and returns a session for Powervs cloud +func LoginIBMPowerVsCloud(apiKey, zone, userAccount string, cloudId string) (*IBMPowerVsSession, error) { + // Authenticator + authenticator := &core.IamAuthenticator{ + ApiKey: apiKey, + } + + // Create the session + options := &ps.IBMPIOptions{ + Authenticator: authenticator, + Zone: zone, + UserAccount: userAccount, + } + session, err := ps.NewIBMPISession(options) + if err != nil { + return nil, err + } + // Create the instance client + powerClient := v.NewIBMPIInstanceClient(context.Background(), session, cloudId) + return &IBMPowerVsSession{powerVsSession: powerClient}, nil +} + +// PerformInstanceActionOnPowerVs performs start or stop action on the instance +func PerformInstanceActionOnPowerVs(powerClient *IBMPowerVsSession, instanceID, action string) error { + powerAction := &ac.PVMInstanceAction{ + Action: core.StringPtr(action), + } + return powerClient.powerVsSession.Action(instanceID, powerAction) +} + +// GetIBMPowerVsCloudID get powervsCloud Id +func GetIBMPowerVsCloudID(oc *CLI, nodeName string) string { + jsonString, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", nodeName, "-o=jsonpath={.spec}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + type Data struct { + ProviderID string `json:"providerID"` + } + + // Parse the JSON string into the defined struct + var data Data + err = json.Unmarshal([]byte(jsonString), &data) + o.Expect(err).NotTo(o.HaveOccurred()) + + // Extract the ID part from the providerID field + parts := strings.Split(data.ProviderID, "/") + if len(parts) < 4 { + e2e.Failf("Invalid providerID format") + + } + instanceID := parts[4] + return instanceID +} + +// GetInstanceInfo retrieves information for the specified instance name +func GetIBMPowerVsInstanceInfo(powerClient *IBMPowerVsSession, instanceName string) (string, string, error) { + // Get all instances + getAllResp, err := powerClient.powerVsSession.GetAll() + if err != nil { + return "", "", err + } + + // Print instance information + for _, inst := range getAllResp.PvmInstances { + if *inst.ServerName == instanceName { + e2e.Logf("ID: %s, Name: %s, Status: %s\n", *inst.PvmInstanceID, *inst.ServerName, *inst.Status) + return *inst.PvmInstanceID, strings.ToLower(*inst.Status), nil + } + } + + return "", "", nil +} + +// ExtractVPCIDFromBaseDomain extracts the VPC ID based on the DNS base domain. +func ExtractVPCIDFromBaseDomain(oc *CLI, vpcs []vpcv1.VPC, baseDomain string) (string, error) { + baseDomain = strings.TrimSpace(baseDomain) + parts := strings.Split(baseDomain, ".") + if len(parts) == 0 { + return "", fmt.Errorf("invalid base domain format") + } + resourceGroupName := parts[0] + e2e.Logf("Extracted resource group name from DNS base domain: %s", resourceGroupName) + expectedVpcName := resourceGroupName + "-vpc" + // Find the VPC with the matching name + for _, vpc := range vpcs { + if *vpc.Name == expectedVpcName { + vpcID := *vpc.ID + e2e.Logf("VpcID found for VpcName %s: %s", *vpc.Name, vpcID) + return vpcID, nil + } + } + return "", fmt.Errorf("VPC not found: %s", expectedVpcName) +} diff --git a/test/util/image_helpers.go b/test/util/image_helpers.go new file mode 100644 index 000000000..ac043a45a --- /dev/null +++ b/test/util/image_helpers.go @@ -0,0 +1,25 @@ +package util + +import ( + "fmt" + + g "github.com/onsi/ginkgo/v2" +) + +// DumpAndReturnTagging takes and array of tags and obtains the hex image IDs, dumps them to ginkgo for printing, and then returns them +func DumpAndReturnTagging(tags []string) ([]string, error) { + hexIDs, err := GetImageIDForTags(tags) + if err != nil { + return nil, err + } + for i, hexID := range hexIDs { + fmt.Fprintf(g.GinkgoWriter, "tag %s hex id %s ", tags[i], hexID) + } + return hexIDs, nil +} + +// CreateResource creates the resources from the supplied json file (not a template); ginkgo error checking included +func CreateResource(jsonFilePath string, oc *CLI) error { + err := oc.Run("create").Args("-f", jsonFilePath).Execute() + return err +} diff --git a/test/util/jenkins/monitor.go b/test/util/jenkins/monitor.go new file mode 100644 index 000000000..a75079e1f --- /dev/null +++ b/test/util/jenkins/monitor.go @@ -0,0 +1,135 @@ +package jenkins + +import ( + "fmt" + "regexp" + "strings" + "time" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" + + exutil "github.com/openshift/openshift-tests-private/test/extended/util" +) + +// JobMon is a Jenkins job monitor +type JobMon struct { + j *JenkinsRef + lastBuildNumber string + buildNumber string + jobName string +} + +const ( + EnableJenkinsMemoryStats = "ENABLE_JENKINS_MEMORY_MONITORING" + EnableJenkinsGCStats = "ENABLE_JENKINS_GC_MONITORING" +) + +// Designed to match if RSS memory is greater than 500000000 (i.e. > 476MB) +var memoryOverragePattern = regexp.MustCompile(`\s+rss\s+5\d\d\d\d\d\d\d\d`) + +// Await waits for the timestamp on the Jenkins job to change. Returns +// and error if the timeout expires. +func (jmon *JobMon) Await(timeout time.Duration) error { + err := wait.Poll(10*time.Second, timeout, func() (bool, error) { + + buildNumber, err := jmon.j.GetJobBuildNumber(jmon.jobName, time.Minute) + o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) + + e2e.Logf("Checking build number for job %q current[%v] vs last[%v]", jmon.jobName, buildNumber, jmon.lastBuildNumber) + if buildNumber == jmon.lastBuildNumber { + return false, nil + } + + if jmon.buildNumber == "" { + jmon.buildNumber = buildNumber + } + body, status, err := jmon.j.GetResource("job/%s/%s/api/json?depth=1", jmon.jobName, jmon.buildNumber) + o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) + o.ExpectWithOffset(1, status).To(o.Equal(200)) + + body = strings.ToLower(body) + if strings.Contains(body, "\"building\":true") { + e2e.Logf("Jenkins job %q still building:\n%s\n\n", jmon.jobName, body) + return false, nil + } + + if strings.Contains(body, "\"result\":null") { + e2e.Logf("Jenkins job %q still building result:\n%s\n\n", jmon.jobName, body) + return false, nil + } + + e2e.Logf("Jenkins job %q build complete:\n%s\n\n", jmon.jobName, body) + // If Jenkins job has completed, output its log + body, status, err = jmon.j.GetResource("job/%s/%s/consoleText", jmon.jobName, jmon.buildNumber) + if err != nil || status != 200 { + e2e.Logf("Unable to retrieve job log from Jenkins.\nStatus code: %d\nError: %v\nResponse Text: %s\n", status, err, body) + return true, nil + } + e2e.Logf("Jenkins job %q log:\n%s\n\n", jmon.jobName, body) + return true, nil + }) + return err +} + +func StartJenkinsGCTracking(oc *exutil.CLI, jenkinsNamespace string) *time.Ticker { + jenkinsPod := FindJenkinsPod(oc) + ticker := time.NewTicker(10 * time.Second) + go func() { + for t := range ticker.C { + stats, err := oc.Run("rsh").Args("--namespace", jenkinsNamespace, jenkinsPod.Name, "jstat", "-gcutil", "1").Output() + if err == nil { + fmt.Fprintf(g.GinkgoWriter, "\n\nJenkins gc stats %v\n%s\n\n", t, stats) + } else { + fmt.Fprintf(g.GinkgoWriter, "Unable to acquire Jenkins gc stats: %v", err) + } + } + }() + return ticker +} + +func StartJenkinsMemoryTracking(oc *exutil.CLI, jenkinsNamespace string) *time.Ticker { + jenkinsPod := FindJenkinsPod(oc) + ticker := time.NewTicker(10 * time.Second) + go func() { + for t := range ticker.C { + memstats, err := oc.Run("exec").Args("--namespace", jenkinsNamespace, jenkinsPod.Name, "--", "cat", "/sys/fs/cgroup/memory/memory.stat").Output() + if err != nil { + fmt.Fprintf(g.GinkgoWriter, "\nUnable to acquire Jenkins cgroup memory.stat") + } + ps, err := oc.Run("exec").Args("--namespace", jenkinsNamespace, jenkinsPod.Name, "--", "ps", "faux").Output() + if err != nil { + fmt.Fprintf(g.GinkgoWriter, "\nUnable to acquire Jenkins ps information") + } + fmt.Fprintf(g.GinkgoWriter, "\nJenkins memory statistics at %v\n%s\n%s\n\n", t, ps, memstats) + gcstats, err := oc.Run("rsh").Args("--namespace", jenkinsNamespace, jenkinsPod.Name, "jstat", "-gcutil", "1").Output() + if err != nil { + fmt.Fprintf(g.GinkgoWriter, "Unable to acquire Jenkins gc stats: %v", err) + } + fmt.Fprintf(g.GinkgoWriter, "\n\nJenkins gc stats %v\n%s\n\n", t, gcstats) + + // This is likely a temporary measure in place to extract diagnostic information during unexpectedly + // high memory utilization within the Jenkins image. If Jenkins is using + // a large amount of RSS, extract JVM information from the pod. + if memoryOverragePattern.MatchString(memstats) { + histogram, err := oc.Run("rsh").Args("--namespace", jenkinsNamespace, jenkinsPod.Name, "jmap", "-histo", "1").Output() + if err == nil { + fmt.Fprintf(g.GinkgoWriter, "\n\nJenkins histogram:\n%s\n\n", histogram) + } else { + fmt.Fprintf(g.GinkgoWriter, "Unable to acquire Jenkins histogram: %v", err) + } + stack, err := oc.Run("exec").Args("--namespace", jenkinsNamespace, jenkinsPod.Name, "--", "jstack", "1").Output() + if err == nil { + fmt.Fprintf(g.GinkgoWriter, "\n\nJenkins thread dump:\n%s\n\n", stack) + } else { + fmt.Fprintf(g.GinkgoWriter, "Unable to acquire Jenkins thread dump: %v", err) + } + } + + } + }() + return ticker +} diff --git a/test/util/jenkins/ref.go b/test/util/jenkins/ref.go new file mode 100644 index 000000000..9a49a5f49 --- /dev/null +++ b/test/util/jenkins/ref.go @@ -0,0 +1,501 @@ +package jenkins + +import ( + "context" + "encoding/xml" + "fmt" + "io/ioutil" + "net/url" + "os" + "regexp" + "strings" + "time" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" + + buildv1 "github.com/openshift/api/build/v1" + exutil "github.com/openshift/openshift-tests-private/test/extended/util" + exurl "github.com/openshift/openshift-tests-private/test/extended/util/url" +) + +const ( + UseLocalPluginSnapshotEnvVarName = "USE_SNAPSHOT_JENKINS_IMAGE" + UseLocalClientPluginSnapshotEnvVarName = "USE_SNAPSHOT_JENKINS_CLIENT_IMAGE" + UseLocalSyncPluginSnapshotEnvVarName = "USE_SNAPSHOT_JENKINS_SYNC_IMAGE" + UseLocalLoginPluginSnapshotEnvVarName = "USE_SNAPSHOT_JENKINS_LOGIN_IMAGE" +) + +// JenkinsRef represents a Jenkins instance running on an OpenShift server +type JenkinsRef struct { + oc *exutil.CLI + host string + port string + // The namespace in which the Jenkins server is running + namespace string + token string + uri_tester *exurl.Tester +} + +// FlowDefinition can be marshalled into XML to represent a Jenkins workflow job definition. +type FlowDefinition struct { + XMLName xml.Name `xml:"flow-definition"` + Plugin string `xml:"plugin,attr"` + KeepDependencies bool `xml:"keepDependencies"` + Definition Definition +} + +// Definition is part of a FlowDefinition +type Definition struct { + XMLName xml.Name `xml:"definition"` + Class string `xml:"class,attr"` + Plugin string `xml:"plugin,attr"` + Script string `xml:"script"` +} + +// NewRef creates a jenkins reference from an OC client +func NewRef(oc *exutil.CLI) *JenkinsRef { + g.By("get ip and port for jenkins service") + serviceIP, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{.spec.clusterIP}}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + port, err := oc.Run("get").Args("svc", "jenkins", "--config", exutil.KubeConfigPath()).Template("{{ $x := index .spec.ports 0}}{{$x.port}}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("get token via whoami") + token, err := oc.Run("whoami").Args("-t").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + + j := &JenkinsRef{ + oc: oc, + host: serviceIP, + port: port, + namespace: oc.Namespace(), + token: token, + uri_tester: exurl.NewTester(oc.AdminKubeClient(), oc.Namespace()), + } + return j +} + +// Namespace returns the Jenkins namespace +func (j *JenkinsRef) Namespace() string { + return j.namespace +} + +// BuildURI builds a URI for the Jenkins server. +func (j *JenkinsRef) BuildURI(resourcePathFormat string, a ...interface{}) string { + resourcePath := fmt.Sprintf(resourcePathFormat, a...) + return fmt.Sprintf("http://%s:%v/%s", j.host, j.port, resourcePath) +} + +// GetResource submits a GET request to this Jenkins server. +// Returns a response body and status code or an error. +func (j *JenkinsRef) GetResource(resourcePathFormat string, a ...interface{}) (string, int, error) { + uri := j.BuildURI(resourcePathFormat, a...) + e2e.Logf("Retrieving Jenkins resource: %q", uri) + response := j.uri_tester.WithErrorPassthrough(true).Response( + exurl.Expect("GET", uri).WithToken(j.token), + ) + var err error + if len(response.Error) > 0 { + err = fmt.Errorf("%s", response.Error) + } + rc := -1 + if response.Response != nil { + rc = response.Response.StatusCode + } + return string(response.Body), rc, err +} + +// Post sends a POST to the Jenkins server. Returns response body and status code or an error. +func (j *JenkinsRef) Post(reqBodyFile, resourcePathFormat, contentType string, a ...interface{}) (string, int, error) { + uri := j.BuildURI(resourcePathFormat, a...) + response := j.uri_tester.WithErrorPassthrough(true).Response( + exurl.Expect("POST", uri).WithBodyToUpload(reqBodyFile, j.uri_tester.Podname(), j.oc).WithToken(j.token).WithHeader("Content-Type", contentType), + ) + var err error + if len(response.Error) > 0 { + err = fmt.Errorf("%s", response.Error) + } + rc := -1 + if response.Response != nil { + rc = response.Response.StatusCode + } + return string(response.Body), rc, err +} + +// PostXML sends a POST to the Jenkins server. If a body is specified, it should be XML. +// Returns response body and status code or an error. +func (j *JenkinsRef) PostXML(reqBodyFile, resourcePathFormat string, a ...interface{}) (string, int, error) { + return j.Post(reqBodyFile, resourcePathFormat, "application/xml", a...) +} + +// GetResourceWithStatus repeatedly tries to GET a jenkins resource with an acceptable +// HTTP status. Retries for the specified duration. +func (j *JenkinsRef) GetResourceWithStatus(validStatusList []int, timeout time.Duration, resourcePathFormat string, a ...interface{}) (string, int, error) { + var retBody string + var retStatus int + err := wait.Poll(10*time.Second, timeout, func() (bool, error) { + body, status, err := j.GetResource(resourcePathFormat, a...) + if err != nil { + e2e.Logf("Error accessing resource: %v", err) + return false, nil + } + var found bool + for _, s := range validStatusList { + if status == s { + found = true + break + } + } + if !found { + e2e.Logf("Expected http status [%v] during GET but received [%v] for %s with body %s", validStatusList, status, resourcePathFormat, body) + return false, nil + } + retBody = body + retStatus = status + return true, nil + }) + if err != nil { + uri := j.BuildURI(resourcePathFormat, a...) + return "", retStatus, fmt.Errorf("Error waiting for status %v from resource path %s: %v", validStatusList, uri, err) + } + return retBody, retStatus, nil +} + +// WaitForContent waits for a particular HTTP status and HTML matching a particular +// pattern to be returned by this Jenkins server. An error will be returned +// if the condition is not matched within the timeout period. +func (j *JenkinsRef) WaitForContent(verificationRegEx string, verificationStatus int, timeout time.Duration, resourcePathFormat string, a ...interface{}) (string, error) { + var matchingContent = "" + err := wait.Poll(10*time.Second, timeout, func() (bool, error) { + + content, _, err := j.GetResourceWithStatus([]int{verificationStatus}, timeout, resourcePathFormat, a...) + if err != nil { + return false, nil + } + + if len(verificationRegEx) > 0 { + re := regexp.MustCompile(verificationRegEx) + if re.MatchString(content) { + matchingContent = content + return true, nil + } else { + e2e.Logf("Content did not match verification regex %q:\n %v", verificationRegEx, content) + return false, nil + } + } else { + matchingContent = content + return true, nil + } + }) + + if err != nil { + uri := j.BuildURI(resourcePathFormat, a...) + return "", fmt.Errorf("Error waiting for status %v and verification regex %q from resource path %s: %v", verificationStatus, verificationRegEx, uri, err) + } else { + return matchingContent, nil + } +} + +// CreateItem submits XML to create a named item on the Jenkins server. +func (j *JenkinsRef) CreateItem(name string, itemDefXML string) { + g.By(fmt.Sprintf("Creating new jenkins item: %s", name)) + _, status, err := j.PostXML(itemDefXML, "createItem?name=%s", name) + o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) + o.ExpectWithOffset(1, status).To(o.Equal(200)) +} + +// GetJobBuildNumber returns the current buildNumber on the named project OR "new" if +// there are no builds against a job yet. +func (j *JenkinsRef) GetJobBuildNumber(name string, timeout time.Duration) (string, error) { + body, status, err := j.GetResourceWithStatus([]int{200, 404}, timeout, "job/%s/lastBuild/buildNumber", name) + if err != nil { + return "", err + } + if status != 200 { + return "new", nil + } + return body, nil +} + +// StartJob triggers a named Jenkins job. The job can be monitored with the +// returned object. +func (j *JenkinsRef) StartJob(jobName string) *JobMon { + lastBuildNumber, err := j.GetJobBuildNumber(jobName, time.Minute) + o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) + + jmon := &JobMon{ + j: j, + lastBuildNumber: lastBuildNumber, + buildNumber: "", + jobName: jobName, + } + + e2e.Logf("Current timestamp for [%s]: %q", jobName, jmon.lastBuildNumber) + g.By(fmt.Sprintf("Starting jenkins job: %s", jobName)) + _, status, err := j.PostXML("", "job/%s/build?delay=0sec", jobName) + o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) + o.ExpectWithOffset(1, status).To(o.Equal(201)) + + return jmon +} + +// ProcessJenkinsJobUsingVars returns the path of the modified Jenkins job XML file. Instances of the +// string "PROJECT_NAME" are replaced with the specified namespace. +// Variables named in the vars map will also be replaced with their +// corresponding value. +func (j *JenkinsRef) ProcessJenkinsJobUsingVars(filename, namespace string, vars map[string]string) string { + pre := exutil.FixturePath("testdata", "jenkins-plugin", filename) + post := exutil.ArtifactPath(filename) + + if vars == nil { + vars = map[string]string{} + } + vars["PROJECT_NAME"] = namespace + err := exutil.VarSubOnFile(pre, post, vars) + o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) + + data, err := ioutil.ReadFile(post) + o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) + + newfile, err := CreateTempFile(string(data)) + e2e.Logf("new temp file %s err %v", newfile, err) + if err != nil { + files, dbgerr := ioutil.ReadDir("/tmp") + if dbgerr != nil { + e2e.Logf("problem diagnosing /tmp: %v", dbgerr) + } else { + for _, file := range files { + e2e.Logf("found file %s under temp isdir %q mode %s", file.Name(), file.IsDir(), file.Mode().String()) + } + } + } + o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) + return newfile + +} + +// ProcessJenkinsJob returns the path of the modified Jenkins job XML file. Instances of the +// string "PROJECT_NAME" are replaced with the specified namespace. +func (j *JenkinsRef) ProcessJenkinsJob(filename, namespace string) string { + return j.ProcessJenkinsJobUsingVars(filename, namespace, nil) +} + +// BuildDSLJob returns an XML string defining a Jenkins workflow/pipeline DSL job. Instances of the +// string "PROJECT_NAME" are replaced with the specified namespace. +func (j *JenkinsRef) BuildDSLJob(namespace string, scriptLines ...string) (string, error) { + script := strings.Join(scriptLines, "\n") + script = strings.Replace(script, "PROJECT_NAME", namespace, -1) + fd := FlowDefinition{ + Plugin: "workflow-job@2.7", + Definition: Definition{ + Class: "org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition", + Plugin: "workflow-cps@2.18", + Script: script, + }, + } + output, err := xml.MarshalIndent(fd, " ", " ") + e2e.Logf("Formulated DSL Project XML:\n%s\n\n", output) + return string(output), err +} + +// GetJobConsoleLogs returns the console logs of a particular buildNumber. +func (j *JenkinsRef) GetJobConsoleLogs(jobName, buildNumber string) (string, error) { + return j.WaitForContent("", 200, 10*time.Minute, "job/%s/%s/consoleText", jobName, buildNumber) +} + +// GetJobConsoleLogsAndMatchViaBuildResult leverages various information in the BuildResult and +// returns the corresponding console logs, as well as look for matching string +func (j *JenkinsRef) GetJobConsoleLogsAndMatchViaBuildResult(br *exutil.BuildResult, match string) (string, error) { + if br == nil { + return "", fmt.Errorf("passed in nil BuildResult") + } + if br.Build == nil { + if br.Oc == nil { + return "", fmt.Errorf("BuildResult oc should have been set up during BuildResult construction") + } + var err error // interestingly, removing this line and using := on the next got a compile error + br.Build, err = br.Oc.BuildClient().BuildV1().Builds(br.Oc.Namespace()).Get(context.Background(), br.BuildName, metav1.GetOptions{}) + if err != nil { + return "", err + } + } + bldURI := br.Build.Annotations[buildv1.BuildJenkinsLogURLAnnotation] + if len(bldURI) > 0 { + // need to strip the route host...WaitForContent will prepend the svc ip:port we need to use in ext tests + url, err := url.Parse(bldURI) + if err != nil { + return "", err + } + bldURI = strings.Trim(url.Path, "/") + return j.WaitForContent(match, 200, 10*time.Minute, bldURI) + } + return "", fmt.Errorf("build %#v is missing the build uri annontation", br.Build) +} + +// GetLastJobConsoleLogs returns the last build associated with a Jenkins job. +func (j *JenkinsRef) GetLastJobConsoleLogs(jobName string) (string, error) { + return j.GetJobConsoleLogs(jobName, "lastBuild") +} + +// Finds the pod running Jenkins +func FindJenkinsPod(oc *exutil.CLI) *corev1.Pod { + pods, err := exutil.GetApplicationPods(oc, "jenkins") + o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) + + if pods == nil || pods.Items == nil { + g.Fail("No pods matching jenkins deploymentconfig in namespace " + oc.Namespace()) + } + + o.ExpectWithOffset(1, len(pods.Items)).To(o.Equal(1)) + return &pods.Items[0] +} + +// OverridePodTemplateImages sees if this is a prow-gcp e2e test invocation, and we want to override the agent image for the default pod templates; +// the jenkins image will pick up the env vars passed to new-app and update the image field of the pod templates with the values +func OverridePodTemplateImages(newAppArgs []string) []string { + nodejsAgent := os.Getenv("IMAGE_NODEJS_AGENT") + if len(strings.TrimSpace(nodejsAgent)) > 0 { + newAppArgs = append(newAppArgs, "-e", fmt.Sprintf("NODEJS_SLAVE_IMAGE=%s", nodejsAgent)) + } + mavenAgent := os.Getenv("IMAGE_MAVEN_AGENT") + if len(strings.TrimSpace(mavenAgent)) > 0 { + newAppArgs = append(newAppArgs, "-e", fmt.Sprintf("MAVEN_SLAVE_IMAGE=%s", mavenAgent)) + } + return newAppArgs +} + +// SetupDockerhubImage pull in a jenkins image from docker.io for aws-build testing; +// at some point during 4.0 dev, the jenkins imagestream in the openshift namespace +// will leverage the rhel images from the terms based registry at registry.redhat.io +// where credentials will be needed; we want to test against pre-release images +func SetupDockerhubImage(localImageName, snapshotImageStream string, newAppArgs []string, oc *exutil.CLI) []string { + g.By("Creating a Jenkins imagestream for overridding the default Jenkins imagestream in the openshift namespace") + + // Create an imagestream based on the Jenkins' plugin PR-Testing image (https://github.com/openshift/jenkins-plugin/blob/master/PR-Testing/README). + err := oc.Run("new-build").Args("-D", fmt.Sprintf("FROM %s", localImageName), "--to", snapshotImageStream).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("waiting for build to finish") + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), snapshotImageStream+"-1", exutil.CheckBuildSuccess, exutil.CheckBuildFailed, exutil.CheckBuildCancelled) + if err != nil { + exutil.DumpBuildLogs(snapshotImageStream, oc) + } + o.Expect(err).NotTo(o.HaveOccurred()) + + // Supplant the normal imagestream with the local imagestream using template parameters + newAppArgs = append(newAppArgs, "-p", fmt.Sprintf("NAMESPACE=%s", oc.Namespace())) + newAppArgs = append(newAppArgs, "-p", fmt.Sprintf("JENKINS_IMAGE_STREAM_TAG=%s:latest", snapshotImageStream)) + + return newAppArgs +} + +// pulls in a jenkins image built from a PR change for one of our plugins +func SetupSnapshotImage(envVarName, localImageName, snapshotImageStream string, newAppArgs []string, oc *exutil.CLI) ([]string, bool) { + tag := []string{localImageName} + hexIDs, err := exutil.DumpAndReturnTagging(tag) + + // If the user has expressed an interest in local plugin testing by setting the + // SNAPSHOT_JENKINS_IMAGE environment variable, try to use the local image. Inform them + // either about which image is being used in case their test fails. + snapshotImagePresent := len(hexIDs) > 0 && err == nil + useSnapshotImage := os.Getenv(envVarName) != "" + + if useSnapshotImage { + g.By("Creating a snapshot Jenkins imagestream and overridding the default Jenkins imagestream") + o.Expect(snapshotImagePresent).To(o.BeTrue()) + + e2e.Logf("\n\nIMPORTANT: You are testing a local jenkins snapshot image.") + e2e.Logf("In order to target the official image stream, you must unset %s before running extended tests.\n\n", envVarName) + + // Create an imagestream based on the Jenkins' plugin PR-Testing image (https://github.com/openshift/jenkins-plugin/blob/master/PR-Testing/README). + err = oc.Run("new-build").Args("-D", fmt.Sprintf("FROM %s", localImageName), "--to", snapshotImageStream).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + + g.By("waiting for build to finish") + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), snapshotImageStream+"-1", exutil.CheckBuildSuccess, exutil.CheckBuildFailed, exutil.CheckBuildCancelled) + if err != nil { + exutil.DumpBuildLogs(snapshotImageStream, oc) + } + o.Expect(err).NotTo(o.HaveOccurred()) + + // Supplant the normal imagestream with the local imagestream using template parameters + newAppArgs = append(newAppArgs, "-p", fmt.Sprintf("NAMESPACE=%s", oc.Namespace())) + newAppArgs = append(newAppArgs, "-p", fmt.Sprintf("JENKINS_IMAGE_STREAM_TAG=%s:latest", snapshotImageStream)) + + } else { + if snapshotImagePresent { + e2e.Logf("\n\nIMPORTANT: You have a local OpenShift jenkins snapshot image, but it is not being used for testing.") + e2e.Logf("In order to target your local image, you must set %s to some value before running extended tests.\n\n", envVarName) + } + } + + return newAppArgs, useSnapshotImage +} + +func ProcessLogURLAnnotations(oc *exutil.CLI, t *exutil.BuildResult) (*url.URL, error) { + if len(t.Build.Annotations[buildv1.BuildJenkinsLogURLAnnotation]) == 0 { + return nil, fmt.Errorf("build %s does not contain a Jenkins URL annotation", t.BuildName) + } + jenkinsLogURL, err := url.Parse(t.Build.Annotations[buildv1.BuildJenkinsLogURLAnnotation]) + if err != nil { + return nil, fmt.Errorf("cannot parse jenkins log URL (%s): %v", t.Build.Annotations[buildv1.BuildJenkinsLogURLAnnotation], err) + } + if len(t.Build.Annotations[buildv1.BuildJenkinsConsoleLogURLAnnotation]) == 0 { + return nil, fmt.Errorf("build %s does not contain a Jenkins Console URL annotation", t.BuildName) + } + _, err = url.Parse(t.Build.Annotations[buildv1.BuildJenkinsConsoleLogURLAnnotation]) + if err != nil { + return nil, fmt.Errorf("cannot parse jenkins console log URL (%s): %v", t.Build.Annotations[buildv1.BuildJenkinsConsoleLogURLAnnotation], err) + } + if len(t.Build.Annotations[buildv1.BuildJenkinsBlueOceanLogURLAnnotation]) == 0 { + return nil, fmt.Errorf("build %s does not contain a Jenkins BlueOcean URL annotation", t.BuildName) + } + _, err = url.Parse(t.Build.Annotations[buildv1.BuildJenkinsBlueOceanLogURLAnnotation]) + if err != nil { + return nil, fmt.Errorf("cannot parse jenkins log blueocean URL (%s): %v", t.Build.Annotations[buildv1.BuildJenkinsBlueOceanLogURLAnnotation], err) + } + return jenkinsLogURL, nil +} + +func DumpLogs(oc *exutil.CLI, t *exutil.BuildResult) (string, error) { + var err error + if t.Build == nil { + t.Build, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(context.Background(), t.BuildName, metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("cannot retrieve build %s: %v", t.BuildName, err) + } + } + jenkinsLogURL, err := ProcessLogURLAnnotations(oc, t) + if err != nil { + return "", err + } + jenkinsRef := NewRef(oc) + log, _, err := jenkinsRef.GetResource(jenkinsLogURL.Path) + if err != nil { + return "", fmt.Errorf("cannot get jenkins log: %v", err) + } + return log, nil +} + +// CreateTempFile stores the specified data in a temp dir/temp file +// for the test who calls it +func CreateTempFile(data string) (string, error) { + testDir, err := ioutil.TempDir(os.TempDir(), "test-files") + if err != nil { + return "", err + } + testFile, err := ioutil.TempFile(testDir, "test-file") + if err != nil { + return "", err + } + if err := ioutil.WriteFile(testFile.Name(), []byte(data), 0666); err != nil { + return "", err + } + return testFile.Name(), nil +} diff --git a/test/util/ldap.go b/test/util/ldap.go new file mode 100644 index 000000000..732c8d66c --- /dev/null +++ b/test/util/ldap.go @@ -0,0 +1,229 @@ +package util + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path" + "strings" + "time" + + app "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/openshift/library-go/pkg/crypto" + "github.com/openshift/library-go/pkg/operator/resource/resourceread" + "github.com/openshift/openshift-tests-private/test/extended/testdata" +) + +const ( + // This image is used for both client and server pods. Temporary repo location. + OpenLDAPTestImage = "docker.io/mrogers950/origin-openldap-test:fedora29" + caCertFilename = "ca.crt" + caKeyFilename = "ca.key" + caName = "ldap CA" + saName = "ldap" + // These names are in sync with those in ldapserver-deployment.yaml + configMountName = "ldap-config" + certMountName = "ldap-cert" + // Used for telling the ldap client where to mount. + configMountPath = "/etc/openldap" + certMountPath = "/usr/local/etc/ldapcert" + // Confirms slapd operation + ldapSearchCommandFormat = "ldapsearch -x -H ldap://%s -Z -b dc=example,dc=com cn -LLL" + expectedLDAPClientResponse = "cn: Manager" +) + +// CreateLDAPTestServer deploys an LDAP server on the service network and then confirms StartTLS connectivity with an +// ldapsearch against it. It returns the ldapserver host and the ldap CA, or an error. +func CreateLDAPTestServer(oc *CLI) (string, []byte, error) { + deploy, ldapService, ldif, scripts := ReadLDAPServerTestData() + certDir, err := ioutil.TempDir("", "testca") + if err != nil { + return "", nil, err + } + defer os.RemoveAll(certDir) + + if _, err := oc.AdminKubeClient().CoreV1().ConfigMaps(oc.Namespace()).Create(context.Background(), ldif, metav1.CreateOptions{}); err != nil { + return "", nil, err + } + if _, err := oc.AdminKubeClient().CoreV1().ConfigMaps(oc.Namespace()).Create(context.Background(), scripts, metav1.CreateOptions{}); err != nil { + return "", nil, err + } + if _, err := oc.AdminKubeClient().CoreV1().Services(oc.Namespace()).Create(context.Background(), ldapService, metav1.CreateOptions{}); err != nil { + return "", nil, err + } + + // Create SA. + if _, err := oc.AdminKubeClient().CoreV1().ServiceAccounts(oc.Namespace()).Create(context.Background(), &corev1.ServiceAccount{ + ObjectMeta: v1.ObjectMeta{ + Name: saName, + }, + }, metav1.CreateOptions{}); err != nil { + return "", nil, err + } + + // Create CA. + ca, err := crypto.MakeSelfSignedCA(path.Join(certDir, caCertFilename), path.Join(certDir, caKeyFilename), + path.Join(certDir, "serial"), caName, 100) + if err != nil { + return "", nil, err + } + + // Ensure that the server cert is valid for localhost and the service network hostname. + serviceHost := ldapService.Name + "." + oc.Namespace() + ".svc" + serverCertConfig, err := ca.MakeServerCert(sets.New("localhost", "127.0.0.1", serviceHost), 100) + if err != nil { + return "", nil, err + } + + caPEM, _, err := ca.Config.GetPEMBytes() + if err != nil { + return "", nil, err + } + + serverCertPEM, serverCertKeyPEM, err := serverCertConfig.GetPEMBytes() + if err != nil { + return "", nil, err + } + + _, err = oc.AdminKubeClient().CoreV1().Secrets(oc.Namespace()).Create(context.Background(), &corev1.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: certMountName, + }, + Data: map[string][]byte{ + corev1.TLSCertKey: []byte(serverCertPEM), + corev1.TLSPrivateKeyKey: serverCertKeyPEM, + caCertFilename: caPEM, + }, + Type: corev1.SecretTypeTLS, + }, metav1.CreateOptions{}) + if err != nil { + return "", nil, err + } + + // Allow the openldap container to run as root and privileged. This lets us use the existing openldap server + // container startup scripts to poplate its database using ldapi:///. + // TODO: Turn these, (and other resources in this function) into yamls. + err = oc.AsAdmin().Run("create").Args("role", "scc-anyuid", "--verb=use", "--resource=scc", + "--resource-name=anyuid").Execute() + if err != nil { + return "", nil, err + } + err = oc.AsAdmin().Run("adm").Args("policy", "add-role-to-user", "scc-anyuid", "-z", "ldap", + "--role-namespace", oc.Namespace()).Execute() + if err != nil { + return "", nil, err + } + + err = oc.AsAdmin().Run("create").Args("role", "scc-priv", "--verb=use", "--resource=scc", + "--resource-name=privileged").Execute() + if err != nil { + return "", nil, err + } + err = oc.AsAdmin().Run("adm").Args("policy", "add-role-to-user", "scc-priv", "-z", "ldap", + "--role-namespace", oc.Namespace()).Execute() + if err != nil { + return "", nil, err + } + + serverDeployment, err := oc.AdminKubeClient().AppsV1().Deployments(oc.Namespace()).Create(context.Background(), deploy, metav1.CreateOptions{}) + if err != nil { + return "", nil, err + } + + // Wait for an available replica. + err = wait.PollImmediate(1*time.Second, 5*time.Minute, func() (done bool, err error) { + dep, getErr := oc.AdminKubeClient().AppsV1().Deployments(oc.Namespace()).Get(context.Background(), serverDeployment.Name, + v1.GetOptions{}) + if getErr != nil { + return false, getErr + } + if dep.Status.AvailableReplicas == 0 { + return false, nil + } + return true, nil + }) + if err != nil { + return "", nil, fmt.Errorf("replica for %s not avaiable: %v", serverDeployment.Name, err) + } + + // Confirm ldap server availability. Since the ldap client does not support SNI, a TLS passthrough route will not + // work, so we need to talk to the server over the service network. + if err := checkLDAPConn(oc, serviceHost); err != nil { + return "", nil, err + } + + return serviceHost, caPEM, nil +} + +// Confirm that the ldapserver host is responding to ldapsearch. +func checkLDAPConn(oc *CLI, host string) error { + compareString := expectedLDAPClientResponse + output, err := runLDAPSearchInPod(oc, host) + if err != nil { + return err + } + if !strings.Contains(output, compareString) { + return fmt.Errorf("ldapsearch output does not contain %s\n Output: \n%s", compareString, output) + } + return nil +} + +// Run an ldapsearch in a pod against host. +func runLDAPSearchInPod(oc *CLI, host string) (string, error) { + mounts, volumes := LDAPClientMounts() + output, errs := RunOneShotCommandPod(oc, "runonce-ldapsearch-pod", OpenLDAPTestImage, fmt.Sprintf(ldapSearchCommandFormat, host), mounts, volumes, nil, 8*time.Minute) + if len(errs) != 0 { + return output, fmt.Errorf("errours encountered trying to run ldapsearch pod: %v", errs) + } + return output, nil +} + +func ReadLDAPServerTestData() (*app.Deployment, *corev1.Service, *corev1.ConfigMap, *corev1.ConfigMap) { + return resourceread.ReadDeploymentV1OrDie(testdata.MustAsset( + "test/extended/testdata/ldap/ldapserver-deployment.yaml")), + resourceread.ReadServiceV1OrDie(testdata.MustAsset( + "test/extended/testdata/ldap/ldapserver-service.yaml")), + resourceread.ReadConfigMapV1OrDie(testdata.MustAsset( + "test/extended/testdata/ldap/ldapserver-config-cm.yaml")), + resourceread.ReadConfigMapV1OrDie(testdata.MustAsset( + "test/extended/testdata/ldap/ldapserver-scripts-cm.yaml")) +} + +func LDAPClientMounts() ([]corev1.VolumeMount, []corev1.Volume) { + return []corev1.VolumeMount{ + { + Name: configMountName, + MountPath: configMountPath, + }, + { + Name: certMountName, + MountPath: certMountPath, + }, + }, []corev1.Volume{ + { + Name: certMountName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: certMountName, + }, + }, + }, + { + Name: configMountName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMountName, + }, + }, + }, + }, + } +} diff --git a/test/util/logext/log.go b/test/util/logext/log.go new file mode 100644 index 000000000..26d98cbeb --- /dev/null +++ b/test/util/logext/log.go @@ -0,0 +1,74 @@ +package logext + +/* + author: rioliu@redhat.com +*/ + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/rs/zerolog" +) + +const ( + // EnableDebugLog env variable to enable debug logging + EnableDebugLog = "GINKGO_TEST_ENABLE_DEBUG_LOG" +) + +// logWrapper wrapper interface for zerolog +type logWrapper struct { + log zerolog.Logger +} + +var logger = newLogger() + +// NewLogger initialize log wrapper with zerolog logger +// default log level is INFO, user can enable debug logging by env variable GINKGO_TEST_ENABLE_DEBUG_LOG +func newLogger() *logWrapper { + + // customize time field format to sync with e2e framework + zerolog.TimeFieldFormat = time.StampMilli + // initialize customized output to integrate with GinkgoWriter + output := zerolog.ConsoleWriter{Out: ginkgo.GinkgoWriter, TimeFormat: time.StampMilli} + // customize level format e.g. INFO, DEBUG, ERROR + output.FormatLevel = func(i interface{}) string { + return strings.ToUpper(fmt.Sprintf("%s:", i)) + } + // disable colorful output for timestamp field + output.FormatTimestamp = func(i interface{}) string { + return fmt.Sprintf("%s:", i) + } + logger := &logWrapper{log: zerolog.New(output).With().Timestamp().Logger()} + // set default log level to INFO + zerolog.SetGlobalLevel(zerolog.InfoLevel) + // if system env var is defined, enable debug logging + if _, enabled := os.LookupEnv(EnableDebugLog); enabled { + zerolog.SetGlobalLevel(zerolog.DebugLevel) + } + + return logger +} + +// Infof log info level message +func Infof(format string, v ...interface{}) { + logger.log.Info().Msgf(format, v...) +} + +// Debugf log debug level message +func Debugf(format string, v ...interface{}) { + logger.log.Debug().Msgf(format, v...) +} + +// Errorf log error level message +func Errorf(format string, v ...interface{}) { + logger.log.Error().Msgf(format, v...) +} + +// Warnf log warning level message +func Warnf(format string, v ...interface{}) { + logger.log.Warn().Msgf(format, v...) +} diff --git a/test/util/machine_helpers.go b/test/util/machine_helpers.go new file mode 100644 index 000000000..4ab7b939a --- /dev/null +++ b/test/util/machine_helpers.go @@ -0,0 +1,54 @@ +package util + +import ( + "context" + "strings" + "time" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" +) + +// We are no longer updating this file because we deprecated it, +// the new file is test/extended/util/clusterinfra/machine_helpers.go +// This file is not deleted because there are some old dependencies +const ( + MachineAPINamespace = "openshift-machine-api" + //MapiMachineset means the fullname of mapi machineset + MapiMachineset = "machinesets.machine.openshift.io" + //MapiMachine means the fullname of mapi machine + MapiMachine = "machines.machine.openshift.io" +) + +func ExtendedCheckPlatform(ctx context.Context, oc *CLI) string { + if CheckAKSCluster(ctx, oc) { + return "azure" + } + return CheckPlatform(oc) +} + +// CheckPlatform check the cluster's platform +func CheckPlatform(oc *CLI) string { + output, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.status.platformStatus.type}").Output() + return strings.ToLower(output) +} + +// SkipForSNOCluster skip for SNO cluster +func SkipForSNOCluster(oc *CLI) { + //Only 1 master, 1 worker node and with the same hostname. + masterNodes, _ := GetClusterNodesBy(oc, "master") + workerNodes, _ := GetClusterNodesBy(oc, "worker") + if len(masterNodes) == 1 && len(workerNodes) == 1 && masterNodes[0] == workerNodes[0] { + g.Skip("Skip for SNO cluster.") + } +} + +func CompareMachineCreationTime(oc *CLI, ms1 string, ms2 string) bool { + p10CreationTime, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+ms1, "-o=jsonpath={.items[0].metadata.creationTimestamp}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + p20CreationTime, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-n", "openshift-machine-api", "-l", "machine.openshift.io/cluster-api-machineset="+ms2, "-o=jsonpath={.items[0].metadata.creationTimestamp}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + t1, _ := time.Parse(time.RFC3339, p10CreationTime) + t2, _ := time.Parse(time.RFC3339, p20CreationTime) + return !(t1.Before(t2) || t1.Equal(t2)) +} diff --git a/test/util/manifest.go b/test/util/manifest.go new file mode 100644 index 000000000..dd03a6f4c --- /dev/null +++ b/test/util/manifest.go @@ -0,0 +1,74 @@ +package util + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" + + o "github.com/onsi/gomega" + + "github.com/openshift/openshift-tests-private/test/extended/scheme" + + "k8s.io/apimachinery/pkg/runtime" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +func ReadFixture(path string) (runtime.Object, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to read file %q: %v", path, err) + } + + obj, _, err := scheme.Codecs.UniversalDeserializer().Decode(data, nil, nil) + if err != nil { + return nil, err + } + + return obj, nil +} + +func ReadFixtureOrFail(path string) runtime.Object { + obj, err := ReadFixture(path) + + o.Expect(err).NotTo(o.HaveOccurred()) + + return obj +} + +// Get file content in test/extended/testdata// +func GetFileContent(baseDir string, name string) (fileContent string) { + filePath := filepath.Join(FixturePath("testdata", baseDir), name) + fileOpen, err := os.Open(filePath) + if err != nil { + e2e.Failf("Failed to open file: %s", filePath) + } + fileRead, err := io.ReadAll(fileOpen) + if err != nil { + e2e.Failf("Failed to read file: %s", filePath) + } + return string(fileRead) +} + +/* +This function accept key value replacement in multiple formats +manifestFile, err := GenerateManifestFile(oc, "config-map.yaml", "myDir", map[string]string{"
": address, "": user}) +manifestFile, err := GenerateManifestFile(oc, "namespace.yaml", "myDir", map[string]string{"": namespace}) +*/ +func GenerateManifestFile(oc *CLI, baseDir string, manifestFile string, replacement ...map[string]string) (string, error) { + manifest := GetFileContent(baseDir, manifestFile) + + for _, m := range replacement { + for key, value := range m { + manifest = strings.ReplaceAll(manifest, key, value) + } + } + ts := time.Now().UTC().Format(time.RFC3339Nano) + splitFileName := strings.Split(manifestFile, ".") + manifestFileName := splitFileName[0] + strings.Replace(ts, ":", "", -1) + "." + splitFileName[1] // get rid of offensive colons + err := os.WriteFile(manifestFileName, []byte(manifest), 0644) + return manifestFileName, err +} diff --git a/test/util/metrics.go b/test/util/metrics.go new file mode 100644 index 000000000..7cf4eee9c --- /dev/null +++ b/test/util/metrics.go @@ -0,0 +1,339 @@ +package util + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" + "k8s.io/kubernetes/test/utils" +) + +// pods whose metrics show a larger ratio of requests per +// second than maxQPSAllowed are considered "unhealthy". +const ( + maxQPSAllowed = 1.5 +) + +var ( + // TODO: these exceptions should not exist. Update operators to have a better request-rate per second + perComponentNamespaceMaxQPSAllowed = map[string]float64{ + "openshift-apiserver-operator": 7.2, + "openshift-kube-apiserver-operator": 7.2, + "openshift-kube-controller-manager-operator": 2.0, + "openshift-cluster-kube-scheduler-operator": 1.8, + "openshift-cluster-openshift-controller-manager-operator": 1.7, + "openshift-kube-scheduler-operator": 1.7, + } +) + +type podInfo struct { + name string + qps float64 + status string + namespace string + result string + failed bool + skipped bool +} + +// CalculatePodMetrics receives an admin client and an admin.kubeconfig, and traverses a list +// of operator namespaces, measuring requests-per-second for each operator pod, using the +// overall long-running time of each pod as a base metric. +func CalculatePodMetrics(adminClient kubernetes.Interface, adminConfig *restclient.Config) error { + podURLGetter := &portForwardURLGetter{ + Protocol: "https", + Host: "localhost", + RemotePort: "8443", + LocalPort: "37587", + } + + namespaces, err := adminClient.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{}) + if err != nil { + return err + } + + failures := []error{} + for _, ns := range namespaces.Items { + // skip namespaces which do not meet "operator namespace" criteria + if !strings.HasPrefix(ns.Name, "openshift-") || !strings.HasSuffix(ns.Name, "-operator") { + continue + } + + infos, err := getPodInfoForNamespace(adminClient, adminConfig, podURLGetter, ns.Name) + if err != nil { + return err + } + + for _, info := range infos { + if info.failed { + failures = append(failures, fmt.Errorf("failed to fetch operator pod metrics for pod %q: %s", info.name, info.result)) + continue + } + if info.skipped { + continue + } + + qpsLimit := maxQPSAllowed + if customLimit, ok := perComponentNamespaceMaxQPSAllowed[info.namespace]; ok { + qpsLimit = customLimit + } + + if info.qps > qpsLimit { + failures = append(failures, fmt.Errorf("operator pod %q in namespace %q is making %v requests per second. Maximum allowed is %v requests per second", info.name, info.namespace, info.qps, qpsLimit)) + continue + } + } + } + + if len(failures) > 0 { + return errors.NewAggregate(failures) + } + return nil +} + +func getPodInfoForNamespace(adminClient kubernetes.Interface, adminConfig *restclient.Config, podURLGetter *portForwardURLGetter, namespace string) ([]*podInfo, error) { + pods, err := adminClient.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{}) + if err != nil { + return nil, err + } + + podInfos := []*podInfo{} + for _, pod := range pods.Items { + info := &podInfo{ + name: pod.Name, + namespace: pod.Namespace, + status: string(pod.Status.Phase), + } + + podReady, err := utils.PodRunningReady(&pod) + if !podReady || err != nil { + result := "skipped, pod is not Running" + if err != nil { + result = fmt.Sprintf("%s: %v", result, err) + } + + info.result = result + info.skipped = true + podInfos = append(podInfos, info) + continue + } + + if len(pod.Spec.Containers) == 0 { + info.result = "skipped, no containers found" + info.skipped = true + podInfos = append(podInfos, info) + continue + } + + metrics, err := getPodMetrics(adminConfig, &pod, podURLGetter) + if err != nil { + info.result = fmt.Sprintf("error: %s", err) + info.failed = true + + // ignore errors from pods with no /metrics endpoint available + switch err.(type) { + case *url.Error: + if strings.Contains(err.Error(), "EOF") { + info.skipped = true + info.failed = false + info.result = fmt.Sprintf("/metrics endpoint not available") + } + } + + podInfos = append(podInfos, info) + continue + } + + metricGroup, ok := metrics["rest_client_requests_total"] + if !ok { + info.result = fmt.Sprintf("error: failed to find counter: %q", "rest_client_requests_total") + info.failed = true + podInfos = append(podInfos, info) + continue + } + + procStartTime, ok := metrics["process_start_time_seconds"] + if !ok || len(procStartTime.Metric) == 0 { + info.result = fmt.Sprintf("error: failed to find metric: %q", "process_start_time_seconds") + info.failed = true + podInfos = append(podInfos, info) + continue + } + procStartTimeSeconds := procStartTime.Metric[0].GetGauge().GetValue() + totalProcTimeSeconds := time.Now().Unix() - int64(procStartTimeSeconds) + + totalRequestCount := float64(0) + for _, metric := range metricGroup.Metric { + totalRequestCount += metric.Counter.GetValue() + } + + comment := "within QPS bounds" + qps := totalRequestCount / float64(totalProcTimeSeconds) + if qps > maxQPSAllowed { + comment = "exceeds QPS bounds" + } + info.status = fmt.Sprintf("%s (%s)", info.status, comment) + info.qps = qps + info.result = fmt.Sprintf("%v requests over a span of %v seconds", totalRequestCount, totalProcTimeSeconds) + podInfos = append(podInfos, info) + } + + return podInfos, nil +} + +func getPodMetrics(adminConfig *restclient.Config, pod *v1.Pod, podURLGetter *portForwardURLGetter) (map[string]*dto.MetricFamily, error) { + result, err := podURLGetter.Get("/metrics", pod, adminConfig) + if err != nil { + return nil, err + } + + return parseRawMetrics(result) +} + +func parseRawMetrics(rawMetrics string) (map[string]*dto.MetricFamily, error) { + p := expfmt.TextParser{} + return p.TextToMetricFamilies(bytes.NewBufferString(rawMetrics)) +} + +type defaultPortForwarder struct { + restConfig *rest.Config + + StopChannel chan struct{} + ReadyChannel chan struct{} +} + +func newDefaultPortForwarder(adminConfig *rest.Config) *defaultPortForwarder { + return &defaultPortForwarder{ + restConfig: adminConfig, + StopChannel: make(chan struct{}, 1), + ReadyChannel: make(chan struct{}, 1), + } +} + +func (f *defaultPortForwarder) forwardPortsAndExecute(pod *v1.Pod, ports []string, toExecute func()) error { + if len(ports) < 1 { + return fmt.Errorf("at least 1 PORT is required for port-forward") + } + + restClient, err := rest.RESTClientFor(setRESTConfigDefaults(*f.restConfig)) + if err != nil { + return err + } + + if pod.Status.Phase != v1.PodRunning { + return fmt.Errorf("unable to forward port because pod is not running. Current status=%v", pod.Status.Phase) + } + + stdout := bytes.NewBuffer(nil) + req := restClient.Post(). + Resource("pods"). + Namespace(pod.Namespace). + Name(pod.Name). + SubResource("portforward") + + transport, upgrader, err := spdy.RoundTripperFor(f.restConfig) + if err != nil { + return err + } + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL()) + fw, err := portforward.New(dialer, ports, f.StopChannel, f.ReadyChannel, stdout, stdout) + if err != nil { + return err + } + + go func() { + if f.StopChannel != nil { + defer close(f.StopChannel) + } + + <-f.ReadyChannel + toExecute() + }() + + return fw.ForwardPorts() +} + +func setRESTConfigDefaults(config rest.Config) *rest.Config { + if config.GroupVersion == nil { + config.GroupVersion = &schema.GroupVersion{Group: "", Version: "v1"} + } + if config.NegotiatedSerializer == nil { + config.NegotiatedSerializer = scheme.Codecs + } + if len(config.UserAgent) == 0 { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + config.APIPath = "/api" + return &config +} + +func newInsecureRESTClientForHost(host string) (rest.Interface, error) { + insecure := true + + configFlags := &genericclioptions.ConfigFlags{} + configFlags.Insecure = &insecure + configFlags.APIServer = &host + + newConfig, err := configFlags.ToRESTConfig() + if err != nil { + return nil, err + } + + return rest.RESTClientFor(setRESTConfigDefaults(*newConfig)) +} + +type portForwardURLGetter struct { + Protocol string + Host string + RemotePort string + LocalPort string +} + +// Get receives a url path (i.e. /metrics), a pod, and a rest config, and forwards a set remote port on the pod +// to a specified local port. It then executes a GET request using an insecure REST client against the given urlPath. +func (c *portForwardURLGetter) Get(urlPath string, pod *v1.Pod, config *rest.Config) (string, error) { + var result string + var lastErr error + forwarder := newDefaultPortForwarder(config) + + if err := forwarder.forwardPortsAndExecute(pod, []string{c.LocalPort + ":" + c.RemotePort}, func() { + restClient, err := newInsecureRESTClientForHost(fmt.Sprintf("https://localhost:%s/", c.LocalPort)) + if err != nil { + lastErr = err + return + } + + ioCloser, err := restClient.Get().RequestURI(urlPath).Stream(context.Background()) + if err != nil { + lastErr = err + return + } + defer ioCloser.Close() + + data := bytes.NewBuffer(nil) + _, lastErr = io.Copy(data, ioCloser) + result = data.String() + }); err != nil { + return "", err + } + return result, lastErr +} diff --git a/test/util/must-gather.go b/test/util/must-gather.go new file mode 100644 index 000000000..49a5426f2 --- /dev/null +++ b/test/util/must-gather.go @@ -0,0 +1,313 @@ +package util + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + + g "github.com/onsi/ginkgo/v2" + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + maxSizeMiB = 500.0 + maxFiles = 2 + artifactDirEnvVar = "QE_MUST_GATHER_DIR" + mustGatherPrefix = "must-gather-" +) + +// GetCurrentTestPolarionIDNumber inspects the name of the test case and return the number of the polarion ID linked to this automated test case. It returns an empty string if no ID found. +func GetCurrentTestPolarionIDNumber() string { + name := g.CurrentSpecReport().FullText() + + r := regexp.MustCompile(`-(?P\d+)-`) + + matches := r.FindStringSubmatch(name) + number := r.SubexpIndex("id") + if len(matches) < number+1 { + logger.Errorf("Could not get the test case ID") + return "" + } + + return matches[number] +} + +// CanArchiveMustGather returns the number of megas in the artifacts directory and and error if the must-gather file cannot be created +func CanArchiveMustGather() (float64, error) { + var ( + mustGatherFileName = GetMustGatherFileName() + ) + + artifactDestDir, ok := os.LookupEnv(artifactDirEnvVar) + if !ok || artifactDestDir == "" { + err := fmt.Errorf("Environment variable QE_MUST_GATHER_DIR is not set. Refuse to create must-gather files") + logger.Errorf("%s", err) + return 0.0, err + } + + dirInfo, err := os.Stat(artifactDestDir) + if err != nil { + logger.Errorf("Eror checking directory %s: %s\n", artifactDestDir, err) + return 0.0, err + } + + // Check if it's actually a directory + if !dirInfo.IsDir() { + err := fmt.Errorf("%s exists but is not a directory", artifactDirEnvVar) + logger.Errorf("%s", err) + return 0.0, err + } + + _, err = os.Stat(path.Join(artifactDestDir, mustGatherFileName)) + if err == nil { + err := fmt.Errorf("A must-gather file has been already generated for this test case. Refuse to create a new must-gather file") + logger.Errorf("%s", err) + return 0.0, err + } + if err != nil && !os.IsNotExist(err) { + logger.Errorf("Error happened while checking if a previous must-gather file exists: %s", err) + return 0.0, err + } + + dirSizeMiB, err := getDirSizeMiB(artifactDestDir) + if err != nil { + return 0.0, err + } + + logger.Infof("Current size used in the artifacts dir: %.2fMiB", dirSizeMiB) + if dirSizeMiB > maxSizeMiB { + err := fmt.Errorf("Maximun size [%.2fMiB] already reached in the artifacts directory. Current size [%.2fMiB]. Refuse to create new must-gahter files", maxSizeMiB, dirSizeMiB) + logger.Errorf("%s", err) + return 0.0, err + } + + matches, err := filepath.Glob(path.Join(artifactDestDir, mustGatherPrefix+"*")) + if err != nil { + logger.Errorf("Cannot calculate the created must-gather files") + return 0.0, err + } + + if len(matches) >= maxFiles { + logger.Infof("The maximum number of must-gather files have been created [%d]. Refuse to create more must-gather files", maxFiles) + return 0.0, fmt.Errorf("Refuse to create new must-gather files. Max number of must-gather files reached") + } + + return maxSizeMiB - dirSizeMiB, nil +} + +// GetMustGatherFileName Get the name of the must-gather file for the current test case +func GetMustGatherFileName() string { + return mustGatherPrefix + "ocp-" + GetCurrentTestPolarionIDNumber() + ".tgz" +} + +// ArchiveMustGatherFile creates a must-gather file to be archived by the CI prow job +// The addExtraContent function can be provided to add extra content to the must-gather file. Set it to nil if no extra content is needed. +// Conditions to generate a must-gather file: +// - The QE_MUST_GATHER_DIR environment variable must be defined and pointing to a valid directory. +// - The directory defined in the QE_MUST_GATHER_DIR env var must not contain more than 500M, including the must-gather file that is being generated. +// - A maximum of 2 must-gather files are allowed per prow job execution. If more that 2 test cases try to create a must-gather file in a the same prow job execution, only 2 must-gather files will be created and the rest will be ignored. +// - One test case can only create one must-gather file +func ArchiveMustGatherFile(oc *CLI, addExtraContent func(*CLI, string) error) error { + var ( + mustGatherFileName = GetMustGatherFileName() + tmpBaseDir = e2e.TestContext.OutputDir + tmpSubdir = "must-gather" + mustGatherPrefix = "must-gather-" + ) + logger.Infof("Creating must-gather file: %s", mustGatherFileName) + + availableSizeMiB, err := CanArchiveMustGather() + if err != nil { + return err + } + logger.Infof("Available size in artifacts directory: %.2fMiB", availableSizeMiB) + + artifactDestDir, ok := os.LookupEnv(artifactDirEnvVar) + if !ok || artifactDestDir == "" { + logger.Errorf("Environment variable QE_MUST_GATHER_DIR is not set. Refuse to create must-gather files") + return nil + } + + tmpMustGatherDir, err := ioutil.TempDir(tmpBaseDir, mustGatherPrefix) + if err != nil { + logger.Errorf("Error creating the tmp directory to create the must-gather file: %s", err) + return err + } + defer os.RemoveAll(tmpMustGatherDir) + + tmpMustGatherTarFile := path.Join(tmpMustGatherDir, mustGatherFileName) + tmpMustGatherGenDir := path.Join(tmpMustGatherDir, tmpSubdir) + + mgStd, mgErr := oc.AsAdmin().WithoutNamespace().Run("adm").Args("must-gather", "--dest-dir", tmpMustGatherGenDir).Output() + if mgErr != nil { + logger.Errorf("Error creating must-gather file: %s\n\n%s", mgErr, mgStd) + // We don't return the error here, we want to always compress the directory in case it exists + } + + mgInternalDir, err := getMustGatherInternalDir(tmpMustGatherGenDir) + if err != nil { + logger.Errorf("Cannot find the directory generated by the `oc adm must-gather` command. Err: %s", err) + return err + } + + editErr := editMCOMustGatherInfo(path.Join(tmpMustGatherGenDir, mgInternalDir)) + if editErr != nil { + return err + } + + var eErr error + if addExtraContent != nil { + logger.Infof("Adding extra content to the must-gather file") + eErr = addExtraContent(oc, tmpMustGatherGenDir) + if eErr != nil { + logger.Errorf("Error adding extra content to the must-gather file: %s", eErr) + // We don't return the error here, we want to always compress the directory in case it exists + } + } + + tarCmd := exec.Command("tar", "-czf", tmpMustGatherTarFile, ".") + tarCmd.Dir = tmpMustGatherGenDir + tarStd, err := tarCmd.CombinedOutput() + if err != nil { + logger.Errorf("Error compressing the must-gather directory: err: %s\n\n%s", err, string(tarStd)) + return err + } + + fileSizeMiB, err := getFileSizeMiB(tmpMustGatherTarFile) + if err != nil { + return err + } + logger.Infof("Size of the currently generated must-gather file: %.2fMiB", fileSizeMiB) + + if fileSizeMiB > availableSizeMiB { + err := fmt.Errorf("Max size reached: %.2fMiB. Available size: %.2fMiB. File size: %.2fMiB, Arch Dir: %2.fMiB. Refuse to archive the new must-gather file", + maxSizeMiB, availableSizeMiB, fileSizeMiB, maxSizeMiB-availableSizeMiB) + logger.Errorf("%s", err) + return err + } + + // We don't use os.Rename because directories will likely use different disks + mvStd, err := exec.Command("mv", tmpMustGatherTarFile, artifactDestDir).CombinedOutput() + if err != nil { + logger.Infof("Error moving the file to the final directory: %s\n\n%s", err, string(mvStd)) + return err + } + + if mgErr != nil { + logger.Infof("Must-gather file %s created with errors. Check the logs", mustGatherFileName) + return mgErr + } + if eErr != nil { + logger.Infof("Must-gather file %s created with errors. Check the logs", mustGatherFileName) + return eErr + } + + logger.Infof("Successfully created must-gather file: %s", mustGatherFileName) + + return nil +} + +// getFileSizeMiB returns the size of a file in MiB +func getFileSizeMiB(path string) (float64, error) { + // Check if the file exists + info, err := os.Stat(path) + if err != nil { + logger.Infof("Eror getting the size of file %s: %s\n", path, err) + return 0.0, err + } + + return float64(info.Size()) / 1024.0 / 1024.0, nil +} + +// getDirSizeMiB returns the size of all files in a directory in MiB +func getDirSizeMiB(dirPath string) (float64, error) { + var bytes int64 + + // Check if the directory exists + dirInfo, err := os.Stat(dirPath) + if err != nil { + logger.Infof("Eror checking directory %s: %s\n", dirPath, err) + return 0.0, err + } + + // Check if it's actually a directory + if !dirInfo.IsDir() { + return 0.0, fmt.Errorf("%s exists but is not a directory", dirPath) + } + + err = filepath.Walk(dirPath, func(_ string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + bytes += info.Size() + } + return err + }) + return float64(bytes) / 1024.0 / 1024.0, err +} + +// getMustGatherInternalDir returns the directory generated by the `oc adm must-gather` command inside our tmp directory +func getMustGatherInternalDir(tmpMustGatherGenDir string) (string, error) { + files, err := ioutil.ReadDir(tmpMustGatherGenDir) + if err != nil { + logger.Errorf("Error listing directories in %s: %s\n", tmpMustGatherGenDir, err) + return "", err + } + + innerMustGatherDir := "" + for _, file := range files { + if file.IsDir() { + innerMustGatherDir = file.Name() + break + } + } + + if innerMustGatherDir == "" { + return "", fmt.Errorf("No directory generated by the `oc adm must-gather` command") + } + + return innerMustGatherDir, nil +} + +// editMustGatherInfo executes a sed command to replace content in must-gather files +func editMustGatherInfo(mgPath, dirPath, fileNamePattern, sedExp string) error { + logger.Infof("Editing info in must-gather. must-gather path: %s, dirPath: %s, fileNamePattern: %s, sedExp: %s", mgPath, dirPath, fileNamePattern, sedExp) + if mgPath == "" || fileNamePattern == "" || sedExp == "" { + return fmt.Errorf("Path, fileNamePattern and sedExp cannot be empty. Provide a path, a fileNamePattern and a sedExp") + } + + path := path.Join(mgPath, dirPath) + + cmdList := []string{path, "-type", "f", "-iname", fileNamePattern, "-exec", "sed", "-i", sedExp, "{}", "+"} + logger.Infof("find %s", cmdList) + + editCmd := exec.Command("find", cmdList...) + editStd, err := editCmd.CombinedOutput() + + if err != nil { + logger.Errorf("Error editing the information in the must-gather file: err: %s\n\n%s", err, string(editStd)) + return err + } + + return nil +} + +// editMCOMustGatherInfo edits the must-gather information regarding MCO +func editMCOMustGatherInfo(mgPath string) error { + + controllerConfigPath := "cluster-scoped-resources/machineconfiguration.openshift.io/controllerconfigs" + + editErr := editMustGatherInfo(mgPath, controllerConfigPath, "*", `1s/.*/EDITED/;2,$d`) + if editErr != nil { + logger.Errorf("Could not edit internalRegistryPullSecret. Refuse to create the must-gather file: %s", editErr) + return editErr + } + + return nil +} diff --git a/test/util/networking.go b/test/util/networking.go new file mode 100644 index 000000000..a875eac82 --- /dev/null +++ b/test/util/networking.go @@ -0,0 +1,72 @@ +package util + +import ( + "context" + "fmt" + "regexp" + "strings" + "time" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +func CheckNetworkType(oc *CLI) string { + output, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("network.operator", "cluster", "-o=jsonpath={.spec.defaultNetwork.type}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + return strings.ToLower(output) +} + +// check until CNO operator status reports True, False, False for Available, Progressing, Degraded status, +func CheckNetworkOperatorStatus(oc *CLI) error { + err := wait.Poll(10*time.Second, 120*time.Second, func() (bool, error) { + output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("co", "network").Output() + if err != nil { + e2e.Logf("Fail to get clusteroperator network, error:%s. Trying again", err) + return false, nil + } + matched, _ := regexp.MatchString("True.*False.*False", output) + if matched { + return true, nil + } + e2e.Logf("Network operator state is:%s", output) + return false, nil + }) + return err +} + +// GetIPVersionStackType gets IP-version Stack type of the cluster +func GetIPVersionStackType(oc *CLI) (ipvStackType string) { + svcNetwork, err := oc.WithoutNamespace().AsAdmin().Run("get").Args("network.operator", "cluster", "-o=jsonpath={.spec.serviceNetwork}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if strings.Count(svcNetwork, ":") >= 2 && strings.Count(svcNetwork, ".") >= 2 { + ipvStackType = "dualstack" + } else if strings.Count(svcNetwork, ":") >= 2 { + ipvStackType = "ipv6single" + } else if strings.Count(svcNetwork, ".") >= 2 { + ipvStackType = "ipv4single" + } + e2e.Logf("The test cluster IP-version Stack type is :\"%s\".", ipvStackType) + return ipvStackType +} +func AssertOrCheckMCP(oc *CLI, mcp string, interval, timeout time.Duration, skip bool) error { + var machineCount string + err := wait.PollUntilContextTimeout(context.TODO(), interval, timeout, false, func(ctx context.Context) (bool, error) { + machineCount, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", mcp, "-o=jsonpath={.status.machineCount}{\" \"}{.status.readyMachineCount}{\" \"}{.status.unavailableMachineCount}{\" \"}{.status.degradedMachineCount}").Output() + indexCount := strings.Fields(machineCount) + if strings.Compare(indexCount[0], indexCount[1]) == 0 && strings.Compare(indexCount[2], "0") == 0 && strings.Compare(indexCount[3], "0") == 0 { + return true, nil + } + return false, nil + }) + e2e.Logf("MachineCount:ReadyMachineCountunavailableMachineCountdegradedMachineCount: %v", machineCount) + if err != nil { + if skip { + g.Skip(fmt.Sprintf("the mcp %v is not correct status, so skip it", machineCount)) + } + return fmt.Errorf("case: %v\nerror: %s", g.CurrentSpecReport().FullText(), fmt.Sprintf("macineconfigpool %v update failed", mcp)) + } + return nil +} diff --git a/test/util/nfs.go b/test/util/nfs.go new file mode 100644 index 000000000..9df37d6f0 --- /dev/null +++ b/test/util/nfs.go @@ -0,0 +1,74 @@ +package util + +import ( + "context" + "fmt" + "time" + + kapiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" + e2epv "k8s.io/kubernetes/test/e2e/framework/pv" + "k8s.io/kubernetes/test/e2e/framework/volume" +) + +// SetupK8SNFSServerAndVolume sets up an nfs server pod with count number of persistent volumes +func SetupK8SNFSServerAndVolume(oc *CLI, count int) (*kapiv1.Pod, []*kapiv1.PersistentVolume, error) { + e2e.Logf("Adding privileged scc from system:serviceaccount:%s:default", oc.Namespace()) + _, err := oc.AsAdmin().Run("adm").Args("policy", "add-scc-to-user", "privileged", fmt.Sprintf("system:serviceaccount:%s:default", oc.Namespace())).Output() + if err != nil { + return nil, nil, err + } + + e2e.Logf("Creating NFS server") + config := volume.TestConfig{ + Namespace: oc.Namespace(), + Prefix: "nfs", + // this image is an extension of k8s.gcr.io/volume-nfs:0.8 that adds + // additional nfs mounts to allow for openshift extended tests with + // replicas and shared state (formerly mongo, postgresql, mysql, etc., now only jenkins); defined + // in repo https://github.com/redhat-developer/nfs-server + ServerImage: "quay.io/redhat-developer/nfs-server:1.1", + ServerPorts: []int{2049}, + ServerVolumes: map[string]string{"": "/exports"}, + } + pod, ip := volume.CreateStorageServer(context.TODO(), oc.AsAdmin().KubeFramework().ClientSet, config) + e2e.Logf("Waiting for pod running") + err = wait.PollImmediate(5*time.Second, 1*time.Minute, func() (bool, error) { + phase, err := oc.AsAdmin().Run("get").Args("pods", pod.Name, "--template", "{{.status.phase}}").Output() + if err != nil { + return false, nil + } + if phase != "Running" { + return false, nil + } + return true, nil + }) + + pvs := []*kapiv1.PersistentVolume{} + volLabel := labels.Set{e2epv.VolumeSelectorKey: oc.Namespace()} + for i := 0; i < count; i++ { + e2e.Logf(fmt.Sprintf("Creating persistent volume %d", i)) + pvConfig := e2epv.PersistentVolumeConfig{ + NamePrefix: "nfs-", + Labels: volLabel, + PVSource: kapiv1.PersistentVolumeSource{ + NFS: &kapiv1.NFSVolumeSource{ + Server: ip, + Path: fmt.Sprintf("/exports/data-%d", i), + ReadOnly: false, + }, + }, + } + pvTemplate := e2epv.MakePersistentVolume(pvConfig) + pv, err := oc.AdminKubeClient().CoreV1().PersistentVolumes().Create(context.Background(), pvTemplate, metav1.CreateOptions{}) + if err != nil { + e2e.Logf("error creating persistent volume %#v", err) + } + e2e.Logf("Created persistent volume %#v", pv) + pvs = append(pvs, pv) + } + return pod, pvs, err +} diff --git a/test/util/nodes.go b/test/util/nodes.go new file mode 100644 index 000000000..1ef4969c9 --- /dev/null +++ b/test/util/nodes.go @@ -0,0 +1,444 @@ +package util + +import ( + "context" + "fmt" + "strings" + "time" + + o "github.com/onsi/gomega" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// GetFirstLinuxWorkerNode returns the first linux worker node in the cluster +func GetFirstLinuxWorkerNode(oc *CLI) (string, error) { + var ( + workerNode string + err error + ) + workerNode, err = getFirstNodeByOsID(oc, "worker", "rhcos") + if len(workerNode) == 0 { + workerNode, err = getFirstNodeByOsID(oc, "worker", "rhel") + } + return workerNode, err +} + +// GetAllNodesbyOSType returns a list of the names of all linux/windows nodes in the cluster have both linux and windows node +func GetAllNodesbyOSType(oc *CLI, ostype string) ([]string, error) { + var nodesArray []string + nodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "kubernetes.io/os="+ostype, "-o", "jsonpath='{.items[*].metadata.name}'").Output() + nodesStr := strings.Trim(nodes, "'") + //If split an empty string to string array, the default length string array is 1 + //So need to check if string is empty. + if len(nodesStr) == 0 { + return nodesArray, err + } + nodesArray = strings.Split(nodesStr, " ") + return nodesArray, err +} + +// GetAllNodes returns a list of the names of all nodes in the cluster +func GetAllNodes(oc *CLI) ([]string, error) { + nodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-o", "jsonpath='{.items[*].metadata.name}'").Output() + return strings.Split(strings.Trim(nodes, "'"), " "), err +} + +// GetFirstWorkerNode returns a first worker node +func GetFirstWorkerNode(oc *CLI) (string, error) { + workerNodes, err := GetClusterNodesBy(oc, "worker") + return workerNodes[0], err +} + +// GetFirstMasterNode returns a first master node +func GetFirstMasterNode(oc *CLI) (string, error) { + masterNodes, err := GetClusterNodesBy(oc, "master") + return masterNodes[0], err +} + +// GetClusterNodesBy returns the cluster nodes by role +func GetClusterNodesBy(oc *CLI, role string) ([]string, error) { + nodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/"+role, "-o", "jsonpath='{.items[*].metadata.name}'").Output() + return strings.Split(strings.Trim(nodes, "'"), " "), err +} + +// DebugNodeWithChroot creates a debugging session of the node with chroot +func DebugNodeWithChroot(oc *CLI, nodeName string, cmd ...string) (string, error) { + stdOut, stdErr, err := debugNode(oc, nodeName, []string{}, true, true, cmd...) + return strings.Join([]string{stdOut, stdErr}, "\n"), err +} + +// DebugNodeWithOptions launches debug container with options e.g. --image +func DebugNodeWithOptions(oc *CLI, nodeName string, options []string, cmd ...string) (string, error) { + stdOut, stdErr, err := debugNode(oc, nodeName, options, false, true, cmd...) + return strings.Join([]string{stdOut, stdErr}, "\n"), err +} + +// DebugNodeWithOptionsAndChroot launches debug container using chroot and with options e.g. --image +func DebugNodeWithOptionsAndChroot(oc *CLI, nodeName string, options []string, cmd ...string) (string, error) { + stdOut, stdErr, err := debugNode(oc, nodeName, options, true, true, cmd...) + return strings.Join([]string{stdOut, stdErr}, "\n"), err +} + +// DebugNodeRetryWithOptionsAndChroot launches debug container using chroot and with options +// And waitPoll to avoid "error: unable to create the debug pod" and do retry +func DebugNodeRetryWithOptionsAndChroot(oc *CLI, nodeName string, options []string, cmd ...string) (string, error) { + var stdErr string + var stdOut string + var err error + errWait := wait.Poll(3*time.Second, 30*time.Second, func() (bool, error) { + stdOut, stdErr, err = debugNode(oc, nodeName, options, true, true, cmd...) + if err != nil { + return false, nil + } + return true, nil + }) + AssertWaitPollNoErr(errWait, fmt.Sprintf("Failed to debug node : %v", errWait)) + return strings.Join([]string{stdOut, stdErr}, "\n"), err +} + +// DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel launches debug container using chroot and with options e.g. --image +// WithoutRecoverNsLabel which will not recover the labels that added for debug node container adapt the podSecurity changed on 4.12+ test clusters +// "security.openshift.io/scc.podSecurityLabelSync=false" And "pod-security.kubernetes.io/enforce=privileged" +func DebugNodeWithOptionsAndChrootWithoutRecoverNsLabel(oc *CLI, nodeName string, options []string, cmd ...string) (stdOut string, stdErr string, err error) { + return debugNode(oc, nodeName, options, true, false, cmd...) +} + +// DebugNode creates a debugging session of the node +func DebugNode(oc *CLI, nodeName string, cmd ...string) (string, error) { + stdOut, stdErr, err := debugNode(oc, nodeName, []string{}, false, true, cmd...) + return strings.Join([]string{stdOut, stdErr}, "\n"), err +} + +func debugNode(oc *CLI, nodeName string, cmdOptions []string, needChroot bool, recoverNsLabels bool, cmd ...string) (stdOut string, stdErr string, err error) { + var ( + debugNodeNamespace string + isNsPrivileged bool + cargs []string + outputError error + ) + cargs = []string{"node/" + nodeName} + // Enhance for debug node namespace used logic + // if "--to-namespace=" option is used, then uses the input options' namespace, otherwise use oc.Namespace() + // if oc.Namespace() is empty, uses "default" namespace instead + hasToNamespaceInCmdOptions, index := StringsSliceElementsHasPrefix(cmdOptions, "--to-namespace=", false) + if hasToNamespaceInCmdOptions { + debugNodeNamespace = strings.TrimPrefix(cmdOptions[index], "--to-namespace=") + } else { + debugNodeNamespace = oc.Namespace() + if debugNodeNamespace == "" { + debugNodeNamespace = "default" + } + } + // Running oc debug node command in normal projects + // (normal projects mean projects that are not clusters default projects like: "openshift-xxx" et al) + // need extra configuration on 4.12+ ocp test clusters + // https://github.com/openshift/oc/blob/master/pkg/helpers/cmd/errors.go#L24-L29 + if !strings.HasPrefix(debugNodeNamespace, "openshift-") { + isNsPrivileged, outputError = IsNamespacePrivileged(oc, debugNodeNamespace) + if outputError != nil { + return "", "", outputError + } + if !isNsPrivileged { + if recoverNsLabels { + defer RecoverNamespaceRestricted(oc, debugNodeNamespace) + } + outputError = SetNamespacePrivileged(oc, debugNodeNamespace) + if outputError != nil { + return "", "", outputError + } + } + } + + // For default nodeSelector enabled test clusters we need to add the extra annotation to avoid the debug pod's + // nodeSelector overwritten by the scheduler + if IsDefaultNodeSelectorEnabled(oc) && !IsWorkerNode(oc, nodeName) && !IsSpecifiedAnnotationKeyExist(oc, "ns/"+debugNodeNamespace, "", `openshift.io/node-selector`) { + AddAnnotationsToSpecificResource(oc, "ns/"+debugNodeNamespace, "", `openshift.io/node-selector=`) + defer RemoveAnnotationFromSpecificResource(oc, "ns/"+debugNodeNamespace, "", `openshift.io/node-selector`) + } + + if len(cmdOptions) > 0 { + cargs = append(cargs, cmdOptions...) + } + if !hasToNamespaceInCmdOptions { + cargs = append(cargs, "--to-namespace="+debugNodeNamespace) + } + if needChroot { + cargs = append(cargs, "--", "chroot", "/host") + } else { + cargs = append(cargs, "--") + } + cargs = append(cargs, cmd...) + return oc.AsAdmin().WithoutNamespace().Run("debug").Args(cargs...).Outputs() +} + +// DeleteLabelFromNode delete the custom label from the node +func DeleteLabelFromNode(oc *CLI, node string, label string) (string, error) { + return oc.AsAdmin().WithoutNamespace().Run("label").Args("node", node, label+"-").Output() +} + +// AddLabelToNode add the custom label to the node +func AddLabelToNode(oc *CLI, node string, label string, value string) (string, error) { + return oc.AsAdmin().WithoutNamespace().Run("label").Args("node", node, label+"="+value).Output() +} + +// GetFirstCoreOsWorkerNode returns the first CoreOS worker node +func GetFirstCoreOsWorkerNode(oc *CLI) (string, error) { + return getFirstNodeByOsID(oc, "worker", "rhcos") +} + +// GetFirstRhelWorkerNode returns the first rhel worker node +func GetFirstRhelWorkerNode(oc *CLI) (string, error) { + return getFirstNodeByOsID(oc, "worker", "rhel") +} + +// getFirstNodeByOsID returns the cluster node by role and os id +func getFirstNodeByOsID(oc *CLI, role string, osID string) (string, error) { + nodes, err := GetClusterNodesBy(oc, role) + for _, node := range nodes { + stdout, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node/"+node, "-o", "jsonpath=\"{.metadata.labels.node\\.openshift\\.io/os_id}\"").Output() + if strings.Trim(stdout, "\"") == osID { + return node, err + } + } + return "", err +} + +// GetNodeHostname returns the cluster node hostname +func GetNodeHostname(oc *CLI, node string) (string, error) { + hostname, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", node, "-o", "jsonpath='{..kubernetes\\.io/hostname}'").Output() + return strings.Trim(hostname, "'"), err +} + +// GetClusterNodesByRoleInHostedCluster returns the cluster nodes by role +func GetClusterNodesByRoleInHostedCluster(oc *CLI, role string) ([]string, error) { + nodes, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("node", "-l", "node-role.kubernetes.io/"+role, "-o", "jsonpath='{.items[*].metadata.name}'").Output() + return strings.Split(strings.Trim(nodes, "'"), " "), err +} + +// getFirstNodeByOsIDInHostedCluster returns the cluster node by role and os id +func getFirstNodeByOsIDInHostedCluster(oc *CLI, role string, osID string) (string, error) { + nodes, err := GetClusterNodesByRoleInHostedCluster(oc, role) + for _, node := range nodes { + stdout, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("node/"+node, "-o", "jsonpath=\"{.metadata.labels.node\\.openshift\\.io/os_id}\"").Output() + if strings.Trim(stdout, "\"") == osID { + return node, err + } + } + return "", err +} + +// GetFirstLinuxWorkerNodeInHostedCluster returns the first linux worker node in the cluster +func GetFirstLinuxWorkerNodeInHostedCluster(oc *CLI) (string, error) { + var ( + workerNode string + err error + ) + workerNode, err = getFirstNodeByOsIDInHostedCluster(oc, "worker", "rhcos") + if len(workerNode) == 0 { + workerNode, err = getFirstNodeByOsIDInHostedCluster(oc, "worker", "rhel") + } + return workerNode, err +} + +// GetAllNodesByNodePoolNameInHostedCluster return all node names of specified nodepool in hosted cluster. +func GetAllNodesByNodePoolNameInHostedCluster(oc *CLI, nodePoolName string) ([]string, error) { + nodes, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args("node", "-l", "hypershift.openshift.io/nodePool="+nodePoolName, "-ojsonpath='{.items[*].metadata.name}'").Output() + return strings.Split(strings.Trim(nodes, "'"), " "), err +} + +// GetFirstWorkerNodeByNodePoolNameInHostedCluster returns the first linux worker node in the cluster +func GetFirstWorkerNodeByNodePoolNameInHostedCluster(oc *CLI, nodePoolName string) (string, error) { + workerNodes, err := GetAllNodesByNodePoolNameInHostedCluster(oc, nodePoolName) + o.Expect(err).NotTo(o.HaveOccurred()) + return workerNodes[0], err +} + +// GetSchedulableLinuxWorkerNodes returns a group of nodes that match the requirements: +// os: linux, role: worker, status: ready, schedulable +func GetSchedulableLinuxWorkerNodes(oc *CLI) ([]v1.Node, error) { + var nodes, workers []v1.Node + linuxNodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{LabelSelector: "kubernetes.io/os=linux"}) + // get schedulable linux worker nodes + for _, node := range linuxNodes.Items { + if _, ok := node.Labels["node-role.kubernetes.io/worker"]; ok && !node.Spec.Unschedulable { + workers = append(workers, node) + } + } + // get ready nodes + for _, worker := range workers { + for _, con := range worker.Status.Conditions { + if con.Type == "Ready" && con.Status == "True" { + nodes = append(nodes, worker) + break + } + } + } + return nodes, err +} + +// GetPodsNodesMap returns all the running pods in each node +func GetPodsNodesMap(oc *CLI, nodes []v1.Node) map[string][]v1.Pod { + podsMap := make(map[string][]v1.Pod) + projects, err := oc.AdminKubeClient().CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + + // get pod list in each node + for _, project := range projects.Items { + pods, err := oc.AdminKubeClient().CoreV1().Pods(project.Name).List(context.Background(), metav1.ListOptions{}) + o.Expect(err).NotTo(o.HaveOccurred()) + for _, pod := range pods.Items { + if pod.Status.Phase != "Failed" && pod.Status.Phase != "Succeeded" { + podsMap[pod.Spec.NodeName] = append(podsMap[pod.Spec.NodeName], pod) + } + } + } + + var nodeNames []string + for _, node := range nodes { + nodeNames = append(nodeNames, node.Name) + } + contain := func(a []string, b string) bool { + for _, c := range a { + if c == b { + return true + } + } + return false + } + // if the key is not in nodes list, remove the element from the map + for podmap := range podsMap { + if !contain(nodeNames, podmap) { + delete(podsMap, podmap) + } + } + return podsMap +} + +// NodeResources contains the resources of CPU and Memory in a node +type NodeResources struct { + CPU int64 + Memory int64 +} + +// GetRequestedResourcesNodesMap returns the total requested CPU and Memory in each node +func GetRequestedResourcesNodesMap(oc *CLI, nodes []v1.Node) map[string]NodeResources { + rmap := make(map[string]NodeResources) + podsMap := GetPodsNodesMap(oc, nodes) + for nodeName := range podsMap { + var totalRequestedCPU, totalRequestedMemory int64 + for _, pod := range podsMap[nodeName] { + for _, container := range pod.Spec.Containers { + totalRequestedCPU += container.Resources.Requests.Cpu().MilliValue() + totalRequestedMemory += container.Resources.Requests.Memory().MilliValue() + } + } + rmap[nodeName] = NodeResources{totalRequestedCPU, totalRequestedMemory} + } + return rmap +} + +// GetAllocatableResourcesNodesMap returns the total allocatable CPU and Memory in each node +func GetAllocatableResourcesNodesMap(nodes []v1.Node) map[string]NodeResources { + rmap := make(map[string]NodeResources) + for _, node := range nodes { + rmap[node.Name] = NodeResources{node.Status.Allocatable.Cpu().MilliValue(), node.Status.Allocatable.Memory().MilliValue()} + } + return rmap +} + +// GetRemainingResourcesNodesMap returns the total remaning CPU and Memory in each node +func GetRemainingResourcesNodesMap(oc *CLI, nodes []v1.Node) map[string]NodeResources { + rmap := make(map[string]NodeResources) + requested := GetRequestedResourcesNodesMap(oc, nodes) + allocatable := GetAllocatableResourcesNodesMap(nodes) + + for _, node := range nodes { + rmap[node.Name] = NodeResources{allocatable[node.Name].CPU - requested[node.Name].CPU, allocatable[node.Name].Memory - requested[node.Name].Memory} + } + return rmap +} + +// getNodesByRoleAndOsID returns list of nodes by role and OS ID +func getNodesByRoleAndOsID(oc *CLI, role string, osID string) ([]string, error) { + var nodesList []string + nodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", "node-role.kubernetes.io/"+role+",node.openshift.io/os_id="+osID, "-o", "jsonpath='{.items[*].metadata.name}'").Output() + nodes = strings.Trim(nodes, "'") + if len(nodes) != 0 { + nodesList = strings.Split(nodes, " ") + } + return nodesList, err +} + +// GetAllWorkerNodesByOSID returns list of worker nodes by OS ID +func GetAllWorkerNodesByOSID(oc *CLI, osID string) ([]string, error) { + return getNodesByRoleAndOsID(oc, "worker", osID) +} + +// GetNodeArchByName gets the node arch by its name +func GetNodeArchByName(oc *CLI, nodeName string) string { + nodeArch, err := GetResourceSpecificLabelValue(oc, "node/"+nodeName, "", "kubernetes\\.io/arch") + o.Expect(err).NotTo(o.HaveOccurred(), "Fail to get node/%s arch: %v\n", nodeName, err) + e2e.Logf(`The node/%s arch is "%s"`, nodeName, nodeArch) + return nodeArch +} + +// GetNodeListByLabel gets the node list by label +func GetNodeListByLabel(oc *CLI, labelKey string) []string { + output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", "-l", labelKey, "-o=jsonpath={.items[*].metadata.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred(), "Fail to get node with label %v, got error: %v\n", labelKey, err) + nodeNameList := strings.Fields(output) + return nodeNameList +} + +// IsDefaultNodeSelectorEnabled judges whether the test cluster enabled the defaultNodeSelector +func IsDefaultNodeSelectorEnabled(oc *CLI) bool { + defaultNodeSelector, getNodeSelectorErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("scheduler", "cluster", "-o=jsonpath={.spec.defaultNodeSelector}").Output() + if getNodeSelectorErr != nil && strings.Contains(defaultNodeSelector, `the server doesn't have a resource type`) { + e2e.Logf("WARNING: The scheduler API is not supported on the test cluster") + return false + } + o.Expect(getNodeSelectorErr).NotTo(o.HaveOccurred(), "Fail to get cluster scheduler defaultNodeSelector got error: %v\n", getNodeSelectorErr) + return !strings.EqualFold(defaultNodeSelector, "") +} + +// IsWorkerNode judges whether the node has the worker role +func IsWorkerNode(oc *CLI, nodeName string) bool { + isWorker, _ := StringsSliceContains(GetNodeListByLabel(oc, `node-role.kubernetes.io/worker`), nodeName) + return isWorker +} + +func WaitForNodeToDisappear(oc *CLI, nodeName string, timeout, interval time.Duration) { + o.Eventually(func() bool { + _, err := oc.AdminKubeClient().CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) + if errors.IsNotFound(err) { + return true + } + o.Expect(err).ShouldNot(o.HaveOccurred(), fmt.Sprintf("Unexpected error: %s", errors.ReasonForError(err))) + e2e.Logf("Still waiting for node %s to disappear", nodeName) + return false + }).WithTimeout(timeout).WithPolling(interval).Should(o.BeTrue()) +} + +// DebugNodeRetryWithOptionsAndChroot launches debug container using chroot and with options +// And waitPoll to avoid "error: unable to create the debug pod" and do retry +// Separate the Warning from Output: metadata.name: this is used in the Pod's hostname, which can result in surprising behavior; a DNS label is recommended: +// [must be no more than 63 characters]\ndevice name 'Warning: metadata.name: this is used in the Pod's hostname, which can result in surprising behavior; +// a DNS label is recommended: [must be no more than 63 characters]' longer than 127 characters\nerror: non-zero exit code from debug container +func DebugNodeRetryWithOptionsAndChrootWithStdErr(oc *CLI, nodeName string, options []string, cmd ...string) (string, string, error) { + var stdErr string + var stdOut string + var err error + errWait := wait.Poll(3*time.Second, 30*time.Second, func() (bool, error) { + stdOut, stdErr, err = debugNode(oc, nodeName, options, true, true, cmd...) + if err != nil { + return false, nil + } + return true, nil + }) + AssertWaitPollNoErr(errWait, fmt.Sprintf("Failed to debug node : %v", errWait)) + return stdOut, stdErr, err +} diff --git a/test/util/nutanix_client.go b/test/util/nutanix_client.go new file mode 100644 index 000000000..2062383a2 --- /dev/null +++ b/test/util/nutanix_client.go @@ -0,0 +1,233 @@ +package util + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "time" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + nutanix "github.com/tecbiz-ch/nutanix-go-sdk" + "github.com/tecbiz-ch/nutanix-go-sdk/schema" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// NutanixSession is an object representing an nutanix session +type NutanixSession struct { + nutanixClient *nutanix.Client + Username string + Password string + Endpoint string +} + +// Secret is an object representing secrets +type Secret struct { + Data struct { + Credentials string `json:"credentials"` + } `json:"data"` +} + +// Credential is an object representing credentials +type Credential struct { + Type string `json:"type"` + Data struct { + PrismCentral struct { + Username string `json:"username"` + Password string `json:"password"` + } `json:"prismCentral"` + } `json:"data"` +} + +// NewNutanixSession creates a new nutanix session from environment credentials +func NewNutanixSession(username, password, endpoint string) (*NutanixSession, error) { + configCreds := nutanix.Credentials{ + Username: username, + Password: password, + } + + opts := []nutanix.ClientOption{ + nutanix.WithCredentials(&configCreds), + nutanix.WithEndpoint(endpoint), + } + + client := nutanix.NewClient(opts...) + + nutanixSess := &NutanixSession{ + nutanixClient: client, + Username: username, + Password: password, + Endpoint: endpoint, + } + return nutanixSess, nil +} + +// GetNutanixCredentialFromCluster gets credentials like username, password, and endpoint URL from the cluster +func GetNutanixCredentialFromCluster(oc *CLI) (string, string, string, error) { + credentialJSON, getSecErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/nutanix-credentials", "-n", "openshift-machine-api", "-o", "json").Output() + if getSecErr != nil || credentialJSON == "" { + g.Skip("Failed to get credential to access Nutanix, skip the testing.") + } + + var secret Secret + errSecret := json.Unmarshal([]byte(credentialJSON), &secret) + o.Expect(errSecret).NotTo(o.HaveOccurred()) + + credentials := secret.Data.Credentials + decodedCred, decodeCredErr := base64.StdEncoding.DecodeString(credentials) + o.Expect(decodeCredErr).NotTo(o.HaveOccurred()) + + var creds []Credential + credErr := json.Unmarshal([]byte(decodedCred), &creds) + o.Expect(credErr).NotTo(o.HaveOccurred()) + + if len(creds) == 0 { + return "", "", "", fmt.Errorf("No nutanix credentials found") + } + + nutanixEndpointURL, nutanixEndpointURLErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("Infrastructure", "cluster", `-o=jsonpath={.spec.platformSpec.nutanix.prismCentral.address}`).Output() + o.Expect(nutanixEndpointURLErr).NotTo(o.HaveOccurred()) + + nutanixPort, nutanixPortLErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("Infrastructure", "cluster", `-o=jsonpath={.spec.platformSpec.nutanix.prismCentral.port}`).Output() + o.Expect(nutanixPortLErr).NotTo(o.HaveOccurred()) + + return creds[0].Data.PrismCentral.Username, creds[0].Data.PrismCentral.Password, nutanixEndpointURL + ":" + nutanixPort, nil +} + +// GetNutanixInstanceID get nutanix instance id +func (nutanixSess *NutanixSession) GetNutanixInstanceID(instanceName string) (string, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // First, retrieve the VM details using the List method + vms, vmsErr := nutanixSess.nutanixClient.VM.List(ctx, &schema.DSMetadata{Filter: fmt.Sprintf("vm_name==%s", instanceName)}) + o.Expect(vmsErr).NotTo(o.HaveOccurred()) + + if len(vms.Entities) > 0 { + instanceID := vms.Entities[0].Metadata.UUID + return instanceID, nil + } + + return "", fmt.Errorf("InstanceID not found: %s", instanceName) +} + +// GetNutanixInstanceState get nutanix powerstate for e.g. ON or OFF +func (nutanixSess *NutanixSession) GetNutanixInstanceState(instanceID string) (string, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vms, vmsErr := nutanixSess.nutanixClient.VM.List(ctx, &schema.DSMetadata{Filter: fmt.Sprintf("vm_name==%s", instanceID)}) + o.Expect(vmsErr).NotTo(o.HaveOccurred()) + + if len(vms.Entities) > 0 { + instanceStatus := vms.Entities[0].Status + powerState := *instanceStatus.Resources.PowerState + e2e.Logf("Power State: %s", powerState) + + // Check the power state + switch powerState { + case "ON": + // Instance is running + return "running", nil + case "OFF": + // Instance is stopped + return "stopped", nil + default: + return "", fmt.Errorf("Invalid power state: %s", powerState) + } + } + return "", nil +} + +// SetNutanixInstanceState changes the Nutanix power state, e.g., ON or OFF +func (nutanixSess *NutanixSession) SetNutanixInstanceState(targetState string, instanceUUID string) error { + // Create the request URL + url := fmt.Sprintf("https://%s/api/nutanix/v3/vms/%s", nutanixSess.Endpoint, instanceUUID) + + // Retry logic parameters + maxRetries := 5 + retryDelay := 30 * time.Second + + // Create HTTP client + client := &http.Client{} + + // Helper function to set common headers and authentication + setHeadersAndAuth := func(req *http.Request) { + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + req.SetBasicAuth(nutanixSess.Username, nutanixSess.Password) + } + + // Fetch the VM data + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return fmt.Errorf("error creating request: %v", err) + } + setHeadersAndAuth(req) + + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("error sending request: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("request failed with status code %d", resp.StatusCode) + } + + // Read the response body + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return fmt.Errorf("error reading response body: %v", err) + } + + // Update the VM power state in the JSON payload + var vmData map[string]interface{} + err = json.Unmarshal(body, &vmData) + if err != nil { + return fmt.Errorf("error parsing response JSON: %v", err) + } + delete(vmData, "status") + vmData["spec"].(map[string]interface{})["resources"].(map[string]interface{})["power_state"] = targetState + + // Convert the modified data back to JSON + payload, err := json.Marshal(vmData) + if err != nil { + return fmt.Errorf("error creating request body: %v", err) + } + + for attempt := 0; attempt < maxRetries; attempt++ { + // Update the VM state + reqPut, err := http.NewRequest("PUT", url, bytes.NewBuffer(payload)) + if err != nil { + return fmt.Errorf("error creating request: %v", err) + } + setHeadersAndAuth(reqPut) + + respPut, err := client.Do(reqPut) + if err != nil { + return fmt.Errorf("error sending request: %v", err) + } + defer respPut.Body.Close() + + if respPut.StatusCode == http.StatusOK || respPut.StatusCode == http.StatusAccepted { + return nil + } else if respPut.StatusCode == http.StatusConflict && attempt < maxRetries-1 { + fmt.Printf("Conflict detected, retrying in %v seconds... (attempt %d/%d)\n", retryDelay.Seconds(), attempt+1, maxRetries) + time.Sleep(retryDelay) + continue + } + + // Read the response body for debugging purposes + respBody, err := ioutil.ReadAll(respPut.Body) + if err != nil { + return fmt.Errorf("error reading response body: %v", err) + } + return fmt.Errorf("PUT request failed with status code %d: %s", respPut.StatusCode, string(respBody)) + } + + return fmt.Errorf("request failed after %d attempts", maxRetries) +} diff --git a/test/util/nutanix_restapi.go b/test/util/nutanix_restapi.go new file mode 100644 index 000000000..9256923f7 --- /dev/null +++ b/test/util/nutanix_restapi.go @@ -0,0 +1,148 @@ +package util + +import ( + "encoding/base64" + "fmt" + "net" + "os" + "os/exec" + "strings" + "time" + + "github.com/tidwall/gjson" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +type NutanixClient struct { + nutanixToken string + nutanixHost string +} + +// GetNutanixCredentialFromCluster gets nutanix credentials from cluster +func GetNutanixCredFromCluster(oc *CLI) (string, error) { + credential, getSecErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("secret/nutanix-credentials", "-n", "openshift-machine-api", "-o=jsonpath={.data.credentials}").Output() + if getSecErr != nil { + return "", fmt.Errorf("Get Nutanix credential Error") + } + + creJson, err := base64.StdEncoding.DecodeString(credential) + if err != nil { + return "", err + } + + result := gjson.Get(string(creJson), "0.data.prismCentral") + if !result.Exists() { + return "", fmt.Errorf("No Nutanix prismCentral credential data found") + } + + username := result.Get("username").String() + password := result.Get("password").String() + + if username != "" && password != "" { + return base64.StdEncoding.EncodeToString([]byte(username + ":" + password)), nil + } + return "", fmt.Errorf("No Nutanix credential string found") +} + +// GetNutanixHostromCluster Gets nutanix [Host]:port from cluster +func GetNutanixHostromCluster(oc *CLI) (string, error) { + host, getHostErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.spec.platformSpec.nutanix.prismCentral.address}").Output() + if getHostErr != nil { + return "", fmt.Errorf("Failed to get Nutanix prismCentral address") + } + + port, getPortErr := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "cluster", "-o=jsonpath={.spec.platformSpec.nutanix.prismCentral.port}").Output() + if getPortErr != nil { + return "", fmt.Errorf("Failed to get Nutanix prismCentral port") + } + + if host != "" && port != "" { + return net.JoinHostPort(host, port), nil + } + + return "", fmt.Errorf("Failed to get Nutanix whole prismCentral host:port info") +} + +// Initial nutanix client by configuring coresponding parameters +func InitNutanixClient(oc *CLI) (*NutanixClient, error) { + encodedCre, err := GetNutanixCredFromCluster(oc) + if err != nil { + return nil, err + } + host, err := GetNutanixHostromCluster(oc) + if err != nil { + return nil, err + } + nutanixClient := NutanixClient{ + nutanixToken: encodedCre, + nutanixHost: host, + } + + return &nutanixClient, nil + +} + +// Get Nutanix VM UUID +func (nt *NutanixClient) GetNutanixVMUUID(nodeName string) (string, error) { + cmdCurl := `curl -s -X POST --header "Content-Type: application/json" \ + --header "Accept: application/json" \ + --header "Authorization: Basic %v" \ + "https://%v/api/nutanix/v3/vms/list" \ + -d '{ "kind": "vm","filter": "","length": 60,"offset": 0}' | + jq -r '.entities[] | select(.spec.name == "'"%v"'") | .metadata.uuid' + ` + formattedCmd := fmt.Sprintf(cmdCurl, nt.nutanixToken, nt.nutanixHost, nodeName) + uuid, cmdErr := exec.Command("bash", "-c", formattedCmd).Output() + if cmdErr != nil || string(uuid) == "" { + return "", cmdErr + } + return strings.TrimRight(string(uuid), "\n"), nil +} + +// Get Nutanix VM state, general value would be "ON" or "OFF" +func (nt *NutanixClient) GetNutanixVMState(vmUUID string) (string, error) { + cmdCurl := `curl -s --header "Content-Type: application/json"\ + --header "Authorization: Basic %v" \ + "https://%v/api/nutanix/v3/vms/%v" \ + | jq -r '.spec.resources.power_state' + ` + formattedCmd := fmt.Sprintf(cmdCurl, nt.nutanixToken, nt.nutanixHost, vmUUID) + state, cmdErr := exec.Command("bash", "-c", formattedCmd).Output() + if cmdErr != nil || string(state) == "" { + return "", cmdErr + } + return strings.TrimRight(string(state), "\n"), nil +} + +// Change NutanixVMstate, target state should be "ON" or "OFF" +func (nt *NutanixClient) ChangeNutanixVMState(vmUUID string, targeState string) error { + cmdCurl := `curl -s --header "Content-Type: application/json" \ + --header "Accept: application/json" \ + --header "Authorization: Basic %v" \ + "https://%v/api/nutanix/v3/vms/%v" \ + | jq 'del(.status) | .spec.resources.power_state |= "%v"' > %v + ` + currentTime := time.Now() + dateTimeString := currentTime.Format("20060102") + randStr := GetRandomString() + filePath := "/tmp/" + randStr + dateTimeString + ".json" + formattedCmd := fmt.Sprintf(cmdCurl, nt.nutanixToken, nt.nutanixHost, vmUUID, targeState, filePath) + _, cmdErr := exec.Command("bash", "-c", formattedCmd).Output() + defer func() { + if err := os.RemoveAll(filePath); err != nil { + e2e.Logf("Error removing file %v: %v", filePath, err.Error()) + } + }() + if cmdErr != nil { + return cmdErr + } + + // Submit the payload to change the VM state + updateAPI := `curl -s -X 'PUT' --header "Content-Type: application/json" --header "Accept: application/json" --header "Authorization: Basic %v" "https://%v/api/nutanix/v3/vms/%v" -d @%v` + formattedUpdateCmd := fmt.Sprintf(updateAPI, nt.nutanixToken, nt.nutanixHost, vmUUID, filePath) + _, cmdErr = exec.Command("bash", "-c", formattedUpdateCmd).Output() + if cmdErr != nil { + return cmdErr + } + return nil +} diff --git a/test/util/oauthserver/helpers.go b/test/util/oauthserver/helpers.go new file mode 100644 index 000000000..5126c1656 --- /dev/null +++ b/test/util/oauthserver/helpers.go @@ -0,0 +1,66 @@ +package oauthserver + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + restclient "k8s.io/client-go/rest" + + osinv1 "github.com/openshift/api/osin/v1" + userv1 "github.com/openshift/api/user/v1" + userv1client "github.com/openshift/client-go/user/clientset/versioned/typed/user/v1" +) + +var ( + osinScheme = runtime.NewScheme() + codecs = serializer.NewCodecFactory(osinScheme) + encoder = codecs.LegacyCodec(osinv1.GroupVersion) +) + +func init() { + utilruntime.Must(osinv1.Install(osinScheme)) +} + +func GetRawExtensionForOsinProvider(obj runtime.Object) (*runtime.RawExtension, error) { + objBytes := encode(obj) + if objBytes == nil { + return nil, fmt.Errorf("unable to encode the object: %v", obj) + } + return &runtime.RawExtension{Raw: objBytes}, nil +} + +func GetUserForToken(config *restclient.Config, token, expectedUsername string) (*userv1.User, error) { + userConfig := restclient.AnonymousClientConfig(config) + userConfig.BearerToken = token + userClient, err := userv1client.NewForConfig(userConfig) + if err != nil { + return nil, err + } + + user, err := userClient.Users().Get(context.Background(), "~", metav1.GetOptions{}) + if err != nil { + return nil, err + } + + return user, err +} + +func GetDirPathFromConfigMapSecretName(name string) string { + return fmt.Sprintf("%s/%s", configObjectsDir, name) // always concat with / in case this is run on windows +} + +func GetPathFromConfigMapSecretName(name, key string) string { + return fmt.Sprintf("%s/%s/%s", configObjectsDir, name, key) +} + +func encode(obj runtime.Object) []byte { + bytes, err := runtime.Encode(encoder, obj) + if err != nil { + return nil + } + return bytes +} diff --git a/test/util/oauthserver/oauthserver.go b/test/util/oauthserver/oauthserver.go new file mode 100644 index 000000000..7ae96a6d5 --- /dev/null +++ b/test/util/oauthserver/oauthserver.go @@ -0,0 +1,531 @@ +package oauthserver + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + "math/rand" + "net/http" + "path" + "time" + + "github.com/RangelReale/osincli" + "github.com/davecgh/go-spew/spew" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + restclient "k8s.io/client-go/rest" + e2e "k8s.io/kubernetes/test/e2e/framework" + + configv1 "github.com/openshift/api/config/v1" + legacyconfigv1 "github.com/openshift/api/legacyconfig/v1" + oauthv1 "github.com/openshift/api/oauth/v1" + osinv1 "github.com/openshift/api/osin/v1" + configclient "github.com/openshift/client-go/config/clientset/versioned" + "github.com/openshift/library-go/pkg/config/helpers" + "github.com/openshift/library-go/pkg/crypto" + + "github.com/openshift/openshift-tests-private/test/extended/testdata" + exutil "github.com/openshift/openshift-tests-private/test/extended/util" + "github.com/openshift/openshift-tests-private/test/extended/util/oauthserver/tokencmd" +) + +const ( + serviceURLFmt = "https://test-oauth-svc.%s.svc" // fill in the namespace + + servingCertDirPath = "/var/config/system/secrets/serving-cert" + servingCertPathCert = "/var/config/system/secrets/serving-cert/tls.crt" + servingCertPathKey = "/var/config/system/secrets/serving-cert/tls.key" + + routerCertsDirPath = "/var/config/system/secrets/router-certs" + + sessionSecretDirPath = "/var/config/system/secrets/session-secret" + sessionSecretPath = "/var/config/system/secrets/session-secret/session" + + oauthConfigPath = "/var/config/system/configmaps/oauth-config" + serviceCADirPath = "/var/config/system/configmaps/service-ca" + + configObjectsDir = "/var/oauth/configobjects/" + + RouteName = "test-oauth-route" + SAName = "e2e-oauth" +) + +var ( + serviceCAPath = "/var/config/system/configmaps/service-ca/service-ca.crt" // has to be var so that we can use its address + + defaultProcMount = corev1.DefaultProcMount + volumesDefaultMode int32 = 420 +) + +type NewRequestTokenOptionsFunc func(username, password string) *tokencmd.RequestTokenOptions + +// DeployOAuthServer - deployes an instance of an OpenShift OAuth server +// very simplified for now +// returns OAuth server url, cleanup function, error +func DeployOAuthServer(oc *exutil.CLI, idps []osinv1.IdentityProvider, configMaps []corev1.ConfigMap, secrets []corev1.Secret) (NewRequestTokenOptionsFunc, func(), error) { + + var cleanupFuncs []func() + cleanupFunc := func() { + for _, f := range cleanupFuncs { + f() + } + } + + // create the CA bundle, Service, Route and SA + oauthServerDataDir := exutil.FixturePath("testdata", "oauthserver") + for _, res := range []string{"cabundle-cm.yaml", "oauth-sa.yaml", "oauth-network.yaml"} { + if err := oc.AsAdmin().Run("create").Args("-f", path.Join(oauthServerDataDir, res)).Execute(); err != nil { + return nil, cleanupFunc, err + } + e2e.Logf("Created resources defined in %v", res) + } + + kubeClient := oc.AdminKubeClient() + + // the oauth server needs access to kube-system configmaps/extension-apiserver-authentication + clusterRoleBinding, err := createClusterRoleBinding(oc) + if err != nil { + return nil, cleanupFunc, err + } + cleanupFuncs = append(cleanupFuncs, func() { + _ = oc.AsAdmin().Run("delete").Args("clusterrolebindings.rbac.authorization.k8s.io", clusterRoleBinding.Name).Execute() + }) + e2e.Logf("Created: %v %v", "ClusterRoleBinding", clusterRoleBinding.Name) + + // create the secrets and configmaps the OAuth server config requires to get the server going + for _, cm := range configMaps { + if _, err := kubeClient.CoreV1().ConfigMaps(oc.Namespace()).Create(context.Background(), &cm, metav1.CreateOptions{}); err != nil { + return nil, cleanupFunc, err + } + e2e.Logf("Created: %v %v/%v", "ConfigMap", oc.Namespace(), cm.Name) + } + for _, secret := range secrets { + if _, err := kubeClient.CoreV1().Secrets(oc.Namespace()).Create(context.Background(), &secret, metav1.CreateOptions{}); err != nil { + return nil, cleanupFunc, err + } + e2e.Logf("Created: %v %v/%v", secret.Kind, secret.Namespace, secret.Name) + } + + // generate a session secret for the oauth server + sessionSecret, err := randomSessionSecret() + if err != nil { + return nil, cleanupFunc, err + } + if _, err := kubeClient.CoreV1().Secrets(oc.Namespace()).Create(context.Background(), sessionSecret, metav1.CreateOptions{}); err != nil { + return nil, cleanupFunc, err + } + e2e.Logf("Created: %v %v/%v", "Secret", oc.Namespace(), sessionSecret.Name) + + // get the route of the future OAuth server (defined in the oauth-network.yaml fixture above) + route, err := oc.AdminRouteClient().RouteV1().Routes(oc.Namespace()).Get(context.Background(), RouteName, metav1.GetOptions{}) + if err != nil { + return nil, cleanupFunc, err + } + routeURL := fmt.Sprintf("https://%s", route.Spec.Host) + + // prepare the config, inject it with the route URL and the IdP config we got + config, err := oauthServerConfig(oc, routeURL, idps) + if err != nil { + return nil, cleanupFunc, err + } + + configBytes := encode(config) + if configBytes == nil { + return nil, cleanupFunc, fmt.Errorf("error encoding the OSIN config") + } + + // store the config in a ConfigMap that's to be mounted into the server's pod + _, err = kubeClient.CoreV1().ConfigMaps(oc.Namespace()).Create(context.Background(), &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "oauth-config", + }, + Data: map[string]string{ + "oauth.conf": string(configBytes), + }, + }, metav1.CreateOptions{}) + if err != nil { + return nil, cleanupFunc, err + } + e2e.Logf("Created: %v %v/%v", "ConfigMap", oc.Namespace(), "oauth-config") + + // get the OAuth server image that's used in the cluster + image, err := getImage(oc) + if err != nil { + return nil, cleanupFunc, err + } + + // prepare the pod def, create secrets and CMs + oauthServerPod, err := oauthServerPod(configMaps, secrets, image) + if err != nil { + return nil, cleanupFunc, err + } + + // finally create the oauth server, wait till it starts running + if _, err := kubeClient.CoreV1().Pods(oc.Namespace()).Create(context.Background(), oauthServerPod, metav1.CreateOptions{}); err != nil { + return nil, cleanupFunc, err + } + e2e.Logf("Created: %v %v/%v", "Pod", oc.Namespace(), oauthServerPod.Name) + + if err := waitForOAuthServerReady(oc); err != nil { + return nil, cleanupFunc, err + } + e2e.Logf("OAuth server is ready") + + oauthClient, err := createOAuthClient(oc, routeURL) + if err != nil { + return nil, cleanupFunc, err + } + cleanupFuncs = append(cleanupFuncs, func() { + _ = oc.AsAdmin().Run("delete").Args("oauthclients.oauth.openshift.io", oauthClient.Name).Execute() + }) + e2e.Logf("Created: %v %v/%v", oauthClient.Kind, oauthClient.Namespace, oauthClient.Name) + + newRequestTokenOptionFunc := func(username, password string) *tokencmd.RequestTokenOptions { + return newRequestTokenOptions(restclient.AnonymousClientConfig(oc.AdminConfig()), routeURL, oc.Namespace(), username, password) + } + + return newRequestTokenOptionFunc, cleanupFunc, nil +} + +func waitForOAuthServerReady(oc *exutil.CLI) error { + if err := exutil.WaitForUserBeAuthorized(oc, "system:serviceaccount:"+oc.Namespace()+":e2e-oauth", "*", "*"); err != nil { + return err + } + if err := waitForOAuthServerPodReady(oc); err != nil { + return err + } + return waitForOAuthServerRouteReady(oc) +} + +func waitForOAuthServerPodReady(oc *exutil.CLI) error { + e2e.Logf("Waiting for the OAuth server pod to be ready") + return wait.PollImmediateInfinite(1*time.Second, func() (bool, error) { + pod, err := oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).Get(context.Background(), "test-oauth-server", metav1.GetOptions{}) + if err != nil { + return false, err + } + if !exutil.CheckPodIsReady(*pod) { + e2e.Logf("OAuth server pod is not ready: %s\nContainer statuses: %s", pod.Status.Message, spew.Sdump(pod.Status.ContainerStatuses)) + return false, nil + } + return true, nil + }) +} + +func waitForOAuthServerRouteReady(oc *exutil.CLI) error { + route, err := oc.AdminRouteClient().RouteV1().Routes(oc.Namespace()).Get(context.Background(), RouteName, metav1.GetOptions{}) + if err != nil { + return err + } + request, err := http.NewRequest(http.MethodHead, fmt.Sprintf("https://%s/healthz", route.Spec.Host), nil) + if err != nil { + return err + } + return wait.PollImmediate(time.Second, time.Minute, func() (done bool, err error) { + e2e.Logf("Waiting for the OAuth server route to be ready") + transport, err := restclient.TransportFor(restclient.AnonymousClientConfig(oc.AdminConfig())) + if err != nil { + e2e.Logf("Error getting transport: %v", err) + return false, err + } + response, err := transport.RoundTrip(request) + if response != nil && response.StatusCode == http.StatusOK { + return true, nil + } + if response != nil { + e2e.Logf("Waiting for the OAuth server route to be ready: %v", response.Status) + } + if err != nil { + e2e.Logf("Waiting for the OAuth server route to be ready: %v", err) + } + return false, nil + }) +} + +func oauthServerPod(configMaps []corev1.ConfigMap, secrets []corev1.Secret, image string) (*corev1.Pod, error) { + oauthServerAsset := testdata.MustAsset("test/extended/testdata/oauthserver/oauth-pod.yaml") + + obj, err := helpers.ReadYAML(bytes.NewBuffer(oauthServerAsset), corev1.AddToScheme) + if err != nil { + return nil, err + } + + oauthServerPod, ok := obj.(*corev1.Pod) + if ok != true { + return nil, err + } + + volumes := oauthServerPod.Spec.Volumes + volumeMounts := oauthServerPod.Spec.Containers[0].VolumeMounts + + for _, cm := range configMaps { + volumes, volumeMounts = addCMMount(volumes, volumeMounts, &cm) + } + + for _, sec := range secrets { + volumes, volumeMounts = addSecretMount(volumes, volumeMounts, &sec) + } + + oauthServerPod.Spec.Volumes = volumes + oauthServerPod.Spec.Containers[0].VolumeMounts = volumeMounts + oauthServerPod.Spec.Containers[0].Image = image + + return oauthServerPod, nil +} + +func addCMMount(volumes []corev1.Volume, volumeMounts []corev1.VolumeMount, cm *corev1.ConfigMap) ([]corev1.Volume, []corev1.VolumeMount) { + volumes = append(volumes, corev1.Volume{ + Name: cm.ObjectMeta.Name, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{Name: cm.ObjectMeta.Name}, + DefaultMode: &volumesDefaultMode, + }, + }, + }) + + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: cm.ObjectMeta.Name, + MountPath: GetDirPathFromConfigMapSecretName(cm.ObjectMeta.Name), + ReadOnly: true, + }) + + return volumes, volumeMounts +} + +func addSecretMount(volumes []corev1.Volume, volumeMounts []corev1.VolumeMount, secret *corev1.Secret) ([]corev1.Volume, []corev1.VolumeMount) { + volumes = append(volumes, corev1.Volume{ + Name: secret.ObjectMeta.Name, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secret.ObjectMeta.Name, + DefaultMode: &volumesDefaultMode, + }, + }, + }) + + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: secret.ObjectMeta.Name, + MountPath: GetDirPathFromConfigMapSecretName(secret.ObjectMeta.Name), + ReadOnly: true, + }) + + return volumes, volumeMounts +} + +func oauthServerConfig(oc *exutil.CLI, routeURL string, idps []osinv1.IdentityProvider) (*osinv1.OsinServerConfig, error) { + adminConfigClient := configclient.NewForConfigOrDie(oc.AdminConfig()).ConfigV1() + + infrastructure, err := adminConfigClient.Infrastructures().Get(context.Background(), "cluster", metav1.GetOptions{}) + if err != nil { + return nil, err + } + + console, err := adminConfigClient.Consoles().Get(context.Background(), "cluster", metav1.GetOptions{}) + if err != nil { + return nil, err + } + + namedRouterCerts, err := routerCertsToSNIConfig(oc) + if err != nil { + return nil, err + } + + return &osinv1.OsinServerConfig{ + GenericAPIServerConfig: configv1.GenericAPIServerConfig{ + ServingInfo: configv1.HTTPServingInfo{ + ServingInfo: configv1.ServingInfo{ + BindAddress: "0.0.0.0:6443", + BindNetwork: "tcp4", + // we have valid serving certs provided by service-ca + // this is our main server cert which is used if SNI does not match + CertInfo: configv1.CertInfo{ + CertFile: servingCertPathCert, + KeyFile: servingCertPathKey, + }, + ClientCA: "", + NamedCertificates: namedRouterCerts, + MinTLSVersion: crypto.TLSVersionToNameOrDie(crypto.DefaultTLSVersion()), + CipherSuites: crypto.CipherSuitesToNamesOrDie(crypto.DefaultCiphers()), + }, + MaxRequestsInFlight: 1000, + RequestTimeoutSeconds: 5 * 60, // 5 minutes + }, + AuditConfig: configv1.AuditConfig{}, + KubeClientConfig: configv1.KubeClientConfig{ + KubeConfig: "", + ConnectionOverrides: configv1.ClientConnectionOverrides{ + QPS: 400, + Burst: 400, + }, + }, + }, + OAuthConfig: osinv1.OAuthConfig{ + MasterCA: &serviceCAPath, // we have valid serving certs provided by service-ca so we can use the service for loopback + MasterURL: fmt.Sprintf(serviceURLFmt, oc.Namespace()), + MasterPublicURL: routeURL, + LoginURL: infrastructure.Status.APIServerURL, + AssetPublicURL: console.Status.ConsoleURL, // set console route as valid 302 redirect for logout + AlwaysShowProviderSelection: false, + IdentityProviders: idps, + GrantConfig: osinv1.GrantConfig{ + Method: osinv1.GrantHandlerDeny, // force denial as this field must be set per OAuth client + ServiceAccountMethod: osinv1.GrantHandlerPrompt, + }, + SessionConfig: &osinv1.SessionConfig{ + SessionSecretsFile: sessionSecretPath, + SessionMaxAgeSeconds: 5 * 60, // 5 minutes + SessionName: "ssn", + }, + TokenConfig: osinv1.TokenConfig{ + AuthorizeTokenMaxAgeSeconds: 5 * 60, // 5 minutes + AccessTokenMaxAgeSeconds: 24 * 60 * 60, // 1 day + }, + }, + }, nil +} + +func routerCertsToSNIConfig(oc *exutil.CLI) ([]configv1.NamedCertificate, error) { + routerSecret, err := oc.AdminKubeClient().CoreV1().Secrets("openshift-config-managed").Get(context.Background(), "router-certs", metav1.GetOptions{}) + if err != nil { + return nil, err + } + localRouterSecret := routerSecret.DeepCopy() + localRouterSecret.ResourceVersion = "" + localRouterSecret.Namespace = oc.Namespace() + if _, err := oc.AdminKubeClient().CoreV1().Secrets(oc.Namespace()).Create(context.Background(), localRouterSecret, metav1.CreateOptions{}); err != nil { + return nil, err + } + + var out []configv1.NamedCertificate + for domain := range localRouterSecret.Data { + out = append(out, configv1.NamedCertificate{ + Names: []string{"*." + domain}, // ingress domain is always a wildcard + CertInfo: configv1.CertInfo{ // the cert and key are appended together + CertFile: routerCertsDirPath + "/" + domain, + KeyFile: routerCertsDirPath + "/" + domain, + }, + }) + } + return out, nil +} + +func randomSessionSecret() (*corev1.Secret, error) { + skey, err := newSessionSecretsJSON() + if err != nil { + return nil, err + } + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "session-secret", + Labels: map[string]string{ + "app": "test-oauth-server", + }, + }, + Data: map[string][]byte{ + "session": skey, + }, + }, nil +} + +// this is less random than the actual secret generated in cluster-authentication-operator +func newSessionSecretsJSON() ([]byte, error) { + const ( + sha256KeyLenBytes = sha256.BlockSize // max key size with HMAC SHA256 + aes256KeyLenBytes = 32 // max key size with AES (AES-256) + ) + + secrets := &legacyconfigv1.SessionSecrets{ + TypeMeta: metav1.TypeMeta{ + Kind: "SessionSecrets", + APIVersion: "v1", + }, + Secrets: []legacyconfigv1.SessionSecret{ + { + Authentication: randomString(sha256KeyLenBytes), // 64 chars + Encryption: randomString(aes256KeyLenBytes), // 32 chars + }, + }, + } + secretsBytes, err := json.Marshal(secrets) + if err != nil { + return nil, fmt.Errorf("error marshalling the session secret: %v", err) // should never happen + } + + return secretsBytes, nil +} + +// randomString - random string of A-Z chars with len size +func randomString(size int) string { + buffer := make([]byte, size) + for i := 0; i < size; i++ { + buffer[i] = byte(65 + rand.Intn(25)) + } + return base64.RawURLEncoding.EncodeToString(buffer) +} + +// getImage will grab the hypershift image version from openshift-authentication ns +func getImage(oc *exutil.CLI) (string, error) { + selector, _ := labels.Parse("app=oauth-openshift") + pods, err := oc.AdminKubeClient().CoreV1().Pods("openshift-authentication").List(context.Background(), metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return "", err + } + return pods.Items[0].Spec.Containers[0].Image, nil +} + +func newRequestTokenOptions(config *restclient.Config, oauthServerURL, oauthClientName, username, password string) *tokencmd.RequestTokenOptions { + options := tokencmd.NewRequestTokenOptions(config, nil, username, password, false) + // supply the info the client would otherwise ask from .well-known/oauth-authorization-server + oauthClientConfig := &osincli.ClientConfig{ + ClientId: oauthClientName, + AuthorizeUrl: fmt.Sprintf("%s/oauth/authorize", oauthServerURL), // TODO: the endpoints are defined in vendor/github.com/openshift/library-go/pkg/oauth/oauthdiscovery/urls.go + TokenUrl: fmt.Sprintf("%s/oauth/token", oauthServerURL), + RedirectUrl: fmt.Sprintf("%s/oauth/token/implicit", oauthServerURL), + } + if err := osincli.PopulatePKCE(oauthClientConfig); err != nil { + panic(err) + } + options.OsinConfig = oauthClientConfig + options.Issuer = oauthServerURL + return options +} + +func createClusterRoleBinding(oc *exutil.CLI) (*rbacv1.ClusterRoleBinding, error) { + return oc.AdminKubeClient().RbacV1().ClusterRoleBindings().Create(context.Background(), &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: oc.Namespace(), + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: "cluster-admin", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: SAName, + Namespace: oc.Namespace(), + }, + }, + }, metav1.CreateOptions{}) +} + +func createOAuthClient(oc *exutil.CLI, routeURL string) (*oauthv1.OAuthClient, error) { + return oc.AdminOAuthClient().OauthV1().OAuthClients(). + Create(context.Background(), &oauthv1.OAuthClient{ + ObjectMeta: metav1.ObjectMeta{ + Name: oc.Namespace(), + }, + GrantMethod: oauthv1.GrantHandlerAuto, + RedirectURIs: []string{fmt.Sprintf("%s/oauth/token/implicit", routeURL)}, + RespondWithChallenges: true, + }, metav1.CreateOptions{}) +} diff --git a/test/util/oauthserver/tokencmd/basicauth.go b/test/util/oauthserver/tokencmd/basicauth.go new file mode 100644 index 000000000..53eb505d0 --- /dev/null +++ b/test/util/oauthserver/tokencmd/basicauth.go @@ -0,0 +1,110 @@ +package tokencmd + +import ( + "encoding/base64" + "fmt" + "io" + "net/http" + "regexp" + "strings" + + "k8s.io/klog" +) + +type BasicChallengeHandler struct { + // Host is the server being authenticated to. Used only for displaying messages when prompting for username/password + Host string + + // Reader is used to prompt for username/password. If nil, no prompting is done + Reader io.Reader + // Writer is used to output prompts. If nil, stdout is used + Writer io.Writer + + // Username is the username to use when challenged. If empty, a prompt is issued to a non-nil Reader + Username string + // Password is the password to use when challenged. If empty, a prompt is issued to a non-nil Reader + Password string + + // handled tracks whether this handler has already handled a challenge. + handled bool + // prompted tracks whether this handler has already prompted for a username and/or password. + prompted bool +} + +func (c *BasicChallengeHandler) CanHandle(headers http.Header) bool { + isBasic, _ := basicRealm(headers) + return isBasic +} +func (c *BasicChallengeHandler) HandleChallenge(requestURL string, headers http.Header) (http.Header, bool, error) { + if c.prompted { + klog.V(2).Info("already prompted for challenge, won't prompt again") + return nil, false, nil + } + if c.handled { + klog.V(2).Info("already handled basic challenge") + return nil, false, nil + } + + username := c.Username + password := c.Password + + missingUsername := len(username) == 0 + missingPassword := len(password) == 0 + + if (missingUsername || missingPassword) && c.Reader != nil { + panic("unsupported") + } + + if len(username) > 0 || len(password) > 0 { + // Basic auth does not support usernames containing colons + // http://tools.ietf.org/html/rfc2617#section-2 + if strings.Contains(username, ":") { + return nil, false, fmt.Errorf("username %s is invalid for basic auth", username) + } + responseHeaders := http.Header{} + responseHeaders.Set("Authorization", getBasicHeader(username, password)) + // remember so we don't re-handle non-interactively + c.handled = true + return responseHeaders, true, nil + } + + klog.V(2).Info("no username or password available") + return nil, false, nil +} +func (c *BasicChallengeHandler) CompleteChallenge(requestURL string, headers http.Header) error { + return nil +} + +func (c *BasicChallengeHandler) Release() error { + return nil +} + +// if any of these match a WWW-Authenticate header, it is a basic challenge +// capturing group 1 (if present) should contain the realm +var basicRegexes = []*regexp.Regexp{ + // quoted realm + regexp.MustCompile(`(?i)^\s*basic\s+realm\s*=\s*"(.*?)"\s*(,|$)`), + // token realm + regexp.MustCompile(`(?i)^\s*basic\s+realm\s*=\s*(.*?)\s*(,|$)`), + // no realm + regexp.MustCompile(`(?i)^\s*basic(?:\s+|$)`), +} + +func basicRealm(headers http.Header) (bool, string) { + for _, challengeHeader := range headers[http.CanonicalHeaderKey("WWW-Authenticate")] { + for _, r := range basicRegexes { + if matches := r.FindStringSubmatch(challengeHeader); matches != nil { + if len(matches) > 1 { + // We got a realm as well + return true, matches[1] + } + // No realm, but still basic + return true, "" + } + } + } + return false, "" +} +func getBasicHeader(username, password string) string { + return "Basic " + base64.StdEncoding.EncodeToString([]byte(username+":"+password)) +} diff --git a/test/util/oauthserver/tokencmd/basicauth_test.go b/test/util/oauthserver/tokencmd/basicauth_test.go new file mode 100644 index 000000000..1085eb04e --- /dev/null +++ b/test/util/oauthserver/tokencmd/basicauth_test.go @@ -0,0 +1,363 @@ +package tokencmd + +import ( + "bytes" + "errors" + "net/http" + "reflect" + "testing" +) + +var ( + AUTHORIZATION = http.CanonicalHeaderKey("Authorization") + WWW_AUTHENTICATE = http.CanonicalHeaderKey("WWW-Authenticate") +) + +type Challenge struct { + Headers http.Header + + ExpectedCanHandle bool + ExpectedHeaders http.Header + ExpectedHandled bool + ExpectedErr error + ExpectedPrompt string +} + +func TestHandleChallenge(t *testing.T) { + + basicChallenge := http.Header{WWW_AUTHENTICATE: []string{`Basic realm="myrealm"`}} + + testCases := map[string]struct { + Handler *BasicChallengeHandler + Challenges []Challenge + }{ + "non-interactive with no defaults": { + Handler: &BasicChallengeHandler{ + Host: "myhost", + Reader: nil, + Username: "", + Password: "", + }, + Challenges: []Challenge{ + { + Headers: basicChallenge, + ExpectedCanHandle: true, + ExpectedHeaders: nil, + ExpectedHandled: false, + ExpectedErr: nil, + ExpectedPrompt: "", + }, + }, + }, + + "non-interactive challenge with defaults": { + Handler: &BasicChallengeHandler{ + Host: "myhost", + Reader: nil, + Username: "myuser", + Password: "mypassword", + }, + Challenges: []Challenge{ + { + Headers: basicChallenge, + ExpectedCanHandle: true, + ExpectedHeaders: http.Header{AUTHORIZATION: []string{getBasicHeader("myuser", "mypassword")}}, + ExpectedHandled: true, + ExpectedErr: nil, + ExpectedPrompt: "", + }, + { + Headers: basicChallenge, + ExpectedCanHandle: true, + ExpectedHeaders: nil, + ExpectedHandled: false, + ExpectedErr: nil, + ExpectedPrompt: "", + }, + }, + }, + + "interactive challenge with default user": { + Handler: &BasicChallengeHandler{ + Host: "myhost", + Reader: bytes.NewBufferString("mypassword\n"), + Username: "myuser", + Password: "", + }, + Challenges: []Challenge{ + { + Headers: basicChallenge, + ExpectedCanHandle: true, + ExpectedHeaders: http.Header{AUTHORIZATION: []string{getBasicHeader("myuser", "mypassword")}}, + ExpectedHandled: true, + ExpectedErr: nil, + ExpectedPrompt: `Authentication required for myhost (myrealm) +Username: myuser +Password: `, + }, + }, + }, + + "interactive challenge": { + Handler: &BasicChallengeHandler{ + Host: "myhost", + Reader: bytes.NewBufferString("myuser\nmypassword\n"), + Username: "", + Password: "", + }, + Challenges: []Challenge{ + { + Headers: basicChallenge, + ExpectedCanHandle: true, + ExpectedHeaders: http.Header{AUTHORIZATION: []string{getBasicHeader("myuser", "mypassword")}}, + ExpectedHandled: true, + ExpectedErr: nil, + ExpectedPrompt: `Authentication required for myhost (myrealm) +Username: Password: `, + }, + { + Headers: basicChallenge, + ExpectedCanHandle: true, + ExpectedHeaders: nil, + ExpectedHandled: false, + ExpectedErr: nil, + ExpectedPrompt: ``, + }, + }, + }, + + "non-interactive challenge with reader defaults": { + Handler: &BasicChallengeHandler{ + Host: "myhost", + Reader: bytes.NewBufferString(""), + Username: "myuser", + Password: "mypassword", + }, + Challenges: []Challenge{ + { + Headers: basicChallenge, + ExpectedCanHandle: true, + ExpectedHeaders: http.Header{AUTHORIZATION: []string{getBasicHeader("myuser", "mypassword")}}, + ExpectedHandled: true, + ExpectedErr: nil, + ExpectedPrompt: "", + }, + { + Headers: basicChallenge, + ExpectedCanHandle: true, + ExpectedHeaders: nil, + ExpectedHandled: false, + ExpectedErr: nil, + ExpectedPrompt: "", + }, + }, + }, + + "invalid basic auth username": { + Handler: &BasicChallengeHandler{ + Host: "myhost", + Reader: bytes.NewBufferString(""), + Username: "system:admin", + Password: "mypassword", + }, + Challenges: []Challenge{ + { + Headers: basicChallenge, + ExpectedCanHandle: true, + ExpectedHeaders: nil, + ExpectedHandled: false, + ExpectedErr: errors.New("username system:admin is invalid for basic auth"), + ExpectedPrompt: "", + }, + }, + }, + "invalid basic auth username prompt": { + Handler: &BasicChallengeHandler{ + Host: "myhost", + Reader: bytes.NewBufferString(``), + Username: "system:admin", + Password: "", + }, + Challenges: []Challenge{ + { + Headers: basicChallenge, + ExpectedCanHandle: true, + ExpectedHeaders: nil, + ExpectedHandled: false, + ExpectedErr: errors.New("username system:admin is invalid for basic auth"), + ExpectedPrompt: `Authentication required for myhost (myrealm) +Username: system:admin +Password: `, + }, + }, + }, + } + + for k, tc := range testCases { + for i, challenge := range tc.Challenges { + out := &bytes.Buffer{} + tc.Handler.Writer = out + + canHandle := tc.Handler.CanHandle(challenge.Headers) + if canHandle != challenge.ExpectedCanHandle { + t.Errorf("%s: %d: Expected CanHandle=%v, got %v", k, i, challenge.ExpectedCanHandle, canHandle) + } + + if canHandle { + headers, handled, err := tc.Handler.HandleChallenge("", challenge.Headers) + if !reflect.DeepEqual(headers, challenge.ExpectedHeaders) { + t.Errorf("%s: %d: Expected headers\n\t%#v\ngot\n\t%#v", k, i, challenge.ExpectedHeaders, headers) + } + if handled != challenge.ExpectedHandled { + t.Errorf("%s: %d: Expected handled=%v, got %v", k, i, challenge.ExpectedHandled, handled) + } + if ((err == nil) != (challenge.ExpectedErr == nil)) || (err != nil && err.Error() != challenge.ExpectedErr.Error()) { + t.Errorf("%s: %d: Expected err=%v, got %v", k, i, challenge.ExpectedErr, err) + } + if out.String() != challenge.ExpectedPrompt { + t.Errorf("%s: %d: Expected prompt %q, got %q", k, i, challenge.ExpectedPrompt, out.String()) + } + } + } + } +} + +func TestBasicRealm(t *testing.T) { + + testCases := map[string]struct { + Headers http.Header + ExpectedBasic bool + ExpectedRealm string + }{ + "empty": { + Headers: http.Header{}, + ExpectedBasic: false, + ExpectedRealm: ``, + }, + + "non-challenge": { + Headers: http.Header{ + "test": []string{`value`}, + }, + ExpectedBasic: false, + ExpectedRealm: ``, + }, + + "non-basic": { + Headers: http.Header{ + WWW_AUTHENTICATE: []string{ + `basicrealm="myrealm"`, + `digest basic="realm"`, + }, + }, + ExpectedBasic: false, + ExpectedRealm: ``, + }, + + "basic multiple www-authenticate headers": { + Headers: http.Header{ + WWW_AUTHENTICATE: []string{ + `digest realm="digestrealm"`, + `basic realm="Foo"`, + `foo bar="baz"`, + }, + }, + ExpectedBasic: true, + ExpectedRealm: `Foo`, + }, + + "basic no realm": { + Headers: http.Header{ + WWW_AUTHENTICATE: []string{`basic`}, + }, + ExpectedBasic: true, + ExpectedRealm: ``, + }, + + "basic other param": { + Headers: http.Header{ + WWW_AUTHENTICATE: []string{`basic otherparam="othervalue"`}, + }, + ExpectedBasic: true, + ExpectedRealm: ``, + }, + + "basic token realm": { + Headers: http.Header{ + WWW_AUTHENTICATE: []string{`basic realm=Foo Bar `}, + }, + ExpectedBasic: true, + ExpectedRealm: `Foo Bar`, + }, + + "basic quoted realm": { + Headers: http.Header{ + WWW_AUTHENTICATE: []string{`basic realm="Foo Bar"`}, + }, + ExpectedBasic: true, + ExpectedRealm: `Foo Bar`, + }, + + "basic case-insensitive scheme": { + Headers: http.Header{ + WWW_AUTHENTICATE: []string{`BASIC realm="Foo"`}, + }, + ExpectedBasic: true, + ExpectedRealm: `Foo`, + }, + + "basic case-insensitive realm": { + Headers: http.Header{ + WWW_AUTHENTICATE: []string{`basic REALM="Foo"`}, + }, + ExpectedBasic: true, + ExpectedRealm: `Foo`, + }, + + "basic whitespace": { + Headers: http.Header{ + WWW_AUTHENTICATE: []string{` basic realm = "Foo\" Bar" `}, + }, + ExpectedBasic: true, + ExpectedRealm: `Foo\" Bar`, + }, + + "basic trailing comma": { + Headers: http.Header{ + WWW_AUTHENTICATE: []string{`basic realm="Foo", otherparam="value"`}, + }, + ExpectedBasic: true, + ExpectedRealm: `Foo`, + }, + + "realm containing quotes": { + Headers: http.Header{ + WWW_AUTHENTICATE: []string{`basic realm="F\"oo", otherparam="value"`}, + }, + ExpectedBasic: true, + ExpectedRealm: `F\"oo`, + }, + + "realm containing comma": { + Headers: http.Header{ + WWW_AUTHENTICATE: []string{`basic realm="Foo, bar", otherparam="value"`}, + }, + ExpectedBasic: true, + ExpectedRealm: `Foo, bar`, + }, + + // TODO: additional forms to support + // Basic param="value", realm="myrealm" + // Digest, Basic param="value", realm="myrealm" + } + + for k, tc := range testCases { + isBasic, realm := basicRealm(tc.Headers) + if isBasic != tc.ExpectedBasic { + t.Errorf("%s: Expected isBasicChallenge=%v, got %v", k, tc.ExpectedBasic, isBasic) + } + if realm != tc.ExpectedRealm { + t.Errorf("%s: Expected realm=%q, got %q", k, tc.ExpectedRealm, realm) + } + } +} diff --git a/test/util/oauthserver/tokencmd/multi.go b/test/util/oauthserver/tokencmd/multi.go new file mode 100644 index 000000000..8f2094a11 --- /dev/null +++ b/test/util/oauthserver/tokencmd/multi.go @@ -0,0 +1,105 @@ +package tokencmd + +import ( + "net/http" + + "k8s.io/klog" + + apierrs "k8s.io/apimachinery/pkg/api/errors" + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +var _ = ChallengeHandler(&MultiHandler{}) + +// MultiHandler manages a series of authentication challenges +// it is single-use only, and not thread-safe +type MultiHandler struct { + // handler holds the selected handler. + // automatically populated with the first handler to successfully respond to HandleChallenge(), + // and used exclusively by CanHandle() and HandleChallenge() from that point forward. + handler ChallengeHandler + + // possibleHandlers holds handlers that could handle subsequent challenges. + // filtered down during HandleChallenge() by calling CanHandle() on each item. + possibleHandlers []ChallengeHandler + + // allHandlers holds all handlers, for purposes of delegating Release() calls + allHandlers []ChallengeHandler +} + +func NewMultiHandler(handlers ...ChallengeHandler) ChallengeHandler { + return &MultiHandler{ + possibleHandlers: handlers, + allHandlers: handlers, + } +} + +func (h *MultiHandler) CanHandle(headers http.Header) bool { + // If we've already selected a handler, it alone can decide whether we can handle the current request + if h.handler != nil { + return h.handler.CanHandle(headers) + } + + // Otherwise, return true if any of our handlers can handle this request + for _, handler := range h.possibleHandlers { + if handler.CanHandle(headers) { + return true + } + } + + return false +} + +func (h *MultiHandler) HandleChallenge(requestURL string, headers http.Header) (http.Header, bool, error) { + // If we've already selected a handler, it alone can handle all subsequent challenges (don't change horses in mid-stream) + if h.handler != nil { + return h.handler.HandleChallenge(requestURL, headers) + } + + // Otherwise, filter our list of handlers to the ones that can handle this request + applicable := []ChallengeHandler{} + for _, handler := range h.possibleHandlers { + if handler.CanHandle(headers) { + applicable = append(applicable, handler) + } + } + h.possibleHandlers = applicable + + // Then select the first available handler that successfully handles the request + var ( + retryHeaders http.Header + retry bool + err error + ) + for i, handler := range h.possibleHandlers { + retryHeaders, retry, err = handler.HandleChallenge(requestURL, headers) + + if err != nil { + klog.V(5).Infof("handler[%d] error: %v", i, err) + } + // If the handler successfully handled the challenge, or we have no other options, select it as our handler + if err == nil || i == len(h.possibleHandlers)-1 { + h.handler = handler + return retryHeaders, retry, err + } + } + + return nil, false, apierrs.NewUnauthorized("unhandled challenge") +} + +func (h *MultiHandler) CompleteChallenge(requestURL string, headers http.Header) error { + if h.handler != nil { + return h.handler.CompleteChallenge(requestURL, headers) + } + return nil +} + +func (h *MultiHandler) Release() error { + var errs []error + for _, handler := range h.allHandlers { + if err := handler.Release(); err != nil { + errs = append(errs, err) + } + } + return utilerrors.NewAggregate(errs) +} diff --git a/test/util/oauthserver/tokencmd/negotiate.go b/test/util/oauthserver/tokencmd/negotiate.go new file mode 100644 index 000000000..5d595a48a --- /dev/null +++ b/test/util/oauthserver/tokencmd/negotiate.go @@ -0,0 +1,121 @@ +package tokencmd + +import ( + "encoding/base64" + "errors" + "net/http" + "strings" + + "k8s.io/klog" +) + +// Negotiator defines the minimal interface needed to interact with GSSAPI to perform a negotiate challenge/response +type Negotiator interface { + // Load gives the negotiator a chance to load any resources needed to handle a challenge/response sequence. + // It may be invoked multiple times. If an error is returned, InitSecContext and IsComplete are not called, but Release() is. + Load() error + // InitSecContext returns the response token for a Negotiate challenge token from a given URL, + // or an error if no response token could be obtained or the incoming token is invalid. + InitSecContext(requestURL string, challengeToken []byte) (tokenToSend []byte, err error) + // IsComplete returns true if the negotiator is satisfied with the negotiation. + // This typically means gssapi returned GSS_S_COMPLETE to an initSecContext call. + IsComplete() bool + // Release gives the negotiator a chance to release any resources held during a challenge/response sequence. + // It is always invoked, even in cases where no challenges were received or handled. + Release() error +} + +// NegotiateChallengeHandler manages a challenge negotiation session +// it is single-host, single-use only, and not thread-safe +type NegotiateChallengeHandler struct { + negotiator Negotiator +} + +func NewNegotiateChallengeHandler(negotiator Negotiator) ChallengeHandler { + return &NegotiateChallengeHandler{negotiator: negotiator} +} + +func (c *NegotiateChallengeHandler) CanHandle(headers http.Header) bool { + // Make sure this is a negotiate request + if isNegotiate, _, err := getNegotiateToken(headers); err != nil || !isNegotiate { + return false + } + // Make sure our negotiator can initialize + if err := c.negotiator.Load(); err != nil { + return false + } + return true +} + +func (c *NegotiateChallengeHandler) HandleChallenge(requestURL string, headers http.Header) (http.Header, bool, error) { + // Get incoming token + _, incomingToken, err := getNegotiateToken(headers) + if err != nil { + return nil, false, err + } + + // Process the token + outgoingToken, err := c.negotiator.InitSecContext(requestURL, incomingToken) + if err != nil { + klog.V(5).Infof("InitSecContext returned error: %v", err) + return nil, false, err + } + + // Build the response headers + responseHeaders := http.Header{} + responseHeaders.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(outgoingToken)) + return responseHeaders, true, nil +} + +func (c *NegotiateChallengeHandler) CompleteChallenge(requestURL string, headers http.Header) error { + if c.negotiator.IsComplete() { + return nil + } + klog.V(5).Infof("continue needed") + + // Get incoming token + isNegotiate, incomingToken, err := getNegotiateToken(headers) + if err != nil { + return err + } + if !isNegotiate { + return errors.New("client requires final negotiate token, none provided") + } + + // Process the token + _, err = c.negotiator.InitSecContext(requestURL, incomingToken) + if err != nil { + klog.V(5).Infof("InitSecContext returned error during final negotiation: %v", err) + return err + } + if !c.negotiator.IsComplete() { + return errors.New("InitSecContext did not indicate final negotiation completed") + } + return nil +} + +func (c *NegotiateChallengeHandler) Release() error { + return c.negotiator.Release() +} + +const negotiateScheme = "negotiate" + +func getNegotiateToken(headers http.Header) (bool, []byte, error) { + for _, challengeHeader := range headers[http.CanonicalHeaderKey("WWW-Authenticate")] { + // TODO: handle WWW-Authenticate headers containing more than one scheme + caseInsensitiveHeader := strings.ToLower(challengeHeader) + if caseInsensitiveHeader == negotiateScheme { + return true, nil, nil + } + if strings.HasPrefix(caseInsensitiveHeader, negotiateScheme+" ") { + payload := challengeHeader[len(negotiateScheme):] + payload = strings.Replace(payload, " ", "", -1) + data, err := base64.StdEncoding.DecodeString(payload) + if err != nil { + return false, nil, err + } + return true, data, nil + } + } + return false, nil, nil +} diff --git a/test/util/oauthserver/tokencmd/negotiate_helpers.go b/test/util/oauthserver/tokencmd/negotiate_helpers.go new file mode 100644 index 000000000..e1d061db3 --- /dev/null +++ b/test/util/oauthserver/tokencmd/negotiate_helpers.go @@ -0,0 +1,39 @@ +package tokencmd + +import ( + "errors" + "net/url" +) + +func getServiceName(sep rune, requestURL string) (string, error) { + u, err := url.Parse(requestURL) + if err != nil { + return "", err + } + + return "HTTP" + string(sep) + u.Hostname(), nil +} + +type negotiateUnsupported struct { + error +} + +func newUnsupportedNegotiator(name string) Negotiator { + return &negotiateUnsupported{error: errors.New(name + " support is not enabled")} +} + +func (n *negotiateUnsupported) Load() error { + return n +} + +func (n *negotiateUnsupported) InitSecContext(requestURL string, challengeToken []byte) ([]byte, error) { + return nil, n +} + +func (*negotiateUnsupported) IsComplete() bool { + return false +} + +func (n *negotiateUnsupported) Release() error { + return n +} diff --git a/test/util/oauthserver/tokencmd/negotiator_gssapi_unsupported.go b/test/util/oauthserver/tokencmd/negotiator_gssapi_unsupported.go new file mode 100644 index 000000000..85426017e --- /dev/null +++ b/test/util/oauthserver/tokencmd/negotiator_gssapi_unsupported.go @@ -0,0 +1,9 @@ +package tokencmd + +func GSSAPIEnabled() bool { + return false +} + +func NewGSSAPINegotiator(string) Negotiator { + return newUnsupportedNegotiator("GSSAPI") +} diff --git a/test/util/oauthserver/tokencmd/negotiator_sspi_unsupported.go b/test/util/oauthserver/tokencmd/negotiator_sspi_unsupported.go new file mode 100644 index 000000000..36df93fdc --- /dev/null +++ b/test/util/oauthserver/tokencmd/negotiator_sspi_unsupported.go @@ -0,0 +1,11 @@ +package tokencmd + +import "io" + +func SSPIEnabled() bool { + return false +} + +func NewSSPINegotiator(string, string, string, io.Reader) Negotiator { + return newUnsupportedNegotiator("SSPI") +} diff --git a/test/util/oauthserver/tokencmd/request_token.go b/test/util/oauthserver/tokencmd/request_token.go new file mode 100644 index 000000000..0aac44e0c --- /dev/null +++ b/test/util/oauthserver/tokencmd/request_token.go @@ -0,0 +1,451 @@ +package tokencmd + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + + "github.com/RangelReale/osincli" + + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + restclient "k8s.io/client-go/rest" + "k8s.io/klog" + + "github.com/openshift/library-go/pkg/oauth/oauthdiscovery" +) + +const ( + // csrfTokenHeader is a marker header that indicates we are not a browser that got tricked into requesting basic auth + // Corresponds to the header expected by basic-auth challenging authenticators + // Copied from pkg/auth/authenticator/challenger/passwordchallenger/password_auth_handler.go + csrfTokenHeader = "X-CSRF-Token" + + // Discovery endpoint for OAuth 2.0 Authorization Server Metadata + // See IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + // Copied from pkg/cmd/server/origin/nonapiserver.go + oauthMetadataEndpoint = "/.well-known/oauth-authorization-server" + + // openShiftCLIClientID is the name of the CLI OAuth client, copied from pkg/oauth/apiserver/auth.go + openShiftCLIClientID = "openshift-challenging-client" + + // pkce_s256 is sha256 hash per RFC7636, copied from github.com/RangelReale/osincli/pkce.go + pkce_s256 = "S256" + + // token fakes the missing osin.TOKEN const + token osincli.AuthorizeRequestType = "token" +) + +// ChallengeHandler handles responses to WWW-Authenticate challenges. +type ChallengeHandler interface { + // CanHandle returns true if the handler recognizes a challenge it thinks it can handle. + CanHandle(headers http.Header) bool + // HandleChallenge lets the handler attempt to handle a challenge. + // It is only invoked if CanHandle() returned true for the given headers. + // Returns response headers and true if the challenge is successfully handled. + // Returns false if the challenge was not handled, and an optional error in error cases. + HandleChallenge(requestURL string, headers http.Header) (http.Header, bool, error) + // CompleteChallenge is invoked with the headers from a successful server response + // received after having handled one or more challenges. + // Returns an error if the handler does not consider the challenge/response interaction complete. + CompleteChallenge(requestURL string, headers http.Header) error + // Release gives the handler a chance to release any resources held during a challenge/response sequence. + // It is always invoked, even in cases where no challenges were received or handled. + Release() error +} + +type RequestTokenOptions struct { + ClientConfig *restclient.Config + Handler ChallengeHandler + OsinConfig *osincli.ClientConfig + Issuer string + TokenFlow bool +} + +// RequestToken uses the cmd arguments to locate an openshift oauth server and attempts to authenticate via an +// OAuth code flow and challenge handling. It returns the access token if it gets one or an error if it does not. +func RequestToken(clientCfg *restclient.Config, reader io.Reader, defaultUsername string, defaultPassword string) (string, error) { + return NewRequestTokenOptions(clientCfg, reader, defaultUsername, defaultPassword, false).RequestToken() +} + +func NewRequestTokenOptions(clientCfg *restclient.Config, reader io.Reader, defaultUsername string, defaultPassword string, tokenFlow bool) *RequestTokenOptions { + // priority ordered list of challenge handlers + // the SPNEGO ones must come before basic auth + var handlers []ChallengeHandler + + if GSSAPIEnabled() { + klog.V(6).Info("GSSAPI Enabled") + handlers = append(handlers, NewNegotiateChallengeHandler(NewGSSAPINegotiator(defaultUsername))) + } + + if SSPIEnabled() { + klog.V(6).Info("SSPI Enabled") + handlers = append(handlers, NewNegotiateChallengeHandler(NewSSPINegotiator(defaultUsername, defaultPassword, clientCfg.Host, reader))) + } + + handlers = append(handlers, &BasicChallengeHandler{Host: clientCfg.Host, Reader: reader, Username: defaultUsername, Password: defaultPassword}) + + var handler ChallengeHandler + if len(handlers) == 1 { + handler = handlers[0] + } else { + handler = NewMultiHandler(handlers...) + } + + return &RequestTokenOptions{ + ClientConfig: clientCfg, + Handler: handler, + TokenFlow: tokenFlow, + } +} + +// SetDefaultOsinConfig overwrites RequestTokenOptions.OsinConfig with the default CLI +// OAuth client and PKCE support if the server supports S256 / a code flow is being used +func (o *RequestTokenOptions) SetDefaultOsinConfig() error { + if o.OsinConfig != nil { + return fmt.Errorf("osin config is already set to: %#v", *o.OsinConfig) + } + + // get the OAuth metadata directly from the api server + // we only want to use the ca data from our config + rt, err := restclient.TransportFor(o.ClientConfig) + if err != nil { + return err + } + + requestURL := strings.TrimRight(o.ClientConfig.Host, "/") + oauthMetadataEndpoint + resp, err := request(rt, requestURL, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("couldn't get %v: unexpected response status %v", requestURL, resp.StatusCode) + } + + metadata := &oauthdiscovery.OauthAuthorizationServerMetadata{} + if err := json.NewDecoder(resp.Body).Decode(metadata); err != nil { + return err + } + + // use the metadata to build the osin config + config := &osincli.ClientConfig{ + ClientId: openShiftCLIClientID, + AuthorizeUrl: metadata.AuthorizationEndpoint, + TokenUrl: metadata.TokenEndpoint, + RedirectUrl: oauthdiscovery.OpenShiftOAuthTokenImplicitURL(metadata.Issuer), + } + if !o.TokenFlow && sets.NewString(metadata.CodeChallengeMethodsSupported...).Has(pkce_s256) { + if err := osincli.PopulatePKCE(config); err != nil { + return err + } + } + + o.OsinConfig = config + o.Issuer = metadata.Issuer + return nil +} + +// RequestToken locates an openshift oauth server and attempts to authenticate. +// It returns the access token if it gets one, or an error if it does not. +// It should only be invoked once on a given RequestTokenOptions instance. +// The Handler held by the options is released as part of this call. +// If RequestTokenOptions.OsinConfig is nil, it will be defaulted using SetDefaultOsinConfig. +// The caller is responsible for setting up the entire OsinConfig if the value is not nil. +func (o *RequestTokenOptions) RequestToken() (string, error) { + defer func() { + // Always release the handler + if err := o.Handler.Release(); err != nil { + // Release errors shouldn't fail the token request, just log + klog.V(4).Infof("error releasing handler: %v", err) + } + }() + + if o.OsinConfig == nil { + if err := o.SetDefaultOsinConfig(); err != nil { + return "", err + } + } + + // we are going to use this transport to talk + // with a server that may not be the api server + // thus we need to include the system roots + // in our ca data otherwise an external + // oauth server with a valid cert will fail with + // error: x509: certificate signed by unknown authority + rt, err := transportWithSystemRoots(o.Issuer, o.ClientConfig) + if err != nil { + return "", err + } + + client, err := osincli.NewClient(o.OsinConfig) + if err != nil { + return "", err + } + client.Transport = rt + authorizeRequest := client.NewAuthorizeRequest(osincli.CODE) // assume code flow to start with + + var oauthTokenFunc func(redirectURL string) (accessToken string, oauthError error) + if o.TokenFlow { + // access_token in fragment or error parameter + authorizeRequest.Type = token // manually override to token flow if necessary + oauthTokenFunc = oauthTokenFlow + } else { + // code or error parameter + oauthTokenFunc = func(redirectURL string) (accessToken string, oauthError error) { + return oauthCodeFlow(client, authorizeRequest, redirectURL) + } + } + + // requestURL holds the current URL to make requests to. This can change if the server responds with a redirect + requestURL := authorizeRequest.GetAuthorizeUrl().String() + // requestHeaders holds additional headers to add to the request. This can be changed by o.Handlers + requestHeaders := http.Header{} + // requestedURLSet/requestedURLList hold the URLs we have requested, to prevent redirect loops. Gets reset when a challenge is handled. + requestedURLSet := sets.NewString() + requestedURLList := []string{} + handledChallenge := false + + for { + // Make the request + resp, err := request(rt, requestURL, requestHeaders) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusUnauthorized { + if resp.Header.Get("WWW-Authenticate") != "" { + if !o.Handler.CanHandle(resp.Header) { + return "", apierrs.NewUnauthorized("unhandled challenge") + } + // Handle the challenge + newRequestHeaders, shouldRetry, err := o.Handler.HandleChallenge(requestURL, resp.Header) + if err != nil { + return "", err + } + if !shouldRetry { + return "", apierrs.NewUnauthorized("challenger chose not to retry the request") + } + // Remember if we've ever handled a challenge + handledChallenge = true + + // Reset request set/list. Since we're setting different headers, it is legitimate to request the same urls + requestedURLSet = sets.NewString() + requestedURLList = []string{} + // Use the response to the challenge as the new headers + requestHeaders = newRequestHeaders + continue + } + + // Unauthorized with no challenge + unauthorizedError := apierrs.NewUnauthorized("") + // Attempt to read body content and include as an error detail + if details, err := ioutil.ReadAll(resp.Body); err == nil && len(details) > 0 { + unauthorizedError.ErrStatus.Details = &metav1.StatusDetails{ + Causes: []metav1.StatusCause{ + {Message: string(details)}, + }, + } + } + + return "", unauthorizedError + } + + // if we've ever handled a challenge, see if the handler also considers the interaction complete. + // this is required for negotiate flows with mutual authentication. + if handledChallenge { + if err := o.Handler.CompleteChallenge(requestURL, resp.Header); err != nil { + return "", err + } + } + + if resp.StatusCode == http.StatusFound { + redirectURL := resp.Header.Get("Location") + + // OAuth response case + accessToken, err := oauthTokenFunc(redirectURL) + if err != nil { + return "", err + } + if len(accessToken) > 0 { + return accessToken, nil + } + + // Non-OAuth response, just follow the URL + // add to our list of redirects + requestedURLList = append(requestedURLList, redirectURL) + // detect loops + if !requestedURLSet.Has(redirectURL) { + requestedURLSet.Insert(redirectURL) + requestURL = redirectURL + continue + } + return "", apierrs.NewInternalError(fmt.Errorf("redirect loop: %s", strings.Join(requestedURLList, " -> "))) + } + + // Unknown response + return "", apierrs.NewInternalError(fmt.Errorf("unexpected response: %d", resp.StatusCode)) + } +} + +// oauthTokenFlow attempts to extract an OAuth token from location's fragment's access_token value. +// It only returns an error if something "impossible" happens (location is not a valid URL) or a definite +// OAuth error is contained in the location URL. No error is returned if location does not contain a token. +// It is assumed that location was not part of the OAuth flow; it was a redirect that the client needs to follow +// as part of the challenge flow (an authenticating proxy for example) and not a redirect step in the OAuth flow. +func oauthTokenFlow(location string) (string, error) { + u, err := url.Parse(location) + if err != nil { + return "", err + } + + if oauthErr := oauthErrFromValues(u.Query()); oauthErr != nil { + return "", oauthErr + } + + // Grab the raw fragment ourselves, since the stdlib URL parsing decodes parts of it + fragment := "" + if parts := strings.SplitN(location, "#", 2); len(parts) == 2 { + fragment = parts[1] + } + fragmentValues, err := url.ParseQuery(fragment) + if err != nil { + return "", err + } + + return fragmentValues.Get("access_token"), nil +} + +// oauthCodeFlow performs the OAuth code flow if location has a code parameter. +// It only returns an error if something "impossible" happens (location is not a valid URL) +// or a definite OAuth error is encountered during the code flow. Other errors are assumed to be caused +// by location not being part of the OAuth flow; it was a redirect that the client needs to follow as part +// of the challenge flow (an authenticating proxy for example) and not a redirect step in the OAuth flow. +func oauthCodeFlow(client *osincli.Client, authorizeRequest *osincli.AuthorizeRequest, location string) (string, error) { + // Make a request out of the URL since that is what AuthorizeRequest.HandleRequest expects to extract data from + req, err := http.NewRequest(http.MethodGet, location, nil) + if err != nil { + return "", err + } + + req.ParseForm() + if oauthErr := oauthErrFromValues(req.Form); oauthErr != nil { + return "", oauthErr + } + if len(req.Form.Get("code")) == 0 { + return "", nil // no code parameter so this is not part of the OAuth flow + } + + // any errors after this are fatal because we are committed to an OAuth flow now + authorizeData, err := authorizeRequest.HandleRequest(req) + if err != nil { + return "", osinToOAuthError(err) + } + + accessRequest := client.NewAccessRequest(osincli.AUTHORIZATION_CODE, authorizeData) + accessData, err := accessRequest.GetToken() + if err != nil { + return "", osinToOAuthError(err) + } + + return accessData.AccessToken, nil +} + +// osinToOAuthError creates a better error message for osincli.Error +func osinToOAuthError(err error) error { + if osinErr, ok := err.(*osincli.Error); ok { + return createOAuthError(osinErr.Id, osinErr.Description) + } + return err +} + +func oauthErrFromValues(values url.Values) error { + if errorCode := values.Get("error"); len(errorCode) > 0 { + errorDescription := values.Get("error_description") + return createOAuthError(errorCode, errorDescription) + } + return nil +} + +func createOAuthError(errorCode, errorDescription string) error { + return fmt.Errorf("%s %s", errorCode, errorDescription) +} + +func request(rt http.RoundTripper, requestURL string, requestHeaders http.Header) (*http.Response, error) { + // Build the request + req, err := http.NewRequest(http.MethodGet, requestURL, nil) + if err != nil { + return nil, err + } + for k, v := range requestHeaders { + req.Header[k] = v + } + req.Header.Set(csrfTokenHeader, "1") + + // Make the request + return rt.RoundTrip(req) +} + +func transportWithSystemRoots(issuer string, clientConfig *restclient.Config) (http.RoundTripper, error) { + // copy the config so we can freely mutate it + configWithSystemRoots := restclient.CopyConfig(clientConfig) + + // explicitly unset CA cert information + // this will make the transport use the system roots or OS specific verification + // this is required to have reasonable behavior on windows (cannot get system roots) + // in general there is no good with to say "I want system roots plus this CA bundle" + // so we just try system roots first before using the kubeconfig CA bundle + configWithSystemRoots.CAFile = "" + configWithSystemRoots.CAData = nil + + systemRootsRT, err := restclient.TransportFor(configWithSystemRoots) + if err != nil { + return nil, err + } + + // build a request to probe the OAuth server CA + req, err := http.NewRequest(http.MethodHead, issuer, nil) + if err != nil { + return nil, err + } + + // see if get a certificate error when using the system roots + // we perform the check using this transport (instead of the kubeconfig based one) + // because it is most likely to work with a route (which is what the OAuth server uses in 4.0+) + // note that both transports are "safe" to use (in the sense that they have valid TLS configurations) + // thus the fallback case is not an "unsafe" operation + _, err = systemRootsRT.RoundTrip(req) + switch err.(type) { + case nil: + // no error meaning the system roots work with the OAuth server + klog.V(4).Info("using system roots as no error was encountered") + return systemRootsRT, nil + case x509.UnknownAuthorityError, x509.HostnameError, x509.CertificateInvalidError, x509.SystemRootsError, + tls.RecordHeaderError, *net.OpError: + // fallback to the CA in the kubeconfig since the system roots did not work + // we are very broad on the errors here to avoid failing when we should fallback + klog.V(4).Infof("falling back to kubeconfig CA due to possible x509 error: %v", err) + return restclient.TransportFor(clientConfig) + default: + switch err { + case io.EOF, io.ErrUnexpectedEOF, io.ErrNoProgress: + // also fallback on various io errors + klog.V(4).Infof("falling back to kubeconfig CA due to possible IO error: %v", err) + return restclient.TransportFor(clientConfig) + } + // unknown error, fail (ideally should never occur) + klog.V(4).Infof("unexpected error during system roots probe: %v", err) + return nil, err + } +} diff --git a/test/util/oauthserver/tokencmd/request_token_test.go b/test/util/oauthserver/tokencmd/request_token_test.go new file mode 100644 index 000000000..61b76030a --- /dev/null +++ b/test/util/oauthserver/tokencmd/request_token_test.go @@ -0,0 +1,729 @@ +package tokencmd + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "reflect" + "testing" + + "github.com/RangelReale/osincli" + + "k8s.io/apimachinery/pkg/util/diff" + restclient "k8s.io/client-go/rest" + + "github.com/openshift/library-go/pkg/oauth/oauthdiscovery" +) + +type unloadableNegotiator struct { + releaseCalls int +} + +func (n *unloadableNegotiator) Load() error { + return errors.New("Load failed") +} +func (n *unloadableNegotiator) InitSecContext(requestURL string, challengeToken []byte) (tokenToSend []byte, err error) { + return nil, errors.New("InitSecContext failed") +} +func (n *unloadableNegotiator) IsComplete() bool { + return false +} +func (n *unloadableNegotiator) Release() error { + n.releaseCalls++ + return errors.New("Release failed") +} + +type failingNegotiator struct { + releaseCalls int +} + +func (n *failingNegotiator) Load() error { + return nil +} +func (n *failingNegotiator) InitSecContext(requestURL string, challengeToken []byte) (tokenToSend []byte, err error) { + return nil, errors.New("InitSecContext failed") +} +func (n *failingNegotiator) IsComplete() bool { + return false +} +func (n *failingNegotiator) Release() error { + n.releaseCalls++ + return errors.New("Release failed") +} + +type successfulNegotiator struct { + rounds int + initSecContextCalls int + loadCalls int + releaseCalls int +} + +func (n *successfulNegotiator) Load() error { + n.loadCalls++ + return nil +} +func (n *successfulNegotiator) InitSecContext(requestURL string, challengeToken []byte) (tokenToSend []byte, err error) { + n.initSecContextCalls++ + + if n.initSecContextCalls > n.rounds { + return nil, fmt.Errorf("InitSecContext: expected %d calls, saw %d", n.rounds, n.initSecContextCalls) + } + + if n.initSecContextCalls == 1 { + if len(challengeToken) > 0 { + return nil, errors.New("expected empty token for first challenge") + } + } else { + expectedChallengeToken := fmt.Sprintf("challenge%d", n.initSecContextCalls) + if string(challengeToken) != expectedChallengeToken { + return nil, fmt.Errorf("expected challenge token '%s', got '%s'", expectedChallengeToken, string(challengeToken)) + } + } + + return []byte(fmt.Sprintf("response%d", n.initSecContextCalls)), nil +} +func (n *successfulNegotiator) IsComplete() bool { + return n.initSecContextCalls == n.rounds +} +func (n *successfulNegotiator) Release() error { + n.releaseCalls++ + return nil +} + +func TestRequestToken(t *testing.T) { + type req struct { + authorization string + method string + path string + } + type resp struct { + status int + location string + wwwAuthenticate []string + } + + type requestResponse struct { + expectedRequest req + serverResponse resp + } + + var verifyReleased func(test string, handler ChallengeHandler) + verifyReleased = func(test string, handler ChallengeHandler) { + switch handler := handler.(type) { + case *MultiHandler: + for _, subhandler := range handler.allHandlers { + verifyReleased(test, subhandler) + } + case *BasicChallengeHandler: + // we don't care + case *NegotiateChallengeHandler: + switch negotiator := handler.negotiator.(type) { + case *successfulNegotiator: + if negotiator.releaseCalls != 1 { + t.Errorf("%s: expected one call to Release(), saw %d", test, negotiator.releaseCalls) + } + case *failingNegotiator: + if negotiator.releaseCalls != 1 { + t.Errorf("%s: expected one call to Release(), saw %d", test, negotiator.releaseCalls) + } + case *unloadableNegotiator: + if negotiator.releaseCalls != 1 { + t.Errorf("%s: expected one call to Release(), saw %d", test, negotiator.releaseCalls) + } + default: + t.Errorf("%s: unrecognized negotiator: %#v", test, handler) + } + default: + t.Errorf("%s: unrecognized handler: %#v", test, handler) + } + } + + initialHead := req{"", http.MethodHead, "/"} + initialHeadResp := resp{http.StatusInternalServerError, "", nil} // value of status is ignored + + initialRequest := req{} + + basicChallenge1 := resp{401, "", []string{"Basic realm=foo"}} + basicRequest1 := req{"Basic bXl1c2VyOm15cGFzc3dvcmQ=", "", ""} // base64("myuser:mypassword") + basicChallenge2 := resp{401, "", []string{"Basic realm=seriously...foo"}} + + negotiateChallenge1 := resp{401, "", []string{"Negotiate"}} + negotiateRequest1 := req{"Negotiate cmVzcG9uc2Ux", "", ""} // base64("response1") + negotiateChallenge2 := resp{401, "", []string{"Negotiate Y2hhbGxlbmdlMg=="}} // base64("challenge2") + negotiateRequest2 := req{"Negotiate cmVzcG9uc2Uy", "", ""} // base64("response2") + + doubleChallenge := resp{401, "", []string{"Negotiate", "Basic realm=foo"}} + + successfulToken := "12345" + successfulLocation := fmt.Sprintf("/#access_token=%s", successfulToken) + success := resp{302, successfulLocation, nil} + successWithNegotiate := resp{302, successfulLocation, []string{"Negotiate Y2hhbGxlbmdlMg=="}} + + testcases := map[string]struct { + Handler ChallengeHandler + Requests []requestResponse + ExpectedToken string + ExpectedError string + }{ + // Defaulting basic handler + "defaulted basic handler, no challenge, success": { + Handler: &BasicChallengeHandler{Username: "myuser", Password: "mypassword"}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, success}, + }, + ExpectedToken: successfulToken, + }, + "defaulted basic handler, basic challenge, success": { + Handler: &BasicChallengeHandler{Username: "myuser", Password: "mypassword"}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, basicChallenge1}, + {basicRequest1, success}, + }, + ExpectedToken: successfulToken, + }, + "defaulted basic handler, basic+negotiate challenge, success": { + Handler: &BasicChallengeHandler{Username: "myuser", Password: "mypassword"}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, doubleChallenge}, + {basicRequest1, success}, + }, + ExpectedToken: successfulToken, + }, + "defaulted basic handler, basic challenge, failure": { + Handler: &BasicChallengeHandler{Username: "myuser", Password: "mypassword"}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, basicChallenge1}, + {basicRequest1, basicChallenge2}, + }, + ExpectedError: "challenger chose not to retry the request", + }, + "defaulted basic handler, negotiate challenge, failure": { + Handler: &BasicChallengeHandler{Username: "myuser", Password: "mypassword"}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, negotiateChallenge1}, + }, + ExpectedError: "unhandled challenge", + }, + "failing basic handler, basic challenge, failure": { + Handler: &BasicChallengeHandler{}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, basicChallenge1}, + }, + ExpectedError: "challenger chose not to retry the request", + }, + + // Prompting basic handler + "prompting basic handler, no challenge, success": { + Handler: &BasicChallengeHandler{Reader: bytes.NewBufferString("myuser\nmypassword\n")}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, success}, + }, + ExpectedToken: successfulToken, + }, + "prompting basic handler, basic challenge, success": { + Handler: &BasicChallengeHandler{Reader: bytes.NewBufferString("myuser\nmypassword\n")}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, basicChallenge1}, + {basicRequest1, success}, + }, + ExpectedToken: successfulToken, + }, + "prompting basic handler, basic+negotiate challenge, success": { + Handler: &BasicChallengeHandler{Reader: bytes.NewBufferString("myuser\nmypassword\n")}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, doubleChallenge}, + {basicRequest1, success}, + }, + ExpectedToken: successfulToken, + }, + "prompting basic handler, basic challenge, failure": { + Handler: &BasicChallengeHandler{Reader: bytes.NewBufferString("myuser\nmypassword\n")}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, basicChallenge1}, + {basicRequest1, basicChallenge2}, + }, + ExpectedError: "challenger chose not to retry the request", + }, + "prompting basic handler, negotiate challenge, failure": { + Handler: &BasicChallengeHandler{Reader: bytes.NewBufferString("myuser\nmypassword\n")}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, negotiateChallenge1}, + }, + ExpectedError: "unhandled challenge", + }, + + // negotiate handler + "negotiate handler, no challenge, success": { + Handler: &NegotiateChallengeHandler{negotiator: &successfulNegotiator{rounds: 1}}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, success}, + }, + ExpectedToken: successfulToken, + }, + "negotiate handler, negotiate challenge, success": { + Handler: &NegotiateChallengeHandler{negotiator: &successfulNegotiator{rounds: 1}}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, negotiateChallenge1}, + {negotiateRequest1, success}, + }, + ExpectedToken: successfulToken, + }, + "negotiate handler, negotiate challenge, 2 rounds, success": { + Handler: &NegotiateChallengeHandler{negotiator: &successfulNegotiator{rounds: 2}}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, negotiateChallenge1}, + {negotiateRequest1, negotiateChallenge2}, + {negotiateRequest2, success}, + }, + ExpectedToken: successfulToken, + }, + "negotiate handler, negotiate challenge, 2 rounds, success with mutual auth": { + Handler: &NegotiateChallengeHandler{negotiator: &successfulNegotiator{rounds: 2}}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, negotiateChallenge1}, + {negotiateRequest1, successWithNegotiate}, + }, + ExpectedToken: successfulToken, + }, + "negotiate handler, negotiate challenge, 2 rounds expected, server success without client completion": { + Handler: &NegotiateChallengeHandler{negotiator: &successfulNegotiator{rounds: 2}}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, negotiateChallenge1}, + {negotiateRequest1, success}, + }, + ExpectedError: "client requires final negotiate token, none provided", + }, + + // Unloadable negotiate handler + "unloadable negotiate handler, no challenge, success": { + Handler: &NegotiateChallengeHandler{negotiator: &unloadableNegotiator{}}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, success}, + }, + ExpectedToken: successfulToken, + }, + "unloadable negotiate handler, negotiate challenge, failure": { + Handler: &NegotiateChallengeHandler{negotiator: &unloadableNegotiator{}}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, negotiateChallenge1}, + }, + ExpectedError: "unhandled challenge", + }, + "unloadable negotiate handler, basic challenge, failure": { + Handler: &NegotiateChallengeHandler{negotiator: &unloadableNegotiator{}}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, basicChallenge1}, + }, + ExpectedError: "unhandled challenge", + }, + + // Failing negotiate handler + "failing negotiate handler, no challenge, success": { + Handler: &NegotiateChallengeHandler{negotiator: &failingNegotiator{}}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, success}, + }, + ExpectedToken: successfulToken, + }, + "failing negotiate handler, negotiate challenge, failure": { + Handler: &NegotiateChallengeHandler{negotiator: &failingNegotiator{}}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, negotiateChallenge1}, + }, + ExpectedError: "InitSecContext failed", + }, + "failing negotiate handler, basic challenge, failure": { + Handler: &NegotiateChallengeHandler{negotiator: &failingNegotiator{}}, + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, basicChallenge1}, + }, + ExpectedError: "unhandled challenge", + }, + + // Negotiate+Basic fallback cases + "failing negotiate+prompting basic handler, no challenge, success": { + Handler: NewMultiHandler( + &NegotiateChallengeHandler{negotiator: &failingNegotiator{}}, + &BasicChallengeHandler{Reader: bytes.NewBufferString("myuser\nmypassword\n")}, + ), + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, success}, + }, + ExpectedToken: successfulToken, + }, + "failing negotiate+prompting basic handler, negotiate+basic challenge, success": { + Handler: NewMultiHandler( + &NegotiateChallengeHandler{negotiator: &failingNegotiator{}}, + &BasicChallengeHandler{Reader: bytes.NewBufferString("myuser\nmypassword\n")}, + ), + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, doubleChallenge}, + {basicRequest1, success}, + }, + ExpectedToken: successfulToken, + }, + "negotiate+failing basic handler, negotiate+basic challenge, success": { + Handler: NewMultiHandler( + &NegotiateChallengeHandler{negotiator: &successfulNegotiator{rounds: 2}}, + &BasicChallengeHandler{}, + ), + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, doubleChallenge}, + {negotiateRequest1, negotiateChallenge2}, + {negotiateRequest2, success}, + }, + ExpectedToken: successfulToken, + }, + "negotiate+basic handler, negotiate+basic challenge, prefers negotiation, success": { + Handler: NewMultiHandler( + &NegotiateChallengeHandler{negotiator: &successfulNegotiator{rounds: 2}}, + &BasicChallengeHandler{Reader: bytes.NewBufferString("myuser\nmypassword\n")}, + ), + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, doubleChallenge}, + {negotiateRequest1, negotiateChallenge2}, + {negotiateRequest2, success}, + }, + ExpectedToken: successfulToken, + }, + "negotiate+basic handler, negotiate+basic challenge, prefers negotiation, sticks with selected handler on failure": { + Handler: NewMultiHandler( + &NegotiateChallengeHandler{negotiator: &successfulNegotiator{rounds: 2}}, + &BasicChallengeHandler{Reader: bytes.NewBufferString("myuser\nmypassword\n")}, + ), + Requests: []requestResponse{ + {initialHead, initialHeadResp}, + {initialRequest, doubleChallenge}, + {negotiateRequest1, negotiateChallenge2}, + {negotiateRequest2, doubleChallenge}, + }, + ExpectedError: "InitSecContext: expected 2 calls, saw 3", + }, + } + + for k, tc := range testcases { + i := 0 + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + defer func() { + if err := recover(); err != nil { + t.Errorf("test %s panicked: %v", k, err) + } + }() + + if i >= len(tc.Requests) { + t.Errorf("%s: %d: more requests received than expected: %#v", k, i, req) + return + } + rr := tc.Requests[i] + i++ + + method := rr.expectedRequest.method + if len(method) == 0 { + method = http.MethodGet + } + if req.Method != method { + t.Errorf("%s: %d: Expected %s, got %s", k, i, method, req.Method) + return + } + + path := rr.expectedRequest.path + if len(path) == 0 { + path = "/oauth/authorize" + } + if req.URL.Path != path { + t.Errorf("%s: %d: Expected %s, got %s", k, i, path, req.URL.Path) + return + } + + if e, a := rr.expectedRequest.authorization, req.Header.Get("Authorization"); e != a { + t.Errorf("%s: %d: expected 'Authorization: %s', got 'Authorization: %s'", k, i, e, a) + return + } + + if len(rr.serverResponse.location) > 0 { + w.Header().Add("Location", rr.serverResponse.location) + } + for _, v := range rr.serverResponse.wwwAuthenticate { + w.Header().Add("WWW-Authenticate", v) + } + w.WriteHeader(rr.serverResponse.status) + })) + defer s.Close() + + opts := &RequestTokenOptions{ + ClientConfig: &restclient.Config{Host: s.URL}, + Handler: tc.Handler, + OsinConfig: &osincli.ClientConfig{ + ClientId: openShiftCLIClientID, + AuthorizeUrl: oauthdiscovery.OpenShiftOAuthAuthorizeURL(s.URL), + TokenUrl: oauthdiscovery.OpenShiftOAuthTokenURL(s.URL), + RedirectUrl: oauthdiscovery.OpenShiftOAuthTokenImplicitURL(s.URL), + }, + Issuer: s.URL, + TokenFlow: true, + } + token, err := opts.RequestToken() + if token != tc.ExpectedToken { + t.Errorf("%s: expected token '%s', got '%s'", k, tc.ExpectedToken, token) + } + errStr := "" + if err != nil { + errStr = err.Error() + } + if errStr != tc.ExpectedError { + t.Errorf("%s: expected error '%s', got '%s'", k, tc.ExpectedError, errStr) + } + if i != len(tc.Requests) { + t.Errorf("%s: expected %d requests, saw %d", k, len(tc.Requests), i) + } + verifyReleased(k, tc.Handler) + } +} + +func TestSetDefaultOsinConfig(t *testing.T) { + noHostChange := func(host string) string { return host } + for _, tc := range []struct { + name string + metadata *oauthdiscovery.OauthAuthorizationServerMetadata + hostWrapper func(host string) (newHost string) + tokenFlow bool + + expectPKCE bool + expectedConfig *osincli.ClientConfig + }{ + { + name: "code with PKCE support from server", + metadata: &oauthdiscovery.OauthAuthorizationServerMetadata{ + Issuer: "a", + AuthorizationEndpoint: "b", + TokenEndpoint: "c", + CodeChallengeMethodsSupported: []string{pkce_s256}, + }, + hostWrapper: noHostChange, + tokenFlow: false, + + expectPKCE: true, + expectedConfig: &osincli.ClientConfig{ + ClientId: openShiftCLIClientID, + AuthorizeUrl: "b", + TokenUrl: "c", + RedirectUrl: "a/oauth/token/implicit", + CodeChallengeMethod: pkce_s256, + }, + }, + { + name: "code without PKCE support from server", + metadata: &oauthdiscovery.OauthAuthorizationServerMetadata{ + Issuer: "a", + AuthorizationEndpoint: "b", + TokenEndpoint: "c", + CodeChallengeMethodsSupported: []string{"someotherstuff"}, + }, + hostWrapper: noHostChange, + tokenFlow: false, + + expectPKCE: false, + expectedConfig: &osincli.ClientConfig{ + ClientId: openShiftCLIClientID, + AuthorizeUrl: "b", + TokenUrl: "c", + RedirectUrl: "a/oauth/token/implicit", + }, + }, + { + name: "token with PKCE support from server", + metadata: &oauthdiscovery.OauthAuthorizationServerMetadata{ + Issuer: "a", + AuthorizationEndpoint: "b", + TokenEndpoint: "c", + CodeChallengeMethodsSupported: []string{pkce_s256}, + }, + hostWrapper: noHostChange, + tokenFlow: true, + + expectPKCE: false, + expectedConfig: &osincli.ClientConfig{ + ClientId: openShiftCLIClientID, + AuthorizeUrl: "b", + TokenUrl: "c", + RedirectUrl: "a/oauth/token/implicit", + }, + }, + { + name: "code with PKCE support from server, but wrong case", + metadata: &oauthdiscovery.OauthAuthorizationServerMetadata{ + Issuer: "a", + AuthorizationEndpoint: "b", + TokenEndpoint: "c", + CodeChallengeMethodsSupported: []string{"s256"}, // we are case sensitive so this is not valid + }, + hostWrapper: noHostChange, + tokenFlow: false, + + expectPKCE: false, + expectedConfig: &osincli.ClientConfig{ + ClientId: openShiftCLIClientID, + AuthorizeUrl: "b", + TokenUrl: "c", + RedirectUrl: "a/oauth/token/implicit", + }, + }, + { + name: "token without PKCE support from server", + metadata: &oauthdiscovery.OauthAuthorizationServerMetadata{ + Issuer: "a", + AuthorizationEndpoint: "b", + TokenEndpoint: "c", + CodeChallengeMethodsSupported: []string{"random"}, + }, + hostWrapper: noHostChange, + tokenFlow: true, + + expectPKCE: false, + expectedConfig: &osincli.ClientConfig{ + ClientId: openShiftCLIClientID, + AuthorizeUrl: "b", + TokenUrl: "c", + RedirectUrl: "a/oauth/token/implicit", + }, + }, + { + name: "host with extra slashes", + metadata: &oauthdiscovery.OauthAuthorizationServerMetadata{ + Issuer: "a", + AuthorizationEndpoint: "b", + TokenEndpoint: "c", + CodeChallengeMethodsSupported: []string{pkce_s256}, + }, + hostWrapper: func(host string) string { return host + "/////" }, + tokenFlow: false, + + expectPKCE: true, + expectedConfig: &osincli.ClientConfig{ + ClientId: openShiftCLIClientID, + AuthorizeUrl: "b", + TokenUrl: "c", + RedirectUrl: "a/oauth/token/implicit", + CodeChallengeMethod: pkce_s256, + }, + }, + { + name: "issuer with extra slashes", + metadata: &oauthdiscovery.OauthAuthorizationServerMetadata{ + Issuer: "a/////", + AuthorizationEndpoint: "b", + TokenEndpoint: "c", + CodeChallengeMethodsSupported: []string{pkce_s256}, + }, + hostWrapper: noHostChange, + tokenFlow: false, + + expectPKCE: true, + expectedConfig: &osincli.ClientConfig{ + ClientId: openShiftCLIClientID, + AuthorizeUrl: "b", + TokenUrl: "c", + RedirectUrl: "a/oauth/token/implicit", + CodeChallengeMethod: pkce_s256, + }, + }, + { + name: "code with PKCE support from server, more complex JSON", + metadata: &oauthdiscovery.OauthAuthorizationServerMetadata{ + Issuer: "arandomissuerthatisfun123!!!///", + AuthorizationEndpoint: "44authzisanawesomeendpoint", + TokenEndpoint: "&&buttokenendpointisprettygoodtoo", + CodeChallengeMethodsSupported: []string{pkce_s256}, + }, + hostWrapper: noHostChange, + tokenFlow: false, + + expectPKCE: true, + expectedConfig: &osincli.ClientConfig{ + ClientId: openShiftCLIClientID, + AuthorizeUrl: "44authzisanawesomeendpoint", + TokenUrl: "&&buttokenendpointisprettygoodtoo", + RedirectUrl: "arandomissuerthatisfun123!!!/oauth/token/implicit", + CodeChallengeMethod: pkce_s256, + }, + }, + } { + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if req.Method != "GET" { + t.Errorf("%s: Expected GET, got %s", tc.name, req.Method) + return + } + if req.URL.Path != oauthMetadataEndpoint { + t.Errorf("%s: Expected metadata endpoint, got %s", tc.name, req.URL.Path) + return + } + data, err := json.Marshal(tc.metadata) + if err != nil { + t.Errorf("%s: unexpected json error: %v", tc.name, err) + return + } + w.Write(data) + })) + defer s.Close() + + opts := &RequestTokenOptions{ + ClientConfig: &restclient.Config{Host: tc.hostWrapper(s.URL)}, + TokenFlow: tc.tokenFlow, + } + if err := opts.SetDefaultOsinConfig(); err != nil { + t.Errorf("%s: unexpected SetDefaultOsinConfig error: %v", tc.name, err) + continue + } + + // check PKCE data + if tc.expectPKCE { + if len(opts.OsinConfig.CodeChallenge) == 0 || len(opts.OsinConfig.CodeChallengeMethod) == 0 || len(opts.OsinConfig.CodeVerifier) == 0 { + t.Errorf("%s: did not set PKCE", tc.name) + continue + } + } else { + if len(opts.OsinConfig.CodeChallenge) != 0 || len(opts.OsinConfig.CodeChallengeMethod) != 0 || len(opts.OsinConfig.CodeVerifier) != 0 { + t.Errorf("%s: incorrectly set PKCE", tc.name) + continue + } + } + + // blindly unset random PKCE data since we already checked for it + opts.OsinConfig.CodeChallenge = "" + opts.OsinConfig.CodeVerifier = "" + + // compare the configs to see if they match + if !reflect.DeepEqual(*tc.expectedConfig, *opts.OsinConfig) { + t.Errorf("%s: expected osin config does not match, %s", tc.name, diff.ObjectDiff(*tc.expectedConfig, *opts.OsinConfig)) + } + } +} diff --git a/test/util/oc_copy.go b/test/util/oc_copy.go new file mode 100644 index 000000000..2a51473be --- /dev/null +++ b/test/util/oc_copy.go @@ -0,0 +1,139 @@ +package util + +import ( + "context" + "net/url" + "strings" + + kauthnv1 "k8s.io/api/authentication/v1" + + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/third_party/forked/golang/netutil" + + kauthnv1typedclient "k8s.io/client-go/kubernetes/typed/authentication/v1" + restclient "k8s.io/client-go/rest" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +// getClusterNicknameFromConfig returns host:port of the clientConfig.Host, with .'s replaced by -'s +// TODO this is copied from pkg/client/config/smart_merge.go, looks like a good library-go candidate +func getClusterNicknameFromConfig(clientCfg *restclient.Config) (string, error) { + u, err := url.Parse(clientCfg.Host) + if err != nil { + return "", err + } + hostPort := netutil.CanonicalAddr(u) + + // we need a character other than "." to avoid conflicts with. replace with '-' + return strings.Replace(hostPort, ".", "-", -1), nil +} + +// getUserNicknameFromConfig returns "username(as known by the server)/getClusterNicknameFromConfig". This allows tab completion for switching users to +// work easily and obviously. +func getUserNicknameFromConfig(clientCfg *restclient.Config) (string, error) { + userPartOfNick, err := getUserPartOfNickname(clientCfg) + if err != nil { + return "", err + } + + clusterNick, err := getClusterNicknameFromConfig(clientCfg) + if err != nil { + return "", err + } + + return userPartOfNick + "/" + clusterNick, nil +} + +func getUserPartOfNickname(clientCfg *restclient.Config) (string, error) { + authClient, err := kauthnv1typedclient.NewForConfig(clientCfg) + if err != nil { + return "", err + } + + var username string + selfSubjectReview, err := authClient.SelfSubjectReviews().Create(context.Background(), &kauthnv1.SelfSubjectReview{}, metav1.CreateOptions{}) + if kerrors.IsNotFound(err) || kerrors.IsForbidden(err) { + // if we're talking to kube (or likely talking to kube), take a best guess consistent with login + switch { + case len(clientCfg.BearerToken) > 0: + username = clientCfg.BearerToken + case len(clientCfg.Username) > 0: + username = clientCfg.Username + } + } else if err != nil { + return "", err + } else { + username = selfSubjectReview.Status.UserInfo.Username + } + + return username, nil +} + +// getContextNicknameFromConfig returns "namespace/getClusterNicknameFromConfig/username(as known by the server)". This allows tab completion for switching projects/context +// to work easily. First tab is the most selective on project. Second stanza in the next most selective on cluster name. The chances of a user trying having +// one projects on a single server that they want to operate against with two identities is low, so username is last. +func getContextNicknameFromConfig(namespace string, clientCfg *restclient.Config) (string, error) { + userPartOfNick, err := getUserPartOfNickname(clientCfg) + if err != nil { + return "", err + } + + clusterNick, err := getClusterNicknameFromConfig(clientCfg) + if err != nil { + return "", err + } + + return namespace + "/" + clusterNick + "/" + userPartOfNick, nil +} + +// CreateConfig takes a clientCfg and builds a config (kubeconfig style) from it. +func createConfig(namespace string, clientCfg *restclient.Config) (*clientcmdapi.Config, error) { + clusterNick, err := getClusterNicknameFromConfig(clientCfg) + if err != nil { + return nil, err + } + + userNick, err := getUserNicknameFromConfig(clientCfg) + if err != nil { + return nil, err + } + + contextNick, err := getContextNicknameFromConfig(namespace, clientCfg) + if err != nil { + return nil, err + } + + config := clientcmdapi.NewConfig() + + credentials := clientcmdapi.NewAuthInfo() + credentials.Token = clientCfg.BearerToken + credentials.Exec = clientCfg.ExecProvider + credentials.ClientCertificate = clientCfg.TLSClientConfig.CertFile + if len(credentials.ClientCertificate) == 0 { + credentials.ClientCertificateData = clientCfg.TLSClientConfig.CertData + } + credentials.ClientKey = clientCfg.TLSClientConfig.KeyFile + if len(credentials.ClientKey) == 0 { + credentials.ClientKeyData = clientCfg.TLSClientConfig.KeyData + } + config.AuthInfos[userNick] = credentials + + cluster := clientcmdapi.NewCluster() + cluster.Server = clientCfg.Host + cluster.CertificateAuthority = clientCfg.CAFile + if len(cluster.CertificateAuthority) == 0 { + cluster.CertificateAuthorityData = clientCfg.CAData + } + cluster.InsecureSkipTLSVerify = clientCfg.Insecure + config.Clusters[clusterNick] = cluster + + context := clientcmdapi.NewContext() + context.Cluster = clusterNick + context.AuthInfo = userNick + context.Namespace = namespace + config.Contexts[contextNick] = context + config.CurrentContext = contextNick + + return config, nil +} diff --git a/test/util/openstack_util.go b/test/util/openstack_util.go new file mode 100644 index 000000000..e44858737 --- /dev/null +++ b/test/util/openstack_util.go @@ -0,0 +1,294 @@ +package util + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" + "github.com/gophercloud/gophercloud/openstack/identity/v3/users" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" + "github.com/gophercloud/gophercloud/pagination" + o "github.com/onsi/gomega" + yamlv3 "gopkg.in/yaml.v3" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// A Osp represents object ... +type Osp struct { + client *gophercloud.ServiceClient +} + +// OpenstackCredentials the openstack credentials extracted from cluster +type OpenstackCredentials struct { + Clouds struct { + Openstack struct { + Auth struct { + AuthURL string `yaml:"auth_url"` + Password string `yaml:"password"` + ProjectID string `yaml:"project_id"` + ProjectName string `yaml:"project_name"` + UserDomainName string `yaml:"user_domain_name"` + Username string `yaml:"username"` + ApplicationCredentialID string `yaml:"application_credential_id"` + ApplicationCredentialSecret string `yaml:"application_credential_secret"` + } `yaml:"auth"` + EndpointType string `yaml:"endpoint_type"` + IdentityAPIVersion string `yaml:"identity_api_version"` + RegionName string `yaml:"region_name"` + Verify bool `yaml:"verify"` + } `yaml:"openstack"` + } `yaml:"clouds"` +} + +// GetOpenStackCredentials gets credentials from cluster +func GetOpenStackCredentials(oc *CLI) (*OpenstackCredentials, error) { + cred := &OpenstackCredentials{} + dirname := "/tmp/" + oc.Namespace() + "-creds" + defer os.RemoveAll(dirname) + err := os.MkdirAll(dirname, 0777) + o.Expect(err).NotTo(o.HaveOccurred()) + + _, err = oc.AsAdmin().WithoutNamespace().Run("extract").Args("secret/openstack-credentials", "-n", "kube-system", "--confirm", "--to="+dirname).Output() + if err != nil { + return cred, err + } + + confFile, err := ioutil.ReadFile(dirname + "/clouds.yaml") + if err == nil { + err = yamlv3.Unmarshal(confFile, cred) + } + return cred, err +} + +func NewOpenStackClient(cred *OpenstackCredentials, serviceType string) *gophercloud.ServiceClient { + var client *gophercloud.ServiceClient + var opts gophercloud.AuthOptions + + if cred.Clouds.Openstack.Auth.ApplicationCredentialID != "" && cred.Clouds.Openstack.Auth.ApplicationCredentialSecret != "" { + opts = gophercloud.AuthOptions{ + IdentityEndpoint: cred.Clouds.Openstack.Auth.AuthURL, + ApplicationCredentialID: cred.Clouds.Openstack.Auth.ApplicationCredentialID, + ApplicationCredentialSecret: cred.Clouds.Openstack.Auth.ApplicationCredentialSecret, + } + } else { + opts = gophercloud.AuthOptions{ + IdentityEndpoint: cred.Clouds.Openstack.Auth.AuthURL, + Username: cred.Clouds.Openstack.Auth.Username, + Password: cred.Clouds.Openstack.Auth.Password, + TenantID: cred.Clouds.Openstack.Auth.ProjectID, + DomainName: cred.Clouds.Openstack.Auth.UserDomainName, + } + } + + provider, err := openstack.AuthenticatedClient(opts) + o.Expect(err).NotTo(o.HaveOccurred()) + + switch serviceType { + case "identity": + client, err = openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{Region: cred.Clouds.Openstack.RegionName}) + case "object-store": + client, err = openstack.NewObjectStorageV1(provider, gophercloud.EndpointOpts{Region: cred.Clouds.Openstack.RegionName}) + case "compute": + client, err = openstack.NewComputeV2(provider, gophercloud.EndpointOpts{Region: cred.Clouds.Openstack.RegionName}) + } + o.Expect(err).NotTo(o.HaveOccurred()) + return client +} + +// GetAuthenticatedUserID gets current user ID +// some users don't have permission to list users, so here extract user ID from auth response +func GetAuthenticatedUserID(providerClient *gophercloud.ProviderClient) (string, error) { + //copied from https://github.com/gophercloud/gophercloud/blob/master/auth_result.go + res := providerClient.GetAuthResult() + if res == nil { + //ProviderClient did not use openstack.Authenticate(), e.g. because token + //was set manually with ProviderClient.SetToken() + return "", fmt.Errorf("no AuthResult available") + } + switch r := res.(type) { + case tokens3.CreateResult: + u, err := r.ExtractUser() + if err != nil { + return "", err + } + return u.ID, nil + default: + return "", fmt.Errorf("got unexpected AuthResult type %t", r) + } +} + +// GetOpenStackUserIDAndDomainID returns the user ID and domain ID +func GetOpenStackUserIDAndDomainID(cred *OpenstackCredentials) (string, string) { + client := NewOpenStackClient(cred, "identity") + userID, err := GetAuthenticatedUserID(client.ProviderClient) + o.Expect(err).NotTo(o.HaveOccurred()) + user, err := users.Get(client, userID).Extract() + o.Expect(err).NotTo(o.HaveOccurred()) + return userID, user.DomainID +} + +// CreateOpenStackContainer creates a storage container in openstack +func CreateOpenStackContainer(client *gophercloud.ServiceClient, name string) error { + pager := containers.List(client, &containers.ListOpts{Full: true, Prefix: name}) + exist := false + // check if the container exists or not + err := pager.EachPage(func(page pagination.Page) (bool, error) { + containerNames, err := containers.ExtractNames(page) + o.Expect(err).NotTo(o.HaveOccurred()) + for _, n := range containerNames { + if n == name { + exist = true + break + } + } + return true, nil + }) + if err != nil { + return err + } + if exist { + err = EmptyOpenStackContainer(client, name) + o.Expect(err).NotTo(o.HaveOccurred()) + } + // create the container + res := containers.Create(client, name, containers.CreateOpts{}) + _, err = res.Extract() + return err +} + +// DeleteOpenStackContainer deletes the storage container from openstack +func DeleteOpenStackContainer(client *gophercloud.ServiceClient, name string) error { + err := EmptyOpenStackContainer(client, name) + if err != nil { + return err + } + response := containers.Delete(client, name) + _, err = response.Extract() + if err != nil { + return fmt.Errorf("error deleting container %s: %v", name, err) + } + e2e.Logf("Container %s is deleted", name) + return nil +} + +// EmptyOpenStackContainer clear all the objects in storage container +func EmptyOpenStackContainer(client *gophercloud.ServiceClient, name string) error { + pager := objects.List(client, name, &objects.ListOpts{Full: true}) + err := pager.EachPage(func(page pagination.Page) (bool, error) { + objectNames, err := objects.ExtractNames(page) + if err != nil { + return false, fmt.Errorf("error getting object names: %v", err) + } + for _, obj := range objectNames { + result := objects.Delete(client, name, obj, objects.DeleteOpts{}) + _, err := result.Extract() + if err != nil { + return false, fmt.Errorf("hit error when deleting object %s: %v", obj, err) + } + } + return true, nil + }) + if err != nil { + return fmt.Errorf("error deleting objects in container %s: %v", name, err) + } + e2e.Logf("deleted all object items in the container %s", name) + return nil +} + +// GetOspInstance represents to list osp instance using SDK +func (osp *Osp) GetOspInstance(client *gophercloud.ServiceClient, instanceName string) (string, error) { + // List servers with the given name + allServers, err := ListServersByName(client, instanceName) + + if err != nil || len(allServers) == 0 { + return "", fmt.Errorf("VM with name %s not found", instanceName) + } + + // Return the name of the first matching server + serverName := allServers[0].Name + e2e.Logf("Virtual machine instance found: %s", serverName) + return serverName, nil +} + +// GetOspInstanceState represents to list osp instance state using SDK +func (osp *Osp) GetOspInstanceState(client *gophercloud.ServiceClient, instanceName string) (string, error) { + // List servers with the given name + allServers, err := ListServersByName(client, instanceName) + + if err != nil || len(allServers) == 0 { + return "", fmt.Errorf("VM with name %s not found", instanceName) + } + + // Return the status of the first matching server + serverStatus := allServers[0].Status + e2e.Logf("Virtual machine instance state: %s", serverStatus) + return serverStatus, nil +} + +// GetStopOspInstance represents to stop osp instance using SDK +func (osp *Osp) GetStopOspInstance(client *gophercloud.ServiceClient, instanceName string) error { + // List servers with the given name + allServers, err := ListServersByName(client, instanceName) + + if err != nil || len(allServers) == 0 { + return fmt.Errorf("VM with name %s not found", instanceName) + } + + // Stop the server + serverID := allServers[0].ID + err = startstop.Stop(client, serverID).ExtractErr() + if err != nil { + return fmt.Errorf("failed to stop VM: %v", err) + } + + e2e.Logf("VM %s stopped successfully", instanceName) + return nil +} + +// GetStartOspInstance represents to start osp instance using SDK +func (osp *Osp) GetStartOspInstance(client *gophercloud.ServiceClient, instanceName string) error { + // List servers with the given name + allServers, err := ListServersByName(client, instanceName) + + if err != nil || len(allServers) == 0 { + return fmt.Errorf("VM with name %s not found", instanceName) + } + + // Start the server + serverID := allServers[0].ID + err = startstop.Start(client, serverID).ExtractErr() + if err != nil { + return fmt.Errorf("failed to start VM: %v", err) + } + + e2e.Logf("VM %s started successfully", instanceName) + return nil +} + +// ListServersByName retrieves a list of servers matching the given instance name. +func ListServersByName(client *gophercloud.ServiceClient, instanceName string) ([]servers.Server, error) { + // Define the options for listing servers + opts := servers.ListOpts{ + Name: instanceName, + } + + // Retrieve all pages of servers matching the options + allPages, err := servers.List(client, opts).AllPages() + if err != nil { + return nil, fmt.Errorf("failed to list servers: %v", err) + } + + // Extract the servers from the retrieved pages + allServers, err := servers.ExtractServers(allPages) + if err != nil { + return nil, fmt.Errorf("failed to extract servers: %v", err) + } + + return allServers, nil +} diff --git a/test/util/pods.go b/test/util/pods.go new file mode 100644 index 000000000..9ed2f378d --- /dev/null +++ b/test/util/pods.go @@ -0,0 +1,280 @@ +package util + +import ( + "context" + "fmt" + "os/exec" + "strings" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kutilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + e2e "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/pod" +) + +// WaitForNoPodsAvailable waits until there are no pods in the +// given namespace +func WaitForNoPodsAvailable(oc *CLI) error { + return wait.Poll(200*time.Millisecond, 3*time.Minute, func() (bool, error) { + pods, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).List(context.Background(), metav1.ListOptions{}) + if err != nil { + return false, err + } + + return len(pods.Items) == 0, nil + }) +} + +// RemovePodsWithPrefixes deletes pods whose name begins with the +// supplied prefixes +func RemovePodsWithPrefixes(oc *CLI, prefixes ...string) error { + e2e.Logf("Removing pods from namespace %s with prefix(es): %v", oc.Namespace(), prefixes) + pods, err := oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).List(context.Background(), metav1.ListOptions{}) + if err != nil { + return err + } + errs := []error{} + for _, prefix := range prefixes { + for _, pod := range pods.Items { + if strings.HasPrefix(pod.Name, prefix) { + if err := oc.AdminKubeClient().CoreV1().Pods(oc.Namespace()).Delete(context.Background(), pod.Name, metav1.DeleteOptions{}); err != nil { + e2e.Logf("unable to remove pod %s/%s", oc.Namespace(), pod.Name) + errs = append(errs, err) + } + } + } + } + if len(errs) > 0 { + return kutilerrors.NewAggregate(errs) + } + return nil +} + +// CreateCentosExecPodOrFail creates a centos:7 pause pod used as a vessel for kubectl exec commands. +// Pod name is uniquely generated. +func CreateCentosExecPodOrFail(client kubernetes.Interface, ns, generateName string, tweak func(*v1.Pod)) *v1.Pod { + return pod.CreateExecPodOrFail(context.TODO(), client, ns, generateName, func(pod *v1.Pod) { + pod.Spec.Containers[0].Image = "centos:7" + pod.Spec.Containers[0].Command = []string{"sh", "-c", "trap exit TERM; while true; do sleep 5; done"} + pod.Spec.Containers[0].Args = nil + + if tweak != nil { + tweak(pod) + } + }) +} + +// If no container is provided (empty string "") it will default to the first container +func remoteShPod(oc *CLI, namespace string, podName string, needBash bool, needChroot bool, container string, cmd ...string) (string, error) { + var cargs []string + var containerArgs []string + if needBash { + cargs = []string{"-n", namespace, podName, "bash", "-c"} + } else if needChroot { + cargs = []string{"-n", namespace, podName, "chroot", "/rootfs"} + } else { + cargs = []string{"-n", namespace, podName} + } + + if container != "" { + containerArgs = []string{"-c", container} + } else { + containerArgs = []string{} + } + + allArgs := append(containerArgs, cargs...) + allArgs = append(allArgs, cmd...) + return oc.AsAdmin().WithoutNamespace().Run("rsh").Args(allArgs...).Output() +} + +// RemoteShContainer creates a remote shell of the given container inside the pod +func RemoteShContainer(oc *CLI, namespace string, podName string, container string, cmd ...string) (string, error) { + return remoteShPod(oc, namespace, podName, false, false, container, cmd...) +} + +// RemoteShPod creates a remote shell of the pod +func RemoteShPod(oc *CLI, namespace string, podName string, cmd ...string) (string, error) { + return remoteShPod(oc, namespace, podName, false, false, "", cmd...) +} + +// RemoteShPodWithChroot creates a remote shell of the pod with chroot +func RemoteShPodWithChroot(oc *CLI, namespace string, podName string, cmd ...string) (string, error) { + return remoteShPod(oc, namespace, podName, false, true, "", cmd...) +} + +// RemoteShPodWithBash creates a remote shell of the pod with bash +func RemoteShPodWithBash(oc *CLI, namespace string, podName string, cmd ...string) (string, error) { + return remoteShPod(oc, namespace, podName, true, false, "", cmd...) +} + +// RemoteShPodWithBashSpecifyContainer creates a remote shell of the pod with bash specifying container name +func RemoteShPodWithBashSpecifyContainer(oc *CLI, namespace string, podName string, containerName string, cmd ...string) (string, error) { + return remoteShPod(oc, namespace, podName, true, false, containerName, cmd...) +} + +// WaitAndGetSpecificPodLogs wait and return the pod logs by the specific filter +func WaitAndGetSpecificPodLogs(oc *CLI, namespace string, container string, podName string, filter string) (string, error) { + logs, err := GetSpecificPodLogs(oc, namespace, container, podName, filter) + if err != nil { + waitErr := wait.Poll(20*time.Second, 10*time.Minute, func() (bool, error) { + logs, err = GetSpecificPodLogs(oc, namespace, container, podName, filter) + if err != nil { + e2e.Logf("the err:%v, and try next round", err) + return false, nil + } + if logs != "" { + return true, nil + } + return false, nil + }) + AssertWaitPollNoErr(waitErr, fmt.Sprintf("Pod logs does not contain %s", filter)) + } + return logs, nil +} + +// Pod Parameters can be used to set the template parameters except PodName as PodName can be provided using pod.Name +type Pod struct { + Name string + Namespace string + Template string + Parameters []string +} + +// Create creates a pod on the basis of Pod struct +// For Ex: pod := Pod{Name: "PodName", Namespace: "NSName", Template: "PodTemplateLocation", Parameters: []string{"HOSTNAME=NODE_IP"}} +// pod.Create(oc) +// The pod name parameter must be NAME in the template file +func (pod *Pod) Create(oc *CLI) { + e2e.Logf("Creating pod: %s", pod.Name) + params := []string{"--ignore-unknown-parameters=true", "-f", pod.Template, "-p", "NAME=" + pod.Name} + CreateNsResourceFromTemplate(oc, pod.Namespace, append(params, pod.Parameters...)...) + AssertPodToBeReady(oc, pod.Name, pod.Namespace) +} + +// Delete pod +func (pod *Pod) Delete(oc *CLI) error { + e2e.Logf("Deleting pod: %s", pod.Name) + return oc.AsAdmin().WithoutNamespace().Run("delete").Args("pod", pod.Name, "-n", pod.Namespace, "--ignore-not-found=true").Execute() + +} + +// AssertPodToBeReady poll pod status to determine it is ready +func AssertPodToBeReady(oc *CLI, podName string, namespace string) { + err := wait.Poll(10*time.Second, 3*time.Minute, func() (bool, error) { + stdout, err := oc.AsAdmin().Run("get").Args("pod", podName, "-n", namespace, "-o", "jsonpath='{.status.conditions[?(@.type==\"Ready\")].status}'").Output() + if err != nil { + e2e.Logf("the err:%v, and try next round", err) + return false, nil + } + if strings.Contains(stdout, "True") { + e2e.Logf("Pod %s is ready!", podName) + return true, nil + } + return false, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("Pod %s status is not ready!", podName)) +} + +// GetSpecificPodLogs returns the pod logs by the specific filter +func GetSpecificPodLogs(oc *CLI, namespace string, container string, podName string, filter string) (string, error) { + return GetSpecificPodLogsCombinedOrNot(oc, namespace, container, podName, filter, false) +} + +// GetSpecificPodLogsCombinedOrNot returns the pod logs by the specific filter with combining stderr or not +func GetSpecificPodLogsCombinedOrNot(oc *CLI, namespace string, container string, podName string, filter string, combined bool) (string, error) { + var cargs []string + if len(container) > 0 { + cargs = []string{"-n", namespace, "-c", container, podName} + } else { + cargs = []string{"-n", namespace, podName} + } + podLogs, err := oc.AsAdmin().WithoutNamespace().Run("logs").Args(cargs...).OutputToFile("podLogs.txt") + if err != nil { + e2e.Logf("unable to get the pod (%s) logs", podName) + return podLogs, err + } + var filterCmd = "" + if len(filter) > 0 { + filterCmd = " | grep -i " + filter + } + var filteredLogs []byte + var errCmd error + if combined { + filteredLogs, errCmd = exec.Command("bash", "-c", "cat "+podLogs+filterCmd).CombinedOutput() + } else { + filteredLogs, errCmd = exec.Command("bash", "-c", "cat "+podLogs+filterCmd).Output() + } + return string(filteredLogs), errCmd +} + +// GetAllPods returns a list of the names of all pods in the cluster in a given namespace +func GetAllPods(oc *CLI, namespace string) ([]string, error) { + pods, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", namespace, "-o", "jsonpath='{.items[*].metadata.name}'").Output() + return strings.Split(strings.Trim(pods, "'"), " "), err +} + +// GetPodName returns the pod name +func GetPodName(oc *CLI, namespace string, podLabel string, node string) (string, error) { + args := []string{"pods", "-n", namespace, "-l", podLabel, + "--field-selector", "spec.nodeName=" + node, "-o", "jsonpath='{..metadata.name}'"} + daemonPod, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Output() + return strings.ReplaceAll(daemonPod, "'", ""), err +} + +// GetPodNodeName returns the name of the node the given pod is running on +func GetPodNodeName(oc *CLI, namespace string, podName string) (string, error) { + return oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", podName, "-n", namespace, "-o=jsonpath={.spec.nodeName}").Output() +} + +// LabelPod labels a given pod with a given label in a given namespace +func LabelPod(oc *CLI, namespace string, podName string, label string) error { + return oc.AsAdmin().WithoutNamespace().Run("label").Args("-n", namespace, "pod", podName, label).Execute() +} + +// GetAllPodsWithLabel get array of all pods for a given namespace and label +func GetAllPodsWithLabel(oc *CLI, namespace string, label string) ([]string, error) { + pods, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", namespace, "-l", label).Template("{{range .items}}{{.metadata.name}}{{\" \"}}{{end}}").Output() + if len(pods) == 0 { + return []string{}, err + } + return strings.Split(pods, " "), err +} + +// AssertAllPodsToBeReadyWithPollerParams assert all pods in NS are in ready state until timeout in a given namespace +// Pros: allow user to customize poller parameters +func AssertAllPodsToBeReadyWithPollerParams(oc *CLI, namespace string, interval, timeout time.Duration) { + err := wait.Poll(interval, timeout, func() (bool, error) { + + // get the status flag for all pods + // except the ones which are in Complete Status. + // it use 'ne' operator which is only compatible with 4.10+ oc versions + template := "'{{- range .items -}}{{- range .status.conditions -}}{{- if ne .reason \"PodCompleted\" -}}{{- if eq .type \"Ready\" -}}{{- .status}} {{\" \"}}{{- end -}}{{- end -}}{{- end -}}{{- end -}}'" + stdout, err := oc.AsAdmin().Run("get").Args("pods", "-n", namespace).Template(template).Output() + if err != nil { + e2e.Logf("the err:%v, and try next round", err) + return false, nil + } + if strings.Contains(stdout, "False") { + return false, nil + } + return true, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("Some Pods are not ready in NS %s!", namespace)) +} + +// AssertAllPodsToBeReady assert all pods in NS are in ready state until timeout in a given namespace +func AssertAllPodsToBeReady(oc *CLI, namespace string) { + AssertAllPodsToBeReadyWithPollerParams(oc, namespace, 10*time.Second, 4*time.Minute) +} + +// GetPodNameInHostedCluster returns the pod name in hosted cluster of hypershift +func GetPodNameInHostedCluster(oc *CLI, namespace string, podLabel string, node string) (string, error) { + args := []string{"pods", "-n", namespace, "-l", podLabel, + "--field-selector", "spec.nodeName=" + node, "-o", "jsonpath='{..metadata.name}'"} + daemonPod, err := oc.AsAdmin().AsGuestKubeconf().Run("get").Args(args...).Output() + return strings.ReplaceAll(daemonPod, "'", ""), err +} diff --git a/test/util/prometheus_monitoring.go b/test/util/prometheus_monitoring.go new file mode 100644 index 000000000..ec1225dab --- /dev/null +++ b/test/util/prometheus_monitoring.go @@ -0,0 +1,270 @@ +package util + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + o "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + prometheusURL = "https://prometheus-k8s.openshift-monitoring.svc:9091" + thanosURL = "https://thanos-querier.openshift-monitoring.svc:9091" + monitorInstantQuery = "/api/v1/query" + monitorRangeQuery = "/api/v1/query_range" + monitorAlerts = "/api/v1/alerts" + monitorRules = "/api/v1/rules" + monitorNamespace = "openshift-monitoring" + prometheusK8s = "prometheus-k8s" +) + +// MonitorInstantQueryParams API doc +// query parameters: +// +// query=: Prometheus expression query string. +// time=: Evaluation timestamp. Optional. +// timeout=: Evaluation timeout. Optional. Defaults to and is capped by the value of the -query.timeout flag. +type MonitorInstantQueryParams struct { + Query string + Time string + Timeout string +} + +// MonitorRangeQueryParams API doc +// query range parameters +// +// query=: Prometheus expression query string. +// start=: Start timestamp, inclusive. +// end=: End timestamp, inclusive. +// step=: Query resolution step width in duration format or float number of seconds. +// timeout=: Evaluation timeout. Optional. Defaults to and is capped by the value of the -query.timeout flag. +type MonitorRangeQueryParams struct { + Query string + Start string + End string + Step string + Timeout string +} + +// Monitorer interface represents all funcs of monitoring +type Monitorer interface { + SimpleQuery(query string) (string, error) + InstantQuery(queryParams MonitorInstantQueryParams) (string, error) + RangeQuery(queryParams MonitorRangeQueryParams) (string, error) + queryRules(query string) (string, error) + GetAllRules() (string, error) + GetAlertRules() (string, error) + GetRecordRules() (string, error) +} + +// Monitor define a monitor object. It will query thanos +type Monitor struct { + url string + Token string + ocClient *CLI +} + +// PrometheusMonitor define a monitor object. It will query prometheus directly instead of thanos +type PrometheusMonitor struct { + Monitor +} + +// NewMonitor create a monitor using thanos URL +func NewMonitor(oc *CLI) (*Monitor, error) { + var mo Monitor + var err error + mo.url = thanosURL + mo.ocClient = oc + mo.Token, err = GetSAToken(oc) + return &mo, err +} + +// NewPrometheusMonitor create a monitor using prometheus url +func NewPrometheusMonitor(oc *CLI) (*PrometheusMonitor, error) { + var mo Monitor + var err error + mo.url = prometheusURL + mo.ocClient = oc + mo.Token, err = GetSAToken(oc) + return &PrometheusMonitor{Monitor: mo}, err +} + +// SimpleQuery query executes a query in prometheus. .../query?query=$query_to_execute +func (mo *Monitor) SimpleQuery(query string) (string, error) { + queryParams := MonitorInstantQueryParams{Query: query} + return mo.InstantQuery(queryParams) +} + +// InstantQuery query executes a query in prometheus with time and timeout. +// +// Example: curl 'http://host:port/api/v1/query?query=up&time=2015-07-01T20:10:51.781Z' +func (mo *Monitor) InstantQuery(queryParams MonitorInstantQueryParams) (string, error) { + queryArgs := []string{"curl", "-k", "-s", "-H", fmt.Sprintf("Authorization: Bearer %v", mo.Token)} + + if queryParams.Query != "" { + queryArgs = append(queryArgs, []string{"--data-urlencode", "query=" + queryParams.Query}...) + } + if queryParams.Time != "" { + queryArgs = append(queryArgs, []string{"--data-urlencode", "time=" + queryParams.Time}...) + } + if queryParams.Timeout != "" { + queryArgs = append(queryArgs, []string{"--data-urlencode", "timeout=" + queryParams.Timeout}...) + } + + queryArgs = append(queryArgs, mo.url+monitorInstantQuery) + + // We don't want to print the token + mo.ocClient.NotShowInfo() + defer mo.ocClient.SetShowInfo() + + return RemoteShPod(mo.ocClient, monitorNamespace, "statefulsets/"+prometheusK8s, queryArgs...) +} + +// RangeQuery executes a query range in prometheus with start, end, step and timeout +// +// Example: curl 'http://host:port/api/v1/query_range?query=metricname&start=2015-07-01T20:10:30.781Z&end=2015-07-01T20:11:00.781Z&step=15s' +func (mo *Monitor) RangeQuery(queryParams MonitorRangeQueryParams) (string, error) { + queryArgs := []string{"curl", "-k", "-s", "-H", fmt.Sprintf("Authorization: Bearer %v", mo.Token)} + + if queryParams.Query != "" { + queryArgs = append(queryArgs, []string{"--data-urlencode", "query=" + queryParams.Query}...) + } + if queryParams.Start != "" { + queryArgs = append(queryArgs, []string{"--data-urlencode", "start=" + queryParams.Start}...) + } + if queryParams.End != "" { + queryArgs = append(queryArgs, []string{"--data-urlencode", "end=" + queryParams.End}...) + } + if queryParams.Step != "" { + queryArgs = append(queryArgs, []string{"--data-urlencode", "step=" + queryParams.Step}...) + } + if queryParams.Timeout != "" { + queryArgs = append(queryArgs, []string{"--data-urlencode", "timeout=" + queryParams.Timeout}...) + } + + queryArgs = append(queryArgs, mo.url+monitorRangeQuery) + + // We don't want to print the token + mo.ocClient.NotShowInfo() + defer mo.ocClient.SetShowInfo() + + return RemoteShPod(mo.ocClient, monitorNamespace, "statefulsets/"+prometheusK8s, queryArgs...) +} + +func (mo *Monitor) queryRules(query string) (string, error) { + queryArgs := []string{"curl", "-k", "-s", "-H", fmt.Sprintf("Authorization: Bearer %v", mo.Token)} + queryString := "" + if query != "" { + queryString = "?" + query + } + + queryArgs = append(queryArgs, mo.url+monitorRules+queryString) + + // We don't want to print the token + mo.ocClient.NotShowInfo() + defer mo.ocClient.SetShowInfo() + + return RemoteShPod(mo.ocClient, monitorNamespace, "statefulsets/"+prometheusK8s, queryArgs...) +} + +// GetAllRules returns all rules +func (mo *Monitor) GetAllRules() (string, error) { + return mo.queryRules("") +} + +// GetAlertRules returns all alerting rules +func (mo *Monitor) GetAlertRules() (string, error) { + return mo.queryRules("type=alert") +} + +// GetRecordRules returns all recording rules +func (mo *Monitor) GetRecordRules() (string, error) { + return mo.queryRules("type=record") +} + +// GetAlerts returns all alerts. It doesn't use the alermanager, and it returns alerts in 'pending' state too +func (pmo *PrometheusMonitor) GetAlerts() (string, error) { + + // We don't want to print the token + pmo.ocClient.NotShowInfo() + defer pmo.ocClient.SetShowInfo() + + getCmd := "curl -k -s -H \"" + fmt.Sprintf("Authorization: Bearer %v", pmo.Token) + "\" " + pmo.url + monitorAlerts + return RemoteShPod(pmo.ocClient, monitorNamespace, "statefulsets/"+prometheusK8s, "sh", "-c", getCmd) +} + +// GetSAToken get a token assigned to prometheus-k8s from openshift-monitoring namespace +// According to 2093780, the secret prometheus-k8s-token is removed from sa prometheus-k8s. +// So from 4.11, command won't work +// Please install oc client and cluster with same major version. +func GetSAToken(oc *CLI) (string, error) { + e2e.Logf("Getting a token assgined to prometheus-k8s from %s namespace...", monitorNamespace) + token, err := oc.AsAdmin().WithoutNamespace().Run("create").Args("token", prometheusK8s, "-n", monitorNamespace).Output() + if err != nil { + if strings.Contains(token, "unknown command") { // oc client is old version, create token is not supported + e2e.Logf("oc create token is not supported by current client, use oc sa get-token instead") + token, err = oc.AsAdmin().WithoutNamespace().Run("sa").Args("get-token", prometheusK8s, "-n", monitorNamespace).Output() + } else { + return "", err + } + } + + return token, err +} + +// InstantQueryWithRetry passes QueryParams to InstantQuery and polls it until it succeeds or times out. +func (mo *Monitor) InstantQueryWithRetry(queryParams MonitorInstantQueryParams, timeDurationSec int) string { + var res string + var err error + err = wait.Poll(time.Duration(timeDurationSec/5)*time.Second, time.Duration(timeDurationSec)*time.Second, func() (bool, error) { + res, err = mo.InstantQuery(queryParams) + if err != nil { + e2e.Logf("Error sending InstantQuery: %v", err) + return false, nil + } + return true, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("Timed out polling for metric %s", queryParams.Query)) + return res +} + +type ContainerMemoryRSSType struct { + Data struct { + Result []struct { + Metric struct { + MetricName string `json:"__name__"` + Container string `json:"container"` + Endpoint string `json:"endpoint"` + ID string `json:"id"` + Image string `json:"image"` + Instance string `json:"instance"` + Job string `json:"job"` + MetricPath string `json:"metrics_path"` + Name string `json:"name"` + Namespace string `json:"namespace"` + Node string `json:"node"` + Pod string `json:"pod"` + Service string `json:"service"` + } `json:"metric"` + Value []interface{} `json:"value"` + } `json:"result"` + ResultType string `json:"resultType"` + } `json:"data"` + Status string `json:"status"` +} + +// ExtractSpecifiedValueFromMetricData4MemRSS extracts the value of the container_memory_rss metric from the result of a Prometheus query +func ExtractSpecifiedValueFromMetricData4MemRSS(oc *CLI, metricResult string) (string, int) { + var ramMetricsInfo ContainerMemoryRSSType + jsonErr := json.Unmarshal([]byte(metricResult), &ramMetricsInfo) + o.Expect(jsonErr).NotTo(o.HaveOccurred()) + e2e.Logf("Node: [%v], Pod Name: [%v], Status: [%v], Metric Name: [%v], Value: [%v]", ramMetricsInfo.Data.Result[0].Metric.Node, ramMetricsInfo.Data.Result[0].Metric.Pod, ramMetricsInfo.Status, ramMetricsInfo.Data.Result[0].Metric.MetricName, ramMetricsInfo.Data.Result[0].Value[1]) + metricValue, err := strconv.Atoi(ramMetricsInfo.Data.Result[0].Value[1].(string)) + o.Expect(err).NotTo(o.HaveOccurred()) + return ramMetricsInfo.Data.Result[0].Metric.MetricName, metricValue +} diff --git a/test/util/psap.go b/test/util/psap.go new file mode 100644 index 000000000..a98f387f9 --- /dev/null +++ b/test/util/psap.go @@ -0,0 +1,1155 @@ +package util + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +//This will check if operator deployment/daemonset is created sucessfully +//will update sro test case to use this common utils later. +//example: +//WaitOprResourceReady(oc, deployment, deployment-name, namespace, true, true) +//WaitOprResourceReady(oc, statefulset, statefulset-name, namespace, false, false) +//WaitOprResourceReady(oc, daemonset, daemonset-name, namespace, false, false) +//If islongduration is true, it will sleep 720s, otherwise 180s +//If excludewinnode is true, skip checking windows nodes daemonset status +//For daemonset or deployment have random name, getting name before use this function + +// WaitOprResourceReady used for checking if deployment/daemonset/statefulset is ready +func WaitOprResourceReady(oc *CLI, kind, name, namespace string, islongduration bool, excludewinnode bool) { + //If islongduration is true, it will sleep 720s, otherwise 180s + var timeDurationSec int + if islongduration { + timeDurationSec = 720 + } else { + timeDurationSec = 360 + } + + waitErr := wait.Poll(20*time.Second, time.Duration(timeDurationSec)*time.Second, func() (bool, error) { + var ( + kindNames string + err error + isCreated bool + desiredNum string + readyNum string + ) + + //Check if deployment/daemonset/statefulset is created. + switch kind { + case "deployment", "statefulset": + kindNames, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(kind, name, "-n", namespace, "-oname").Output() + if strings.Contains(kindNames, "NotFound") || strings.Contains(kindNames, "No resources") || len(kindNames) == 0 || err != nil { + isCreated = false + } else { + //deployment/statefulset has been created, but not running, need to compare .status.readyReplicas and in .status.replicas + isCreated = true + desiredNum, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args(kindNames, "-n", namespace, "-o=jsonpath={.status.readyReplicas}").Output() + readyNum, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args(kindNames, "-n", namespace, "-o=jsonpath={.status.replicas}").Output() + } + case "daemonset": + kindNames, err = oc.AsAdmin().WithoutNamespace().Run("get").Args(kind, name, "-n", namespace, "-oname").Output() + e2e.Logf("daemonset name is:" + kindNames) + if len(kindNames) == 0 || err != nil { + isCreated = false + } else { + //daemonset/statefulset has been created, but not running, need to compare .status.desiredNumberScheduled and .status.numberReady} + //if the two value is equal, set output="has successfully progressed" + isCreated = true + desiredNum, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args(kindNames, "-n", namespace, "-o=jsonpath={.status.desiredNumberScheduled}").Output() + //If there are windows worker nodes, the desired daemonset should be linux node's num + _, WindowsNodeNum := CountNodeNumByOS(oc) + if WindowsNodeNum > 0 && excludewinnode { + + //Exclude windows nodes + e2e.Logf("%v desiredNum is: %v", kindNames, desiredNum) + desiredLinuxWorkerNum, _ := strconv.Atoi(desiredNum) + e2e.Logf("desiredlinuxworkerNum is:%v", desiredLinuxWorkerNum) + desiredNum = strconv.Itoa(desiredLinuxWorkerNum - WindowsNodeNum) + } + readyNum, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args(kindNames, "-n", namespace, "-o=jsonpath={.status.numberReady}").Output() + } + default: + e2e.Logf("Invalid Resource Type") + } + + e2e.Logf("desiredNum is: " + desiredNum + " readyNum is: " + readyNum) + //daemonset/deloyment has been created, but not running, need to compare desiredNum and readynum + //if isCreate is true and the two value is equal, the pod is ready + if isCreated && len(kindNames) != 0 && desiredNum == readyNum { + e2e.Logf("The %v is successfully progressed and running normally", kindNames) + return true, nil + } + e2e.Logf("The %v is not ready or running normally", kindNames) + return false, nil + + }) + AssertWaitPollNoErr(waitErr, fmt.Sprintf("the pod of %v is not running", name)) +} + +// IsNodeLabeledByNFD Check if NFD Installed base on the cluster labels +func IsNodeLabeledByNFD(oc *CLI) bool { + workNode, _ := GetFirstWorkerNode(oc) + Output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node", workNode, "-o", "jsonpath='{.metadata.annotations}'").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if strings.Contains(Output, "nfd.node.kubernetes.io/feature-labels") { + e2e.Logf("NFD installed on openshift container platform and labeled nodes") + return true + } + return false +} + +// CountNodeNumByOS used for count how many worker node by windows or linux +func CountNodeNumByOS(oc *CLI) (linuxNum int, windowsNum int) { + //Count how many windows node and linux node + linuxNodeNames, err := GetAllNodesbyOSType(oc, "linux") + o.Expect(err).NotTo(o.HaveOccurred()) + windowsNodeNames, err := GetAllNodesbyOSType(oc, "windows") + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("linuxNodeNames is:%v", linuxNodeNames[:]) + e2e.Logf("windowsNodeNames is:%v", windowsNodeNames[:]) + linuxNum = len(linuxNodeNames) + windowsNum = len(windowsNodeNames) + e2e.Logf("Linux node is:%v, windows node is %v", linuxNum, windowsNum) + return linuxNum, windowsNum +} + +// GetFirstLinuxMachineSets used for getting first linux worker nodes name +func GetFirstLinuxMachineSets(oc *CLI) string { + machinesets, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, "-o=jsonpath={.items[*].metadata.name}", "-n", "openshift-machine-api").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + + var regularMachineset []string + machinesetsArray := strings.Split(machinesets, " ") + //Remove windows machineset + for _, machineset := range machinesetsArray { + + if strings.Contains(machineset, "windows") || strings.Contains(machineset, "edge") { + continue + } + regularMachineset = append(regularMachineset, machineset) + e2e.Logf("None windows or edge is %v\n", regularMachineset) + + } + e2e.Logf("regularMachineset is %v\n", regularMachineset) + return regularMachineset[0] +} + +// InstallNFD attempts to install the Node Feature Discovery operator and verify that it is running +func InstallNFD(oc *CLI, nfdNamespace string) { + var ( + nfdNamespaceFile = FixturePath("testdata", "psap", "nfd", "nfd-namespace.yaml") + nfdOperatorgroupFile = FixturePath("testdata", "psap", "nfd", "nfd-operatorgroup.yaml") + nfdSubFile = FixturePath("testdata", "psap", "nfd", "nfd-sub.yaml") + ) + // check if NFD namespace already exists + nsName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("namespace", nfdNamespace).Output() + // if namespace exists, check if NFD is installed - exit if it is, continue with installation otherwise + // if an error is thrown, namespace does not exist, create and continue with installation + if strings.Contains(nsName, "NotFound") || strings.Contains(nsName, "No resources") || err != nil { + e2e.Logf("NFD namespace not found - creating namespace and installing NFD ...") + CreateClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", nfdNamespaceFile) + } else { + e2e.Logf("NFD namespace found - checking if NFD is installed ...") + } + + ogName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("OperatorGroup", "openshift-nfd", "-n", nfdNamespace).Output() + if strings.Contains(ogName, "NotFound") || strings.Contains(ogName, "No resources") || err != nil { + // create NFD operator group from template + ApplyNsResourceFromTemplate(oc, nfdNamespace, "--ignore-unknown-parameters=true", "-f", nfdOperatorgroupFile) + } else { + e2e.Logf("NFD operatorgroup found - continue to check subscription ...") + } + + // get default channel and create subscription from template + channel, err := GetOperatorPKGManifestDefaultChannel(oc, "nfd", "openshift-marketplace") + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Channel: %v", channel) + // get default channel and create subscription from template + source, err := GetOperatorPKGManifestSource(oc, "nfd", "openshift-marketplace") + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Source: %v", source) + + subName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("Subscription", "-n", nfdNamespace).Output() + if strings.Contains(subName, "NotFound") || strings.Contains(subName, "No resources") || !strings.Contains(subName, "nfd") || err != nil { + // create NFD operator group from template + ApplyNsResourceFromTemplate(oc, nfdNamespace, "--ignore-unknown-parameters=true", "-f", nfdSubFile, "-p", "CHANNEL="+channel, "SOURCE="+source) + } else { + e2e.Logf("NFD subscription found - continue to check pod status ...") + } + + //Wait for NFD controller manager is ready + WaitOprResourceReady(oc, "deployment", "nfd-controller-manager", nfdNamespace, false, false) + +} + +// CreateNFDInstance used for create NFD Instance in different namespace +func CreateNFDInstance(oc *CLI, namespace string) { + + var ( + nfdInstanceFile = FixturePath("testdata", "psap", "nfd", "nfd-instance.yaml") + ) + // get cluster version and create NFD instance from template + clusterVersion, _, err := GetClusterVersion(oc) + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Cluster Version: %v", clusterVersion) + + nfdinstanceName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("NodeFeatureDiscovery", "nfd-instance", "-n", namespace).Output() + e2e.Logf("NFD Instance is: %v", nfdinstanceName) + if strings.Contains(nfdinstanceName, "NotFound") || strings.Contains(nfdinstanceName, "No resources") || err != nil { + // create NFD operator group from template + nfdInstanceImage := GetNFDInstanceImage(oc, namespace) + e2e.Logf("NFD instance image name: %v", nfdInstanceImage) + o.Expect(nfdInstanceImage).NotTo(o.BeEmpty()) + ApplyNsResourceFromTemplate(oc, namespace, "--ignore-unknown-parameters=true", "-f", nfdInstanceFile, "-p", "IMAGE="+nfdInstanceImage, "NAMESPACE="+namespace) + } else { + e2e.Logf("NFD instance found - continue to check pod status ...") + } + + //wait for NFD master and worker is ready + WaitOprResourceReady(oc, "deployment", "nfd-master", namespace, false, false) + WaitOprResourceReady(oc, "daemonset", "nfd-worker", namespace, false, true) +} + +// GetNFDVersionbyPackageManifest return NFD version +func GetNFDVersionbyPackageManifest(oc *CLI, namespace string) string { + nfdVersionOrigin, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifest", "nfd", "-n", namespace, "-ojsonpath={.status.channels[*].currentCSVDesc.version}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(nfdVersionOrigin).NotTo(o.BeEmpty()) + nfdVersionArr := strings.Split(nfdVersionOrigin, ".") + nfdVersion := nfdVersionArr[0] + "." + nfdVersionArr[1] + return nfdVersion +} + +// GetNFDInstanceImage return correct image name in manifest channel +func GetNFDInstanceImage(oc *CLI, namespace string) string { + var nfdInstanceImage string + nfdInstanceImageStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifest", "nfd", "-n", namespace, "-ojsonpath={.status.channels[*].currentCSVDesc.relatedImages}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(nfdInstanceImageStr).NotTo(o.BeEmpty()) + + strTmp1 := strings.ReplaceAll(nfdInstanceImageStr, "[", ",") + strTmp2 := strings.ReplaceAll(strTmp1, "]", ",") + strTmp3 := strings.ReplaceAll(strTmp2, `"`, "") + + nfdInstanceImageArr := strings.Split(strTmp3, ",") + + //using the last one image if mulitiple image was found + for i := 0; i < len(nfdInstanceImageArr); i++ { + if strings.Contains(nfdInstanceImageArr[i], "node-feature-discovery") { + nfdInstanceImage = nfdInstanceImageArr[i] + } + } + e2e.Logf("NFD instance image name: %v", nfdInstanceImage) + return nfdInstanceImage +} + +// GetOperatorPKGManifestSource used for getting operator Packagemanifest source name +func GetOperatorPKGManifestSource(oc *CLI, pkgManifestName, namespace string) (string, error) { + catalogSourceNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("catalogsource", "-n", namespace, "-o=jsonpath={.items[*].metadata.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if strings.Contains(catalogSourceNames, "qe-app-registry") || err != nil { + //If the catalogsource qe-app-registry exist, prefer to use qe-app-registry, not use redhat-operators or certificate-operator ... + return "qe-app-registry", nil + } + soureName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifest", pkgManifestName, "-n", namespace, "-o=jsonpath={.status.catalogSource}").Output() + return soureName, err +} + +// GetOperatorPKGManifestDefaultChannel to getting operator Packagemanifest default channel +func GetOperatorPKGManifestDefaultChannel(oc *CLI, pkgManifestName, namespace string) (string, error) { + channel, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifest", pkgManifestName, "-n", namespace, "-o", "jsonpath={.status.defaultChannel}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + return channel, err +} + +// ApplyOperatorResourceByYaml - It's not a template yaml file, the yaml shouldn't include namespace, we specify namespace by parameter. +func ApplyOperatorResourceByYaml(oc *CLI, namespace string, yamlfile string) { + if len(namespace) == 0 { + //Create cluster-wide resource + err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", yamlfile).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + } else { + //Create namespace-wide resource + err := oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", yamlfile, "-n", namespace).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + } +} + +// CleanupOperatorResourceByYaml - It's not a template yaml file, the yaml shouldn't include namespace, we specify namespace by parameter. +func CleanupOperatorResourceByYaml(oc *CLI, namespace string, yamlfile string) { + if len(namespace) == 0 { + //Delete cluster-wide resource + err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", yamlfile).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + } else { + //Delete namespace-wide resource + err := oc.AsAdmin().WithoutNamespace().Run("delete").Args("-f", yamlfile, "-n", namespace).Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + } +} + +// AssertOprPodLogsbyFilterWithDuration used for truncting pods logs by filter +func AssertOprPodLogsbyFilterWithDuration(oc *CLI, podName string, namespace string, filter string, timeDurationSec int, minimalMatch int) { + podList, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", namespace, "-oname").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(podList).To(o.ContainSubstring(podName)) + + e2e.Logf("Got pods list as below: \n" + podList) + //Filter pod name base on deployment name + regexpoprname, _ := regexp.Compile(".*" + podName + ".*") + podListArry := regexpoprname.FindAllString(podList, -1) + + podListSize := len(podListArry) + for i := 0; i < podListSize; i++ { + //Check the log files until finding the keywords by filter + waitErr := wait.Poll(15*time.Second, time.Duration(timeDurationSec)*time.Second, func() (bool, error) { + e2e.Logf("Verify the logs on %v", podListArry[i]) + output, _ := oc.AsAdmin().WithoutNamespace().Run("logs").Args(podListArry[i], "-n", namespace).Output() + regexpstr, _ := regexp.Compile(".*" + filter + ".*") + loglines := regexpstr.FindAllString(output, -1) + matchNumber := len(loglines) + if strings.Contains(output, filter) && matchNumber >= minimalMatch { + //Print the last entry log + matchNumber = matchNumber - 1 + e2e.Logf("The result is: %v", loglines[matchNumber]) + return true, nil + } + e2e.Logf("Can not find the key words in pod logs by: %v", filter) + return false, nil + }) + AssertWaitPollNoErr(waitErr, fmt.Sprintf("the pod of %v is not running", podName)) + } +} + +// AssertOprPodLogsbyFilter trunct pods logs by filter +func AssertOprPodLogsbyFilter(oc *CLI, podName string, namespace string, filter string, minimalMatch int) bool { + podList, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pods", "-n", namespace, "-oname").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(podList).To(o.ContainSubstring(podName)) + + e2e.Logf("Got pods list as below: \n" + podList) + //Filter pod name base on deployment name + regexpoprname, _ := regexp.Compile(".*" + podName + ".*") + podListArry := regexpoprname.FindAllString(podList, -1) + + podListSize := len(podListArry) + var isMatch bool + for i := 0; i < podListSize; i++ { + e2e.Logf("Verify the logs on %v", podListArry[i]) + output, _ := oc.AsAdmin().WithoutNamespace().Run("logs").Args(podListArry[i], "-n", namespace).Output() + regexpstr, _ := regexp.Compile(".*" + filter + ".*") + loglines := regexpstr.FindAllString(output, -1) + matchNumber := len(loglines) + if strings.Contains(output, filter) && matchNumber >= minimalMatch { + //Print the last entry log + matchNumber = matchNumber - 1 + e2e.Logf("The result is: %v", loglines[matchNumber]) + isMatch = true + } else { + e2e.Logf("Can not find the key words in pod logs by: %v", filter) + isMatch = false + } + } + return isMatch +} + +// WaitForNoPodsAvailableByKind used for checking no pods in a certain namespace +func WaitForNoPodsAvailableByKind(oc *CLI, kind string, name string, namespace string) { + err := wait.Poll(10*time.Second, 180*time.Second, func() (bool, error) { + kindNames, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(kind, name, "-n", namespace, "-oname").Output() + if strings.Contains(kindNames, "NotFound") || strings.Contains(kindNames, "No resources") || len(kindNames) == 0 || err != nil { + //Check if the new profiles name applied on a node + e2e.Logf("All the pod has been terminated:\n %v", kindNames) + return true, nil + } + e2e.Logf("The pod is still terminating, waiting for a while: \n%v", kindNames) + return false, nil + }) + AssertWaitPollNoErr(err, "No pod was found ...") +} + +// InstallPAO attempts to install the Performance Add-On operator and verify that it is running +func InstallPAO(oc *CLI, paoNamespace string) { + var ( + paoNamespaceFile = FixturePath("testdata", "psap", "pao", "pao-namespace.yaml") + paoOperatorgroupFile = FixturePath("testdata", "psap", "pao", "pao-operatorgroup.yaml") + paoSubFile = FixturePath("testdata", "psap", "pao", "pao-subscription.yaml") + ) + // check if PAO namespace already exists + nsName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("namespace", paoNamespace).Output() + // if namespace exists, check if PAO is installed - exit if it is, continue with installation otherwise + // if an error is thrown, namespace does not exist, create and continue with installation + if strings.Contains(nsName, "NotFound") || strings.Contains(nsName, "No resources") || err != nil { + e2e.Logf("PAO namespace not found - creating namespace and installing PAO ...") + CreateClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", paoNamespaceFile) + } else { + e2e.Logf("PAO namespace found - checking if PAO is installed ...") + } + + ogName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("OperatorGroup", "openshift-performance-addon-operator", "-n", paoNamespace).Output() + if strings.Contains(ogName, "NotFound") || strings.Contains(ogName, "No resources") || err != nil { + // create PAO operator group from template + ApplyNsResourceFromTemplate(oc, paoNamespace, "--ignore-unknown-parameters=true", "-f", paoOperatorgroupFile) + } else { + e2e.Logf("PAO operatorgroup found - continue to check subscription ...") + } + + // get default channel and create subscription from template + channel, err := GetOperatorPKGManifestDefaultChannel(oc, "performance-addon-operator", "openshift-marketplace") + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Channel: %v", channel) + // get default channel and create subscription from template + source, err := GetOperatorPKGManifestSource(oc, "performance-addon-operator", "openshift-marketplace") + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("Source: %v", source) + + subName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("Subscription", "-n", paoNamespace).Output() + if strings.Contains(subName, "NotFound") || strings.Contains(subName, "No resources") || !strings.Contains(subName, "performance-operator") || err != nil { + // create PAO operator group from template + ApplyNsResourceFromTemplate(oc, paoNamespace, "--ignore-unknown-parameters=true", "-f", paoSubFile, "-p", "CHANNEL="+channel, "SOURCE="+source) + } else { + e2e.Logf("PAO subscription found - continue to check pod status ...") + } + + //Wait for PAO controller manager is ready + WaitOprResourceReady(oc, "deployment", "performance-operator", paoNamespace, false, false) +} + +// IsPAOInstalled used for deploying Performance Add-on Operator +func IsPAOInstalled(oc *CLI) bool { + var isInstalled bool + deployments, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("deployment", "-A").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if strings.Contains(deployments, "performance-operator") { + isInstalled = true + } else { + e2e.Logf("PAO doesn't installed - will install pao ...") + isInstalled = false + } + return isInstalled +} + +// IsPAOInOperatorHub used for checking if PAO exist in OperatorHub +func IsPAOInOperatorHub(oc *CLI) bool { + var havePAO bool + packagemanifest, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("packagemanifest", "-A").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + if strings.Contains(packagemanifest, "performance-addon-operator") { + havePAO = true + } else { + e2e.Logf("No PAO packagemanifet detect in operatorhub - skip ...") + havePAO = false + } + return havePAO +} + +// StringToBASE64 Base64 Encode +func StringToBASE64(src string) string { + // plaintext, err := base64.StdEncoding.DecodeString(src) + stdEnc := base64.StdEncoding.EncodeToString([]byte(src)) + return string(stdEnc) +} + +// BASE64DecodeStr Base64 Decode +func BASE64DecodeStr(src string) string { + plaintext, err := base64.StdEncoding.DecodeString(src) + if err != nil { + return "" + } + return string(plaintext) +} + +// CreateMachinesetbyInstanceType used to create a machineset with specified machineset name and instance type +func CreateMachinesetbyInstanceType(oc *CLI, machinesetName string, instanceType string) { + // Get existing machinesets in cluster + ocGetMachineset, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, "-n", "openshift-machine-api", "-oname").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(ocGetMachineset).NotTo(o.BeEmpty()) + e2e.Logf("Existing machinesets:\n%v", ocGetMachineset) + + // Get name of first machineset in existing machineset list + firstMachinesetName := GetFirstLinuxMachineSets(oc) + o.Expect(firstMachinesetName).NotTo(o.BeEmpty()) + e2e.Logf("Got %v from machineset list", firstMachinesetName) + + machinesetYamlOutput, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, firstMachinesetName, "-n", "openshift-machine-api", "-oyaml").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(machinesetYamlOutput).NotTo(o.BeEmpty()) + + //Create machinset by specifying a machineset name + regMachineSet := regexp.MustCompile(firstMachinesetName) + newMachinesetYaml := regMachineSet.ReplaceAllString(machinesetYamlOutput, machinesetName) + + //Change instanceType to g4dn.xlarge + iaasPlatform := CheckPlatform(oc) + if iaasPlatform == "aws" || iaasPlatform == "alibabacloud" { + regInstanceType := regexp.MustCompile(`instanceType:.*`) + e2e.Logf("instanceType is %v inside CreateMachinesetbyInstanceType", instanceType) + newInstanceType := "instanceType: " + instanceType + newMachinesetYaml = regInstanceType.ReplaceAllString(newMachinesetYaml, newInstanceType) + } else if iaasPlatform == "gcp" { + regInstanceType := regexp.MustCompile(`machineType:.*`) + e2e.Logf("machineType is %v inside CreateMachinesetbyInstanceType", instanceType) + newInstanceType := "machineType: " + instanceType + newMachinesetYaml = regInstanceType.ReplaceAllString(newMachinesetYaml, newInstanceType) + } else if iaasPlatform == "azure" { + regInstanceType := regexp.MustCompile(`vmSize:.*`) + e2e.Logf("vmSize is %v inside CreateMachinesetbyInstanceType", instanceType) + newInstanceType := "vmSize: " + instanceType + newMachinesetYaml = regInstanceType.ReplaceAllString(newMachinesetYaml, newInstanceType) + } else if iaasPlatform == "ibmcloud" { + regInstanceType := regexp.MustCompile(`profile:.*`) + e2e.Logf("profile is %v inside CreateMachinesetbyInstanceType", instanceType) + newInstanceType := "profile: " + instanceType + newMachinesetYaml = regInstanceType.ReplaceAllString(newMachinesetYaml, newInstanceType) + } else { + e2e.Logf("unsupported instance: %v", instanceType) + } + + //Make sure the replicas is 1 + regReplicas := regexp.MustCompile(`replicas:.*`) + replicasNum := "replicas: 1" + newMachinesetYaml = regReplicas.ReplaceAllString(newMachinesetYaml, replicasNum) + + machinesetNewB := []byte(newMachinesetYaml) + + newMachinesetFileName := filepath.Join(e2e.TestContext.OutputDir, oc.Namespace()+"-"+machinesetName+"-new.yaml") + defer os.RemoveAll(newMachinesetFileName) + err = ioutil.WriteFile(newMachinesetFileName, machinesetNewB, 0o644) + o.Expect(err).NotTo(o.HaveOccurred()) + ApplyOperatorResourceByYaml(oc, "openshift-machine-api", newMachinesetFileName) +} + +// IsMachineSetExist check if machineset exist in OCP +func IsMachineSetExist(oc *CLI) bool { + + haveMachineSet := true + Output, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", "-n", "openshift-machine-api").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(Output).NotTo(o.BeEmpty()) + + if strings.Contains(Output, "No resources found") { + haveMachineSet = false + } + return haveMachineSet +} + +// GetMachineSetInstanceType used to get first machineset instance type +func GetMachineSetInstanceType(oc *CLI) string { + var ( + instanceType string + err error + ) + firstMachinesetName := GetFirstLinuxMachineSets(oc) + e2e.Logf("Got %v from machineset list", firstMachinesetName) + iaasPlatform := CheckPlatform(oc) + if iaasPlatform == "aws" { + instanceType, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", firstMachinesetName, "-n", "openshift-machine-api", "-ojsonpath={.spec.template.spec.providerSpec.value.instanceType}").Output() + } else if iaasPlatform == "azure" { + instanceType, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", firstMachinesetName, "-n", "openshift-machine-api", "-ojsonpath={.spec.template.spec.providerSpec.value.vmSize}").Output() + } else if iaasPlatform == "gcp" { + instanceType, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", firstMachinesetName, "-n", "openshift-machine-api", "-ojsonpath={.spec.template.spec.providerSpec.value.machineType}").Output() + } else if iaasPlatform == "ibmcloud" { + instanceType, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", firstMachinesetName, "-n", "openshift-machine-api", "-ojsonpath={.spec.template.spec.providerSpec.value.profile}").Output() + } else if iaasPlatform == "alibabacloud" { + instanceType, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", firstMachinesetName, "-n", "openshift-machine-api", "-ojsonpath={.spec.template.spec.providerSpec.value.instanceType}").Output() + } + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(instanceType).NotTo(o.BeEmpty()) + return instanceType +} + +// GetNodeNameByMachineset used for get node name by machineset name +func GetNodeNameByMachineset(oc *CLI, machinesetName string) string { + + var machineName string + machinesetLabels, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachineset, machinesetName, "-n", "openshift-machine-api", "-ojsonpath={.spec.selector.matchLabels.machine\\.openshift\\.io/cluster-api-machineset}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(machinesetLabels).NotTo(o.BeEmpty()) + machineNameStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(MapiMachine, "-l", "machine.openshift.io/cluster-api-machineset="+machinesetLabels, "-n", "openshift-machine-api", "-oname").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(machineNameStr).NotTo(o.BeEmpty()) + machineNames := strings.Split(machineNameStr, "\n") + if len(machineNames) > 0 { + machineName = machineNames[0] + } + + e2e.Logf("machineName is %v in GetNodeNameByMachineset", machineName) + + nodeName, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(machineName, "-n", "openshift-machine-api", "-ojsonpath={.status.nodeRef.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(nodeName).NotTo(o.BeEmpty()) + return nodeName +} + +// AssertIfMCPChangesAppliedByName checks the MCP of a given oc client and determines if the machine counts are as expected +func AssertIfMCPChangesAppliedByName(oc *CLI, mcpName string, timeDurationSec int) { + err := wait.Poll(time.Duration(timeDurationSec/10)*time.Second, time.Duration(timeDurationSec)*time.Second, func() (bool, error) { + var ( + mcpMachineCount string + mcpReadyMachineCount string + mcpUpdatedMachineCount string + mcpDegradedMachineCount string + mcpUpdatingStatus string + mcpUpdatedStatus string + err error + ) + + mcpUpdatingStatus, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", mcpName, `-ojsonpath='{.status.conditions[?(@.type=="Updating")].status}'`).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(mcpUpdatingStatus).NotTo(o.BeEmpty()) + mcpUpdatedStatus, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", mcpName, `-ojsonpath='{.status.conditions[?(@.type=="Updated")].status}'`).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(mcpUpdatedStatus).NotTo(o.BeEmpty()) + + //Do not check master err due to sometimes SNO can not accesss api server when server rebooted + mcpMachineCount, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", mcpName, "-o=jsonpath={..status.machineCount}").Output() + mcpReadyMachineCount, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", mcpName, "-o=jsonpath={..status.readyMachineCount}").Output() + mcpUpdatedMachineCount, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", mcpName, "-o=jsonpath={..status.updatedMachineCount}").Output() + mcpDegradedMachineCount, _ = oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", mcpName, "-o=jsonpath={..status.degradedMachineCount}").Output() + if strings.Contains(mcpUpdatingStatus, "False") && strings.Contains(mcpUpdatedStatus, "True") && mcpMachineCount == mcpReadyMachineCount && mcpMachineCount == mcpUpdatedMachineCount && mcpDegradedMachineCount == "0" { + e2e.Logf("MachineConfigPool [%v] checks succeeded!", mcpName) + return true, nil + } + + e2e.Logf("MachineConfigPool [%v] checks failed, the following values were found (all should be '%v'):\nmachineCount: %v\nmcpUpdatingStatus: %v\nmcpUpdatedStatus: %v\nreadyMachineCount: %v\nupdatedMachineCount: %v\nmcpDegradedMachine:%v\nRetrying...", mcpName, mcpMachineCount, mcpMachineCount, mcpUpdatingStatus, mcpUpdatedStatus, mcpReadyMachineCount, mcpUpdatedMachineCount, mcpDegradedMachineCount) + return false, nil + }) + AssertWaitPollNoErr(err, "MachineConfigPool checks were not successful within timeout limit") +} + +// DeleteMCAndMCPByName used for checking if node return to worker machine config pool and the specified mcp is zero, then delete mc and mcp +func DeleteMCAndMCPByName(oc *CLI, mcName string, mcpName string, timeDurationSec int) { + + //Check if labeled node return back to worker mcp, then delete mc and mcp after worker mcp is ready + e2e.Logf("Check if labeled node return back to worker mcp") + AssertIfMCPChangesAppliedByName(oc, "worker", timeDurationSec) + + mcpNameList, _ := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp").Output() + + if strings.Contains(mcpNameList, mcpName) { + //Confirm if the custom machine count is 0 + mcpMachineCount, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("mcp", mcpName, "-o=jsonpath={..status.machineCount}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(mcpMachineCount).NotTo(o.BeEmpty()) + if mcpMachineCount == "0" { + oc.AsAdmin().WithoutNamespace().Run("delete").Args("mcp", mcpName, "--ignore-not-found").Execute() + oc.AsAdmin().WithoutNamespace().Run("delete").Args("mc", mcName, "--ignore-not-found").Execute() + } + } else { + e2e.Logf("The mcp [%v] has been deleted ...", mcpName) + } +} + +// CreateCustomNodePoolInHypershift retrun custom nodepool yaml +func CreateCustomNodePoolInHypershift(oc *CLI, cloudProvider, guestClusterName, nodePoolName, nodeCount, instanceType, upgradeType, clustersNS, defaultNodePoolName string) { + + if cloudProvider == "aws" { + cmdString := fmt.Sprintf("hypershift create nodepool %s --cluster-name %s --name %s --node-count %s --instance-type %s --node-upgrade-type %s --namespace %s", cloudProvider, guestClusterName, nodePoolName, nodeCount, instanceType, upgradeType, clustersNS) + e2e.Logf("cmdString is %v )", cmdString) + _, err := exec.Command("bash", "-c", cmdString).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + } else if cloudProvider == "azure" { + subnetID, err := oc.AsAdmin().Run("get").Args("-n", clustersNS, "nodepool", defaultNodePoolName, "-ojsonpath={.spec.platform.azure.subnetID}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + cmdString := fmt.Sprintf("hypershift create nodepool %s --cluster-name %s --name %s --node-count %s --instance-type %s --node-upgrade-type %s --nodepool-subnet-id %s --namespace %s", cloudProvider, guestClusterName, nodePoolName, nodeCount, instanceType, upgradeType, subnetID, clustersNS) + e2e.Logf("cmdString is %v )", cmdString) + _, err = exec.Command("bash", "-c", cmdString).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + } else if cloudProvider == "aks" { + subnetID, err := oc.AsAdmin().Run("get").Args("-n", clustersNS, "nodepool", defaultNodePoolName, "-ojsonpath={.spec.platform.azure.subnetID}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + azMKPSKU, err := oc.AsAdmin().Run("get").Args("-n", clustersNS, "nodepool", defaultNodePoolName, "-ojsonpath={.spec.platform.azure.image.azureMarketplace.sku}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + azMKPVersion, err := oc.AsAdmin().Run("get").Args("-n", clustersNS, "nodepool", defaultNodePoolName, "-ojsonpath={.spec.platform.azure.image.azureMarketplace.version}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + azMKPOffer, err := oc.AsAdmin().Run("get").Args("-n", clustersNS, "nodepool", defaultNodePoolName, "-ojsonpath={.spec.platform.azure.image.azureMarketplace.offer}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + azMKPPublisher, err := oc.AsAdmin().Run("get").Args("-n", clustersNS, "nodepool", defaultNodePoolName, "-ojsonpath={.spec.platform.azure.image.azureMarketplace.publisher}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + cmdString := fmt.Sprintf("hypershift create nodepool azure --cluster-name %s --name %s --node-count %s --instance-type %s --node-upgrade-type %s --nodepool-subnet-id %s --namespace %s --marketplace-offer %s --marketplace-publisher %s --marketplace-sku %s --marketplace-version %s", guestClusterName, nodePoolName, nodeCount, instanceType, upgradeType, subnetID, clustersNS, azMKPOffer, azMKPPublisher, azMKPSKU, azMKPVersion) + e2e.Logf("cmdString is %v )", cmdString) + _, err = exec.Command("bash", "-c", cmdString).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + } else { + e2e.Logf("Unsupported cloud provider is %v )", cloudProvider) + } +} + +// AssertIfNodePoolIsReadyByName checks if the Nodepool is ready +func AssertIfNodePoolIsReadyByName(oc *CLI, nodePoolName string, timeDurationSec int, clustersNS string) { + + o.Expect(timeDurationSec).Should(o.BeNumerically(">=", 10), "Disaster error: specify the value of timeDurationSec great than 10.") + + err := wait.Poll(time.Duration(timeDurationSec/10)*time.Second, time.Duration(timeDurationSec)*time.Second, func() (bool, error) { + + var ( + isNodePoolReady string + isAllNodesHealthy string + err error + ) + isAllNodesHealthy, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodepool", nodePoolName, "-n", clustersNS, `-ojsonpath='{.status.conditions[?(@.type=="AllNodesHealthy")].status}'`).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(isAllNodesHealthy).NotTo(o.BeEmpty()) + + isNodePoolReady, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodepool", nodePoolName, "-n", clustersNS, `-ojsonpath='{.status.conditions[?(@.type=="Ready")].status}'`).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(isNodePoolReady).NotTo(o.BeEmpty()) + + //For master node, only make sure one of master is ready. + if strings.Contains(isNodePoolReady, "True") && strings.Contains(isAllNodesHealthy, "True") { + return true, nil + } + e2e.Logf("Node Pool [%v] checks failed, the following values were found (read type should be true '%v')", nodePoolName, isNodePoolReady) + return false, nil + }) + AssertWaitPollNoErr(err, "Nodepool checks were not successful within timeout limit") +} + +// AssertIfNodePoolUpdatingConfigByName checks if the Nodepool is ready +func AssertIfNodePoolUpdatingConfigByName(oc *CLI, nodePoolName string, timeDurationSec int, clustersNS string) { + + o.Expect(timeDurationSec).Should(o.BeNumerically(">=", 10), "Disaster error: specify the value of timeDurationSec great than 10.") + + err := wait.Poll(time.Duration(timeDurationSec/10)*time.Second, time.Duration(timeDurationSec)*time.Second, func() (bool, error) { + + var ( + isNodePoolUpdatingConfig string + isNodePoolAllNodesHealthy string + isNodePoolReady string + err error + ) + isNodePoolUpdatingConfig, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodepool", nodePoolName, "-n", clustersNS, `-ojsonpath='{.status.conditions[?(@.type=="UpdatingConfig")].status}'`).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(isNodePoolUpdatingConfig).NotTo(o.BeEmpty()) + + isNodePoolAllNodesHealthy, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodepool", nodePoolName, "-n", clustersNS, `-ojsonpath='{.status.conditions[?(@.type=="AllNodesHealthy")].status}'`).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(isNodePoolAllNodesHealthy).NotTo(o.BeEmpty()) + + isNodePoolReady, err = oc.AsAdmin().WithoutNamespace().Run("get").Args("nodepool", nodePoolName, "-n", clustersNS, `-ojsonpath='{.status.conditions[?(@.type=="Ready")].status}'`).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(isNodePoolAllNodesHealthy).NotTo(o.BeEmpty()) + + if !strings.Contains(isNodePoolUpdatingConfig, "True") && strings.Contains(isNodePoolAllNodesHealthy, "True") && strings.Contains(isNodePoolReady, "True") { + e2e.Logf("Node Pool [%v] status isNodePoolUpdatingConfig: %v isNodePoolAllNodesHealthy: %v isNodePoolReady: %v')", nodePoolName, isNodePoolUpdatingConfig, isNodePoolAllNodesHealthy, isNodePoolReady) + return true, nil + } + e2e.Logf("Node Pool [%v] checks failed, the following values were found (ready type should be empty '%v')", nodePoolName, isNodePoolUpdatingConfig) + return false, nil + }) + AssertWaitPollNoErr(err, "Nodepool checks were not successful within timeout limit") +} + +// IsSNOCluster will check if OCP is a single node cluster +func IsSNOCluster(oc *CLI) bool { + //Only 1 master, 1 worker node and with the same hostname. + masterNodes, _ := GetClusterNodesBy(oc, "master") + workerNodes, _ := GetClusterNodesBy(oc, "worker") + if len(masterNodes) == 1 && len(workerNodes) == 1 && masterNodes[0] == workerNodes[0] { + return true + } + return false +} + +func IsOneMasterWithNWorkerNodes(oc *CLI) bool { + + //Skip one master with 1-N worker nodes senario + topologyTypeStdOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("infrastructure", "-ojsonpath={.items[*].status.infrastructureTopology}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(topologyTypeStdOut).NotTo(o.BeEmpty()) + topologyType := strings.ToLower(topologyTypeStdOut) + + masterNodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "node-role.kubernetes.io/worker", "-oname").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(masterNodes).NotTo(o.BeEmpty()) + masterNodesArr := strings.Split(masterNodes, "\n") + + workerNodes, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("nodes", "-l", "node-role.kubernetes.io/worker", "-oname").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(workerNodes).NotTo(o.BeEmpty()) + workerNodesArr := strings.Split(workerNodes, "\n") + workerNums := len(workerNodesArr) + + if workerNodesArr[0] == masterNodesArr[0] { + return topologyType == "singlereplica" && workerNums > 1 + } else { + return topologyType == "singlereplica" && workerNums >= 1 + } +} + +// CheckAllNodepoolReadyByHostedClusterName used for checking if all nodepool is ready +// eg. CheckAllNodepoolReadyByHostedClusterName(oc, psap-qe-hcluster01,clusters,3600) +func CheckAllNodepoolReadyByHostedClusterName(oc *CLI, nodePoolName, hostedClusterNS string, timeDurationSec int) bool { + + var ( + isMatch bool + ) + + err := wait.Poll(90*time.Second, time.Duration(timeDurationSec)*time.Second, func() (bool, error) { + nodesStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("--ignore-not-found", "np", nodePoolName, `-ojsonpath='{.status.conditions[?(@.type=="Ready")].status}'`, "--namespace", hostedClusterNS).Output() + o.Expect(err).ShouldNot(o.HaveOccurred()) + e2e.Logf("The nodepool ready status is %v ...", nodesStatus) + if len(nodesStatus) <= 0 { + isMatch = true + return true, nil + } + return false, nil + }) + AssertWaitPollNoErr(err, "The status of nodepool isn't ready") + return isMatch +} + +// getLastWorkerNodeByOsID returns the cluster node by OS type, linux or windows +func getLastWorkerNodeByOsType(oc *CLI, ostype string) (string, error) { + nodes, err := GetClusterNodesBy(oc, "worker") + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(nodes).NotTo(o.BeEmpty()) + + totalNodeNum := len(nodes) + + for i := totalNodeNum - 1; i >= 0; i-- { + //Skip the node that is work node and also is master node in the OCP with one master + [1-N] worker node + nodeLabels, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node/"+nodes[i], "-o", "jsonpath={.metadata.labels}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(nodeLabels).NotTo(o.BeEmpty()) + + regNodeLabls := regexp.MustCompile("control-plane|master") + isMaster := regNodeLabls.MatchString(nodeLabels) + + stdout, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("node/"+nodes[i], `-ojsonpath='{.metadata.labels.kubernetes\.io/os}'`).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(stdout).NotTo(o.BeEmpty()) + + if strings.Trim(stdout, "'") == ostype && !isMaster { + return nodes[i], err + } + } + return "", err +} + +// GetLastLinuxWorkerNode return last worker node +func GetLastLinuxWorkerNode(oc *CLI) (string, error) { + return getLastWorkerNodeByOsType(oc, "linux") +} + +// ValidHypershiftAndGetGuestKubeConf4SecondHostedCluster check if it is hypershift env and get kubeconf of the hosted cluster +// the first return is hosted cluster name +// the second return is the file of kubeconfig of the hosted cluster +// the third return is the hostedcluster namespace in mgmt cluster which contains the generated resources +// if it is not hypershift env, it will skip test. +func ValidHypershiftAndGetGuestKubeConf4SecondHostedCluster(oc *CLI) (string, string, string) { + if IsROSA() { + e2e.Logf("there is a ROSA env") + hostedClusterName, hostedclusterKubeconfig, hostedClusterNs := ROSAValidHypershiftAndGetGuestKubeConf(oc) + if len(hostedClusterName) == 0 || len(hostedclusterKubeconfig) == 0 || len(hostedClusterNs) == 0 { + g.Skip("there is a ROSA env, but the env is problematic, skip test run") + } + return hostedClusterName, hostedclusterKubeconfig, hostedClusterNs + } + operatorNS := GetHyperShiftOperatorNameSpace(oc) + if len(operatorNS) <= 0 { + g.Skip("there is no hypershift operator on host cluster, skip test run") + } + + hostedclusterNS := GetHyperShiftHostedClusterNameSpace(oc) + if len(hostedclusterNS) <= 0 { + g.Skip("there is no hosted cluster NS in mgmt cluster, skip test run") + } + + clusterNamesStr, err := oc.AsAdmin().WithoutNamespace().Run("get").Args( + "-n", hostedclusterNS, "hostedclusters", "-o=jsonpath={.items[*].metadata.name}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + + clusterNames := strings.Split(clusterNamesStr, " ") + e2e.Logf(fmt.Sprintf("clusterNames is: %v", clusterNames)) + if len(clusterNames) < 2 { + g.Skip("there is no second hosted cluster, skip test run") + } + + hypersfhitPodStatus, err := oc.AsAdmin().WithoutNamespace().Run("get").Args( + "-n", operatorNS, "pod", "-o=jsonpath={.items[0].status.phase}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(hypersfhitPodStatus).To(o.ContainSubstring("Running")) + + //get second hosted cluster to run test + e2e.Logf("the hosted cluster names: %s, and will select the second", clusterNames) + clusterName := clusterNames[1] + + var hostedClusterKubeconfigFile string + hostedClusterKubeconfigFile = "/tmp/guestcluster-kubeconfig-" + clusterName + "-" + GetRandomString() + output, err := exec.Command("bash", "-c", fmt.Sprintf("hypershift create kubeconfig --name %s --namespace %s > %s", + clusterName, hostedclusterNS, hostedClusterKubeconfigFile)).Output() + e2e.Logf("the cmd output: %s", string(output)) + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf(fmt.Sprintf("create a new hosted cluster kubeconfig: %v", hostedClusterKubeconfigFile)) + e2e.Logf("if you want hostedcluster controlplane namespace, you could get it by combining %s and %s with -", hostedclusterNS, clusterName) + return clusterName, hostedClusterKubeconfigFile, hostedclusterNS +} + +// Is3MasterNoDedicatedWorkerNode reture if the OCP have three master/worker node, but no dedicated worker node. +func Is3MasterNoDedicatedWorkerNode(oc *CLI) bool { + // Only 1 master, 1 worker node and with the same hostname. + masterNodes, err := GetClusterNodesBy(oc, "master") + o.Expect(err).NotTo(o.HaveOccurred()) + workerNodes, err := GetClusterNodesBy(oc, "worker") + o.Expect(err).NotTo(o.HaveOccurred()) + if len(masterNodes) != 3 || len(workerNodes) != 3 { + return false + } + + matchCount := 0 + for i := 0; i < len(workerNodes); i++ { + for j := 0; j < len(masterNodes); j++ { + if workerNodes[i] == masterNodes[j] { + matchCount++ + } + } + } + return matchCount == 3 +} + +func converseInstanceType(currentInstanceType string, sSubString string, tSubString string) string { + var expectedInstanceType string + if strings.Contains(currentInstanceType, sSubString) { + expectedInstanceType = strings.ReplaceAll(currentInstanceType, sSubString, tSubString) + + } else if strings.Contains(currentInstanceType, tSubString) { + expectedInstanceType = strings.ReplaceAll(currentInstanceType, tSubString, sSubString) + } + return expectedInstanceType +} + +// SpecifyMachinesetWithDifferentInstanceType used for specify cpu type that different from default one +func SpecifyMachinesetWithDifferentInstanceType(oc *CLI) string { + + var expectedInstanceType string + //Check cloud provider name + iaasPlatform := CheckPlatform(oc) + + //Get instance type of the first machineset + currentInstanceType := GetMachineSetInstanceType(oc) + + switch iaasPlatform { + case "aws": + //we use m6i.2xlarge as default instance type, if current machineset instanceType is "m6i.2xlarge", we use "m6i.xlarge" + expectedInstanceType = converseInstanceType(currentInstanceType, "2xlarge", "xlarge") + if len(expectedInstanceType) == 0 { + expectedInstanceType = "m6i.xlarge" + } + case "azure": + //we use Standard_DS3_v2 as default instance type, if current machineset instanceType is "Standard_DS3_v2", we use "Standard_DS2_v2" + expectedInstanceType = converseInstanceType(currentInstanceType, "DS3_v2", "DS2_v2") + if len(expectedInstanceType) == 0 { + expectedInstanceType = "Standard_DS2_v2" + } + case "gcp": + // we use n1-standard-4 as default instance type, if current machineset instanceType is "n1-standard-4", we use "n1-standard-2" + expectedInstanceType = converseInstanceType(currentInstanceType, "standard-4", "standard-2") + if len(expectedInstanceType) == 0 { + expectedInstanceType = "n1-standard-2" + } + e2e.Logf("1 currentInstanceType is %v, expectedInstanceType is %v, ", currentInstanceType, expectedInstanceType) + + case "ibmcloud": + //we use bx2-4x16 as default instance type, if current machineset instanceType is "bx2-4x16", we use "bx2d-2x8" + expectedInstanceType = converseInstanceType(currentInstanceType, "4x16", "2x8") + if len(expectedInstanceType) == 0 { + expectedInstanceType = "bx2d-2x8" + } + case "alibabacloud": + //we use ecs.g6.xlarge as default instance type, if current machineset instanceType is "ecs.g6.xlarge", we use "ecs.g6.large" + expectedInstanceType = converseInstanceType(currentInstanceType, "sxlarge", "large") + if len(expectedInstanceType) == 0 { + expectedInstanceType = "ecs.g6.large" + } + default: + e2e.Logf("Unsupported cloud provider specified, please check") + expectedInstanceType = "" + } + e2e.Logf("3 currentInstanceType is %v, expectedInstanceType is %v, ", currentInstanceType, expectedInstanceType) + return expectedInstanceType +} + +// GetImagestreamImageName Return an imagestream's image repository name +func GetImagestreamImageName(oc *CLI, imagestreamName string) string { + var imageName string + + //Ignore NotFound error, it will return a empty string, then use another image if the image doesn't exit + imageRepos, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("is", imagestreamName, "-n", "openshift", "-ojsonpath={.status.dockerImageRepository}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + + if !strings.Contains(imageRepos, "NotFound") { + imageTags, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("is", imagestreamName, "-n", "openshift", "-ojsonpath={.status.tags[*].tag}").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + + imageTagList := strings.Split(imageTags, " ") + //Because some image stream tag is broken, we need to find which image is available in disconnected cluster. + for i := 0; i < len(imageTagList); i++ { + jsonathStr := fmt.Sprintf(`-ojsonpath='{.status.tags[%v].conditions[?(@.status=="False")]}{.status.tags[%v].tag}'`, i, i) + stdOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("is", imagestreamName, "-n", "openshift", jsonathStr).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(stdOut).NotTo(o.BeEmpty()) + e2e.Logf("stdOut is: %v", stdOut) + if !strings.Contains(stdOut, "NotFound") { + imageTag := strings.ReplaceAll(stdOut, "'", "") + imageName = imageRepos + ":" + imageTag + break + } + + } + + } + return imageName +} + +// GetRelicasByMachinesetName used for get replicas number by machineset name +func GetRelicasByMachinesetName(oc *CLI, machinesetName string) string { + + machineseReplicas, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("machineset", machinesetName, "-n", "openshift-machine-api", `-ojsonpath="{.spec.replicas}"`).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(machineseReplicas).NotTo(o.BeEmpty()) + + e2e.Logf("machineseReplicas is %v in GetRelicasByMachinesetName", machineseReplicas) + return machineseReplicas +} + +// CountNodeNumByOS used for count how many worker node by windows or linux +func CountLinuxWorkerNodeNumByOS(oc *CLI) (linuxNum int) { + //Count how many windows node and linux node + rhcosWorkerNodes, err := GetAllWorkerNodesByOSID(oc, "rhcos") + o.Expect(err).NotTo(o.HaveOccurred()) + rhelWorkerNodes, err := GetAllWorkerNodesByOSID(oc, "rhel") + o.Expect(err).NotTo(o.HaveOccurred()) + e2e.Logf("rhcosWorkerNodes is:%v", rhcosWorkerNodes[:]) + e2e.Logf("rhelWorkerNodes is:%v", rhelWorkerNodes[:]) + rhcosNum := len(rhcosWorkerNodes) + rhelNum := len(rhelWorkerNodes) + e2e.Logf("rhcos node is:%v, rhel node is %v", rhcosNum, rhelNum) + return rhcosNum + rhelNum +} + +func ShowSystemctlPropertyValueOfServiceUnitByName(oc *CLI, tunedNodeName string, ntoNamespace string, serviceUnit string, propertyName string) string { + + var ( + propertyValue string + err error + ) + // Example: + // Show all properties by systemctl show kubelet + // ExecMainStartTimestamp=Fri 2024-09-06 11:16:00 UTC + // ExecMainStartTimestampMonotonic=27894650 + + allProperties, err := DebugNodeWithOptionsAndChroot(oc, tunedNodeName, []string{"-q", "--to-namespace=" + ntoNamespace}, "systemctl", "show", serviceUnit) + o.Expect(err).NotTo(o.HaveOccurred()) + if strings.Contains(allProperties, propertyName) { + propertyValue, err = DebugNodeWithOptionsAndChroot(oc, tunedNodeName, []string{"-q", "--to-namespace=" + ntoNamespace}, "systemctl", "show", "-p", propertyName, serviceUnit) + o.Expect(err).NotTo(o.HaveOccurred()) + } else { + o.Expect(strings.Contains(allProperties, propertyName)).To(o.BeTrue()) + } + //It will return the string like as ExecMainStartTimestampMonotonic=27894650 + return strings.Trim(propertyValue, "\n") +} + +func GetSystemctlServiceUnitTimestampByPropertyNameWithMonotonic(propertyValue string) int { + + var ( + serviceUnitTimestamp int + serviceUnitTimestampArr []string + err error + ) + + // Extract 27871378 from AssertTimestampMonotonic=27871378 + serviceUnitTimestampArr = strings.Split(propertyValue, "=") + if len(serviceUnitTimestampArr) > 1 && strings.Contains(propertyValue, "Monotonic") { + serviceUnitTimestamp, err = strconv.Atoi(serviceUnitTimestampArr[1]) + e2e.Logf("the serviceUnitTimestamp is [ %v ]", serviceUnitTimestamp) + o.Expect(err).NotTo(o.HaveOccurred()) + } + return serviceUnitTimestamp +} + +func CPUManagerStatebyNode(oc *CLI, namespace string, nodeName string, ContainerName string) (string, string) { + + var ( + PODCUPs string + CPUNums string + ) + + cpuManagerStateStdOut, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("node/"+nodeName, "-n", namespace, "-q", "--", "chroot", "host", "cat", "/var/lib/kubelet/cpu_manager_state").Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(cpuManagerStateStdOut).NotTo(o.BeEmpty()) + + var cpuManagerStateInfo map[string]interface{} + json.Unmarshal([]byte(cpuManagerStateStdOut), &cpuManagerStateInfo) + + defaultCpuSet := fmt.Sprint(cpuManagerStateInfo["defaultCpuSet"]) + o.Expect(defaultCpuSet).NotTo(o.BeEmpty()) + + Entries := fmt.Sprint(cpuManagerStateInfo["entries"]) + o.Expect(Entries).NotTo(o.BeEmpty()) + + PODUUIDMapCPUs := strings.Split(Entries, " ") + Len := len(PODUUIDMapCPUs) + for i := 0; i < Len; i++ { + if strings.Contains(PODUUIDMapCPUs[i], ContainerName) { + PODUUIDMapCPU := strings.Split(PODUUIDMapCPUs[i], ":") + CPUNums = strings.Trim(PODUUIDMapCPU[len(PODUUIDMapCPU)-1], "]") + } + PODCUPs += CPUNums + " " + } + return defaultCpuSet, PODCUPs +} + +func GetContainerIDByPODName(oc *CLI, podName string, namespace string) string { + var containerID string + containerIDStdOut, err := oc.AsAdmin().WithoutNamespace().Run("get").Args("pod", podName, "-n", namespace, `-ojsonpath='{.status.containerStatuses[?(@.name=="etcd")].containerID}`).Output() + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(containerIDStdOut).NotTo(o.BeEmpty()) + + containerIDArr := strings.Split(containerIDStdOut, "/") + Len := len(containerIDArr) + if Len > 0 { + containerID = containerIDArr[Len-1] + } + return containerID +} + +func GetPODCPUSet(oc *CLI, namespace string, nodeName string, containerID string) string { + + podCPUSetStdDir, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("node/"+nodeName, "-n", namespace, "-q", "--", "chroot", "host", "find", "/sys/fs/cgroup/", "-name", "*crio-"+containerID+"*").Output() + e2e.Logf("The podCPUSetStdDir is [ %v ]", podCPUSetStdDir) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(podCPUSetStdDir).NotTo(o.BeEmpty()) + podCPUSet, err := oc.AsAdmin().WithoutNamespace().Run("debug").Args("node/"+nodeName, "-n", namespace, "-q", "--", "chroot", "host", "cat", podCPUSetStdDir+"/cpuset.cpus").Output() + e2e.Logf("The podCPUSet is [ %v ]", podCPUSet) + o.Expect(err).NotTo(o.HaveOccurred()) + o.Expect(podCPUSet).NotTo(o.BeEmpty()) + return podCPUSet +} + +// fuction to check given string is in array or not +func ImplStringArrayContains(stringArray []string, name string) bool { + // iterate over the array and compare given string to each element + for _, value := range stringArray { + if value == name { + return true + } + } + return false +} diff --git a/test/util/randstr.go b/test/util/randstr.go new file mode 100644 index 000000000..c15b447eb --- /dev/null +++ b/test/util/randstr.go @@ -0,0 +1,45 @@ +package util + +import ( + "math/rand" + "time" + "unsafe" +) + +const ( + // 6 bits to represent a letter index + letterIDBits = 6 + // All 1-bits as many as letterIdBits + letterIDMask = 1<= 0; { + if remain == 0 { + cache, remain = src.Int63(), letterIDMax + } + if idx := int(cache & letterIDMask); idx < len(s) { + b[i] = s[idx] + i-- + } + cache >>= letterIDBits + remain-- + } + return *(*string)(unsafe.Pointer(&b)) +} diff --git a/test/util/release.go b/test/util/release.go new file mode 100644 index 000000000..1f1dc436e --- /dev/null +++ b/test/util/release.go @@ -0,0 +1,168 @@ +package util + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/exec" + + "github.com/tidwall/gjson" + + "github.com/coreos/stream-metadata-go/stream" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/yaml" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +type CoreOSImageArch string + +const ( + CoreOSBootImagesFile = "0000_50_installer_coreos-bootimages.yaml" + CoreOSBootImageArchX86_64 CoreOSImageArch = "x86_64" + + ReleaseImageLatestEnv = "RELEASE_IMAGE_LATEST" +) + +func (a CoreOSImageArch) String() string { + return string(a) +} + +// ExtractCoreOSBootImagesConfigMap extracts the CoreOS boot images ConfigMap from the given release image +func ExtractCoreOSBootImagesConfigMap(oc *CLI, releaseImage, pullSecretFile string) (*corev1.ConfigMap, error) { + stdout, _, err := oc.AsAdmin().WithoutNamespace().Run("adm", "release", "extract").Args(releaseImage, "--file", CoreOSBootImagesFile, "-a", pullSecretFile).Outputs() + if err != nil { + return nil, fmt.Errorf("failed to extract CoreOS boot images from release image: %v", err) + } + + var coreOSBootImagesCM corev1.ConfigMap + if err = yaml.Unmarshal([]byte(stdout), &coreOSBootImagesCM); err != nil { + return nil, fmt.Errorf("failed to unmarshal CoreOS boot images file content: %v", err) + } + + return &coreOSBootImagesCM, nil +} + +// GetRHCOSImageURLForAzureDisk retrieves the RHCOS URL for the specified architecture's Azure disk image +func GetRHCOSImageURLForAzureDisk(oc *CLI, releaseImage string, pullSecretFile string, arch CoreOSImageArch) (string, error) { + coreOSBootImagesCM, err := ExtractCoreOSBootImagesConfigMap(oc, releaseImage, pullSecretFile) + if err != nil { + return "", fmt.Errorf("failed to extract CoreOS boot images ConfigMap: %v", err) + } + + var coreOSBootImagesStream stream.Stream + if err = json.Unmarshal([]byte(coreOSBootImagesCM.Data["stream"]), &coreOSBootImagesStream); err != nil { + return "", fmt.Errorf("failed to unmarshal CoreOS bootimages stream data: %v", err) + } + + return coreOSBootImagesStream.Architectures[arch.String()].RHELCoreOSExtensions.AzureDisk.URL, nil +} + +func GetLatestReleaseImageFromEnv() string { + return os.Getenv(ReleaseImageLatestEnv) +} + +// GetLatest4StableImage to get the latest 4-stable OCP image from releasestream link +// Return OCP image for sample quay.io/openshift-release-dev/ocp-release:4.11.0-fc.0-x86_64 +func GetLatest4StableImage() (string, error) { + outputCmd, err := exec.Command("bash", "-c", "curl -s -k https://amd64.ocp.releases.ci.openshift.org/api/v1/releasestream/4-stable/latest").Output() + if err != nil { + e2e.Logf("Encountered err: %v when trying to curl the releasestream page", err) + return "", err + } + latestImage := gjson.Get(string(outputCmd), `pullSpec`).String() + e2e.Logf("The latest 4-stable OCP image is %s", latestImage) + return latestImage, nil +} + +// GetLatest4StableImageByStream to get the latest 4-stable OCP image from a specifed releasestream link +// GetLatest4StableImageByStream("multi", "4-stable-multi/latest?in=>4.16.0-0+<4.17.0-0") +// GetLatest4StableImageByStream("amd64", "4-stable/latest") +func GetLatest4StableImageByStream(arch string, stream string) (latestImage string, err error) { + url := fmt.Sprintf("https://%s.ocp.releases.ci.openshift.org/api/v1/releasestream/%s", arch, stream) + var resp *http.Response + var body []byte + resp, err = http.Get(url) + if err != nil { + err = fmt.Errorf("fail to get url %s, error: %v", url, err) + return "", err + } + body, err = io.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + err = fmt.Errorf("fail to parse the result, error: %v", err) + return "", err + } + latestImage = gjson.Get(string(body), `pullSpec`).String() + e2e.Logf("The latest 4-stable OCP image is %s", latestImage) + return latestImage, err +} + +// GetLatestNightlyImage to get the latest nightly OCP image from releasestream link +// Input parameter release: OCP release version such as 4.11, 4.9, ..., 4.6 +// Return OCP image +func GetLatestNightlyImage(release string) (string, error) { + var url string + switch release { + case "4.19", "4.18", "4.17", "4.16", "4.15", "4.14", "4.13", "4.12", "4.11", "4.10", "4.9", "4.8", "4.7", "4.6": + url = "https://amd64.ocp.releases.ci.openshift.org/api/v1/releasestream/" + release + ".0-0.nightly/latest" + default: + e2e.Logf("Inputted release version %s is not supported. Only versions from 4.16 to 4.6 are supported.", release) + return "", errors.New("not supported version of payload") + } + outputCmd, err := exec.Command("bash", "-c", "curl -s -k "+url).Output() + if err != nil { + e2e.Logf("Encountered err: %v when trying to curl the releasestream page", err) + return "", err + } + latestImage := gjson.Get(string(outputCmd), `pullSpec`).String() + e2e.Logf("The latest nightly OCP image for %s is: %s", release, latestImage) + return latestImage, nil +} + +// GetLatestImage retrieves the pull spec of the latest image satisfying the arch - product - stream combination. +// arch = "amd64", "arm64", "ppc64le", "s390x", "multi" +// product = "ocp", "origin" (i.e. okd, which only supports the amd64 architecture) +// Possible values for the stream parameter depend on arch and product. +// See https://docs.ci.openshift.org/docs/getting-started/useful-links/#services for relevant release status pages. +// +// Examples: +// GetLatestImage("amd64", "ocp", "4.14.0-0.nightly") +// GetLatestImage("arm64", "ocp", "4.14.0-0.nightly-arm64") +// GetLatestImage("amd64", "origin", "4.14.0-0.okd") +func GetLatestImage(arch, product, stream string) (string, error) { + switch arch { + case "amd64", "arm64", "ppc64le", "s390x", "multi": + default: + return "", fmt.Errorf("unsupported architecture %v", arch) + } + + switch product { + case "ocp", "origin": + default: + return "", fmt.Errorf("unsupported product %v", product) + } + + switch { + case product == "ocp": + case product == "origin" && arch == "amd64": + default: + return "", fmt.Errorf("the product - architecture combination: %v - %v is not supported", product, arch) + } + + url := fmt.Sprintf("https://%v.%v.releases.ci.openshift.org/api/v1/releasestream/%v/latest", + arch, product, stream) + stdout, err := exec.Command("bash", "-c", "curl -s -k "+url).Output() + if err != nil { + return "", err + } + if !gjson.ValidBytes(stdout) { + return "", errors.New("curl does not return a valid json") + } + latestImage := gjson.GetBytes(stdout, "pullSpec").String() + e2e.Logf("Found latest image %v for architecture %v, product %v and stream %v", latestImage, arch, product, stream) + return latestImage, nil +} diff --git a/test/util/resource_op.go b/test/util/resource_op.go new file mode 100644 index 000000000..28409f92e --- /dev/null +++ b/test/util/resource_op.go @@ -0,0 +1,292 @@ +package util + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + o "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// DeleteLabelsFromSpecificResource deletes the custom labels from the specific resource +func DeleteLabelsFromSpecificResource(oc *CLI, resourceKindAndName string, resourceNamespace string, labelNames ...string) (string, error) { + var cargs []string + if resourceNamespace != "" { + cargs = append(cargs, "-n", resourceNamespace) + } + cargs = append(cargs, resourceKindAndName) + cargs = append(cargs, StringsSliceElementsAddSuffix(labelNames, "-")...) + return oc.AsAdmin().WithoutNamespace().Run("label").Args(cargs...).Output() +} + +// AddLabelsToSpecificResource adds the custom labels to the specific resource +func AddLabelsToSpecificResource(oc *CLI, resourceKindAndName string, resourceNamespace string, labels ...string) (string, error) { + var cargs []string + if resourceNamespace != "" { + cargs = append(cargs, "-n", resourceNamespace) + } + cargs = append(cargs, resourceKindAndName) + cargs = append(cargs, labels...) + cargs = append(cargs, "--overwrite") + return oc.AsAdmin().WithoutNamespace().Run("label").Args(cargs...).Output() +} + +// GetResourceSpecificLabelValue gets the specified label value from the resource and label name +func GetResourceSpecificLabelValue(oc *CLI, resourceKindAndName string, resourceNamespace string, labelName string) (string, error) { + var cargs []string + if resourceNamespace != "" { + cargs = append(cargs, "-n", resourceNamespace) + } + cargs = append(cargs, resourceKindAndName, "-o=jsonpath={.metadata.labels."+labelName+"}") + return oc.AsAdmin().WithoutNamespace().Run("get").Args(cargs...).Output() +} + +// AddAnnotationsToSpecificResource adds the custom annotations to the specific resource +func AddAnnotationsToSpecificResource(oc *CLI, resourceKindAndName, resourceNamespace string, annotations ...string) (string, error) { + var cargs []string + if resourceNamespace != "" { + cargs = append(cargs, "-n", resourceNamespace) + } + cargs = append(cargs, resourceKindAndName) + cargs = append(cargs, annotations...) + cargs = append(cargs, "--overwrite") + return oc.AsAdmin().WithoutNamespace().Run("annotate").Args(cargs...).Output() +} + +// RemoveAnnotationFromSpecificResource removes the specified annotation from the resource +func RemoveAnnotationFromSpecificResource(oc *CLI, resourceKindAndName, resourceNamespace string, annotationName string) (string, error) { + var cargs []string + if resourceNamespace != "" { + cargs = append(cargs, "-n", resourceNamespace) + } + cargs = append(cargs, resourceKindAndName) + cargs = append(cargs, annotationName+"-") + return oc.AsAdmin().WithoutNamespace().Run("annotate").Args(cargs...).Output() +} + +// GetAnnotationsFromSpecificResource gets the annotations from the specific resource +func GetAnnotationsFromSpecificResource(oc *CLI, resourceKindAndName, resourceNamespace string) ([]string, error) { + var cargs []string + if resourceNamespace != "" { + cargs = append(cargs, "-n", resourceNamespace) + } + cargs = append(cargs, resourceKindAndName, "--list") + annotationsStr, getAnnotationsErr := oc.AsAdmin().WithoutNamespace().Run("annotate").Args(cargs...).Output() + if getAnnotationsErr != nil { + e2e.Logf(`Failed to get annotations from /%s in namespace %s: "%v"`, resourceKindAndName, resourceNamespace, getAnnotationsErr) + } + return strings.Fields(annotationsStr), getAnnotationsErr +} + +// IsSpecifiedAnnotationKeyExist judges whether the specified annotationKey exist on the resource +func IsSpecifiedAnnotationKeyExist(oc *CLI, resourceKindAndName, resourceNamespace, annotationKey string) bool { + resourceAnnotations, getResourceAnnotationsErr := GetAnnotationsFromSpecificResource(oc, resourceKindAndName, resourceNamespace) + o.Expect(getResourceAnnotationsErr).NotTo(o.HaveOccurred()) + isAnnotationKeyExist, _ := StringsSliceElementsHasPrefix(resourceAnnotations, annotationKey+"=", true) + return isAnnotationKeyExist +} + +// StringsSliceContains judges whether the strings Slice contains specific element, return bool and the first matched index +// If no matched return (false, 0) +func StringsSliceContains(stringsSlice []string, element string) (bool, int) { + for index, strElement := range stringsSlice { + if strElement == element { + return true, index + } + } + return false, 0 +} + +// StringsSliceElementsHasPrefix judges whether the strings Slice contains an element which has the specific prefix +// returns bool and the first matched index +// sequential order: -> sequentialFlag: "true" +// reverse order: -> sequentialFlag: "false" +// If no matched return (false, 0) +func StringsSliceElementsHasPrefix(stringsSlice []string, elementPrefix string, sequentialFlag bool) (bool, int) { + if len(stringsSlice) == 0 { + return false, 0 + } + if sequentialFlag { + for index, strElement := range stringsSlice { + if strings.HasPrefix(strElement, elementPrefix) { + return true, index + } + } + } else { + for i := len(stringsSlice) - 1; i >= 0; i-- { + if strings.HasPrefix(stringsSlice[i], elementPrefix) { + return true, i + } + } + } + return false, 0 +} + +// StringsSliceElementsAddSuffix returns a new string slice all elements with the specific suffix added +func StringsSliceElementsAddSuffix(stringsSlice []string, suffix string) []string { + if len(stringsSlice) == 0 { + return []string{} + } + var newStringsSlice = make([]string, 0, 10) + for _, element := range stringsSlice { + newStringsSlice = append(newStringsSlice, element+suffix) + } + return newStringsSlice +} + +const ( + AsAdmin = true + AsUser = false + WithoutNamespace = true + WthNamespace = false + Immediately = true + NotImmediately = false + AllowEmpty = true + NotAllowEmpty = false + Appear = true + Disappear = false +) + +// GetFieldWithJsonpath gets the field of the resource per jsonpath +// interval and timeout is the inveterl and timeout of Poll +// immediately means if it wait first interval and then get +// allowEmpty means if the result allow empty string +// asAdmin means oc.AsAdmin() or not +// withoutNamespace means oc.WithoutNamespace() or not. +// for example, it is to get clusterresource +// GetFieldWithJsonpath(oc, 3*time.Second, 150*time.Second, exutil.NotImmediately, exutil.NotAllowEmpty, exutil.AsAdmin, exutil.WithoutNamespace, "operator", name, "-o", "jsonpath={.status}") +// if you want to get ns resource, could be +// GetFieldWithJsonpath(oc, 3*time.Second, 150*time.Second, exutil.NotImmediately, exutil.NotAllowEmpty, exutil.AsAdmin, exutil.WithoutNamespace, "-n", ns, "pod", name, "-o", "jsonpath={.status}") +// or if the ns is same to oc.Namespace, could be +// GetFieldWithJsonpath(oc, 3*time.Second, 150*time.Second, exutil.NotImmediately, exutil.AllowEmpty, exutil.AsAdmin, exutil.WithoutNamespace, "pod", name, "-o", "jsonpath={.status}") +func GetFieldWithJsonpath(oc *CLI, interval, timeout time.Duration, immediately, allowEmpty, asAdmin, withoutNamespace bool, parameters ...string) (string, error) { + var result string + var err error + usingJsonpath := false + for _, parameter := range parameters { + if strings.Contains(parameter, "jsonpath") { + usingJsonpath = true + } + } + if !usingJsonpath { + return "", fmt.Errorf("you do not use jsonpath to get field") + } + errWait := wait.PollUntilContextTimeout(context.TODO(), interval, timeout, immediately, func(ctx context.Context) (bool, error) { + result, err = ocAction(oc, "get", asAdmin, withoutNamespace, parameters...) + if err != nil || (!allowEmpty && strings.TrimSpace(result) == "") { + e2e.Logf("output is %v, error is %v, and try next", result, err) + return false, nil + } + return true, nil + }) + e2e.Logf("$oc get %v, the returned resource:%v", parameters, result) + // replace errWait because it is always timeout if it happned with wait.Poll + if errWait != nil { + errWait = fmt.Errorf("can not get resource with %v", parameters) + } + return result, errWait +} + +// CheckAppearance check if the resource appears or not. +// interval and timeout is the inveterl and timeout of Poll +// immediately means if it wait first interval and then check +// asAdmin means oc.AsAdmin() or not +// withoutNamespace means oc.WithoutNamespace() or not. +// appear means expect appear or not +// for example, expect pod in ns appear +// CheckAppearance(oc, 4*time.Second, 200*time.Second, exutil.NotImmediately, exutil.AsAdmin, exutil.WithoutNamespace, exutil.Appear, "-n", ns, "pod" name) +// if you expect pod in ns disappear, could be +// CheckAppearance(oc, 4*time.Second, 200*time.Second, exutil.NotImmediately, exutil.AsAdmin, exutil.WithoutNamespace, exutil.Disappear, "-n", ns, "pod" name) +func CheckAppearance(oc *CLI, interval, timeout time.Duration, immediately, asAdmin, withoutNamespace, appear bool, parameters ...string) bool { + if !appear { + parameters = append(parameters, "--ignore-not-found") + } + err := wait.PollUntilContextTimeout(context.TODO(), interval, timeout, immediately, func(ctx context.Context) (bool, error) { + output, err := ocAction(oc, "get", asAdmin, withoutNamespace, parameters...) + if err != nil { + e2e.Logf("the get error is %v, and try next", err) + return false, nil + } + e2e.Logf("output: %v", output) + if !appear && strings.Compare(output, "") == 0 { + return true, nil + } + if appear && strings.Compare(output, "") != 0 && !strings.Contains(strings.ToLower(output), "no resources found") { + return true, nil + } + return false, nil + }) + return err == nil +} + +// CleanupResource cleanup one resouce and check if it is not found. +// interval and timeout is the inveterl and timeout of Poll to check if it is not found +// asAdmin means oc.AsAdmin() or not +// withoutNamespace means oc.WithoutNamespace() or not. +// for example, cleanup cluster level resource +// CleanupResource(oc, 4*time.Second, 160*time.Second, exutil.AsAdmin, exutil.WithoutNamespace, "operator.operators.operatorframework.io", operator.Name) +// cleanup ns resource +// CleanupResource(oc, 4*time.Second, 160*time.Second, exutil.AsAdmin, exutil.WithoutNamespace, "-n", ns, "pod" name) +func CleanupResource(oc *CLI, interval, timeout time.Duration, asAdmin, withoutNamespace bool, parameters ...string) { + output, err := ocAction(oc, "delete", asAdmin, withoutNamespace, parameters...) + if err != nil && (strings.Contains(output, "NotFound") || strings.Contains(output, "No resources found")) { + e2e.Logf("the resource is deleted already") + return + } + o.Expect(err).NotTo(o.HaveOccurred()) + + err = wait.PollUntilContextTimeout(context.TODO(), interval, timeout, false, func(ctx context.Context) (bool, error) { + output, err := ocAction(oc, "get", asAdmin, withoutNamespace, parameters...) + if err != nil && (strings.Contains(output, "NotFound") || strings.Contains(output, "No resources found")) { + e2e.Logf("the resource is delete successfully") + return true, nil + } + return false, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("can not remove %v", parameters)) +} + +// ocAction basical executes oc command +func ocAction(oc *CLI, action string, asAdmin, withoutNamespace bool, parameters ...string) (string, error) { + if asAdmin && withoutNamespace { + return oc.AsAdmin().WithoutNamespace().Run(action).Args(parameters...).Output() + } + if asAdmin && !withoutNamespace { + return oc.AsAdmin().Run(action).Args(parameters...).Output() + } + if !asAdmin && withoutNamespace { + return oc.WithoutNamespace().Run(action).Args(parameters...).Output() + } + if !asAdmin && !withoutNamespace { + return oc.Run(action).Args(parameters...).Output() + } + return "", nil +} + +// WaitForResourceUpdate waits for the resourceVersion of a resource to be updated +func WaitForResourceUpdate(ctx context.Context, oc *CLI, interval, timeout time.Duration, kindAndName, namespace, oldResourceVersion string) error { + args := []string{kindAndName} + if len(namespace) > 0 { + args = append(args, "-n", namespace) + } + args = append(args, "-o=jsonpath={.metadata.resourceVersion}") + return wait.PollUntilContextTimeout(ctx, interval, timeout, true, func(ctx context.Context) (done bool, err error) { + resourceVersion, _, err := oc.AsAdmin().WithoutNamespace().Run("get").Args(args...).Outputs() + if err != nil { + e2e.Logf("Error getting current resourceVersion: %v", err) + return false, nil + } + if len(resourceVersion) == 0 { + return false, errors.New("obtained empty resourceVersion") + } + if resourceVersion == oldResourceVersion { + e2e.Logf("resourceVersion unchanged, keep polling") + return false, nil + } + return true, nil + }) +} diff --git a/test/util/rosa.go b/test/util/rosa.go new file mode 100644 index 000000000..e916b2770 --- /dev/null +++ b/test/util/rosa.go @@ -0,0 +1,52 @@ +package util + +import ( + "fmt" + "os" + "os/exec" + "strings" + + g "github.com/onsi/ginkgo/v2" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// IsROSA Determine whether it is a ROSA env, now only support prow +func IsROSA() bool { + _, err := os.Stat(os.Getenv("SHARED_DIR") + "/cluster-id") + if err != nil { + if !os.IsExist(err) { + return false + } + } + return len(os.Getenv("TEST_ROSA_TOKEN")) > 0 +} + +// ROSALogin rosa login, If the login fails then skip +func ROSALogin() { + e2e.Logf("ROSA login") + if len(os.Getenv("TEST_ROSA_TOKEN")) == 0 { + g.Skip("env TEST_ROSA_LOGIN_ENV not set") + } + cmd := fmt.Sprintf(`rosa login --env "%s" --token "%s"`, os.Getenv("TEST_ROSA_LOGIN_ENV"), os.Getenv("TEST_ROSA_TOKEN")) + _, err := exec.Command("bash", "-c", cmd).Output() + if err != nil { + e2e.Failf("rosa cli login error" + err.Error()) + } +} + +// Get cluster ID for ROSA created cluster +func GetROSAClusterID() string { + return os.Getenv("CLUSTER_ID") +} + +// IsROSACluster checks if the cluster is running on ROSA +func IsROSACluster(oc *CLI) bool { + // get the cluster resource + out, err := oc.AsAdmin().Run("get").Args("infrastructures.config.openshift.io/cluster", "-o", `jsonpath='{.status.platformStatus.aws.resourceTags[?(@.key=="red-hat-clustertype")].value}'`).Output() + if err != nil { + e2e.Failf("get infrastructure resource failed: %v", err) + } + e2e.Logf("red-hat-clustertype is: %s", out) + // check if the cluster is running on ROSA + return strings.Contains(out, "rosa") +} diff --git a/test/util/rosacli/cluster_command_config.go b/test/util/rosacli/cluster_command_config.go new file mode 100644 index 000000000..a2768fdd0 --- /dev/null +++ b/test/util/rosacli/cluster_command_config.go @@ -0,0 +1,88 @@ +package rosacli + +import ( + "fmt" + "os" + "strings" +) + +type Command interface { + GetFullCommand() string + GetFlagValue(flag string, flagWithVaue bool) string + AddFlags(flags ...string) + ReplaceFlagValue(flags map[string]string) + DeleteFlag(flag string, flagWithVaue bool) error +} + +type command struct { + cmd string +} + +// Get the rosa command for creating cluster from ${SHARED_DIR}/create_cluster.sh +func RetrieveClusterCreationCommand() (Command, error) { + sharedDIR := os.Getenv("SHARED_DIR") + filePath := sharedDIR + "/create_cluster.sh" + fileContents, err := os.ReadFile(filePath) + if err != nil { + return nil, err + } + cmd := &command{ + cmd: strings.Trim(string(fileContents), "\n"), + } + return cmd, nil +} + +func (c *command) GetFullCommand() string { + return c.cmd +} + +// a function to replace any flag in the command with the key-value map passed to the function +func (c *command) ReplaceFlagValue(flags map[string]string) { + elements := strings.Split(c.cmd, " ") + for i, e := range elements { + if value, ok := flags[e]; ok { + elements[i+1] = value + } + } + c.cmd = strings.Join(elements, " ") +} + +// a function to delete any flag in the command +func (c *command) DeleteFlag(flag string, flagWithVaue bool) error { + elements := strings.Split(c.cmd, " ") + for i, e := range elements { + if e == flag { + if flagWithVaue { + elements = append(elements[:i], elements[i+2:]...) + } else { + elements = append(elements[:i], elements[i+1:]...) + } + c.cmd = strings.Join(elements, " ") + return nil + } + } + return fmt.Errorf("cannot find flag %s in command %s", flag, c.cmd) +} + +// Get the value of a flag from the command +func (c *command) GetFlagValue(flag string, flagWithVaue bool) string { + elements := strings.Split(c.cmd, " ") + for i, e := range elements { + if e == flag { + if flagWithVaue { + return elements[i+1] + } else { + return "" + } + } + } + return "" +} + +// Add flags to the command +func (c *command) AddFlags(flags ...string) { + for _, flag := range flags { + // combine the command with space + c.cmd += " " + flag + } +} diff --git a/test/util/rosacli/cluster_config.go b/test/util/rosacli/cluster_config.go new file mode 100644 index 000000000..70ab0b3d6 --- /dev/null +++ b/test/util/rosacli/cluster_config.go @@ -0,0 +1,138 @@ +package rosacli + +import ( + "encoding/json" + "fmt" + "os" + "path" + + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" +) + +type Version struct { + ChannelGroup string `json:"channel_group,omitempty"` + RawID string `json:"raw_id,omitempty"` +} + +type Encryption struct { + KmsKeyArn string `json:"kms_key_arn,omitempty"` + EtcdEncryptionKmsArn string `json:"etcd_encryption_kms_arn,omitempty"` +} + +type Properties struct { + ProvisionShardID string `json:"provision_shard_id,omitempty"` +} + +type Sts struct { + RoleArn string `json:"role_arn,omitempty"` + SupportRoleArn string `json:"support_role_arn,omitempty"` + WorkerRoleArn string `json:"worker_role_arn,omitempty"` + ControlPlaneRoleArn string `json:"control_plane_role_arn,omitempty"` + OidcConfigID string `json:"oidc_config_id,omitempty"` + OperatorRolesPrefix string `json:"operator_roles_prefix,omitempty"` +} + +type AWS struct { + Sts Sts `json:"sts,omitempty"` +} + +type Proxy struct { + Enabled bool `json:"enabled,omitempty"` + Http string `json:"http,omitempty"` + Https string `json:"https,omitempty"` + TrustBundleFile string `json:"trust_bundle_file,omitempty"` +} + +type Subnets struct { + PrivateSubnetIds string `json:"private_subnet_ids,omitempty"` + PublicSubnetIds string `json:"public_subnet_ids,omitempty"` +} + +type Nodes struct { + Replicas string `json:"replicas,omitempty"` + MinReplicas string `json:"min_replicas,omitempty"` + MaxReplicas string `json:"max_replicas,omitempty"` +} + +type Autoscaling struct { + Enabled bool `json:"enabled,omitempty"` +} + +type ClusterConfig struct { + DisableScpChecks bool `json:"disable_scp_checks,omitempty"` + DisableWorkloadMonitoring bool `json:"disable_workload_monitoring,omitempty"` + EnableCustomerManagedKey bool `json:"enable_customer_managed_key,omitempty"` + EtcdEncryption bool `json:"etcd_encryption,omitempty"` + Fips bool `json:"fips,omitempty"` + Hypershift bool `json:"hypershift,omitempty"` + MultiAZ bool `json:"multi_az,omitempty"` + Private bool `json:"private,omitempty"` + PrivateLink bool `json:"private_link,omitempty"` + Sts bool `json:"sts,omitempty"` + AuditLogArn string `json:"audit_log_arn,omitempty"` + AvailabilityZones string `json:"availability_zones,omitempty"` + DefaultMpLabels string `json:"default_mp_labels,omitempty"` + Ec2MetadataHttpTokens string `json:"ec2_metadata_http_tokens,omitempty"` + Name string `json:"name,omitempty"` + Region string `json:"region,omitempty"` + Tags string `json:"tags,omitempty"` + WorkerDiskSize string `json:"worker_disk_size,omitempty"` + Autoscaling Autoscaling `json:"autoscaling,omitempty"` + Aws AWS `json:"aws,omitempty"` + Encryption Encryption `json:"encryption,omitempty"` + Nodes Nodes `json:"nodes,omitempty"` + Properties Properties `json:"properties,omitempty"` + Proxy Proxy `json:"proxy,omitempty"` + Subnets Subnets `json:"subnets,omitempty"` + Version Version `json:"version,omitempty"` +} + +func ParseClusterProfile() (*ClusterConfig, error) { + filePath := getClusterConfigFile() + // Load the JSON file + data, err := os.ReadFile(filePath) + if err != nil { + return nil, fmt.Errorf("error reading JSON file: %v", err) + } + + // Parse the JSON data into the ClusterConfig struct + var config ClusterConfig + err = json.Unmarshal(data, &config) + if err != nil { + return nil, fmt.Errorf("error parsing JSON data: %v", err) + } + + return &config, nil +} + +// Get the cluster config file +func getClusterConfigFile() string { + sharedDir := os.Getenv("SHARED_DIR") + return path.Join(sharedDir, "cluster-config") +} + +func GetClusterID() (clusterID string) { + clusterID = getClusterIDENVExisted() + if clusterID != "" { + return + } + + if _, err := os.Stat(getClusterIDFile()); err != nil { + logger.Errorf("Cluster id file not existing") + return "" + } + fileCont, _ := os.ReadFile(getClusterIDFile()) + clusterID = string(fileCont) + return +} + +// Get the cluster config file, for jean chen +func getClusterIDFile() string { + sharedDir := os.Getenv("SHARED_DIR") + return path.Join(sharedDir, "cluster-id") +} + +// Get the clusterID env. +func getClusterIDENVExisted() string { + return os.Getenv("CLUSTER_ID") +} diff --git a/test/util/rosacli/cluster_service.go b/test/util/rosacli/cluster_service.go new file mode 100644 index 000000000..fb3335057 --- /dev/null +++ b/test/util/rosacli/cluster_service.go @@ -0,0 +1,246 @@ +package rosacli + +import ( + "bytes" + "strings" + + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" + "gopkg.in/yaml.v3" +) + +type ClusterService interface { + ResourcesCleaner + + DescribeCluster(clusterID string) (bytes.Buffer, error) + ReflectClusterDescription(result bytes.Buffer) (*ClusterDescription, error) + DescribeClusterAndReflect(clusterID string) (*ClusterDescription, error) + List() (bytes.Buffer, error) + CreateDryRun(clusterName string, flags ...string) (bytes.Buffer, error) + EditCluster(clusterID string, flags ...string) (bytes.Buffer, error) + DeleteUpgrade(flags ...string) (bytes.Buffer, error) + + IsHostedCPCluster(clusterID string) (bool, error) + IsSTSCluster(clusterID string) (bool, error) + IsPrivateCluster(clusterID string) (bool, error) + IsUsingReusableOIDCConfig(clusterID string) (bool, error) + GetClusterVersion(clusterID string) (Version, error) + IsBYOVPCCluster(clusterID string) (bool, error) +} + +type clusterService struct { + ResourcesService +} + +func NewClusterService(client *Client) ClusterService { + return &clusterService{ + ResourcesService: ResourcesService{ + client: client, + }, + } +} + +// Struct for the 'rosa describe cluster' output +type ClusterDescription struct { + Name string `yaml:"Name,omitempty"` + ID string `yaml:"ID,omitempty"` + ExternalID string `yaml:"External ID,omitempty"` + OpenshiftVersion string `yaml:"OpenShift Version,omitempty"` + ChannelGroup string `yaml:"Channel Group,omitempty"` + DNS string `yaml:"DNS,omitempty"` + AWSAccount string `yaml:"AWS Account,omitempty"` + AWSBillingAccount string `yaml:"AWS Billing Account,omitempty"` + APIURL string `yaml:"API URL,omitempty"` + ConsoleURL string `yaml:"Console URL,omitempty"` + Region string `yaml:"Region,omitempty"` + MultiAZ string `yaml:"Multi-AZ,omitempty"` + State string `yaml:"State,omitempty"` + Private string `yaml:"Private,omitempty"` + Created string `yaml:"Created,omitempty"` + DetailsPage string `yaml:"Details Page,omitempty"` + ControlPlane string `yaml:"Control Plane,omitempty"` + ScheduledUpgrade string `yaml:"Scheduled Upgrade,omitempty"` + InfraID string `yaml:"Infra ID,omitempty"` + AdditionalTrustBundle string `yaml:"Additional trust bundle,omitempty"` + Ec2MetadataHttpTokens string `yaml:"Ec2 Metadata Http Tokens,omitempty"` + Availability []map[string]string `yaml:"Availability,omitempty"` + Nodes []map[string]interface{} `yaml:"Nodes,omitempty"` + Network []map[string]string `yaml:"Network,omitempty"` + Proxy []map[string]string `yaml:"Proxy,omitempty"` + STSRoleArn string `yaml:"Role (STS) ARN,omitempty"` + // STSExternalID string `yaml:"STS External ID,omitempty"` + SupportRoleARN string `yaml:"Support Role ARN,omitempty"` + OperatorIAMRoles []string `yaml:"Operator IAM Roles,omitempty"` + InstanceIAMRoles []map[string]string `yaml:"Instance IAM Roles,omitempty"` + ManagedPolicies string `yaml:"Managed Policies,omitempty"` + UserWorkloadMonitoring string `yaml:"User Workload Monitoring,omitempty"` + FIPSMod string `yaml:"FIPS mode,omitempty"` + OIDCEndpointURL string `yaml:"OIDC Endpoint URL,omitempty"` + PrivateHostedZone []map[string]string `yaml:"Private Hosted Zone,omitempty"` + AuditLogForwarding string `yaml:"Audit Log Forwarding,omitempty"` + ProvisioningErrorMessage string `yaml:"Provisioning Error Message,omitempty"` + ProvisioningErrorCode string `yaml:"Provisioning Error Code,omitempty"` + LimitedSupport []map[string]string `yaml:"Limited Support,omitempty"` + AuditLogRoleARN string `yaml:"Audit Log Role ARN,omitempty"` + FailedInflightChecks string `yaml:"Failed Inflight Checks,omitempty"` +} + +func (c *clusterService) DescribeCluster(clusterID string) (bytes.Buffer, error) { + describe := c.client.Runner. + Cmd("describe", "cluster"). + CmdFlags("-c", clusterID) + + return describe.Run() +} + +func (c *clusterService) DescribeClusterAndReflect(clusterID string) (res *ClusterDescription, err error) { + output, err := c.DescribeCluster(clusterID) + if err != nil { + return nil, err + } + return c.ReflectClusterDescription(output) +} + +// Pasrse the result of 'rosa describe cluster' to the RosaClusterDescription struct +func (c *clusterService) ReflectClusterDescription(result bytes.Buffer) (res *ClusterDescription, err error) { + var data []byte + res = new(ClusterDescription) + theMap, err := c.client. + Parser. + TextData. + Input(result). + Parse(). + TransformOutput(func(str string) (newStr string) { + // Apply transformation to avoid issue with the list of Inflight checks below + // It will consider + newStr = strings.Replace(str, "Failed Inflight Checks:", "Failed Inflight Checks: |", 1) + newStr = strings.ReplaceAll(newStr, "\t", " ") + return + }). + YamlToMap() + if err != nil { + return + } + data, err = yaml.Marshal(&theMap) + if err != nil { + return + } + err = yaml.Unmarshal(data, res) + return res, err +} + +func (c *clusterService) List() (bytes.Buffer, error) { + list := c.client.Runner.Cmd("list", "cluster") + return list.Run() +} + +func (c *clusterService) CreateDryRun(clusterName string, flags ...string) (bytes.Buffer, error) { + combflags := append([]string{"-c", clusterName, "--dry-run"}, flags...) + createDryRun := c.client.Runner. + Cmd("create", "cluster"). + CmdFlags(combflags...) + return createDryRun.Run() +} + +func (c *clusterService) EditCluster(clusterID string, flags ...string) (bytes.Buffer, error) { + combflags := append([]string{"-c", clusterID}, flags...) + editCluster := c.client.Runner. + Cmd("edit", "cluster"). + CmdFlags(combflags...) + return editCluster.Run() +} + +func (c *clusterService) DeleteUpgrade(flags ...string) (bytes.Buffer, error) { + DeleteUpgrade := c.client.Runner. + Cmd("delete", "upgrade"). + CmdFlags(flags...) + return DeleteUpgrade.Run() +} + +func (c *clusterService) CleanResources(clusterID string) (errors []error) { + logger.Debugf("Nothing releated to cluster was done there") + return +} + +// Check if the cluster is hosted-cp cluster +func (c *clusterService) IsHostedCPCluster(clusterID string) (bool, error) { + jsonData, err := c.getJSONClusterDescription(clusterID) + if err != nil { + return false, err + } + return jsonData.DigBool("hypershift", "enabled"), nil +} + +// Check if the cluster is sts cluster. hosted-cp cluster is also treated as sts cluster +func (c *clusterService) IsSTSCluster(clusterID string) (bool, error) { + jsonData, err := c.getJSONClusterDescription(clusterID) + if err != nil { + return false, err + } + return jsonData.DigBool("aws", "sts", "enabled"), nil +} + +// Check if the cluster is private cluster +func (c *clusterService) IsPrivateCluster(clusterID string) (bool, error) { + jsonData, err := c.getJSONClusterDescription(clusterID) + if err != nil { + return false, err + } + return jsonData.DigString("api", "listening") == "internal", nil +} + +// Check if the cluster is using reusable oidc-config +func (c *clusterService) IsUsingReusableOIDCConfig(clusterID string) (bool, error) { + jsonData, err := c.getJSONClusterDescription(clusterID) + if err != nil { + return false, err + } + return jsonData.DigBool("aws", "sts", "oidc_config", "reusable"), nil +} + +// Get cluster version +func (c *clusterService) GetClusterVersion(clusterID string) (clusterVersion Version, err error) { + var clusterConfig *ClusterConfig + clusterConfig, err = ParseClusterProfile() + if err != nil { + return + } + + if clusterConfig.Version.RawID != "" { + clusterVersion = clusterConfig.Version + } else { + // Else retrieve from cluster description + var jsonData *jsonData + jsonData, err = c.getJSONClusterDescription(clusterID) + if err != nil { + return + } + clusterVersion = Version{ + RawID: jsonData.DigString("version", "raw_id"), + ChannelGroup: jsonData.DigString("version", "channel_group"), + } + } + return +} + +func (c *clusterService) getJSONClusterDescription(clusterID string) (*jsonData, error) { + c.client.Runner.JsonFormat() + output, err := c.DescribeCluster(clusterID) + if err != nil { + logger.Errorf("it met error when describeCluster in IsUsingReusableOIDCConfig is %v", err) + return nil, err + } + c.client.Runner.UnsetFormat() + return c.client.Parser.JsonData.Input(output).Parse(), nil +} + +// Check if the cluster is byo vpc cluster +func (c *clusterService) IsBYOVPCCluster(clusterID string) (bool, error) { + jsonData, err := c.getJSONClusterDescription(clusterID) + if err != nil { + return false, err + } + if len(jsonData.DigString("aws", "subnet_ids")) > 0 { + return true, nil + } + return false, nil +} diff --git a/test/util/rosacli/cluster_utils.go b/test/util/rosacli/cluster_utils.go new file mode 100644 index 000000000..777f44ff0 --- /dev/null +++ b/test/util/rosacli/cluster_utils.go @@ -0,0 +1,27 @@ +package rosacli + +import ( + "fmt" + "strconv" + "strings" +) + +const ( + ClusterDescriptionComputeDesired = "Compute (desired)" + ClusterDescriptionComputeAutoscaled = "Compute (autoscaled)" +) + +func RetrieveDesiredComputeNodes(clusterDescription *ClusterDescription) (nodesNb int, err error) { + if clusterDescription.Nodes[0]["Compute (desired)"] != nil { + var isInt bool + nodesNb, isInt = clusterDescription.Nodes[0]["Compute (desired)"].(int) + if !isInt { + err = fmt.Errorf("'%v' is not an integer value") + } + } else { + // Try autoscale one + autoscaleInfo := clusterDescription.Nodes[0]["Compute (Autoscaled)"].(string) + nodesNb, err = strconv.Atoi(strings.Split(autoscaleInfo, "-")[0]) + } + return +} diff --git a/test/util/rosacli/cmd_client.go b/test/util/rosacli/cmd_client.go new file mode 100644 index 000000000..3f7125284 --- /dev/null +++ b/test/util/rosacli/cmd_client.go @@ -0,0 +1,95 @@ +package rosacli + +import ( + "bytes" + "errors" +) + +type ResourcesCleaner interface { + CleanResources(clusterID string) []error +} + +type CLDNamedResourceService interface { + ResourcesCleaner + + List(clusterID string) (bytes.Buffer, error) + Describe(clusterID string, name string) (bytes.Buffer, error) + Create(clusterID string, name string, flags ...string) (bytes.Buffer, error) + Edit(clusterID string, name string, flags ...string) (bytes.Buffer, error) + Delete(clusterID string, name string) (bytes.Buffer, error) +} + +type ResourcesService struct { + client *Client +} + +type Client struct { + // Clients + Runner *runner + Parser *Parser + + // services + // Keep in alphabetical order + Cluster ClusterService + IDP IDPService + Ingress IngressService + KubeletConfig KubeletConfigService + MachinePool MachinePoolService + MachinePoolUpgrade MachinePoolUpgradeService + NetworkVerifier NetworkVerifierService + OCMResource OCMResourceService + TuningConfig TuningConfigService + User UserService + Version VersionService +} + +func NewClient() *Client { + runner := NewRunner() + parser := NewParser() + + client := &Client{ + Runner: runner, + Parser: parser, + } + + // Keep in alphabetical order + client.Cluster = NewClusterService(client) + client.IDP = NewIDPService(client) + client.Ingress = NewIngressService(client) + client.KubeletConfig = NewKubeletConfigService(client) + client.MachinePool = NewMachinePoolService(client) + client.MachinePoolUpgrade = NewMachinePoolUpgradeService(client) + client.NetworkVerifier = NewNetworkVerifierService(client) + client.OCMResource = NewOCMResourceService(client) + client.TuningConfig = NewTuningConfigService(client) + client.User = NewUserService(client) + client.Version = NewVersionService(client) + + return client +} + +func NewSensitiveClient() *Client { + client := NewClient() + client.Runner.Sensitive(true) + return client +} + +func (c *Client) CleanResources(clusterID string) error { + var errorList []error + + // Keep in logical order + errorList = append(errorList, c.Version.CleanResources(clusterID)...) + errorList = append(errorList, c.TuningConfig.CleanResources(clusterID)...) + errorList = append(errorList, c.MachinePoolUpgrade.CleanResources(clusterID)...) + errorList = append(errorList, c.MachinePool.CleanResources(clusterID)...) + errorList = append(errorList, c.Ingress.CleanResources(clusterID)...) + errorList = append(errorList, c.NetworkVerifier.CleanResources(clusterID)...) + errorList = append(errorList, c.KubeletConfig.CleanResources(clusterID)...) + errorList = append(errorList, c.User.CleanResources(clusterID)...) + errorList = append(errorList, c.IDP.CleanResources(clusterID)...) + errorList = append(errorList, c.OCMResource.CleanResources(clusterID)...) + errorList = append(errorList, c.Cluster.CleanResources(clusterID)...) + + return errors.Join(errorList...) + +} diff --git a/test/util/rosacli/cmd_parser.go b/test/util/rosacli/cmd_parser.go new file mode 100644 index 000000000..880179e52 --- /dev/null +++ b/test/util/rosacli/cmd_parser.go @@ -0,0 +1,388 @@ +package rosacli + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" + + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" + "gopkg.in/yaml.v3" +) + +type Parser struct { + JsonData *jsonData + TableData *tableData + TextData *textData +} + +func NewParser() *Parser { + jsonD := new(jsonData) + tableD := new(tableData) + textD := new(textData) + + p := &Parser{ + JsonData: jsonD, + TableData: tableD, + TextData: textD, + } + return p +} + +type jsonData struct { + input bytes.Buffer + output interface{} +} + +type tableData struct { + input bytes.Buffer + output []map[string]interface{} +} + +type textData struct { + input bytes.Buffer + output string + tip string +} + +// Read the cmd input and return the []byte array +func ReadLines(in bytes.Buffer) [][]byte { + lines := [][]byte{} + var line []byte + var err error + for err == nil { + line, err = in.ReadBytes('\n') + lines = append(lines, line) + } + return lines +} + +func (jd *jsonData) Input(input bytes.Buffer) *jsonData { + jd.input = input + return jd +} +func (jd *jsonData) Output() interface{} { + return jd.output +} + +func (td *textData) Input(input bytes.Buffer) *textData { + td.input = input + return td +} +func (td *textData) Output() string { + return td.output +} +func (td *textData) Tip() string { + return td.tip +} + +func (tad *tableData) Input(input bytes.Buffer) *tableData { + tad.input = input + return tad +} +func (tad *tableData) Output() []map[string]interface{} { + return tad.output +} + +// It extracts the useful result struct as a map and the message as a string +func (td *textData) Parse() *textData { + var tips bytes.Buffer + var results bytes.Buffer + + input := td.input + lines := ReadLines(input) + reg1 := regexp.MustCompile(`.*[IEW].*:\x20\S.*\s+\S+`) + reg2 := regexp.MustCompile("^```\\s*") + for _, line := range lines { + strline := string(line) + if reg2.FindString(strline) != "" { + continue + } + result := reg1.FindString(strline) + if result == "" { + results.WriteString(strline) + } else { + tips.WriteString(strline) + } + } + + td.output = results.String() + td.tip = tips.String() + return td +} + +// TransformOutput allows to transform the string before it would be parsed +func (td *textData) TransformOutput(transformFunc func(str string) string) *textData { + td.output = transformFunc(td.output) + return td +} + +func (td *textData) YamlToMap() (res map[string]interface{}, err error) { + res = make(map[string]interface{}) + + // Escape value(s) with quote due to https://github.com/go-yaml/yaml/issues/784 + // This happens sometimes in NodePool Message like `WaitingForAvailableMachines: InstanceNotReady,WaitingForNodeRef` + // This would fail to unmarshal due to the `: ` in the value ... + escapedOutput, err := escapeYamlStringValues(td.output) + if err != nil { + return + } + err = yaml.Unmarshal([]byte(escapedOutput), &res) + return +} + +// escapeYamlStringValues escapes yaml values if they contain any special characters: https://www.yaml.info/learn/quote.html#noplain +// Checks have to be completed on demande... +func escapeYamlStringValues(input string) (string, error) { + var lines []string + scanner := bufio.NewScanner(strings.NewReader(input)) + for scanner.Scan() { + line := scanner.Text() + key, value, found := strings.Cut(line, ":") + if found { + value = strings.TrimSpace(value) + + // Checks to perform + if !strings.HasPrefix(value, "'") && strings.Contains(value, ": ") { + line = fmt.Sprintf("%s: '%s'", key, value) + } + } + lines = append(lines, line) + } + return strings.Join(lines, "\n"), scanner.Err() +} + +func (td *textData) JsonToMap() (map[string]interface{}, error) { + res := make(map[string]interface{}) + err := json.Unmarshal([]byte(td.output), &res) + return res, err +} + +// Parse the cmd table ouptut title +func tableTitle(titleLine []byte) map[int]string { + offsetMap := map[int]string{} + var elem []byte + startOffset := 0 + startCount := true + + for offset, char := range titleLine { + if offset == len(titleLine)-1 { + if len(elem) != 0 { + key := string(elem) + offsetMap[startOffset] = key + elem = []byte{} + startCount = true + } + } + if char != ' ' { + elem = append(elem, char) + if startCount { + startOffset = offset + startCount = false + } + } else { + if offset == 0 { + continue + } else if titleLine[offset-1] == ' ' { + if len(elem) != 0 { + key := string(elem) + offsetMap[startOffset] = key + elem = []byte{} + startCount = true + } + } else { + elem = append(elem, char) + } + } + } + for key, val := range offsetMap { + offsetMap[key] = strings.TrimRight(val, " ") + } + return offsetMap +} + +// Parse the cmd table ouptut line +func tableLine(line []byte, offsetMap map[int]string) map[string]interface{} { + resultMap := map[string]interface{}{} + var elemValue string + for offset, key := range offsetMap { + if offset >= len(line) { + resultMap[key] = "" + continue + } + for subOff, char := range line[offset:] { + if subOff == len(line[offset:])-1 { + elemValue = strings.TrimRight(string(line[offset:]), "\n") + elemValue = strings.TrimLeft(elemValue, " ") + resultMap[key] = elemValue + } + if char == ' ' && line[subOff+1+offset] == ' ' { + elemValue = strings.TrimRight(string(line[offset:subOff+offset]), " ") + elemValue = strings.TrimLeft(elemValue, " ") + resultMap[key] = elemValue + break + } + } + } + return resultMap +} + +// Parse the table output of the rosa cmd +func (tab *tableData) Parse() *tableData { + var results bytes.Buffer + input := tab.input + lines := ReadLines(input) + reg1 := regexp.MustCompile(`.*[IEW].*:\x20\S.*\s+\S+`) + reg2 := regexp.MustCompile("^```\\s*") + reg3 := regexp.MustCompile(`time=(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z) level=(\w+) msg=(.*)`) + for _, line := range lines { + strline := string(line) + if reg2.FindString(strline) != "" || reg3.FindString(strline) != "" { + continue + } + result := reg1.FindString(strline) + if result == "" { + results.WriteString(strline) + } + } + lines = ReadLines(results) + titleLine := lines[0] + offsetMap := tableTitle(titleLine) + + result := []map[string]interface{}{} + if len(lines) >= 2 { + for _, line := range lines[1 : len(lines)-1] { + result = append(result, tableLine(line, offsetMap)) + } + } + tab.output = result + return tab +} + +func (jd *jsonData) Parse() *jsonData { + var object map[string]interface{} + err := json.Unmarshal(jd.input.Bytes(), &object) + if err != nil { + logger.Errorf(" error in Parse is %v", err) + } + + jd.output = object + return jd +} + +func (jd *jsonData) ParseList(jsonStr string) *jsonData { + var object []map[string]interface{} + err := json.Unmarshal(jd.input.Bytes(), &object) + if err != nil { + logger.Errorf(" error in Parse is %v", err) + } + + jd.output = object + return jd +} + +func (jd *jsonData) DigObject(keys ...interface{}) interface{} { + value := dig(jd.output, keys) + return value +} +func (jd *jsonData) DigString(keys ...interface{}) string { + switch result := dig(jd.output, keys).(type) { + case nil: + return "" + case string: + return result + case fmt.Stringer: + return result.String() + default: + return fmt.Sprintf("%s", result) + } +} +func (jd *jsonData) DigBool(keys ...interface{}) bool { + switch result := dig(jd.output, keys).(type) { + case nil: + return false + case bool: + return result + case string: + b, err := strconv.ParseBool(result) + if err != nil { + return false + } + return b + default: + return false + } +} + +func (jd *jsonData) DigFloat(keys ...interface{}) float64 { + value := dig(jd.output, keys) + result := value.(float64) + return result +} + +func dig(object interface{}, keys []interface{}) interface{} { + if object == nil || len(keys) == 0 { + return nil + } + switch key := keys[0].(type) { + case string: + switch data := object.(type) { + case map[string]interface{}: + value := data[key] + if len(keys) == 1 { + return value + } + return dig(value, keys[1:]) + } + case int: + switch data := object.(type) { + case []interface{}: + value := data[key] + if len(keys) == 1 { + return value + } + return dig(value, keys[1:]) + } + } + return nil +} + +// mapStructure will map the map to the address of the structre *i +func MapStructure(m map[string]interface{}, i interface{}) error { + m = ConvertMapKey(m) + jsonbody, err := json.Marshal(m) + if err != nil { + return err + } + err = json.Unmarshal(jsonbody, i) + if err != nil { + return err + } + return nil +} + +func ConvertMapKey(m map[string]interface{}) map[string]interface{} { + for k, v := range m { + m[k] = Convert(v) + } + return m +} + +func Convert(i interface{}) interface{} { + switch x := i.(type) { + case map[interface{}]interface{}: + m2 := map[string]interface{}{} + for k, v := range x { + m2[k.(string)] = Convert(v) + } + return m2 + case []interface{}: + for i, v := range x { + x[i] = Convert(v) + } + } + return i +} diff --git a/test/util/rosacli/cmd_runner.go b/test/util/rosacli/cmd_runner.go new file mode 100644 index 000000000..898356f32 --- /dev/null +++ b/test/util/rosacli/cmd_runner.go @@ -0,0 +1,224 @@ +package rosacli + +import ( + "bytes" + "fmt" + "os/exec" + "strings" + "time" + + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" +) + +const ( + defaultRunnerFormat = "text" + jsonRunnerFormat = "json" + yamlRunnerFormat = "yaml" +) + +type runner struct { + cmds []string + cmdArgs []string + runnerCfg *runnerConfig + sensitive bool +} + +type runnerConfig struct { + format string + color string + debug bool +} + +func NewRunner() *runner { + runner := &runner{ + runnerCfg: &runnerConfig{ + format: "text", + debug: false, + color: "auto", + }, + } + return runner +} + +func (r *runner) Copy() *runner { + return &runner{ + runnerCfg: r.runnerCfg.Copy(), + sensitive: r.sensitive, + } +} + +func (rc *runnerConfig) Copy() *runnerConfig { + return &runnerConfig{ + format: rc.format, + color: rc.color, + debug: rc.debug, + } +} + +func (r *runner) Sensitive(sensitive bool) *runner { + r.sensitive = sensitive + return r +} + +func (r *runner) format(format string) *runner { + r.runnerCfg.format = format + return r +} + +func (r *runner) Debug(debug bool) *runner { + r.runnerCfg.debug = debug + return r +} + +func (r *runner) Color(color string) *runner { + r.runnerCfg.color = color + return r +} + +func (r *runner) JsonFormat() *runner { + return r.format(jsonRunnerFormat) +} + +func (r *runner) YamlFormat() *runner { + return r.format(yamlRunnerFormat) +} + +func (r *runner) UnsetFormat() *runner { + return r.format(defaultRunnerFormat) +} + +func (r *runner) Cmd(commands ...string) *runner { + r.cmds = commands + return r +} + +func (r *runner) CmdFlags(cmdFlags ...string) *runner { + var cmdArgs []string + cmdArgs = append(cmdArgs, cmdFlags...) + r.cmdArgs = cmdArgs + return r +} + +func (r *runner) AddCmdFlags(cmdFlags ...string) *runner { + cmdArgs := append(r.cmdArgs, cmdFlags...) + r.cmdArgs = cmdArgs + return r +} + +func (r *runner) UnsetBoolFlag(flag string) *runner { + var newCmdArgs []string + cmdArgs := r.cmdArgs + for _, vv := range cmdArgs { + if vv == flag { + continue + } + newCmdArgs = append(newCmdArgs, vv) + } + + r.cmdArgs = newCmdArgs + return r +} + +func (r *runner) UnsetFlag(flag string) *runner { + cmdArgs := r.cmdArgs + flagIndex := 0 + for n, key := range cmdArgs { + if key == flag { + flagIndex = n + break + } + } + + cmdArgs = append(cmdArgs[:flagIndex], cmdArgs[flagIndex+2:]...) + r.cmdArgs = cmdArgs + return r +} + +func (r *runner) ReplaceFlag(flag string, value string) *runner { + cmdArgs := r.cmdArgs + for n, key := range cmdArgs { + if key == flag { + cmdArgs[n+1] = value + break + } + } + + r.cmdArgs = cmdArgs + return r +} + +func (rc *runnerConfig) GenerateCmdFlags() (flags []string) { + if rc.format == jsonRunnerFormat || rc.format == yamlRunnerFormat { + flags = append(flags, "--output", rc.format) + } + if rc.debug { + flags = append(flags, "--debug") + } + if rc.color != "auto" { + flags = append(flags, "--color", rc.color) + } + return +} + +func (r *runner) Run() (bytes.Buffer, error) { + rosacmd := "rosa" + cmdElements := r.cmds + if len(r.cmdArgs) > 0 { + cmdElements = append(cmdElements, r.cmdArgs...) + } + cmdElements = append(cmdElements, r.runnerCfg.GenerateCmdFlags()...) + + var output bytes.Buffer + var err error + retry := 0 + for { + if retry > 4 { + err = fmt.Errorf("executing failed: %s", output.String()) + return output, err + } + if r.sensitive { + logger.Infof("Running command: rosa %s", strings.Join(cmdElements[:2], " ")) + } else { + logger.Infof("Running command: rosa %s", strings.Join(cmdElements, " ")) + } + + output.Reset() + cmd := exec.Command(rosacmd, cmdElements...) + cmd.Stdout = &output + cmd.Stderr = cmd.Stdout + + err = cmd.Run() + if !r.sensitive { + logger.Infof("Get Combining Stdout and Stder is :\n%s", output.String()) + } + + if strings.Contains(output.String(), "Not able to get authentication token") { + retry = retry + 1 + logger.Warnf("[Retry] Not able to get authentication token!! Wait and sleep 5s to do the %d retry", retry) + time.Sleep(5 * time.Second) + continue + } + return output, err + } +} + +func (r *runner) RunCMD(command []string) (bytes.Buffer, error) { + var output bytes.Buffer + var err error + + if !r.sensitive { + logger.Infof("Running command: %s", strings.Join(command, " ")) + } else { + logger.Infof("%s command is running", command[0]) + } + output.Reset() + cmd := exec.Command(command[0], command[1:]...) + cmd.Stdout = &output + cmd.Stderr = cmd.Stdout + + err = cmd.Run() + logger.Infof("Get Combining Stdout and Stder is :\n%s", output.String()) + + return output, err + +} diff --git a/test/util/rosacli/defaults.go b/test/util/rosacli/defaults.go new file mode 100644 index 000000000..827bc6155 --- /dev/null +++ b/test/util/rosacli/defaults.go @@ -0,0 +1,4 @@ +package rosacli + +var DefaultClassicWorkerPool = "worker" +var DefaultHostedWorkerPool = "workers" diff --git a/test/util/rosacli/file_utils.go b/test/util/rosacli/file_utils.go new file mode 100644 index 000000000..24f92d3f5 --- /dev/null +++ b/test/util/rosacli/file_utils.go @@ -0,0 +1,29 @@ +package rosacli + +import ( + "os" + + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" +) + +func CreateTempFileWithContent(fileContent string) (string, error) { + return CreateTempFileWithPrefixAndContent("tmpfile", fileContent) +} + +func CreateTempFileWithPrefixAndContent(prefix string, fileContent string) (string, error) { + f, err := os.CreateTemp("", prefix+"-") + if err != nil { + return "", err + } + return CreateFileWithContent(f.Name(), fileContent) +} + +// Write string to a file +func CreateFileWithContent(fileAbsPath string, content string) (string, error) { + err := os.WriteFile(fileAbsPath, []byte(content), 0644) + if err != nil { + logger.Errorf("Failed to write to file: %s", err) + return "", err + } + return fileAbsPath, err +} diff --git a/test/util/rosacli/idp_service.go b/test/util/rosacli/idp_service.go new file mode 100644 index 000000000..77f6742a2 --- /dev/null +++ b/test/util/rosacli/idp_service.go @@ -0,0 +1,130 @@ +package rosacli + +import ( + "bytes" + + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" +) + +type IDPService interface { + ResourcesCleaner + + ReflectIDPList(result bytes.Buffer) (idplist IDPList, err error) + CreateIDP(clusterID string, idpName string, idflags ...string) (bytes.Buffer, error) + ListIDP(clusterID string) (IDPList, bytes.Buffer, error) + DeleteIDP(clusterID string, idpName string) (bytes.Buffer, error) +} + +type idpService struct { + ResourcesService + + idps map[string][]string +} + +func NewIDPService(client *Client) IDPService { + return &idpService{ + ResourcesService: ResourcesService{ + client: client, + }, + idps: make(map[string][]string), + } +} + +// Struct for the 'rosa list idp' output +type IDP struct { + Name string `json:"NAME,omitempty"` + Type string `json:"TYPE,omitempty"` + AuthURL string `json:"AUTH URL,omitempty"` +} +type IDPList struct { + IDPs []IDP `json:"IDPs,omitempty"` +} + +// Pasrse the result of 'rosa list idp' to the IDPList struct +func (is *idpService) ReflectIDPList(result bytes.Buffer) (idplist IDPList, err error) { + idplist = IDPList{} + theMap := is.client.Parser.TableData.Input(result).Parse().Output() + for _, idpItem := range theMap { + idp := &IDP{} + err = MapStructure(idpItem, idp) + if err != nil { + return + } + idplist.IDPs = append(idplist.IDPs, *idp) + } + return idplist, err +} + +// Check the idp with the name exists in the IDPLIST +func (idps IDPList) IsExist(idpName string) (existed bool) { + existed = false + for _, idp := range idps.IDPs { + if idp.Name == idpName { + existed = true + break + } + } + return +} + +// Get specified machinepool by IDP NAME +func (idps IDPList) Idp(idpName string) (idp IDP) { + for _, idp := range idps.IDPs { + if idp.Name == idpName { + return idp + } + } + return +} + +// Create idp +func (is *idpService) CreateIDP(clusterID string, name string, flags ...string) (output bytes.Buffer, err error) { + output, err = is.client.Runner. + Cmd("create", "idp"). + CmdFlags(append(flags, "-c", clusterID, "--name", name)...). + Run() + if err == nil { + is.idps[clusterID] = append(is.idps[clusterID], name) + } + return +} + +// Delete idp +func (is *idpService) DeleteIDP(clusterID string, idpName string) (output bytes.Buffer, err error) { + output, err = is.client.Runner. + Cmd("delete", "idp", idpName). + CmdFlags("-c", clusterID, "-y"). + Run() + if err == nil { + is.idps[clusterID] = RemoveFromStringSlice(is.idps[clusterID], idpName) + } + return +} + +// list idp +func (is *idpService) ListIDP(clusterID string) (IDPList, bytes.Buffer, error) { + listIDP := is.client.Runner. + Cmd("list", "idp"). + CmdFlags("-c", clusterID) + + output, err := listIDP.Run() + if err != nil { + return IDPList{}, output, err + } + idpList, err := is.ReflectIDPList(output) + return idpList, output, err +} + +func (is *idpService) CleanResources(clusterID string) (errors []error) { + var idpsToDel []string + idpsToDel = append(idpsToDel, is.idps[clusterID]...) + for _, idpName := range idpsToDel { + logger.Infof("Remove remaining idp '%s'", idpName) + _, err := is.DeleteIDP(clusterID, idpName) + if err != nil { + errors = append(errors, err) + } + } + + return +} diff --git a/test/util/rosacli/idp_utils.go b/test/util/rosacli/idp_utils.go new file mode 100644 index 000000000..1647a00dc --- /dev/null +++ b/test/util/rosacli/idp_utils.go @@ -0,0 +1,35 @@ +package rosacli + +import ( + "fmt" + "os/exec" + "strings" + + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" +) + +// Generate htpasspwd key value pair, return with a string +func GenerateHtpasswdPair(user string, pass string) (string, string, string, error) { + generateCMD := fmt.Sprintf("htpasswd -Bbn %s %s", user, pass) + output, err := exec.Command("bash", "-c", generateCMD).Output() + htpasswdPair := strings.TrimSpace(string(output)) + parts := strings.SplitN(htpasswdPair, ":", 2) + if err != nil { + logger.Errorf("Fail to generate htpasswd file: %v", err) + return "", "", "", err + } + return htpasswdPair, parts[0], parts[1], nil +} + +// generate Htpasswd user-password Pairs +func GenerateMultipleHtpasswdPairs(pairNum int) ([]string, error) { + multipleuserPasswd := []string{} + for i := 0; i < pairNum; i++ { + userPasswdPair, _, _, err := GenerateHtpasswdPair(GenerateRandomString(6), GenerateRandomString(6)) + if err != nil { + return multipleuserPasswd, err + } + multipleuserPasswd = append(multipleuserPasswd, userPasswdPair) + } + return multipleuserPasswd, nil +} diff --git a/test/util/rosacli/ingress_service.go b/test/util/rosacli/ingress_service.go new file mode 100644 index 000000000..1ba1fc7dd --- /dev/null +++ b/test/util/rosacli/ingress_service.go @@ -0,0 +1,114 @@ +package rosacli + +import ( + "bytes" + + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" +) + +type IngressService interface { + ResourcesCleaner + + EditIngress(clusterID string, ingressID string, flags ...string) (bytes.Buffer, error) + ListIngress(clusterID string, flags ...string) (bytes.Buffer, error) + DeleteIngress(clusterID string, ingressID string) (bytes.Buffer, error) + ReflectIngressList(result bytes.Buffer) (res *IngressList, err error) +} + +type ingressService struct { + ResourcesService + + ingress map[string][]string +} + +func NewIngressService(client *Client) IngressService { + return &ingressService{ + ResourcesService: ResourcesService{ + client: client, + }, + ingress: make(map[string][]string), + } +} + +func (i *ingressService) CleanResources(clusterID string) (errors []error) { + var igsToDel []string + igsToDel = append(igsToDel, i.ingress[clusterID]...) + for _, igID := range igsToDel { + logger.Infof("Remove remaining ingress '%s'", igID) + _, err := i.DeleteIngress(clusterID, igID) + if err != nil { + errors = append(errors, err) + } + } + + return +} + +// Struct for the 'rosa describe ingress' output +type IngressList struct { + Ingresses []Ingress `json:"Ingresses,omitempty"` +} +type Ingress struct { + ID string `yaml:"ID,omitempty"` + ApplicationRouter string `yaml:"APPLICATION ROUTER,omitempty"` + Private string `yaml:"PRIVATE,omitempty"` + Default string `yaml:"DEFAULT,omitempty"` + RouteSelectors string `yaml:"ROUTE SELECTORS,omitempty"` + LBType string `yaml:"LB-TYPE,omitempty"` +} + +// Get specified ingress by ingress id +func (inl IngressList) Ingress(id string) (in *Ingress) { + for _, inItem := range inl.Ingresses { + if inItem.ID == id { + in = &inItem + return + } + } + return +} + +// Edit the cluster ingress +func (i *ingressService) EditIngress(clusterID string, ingressID string, flags ...string) (bytes.Buffer, error) { + combflags := append([]string{"-c", clusterID}, flags...) + editIngress := i.client.Runner. + Cmd("edit", "ingress", ingressID). + CmdFlags(combflags...) + return editIngress.Run() +} + +// List the cluster ingress +func (i *ingressService) ListIngress(clusterID string, flags ...string) (bytes.Buffer, error) { + combflags := append([]string{"-c", clusterID}, flags...) + listIngress := i.client.Runner. + Cmd("list", "ingress"). + CmdFlags(combflags...) + return listIngress.Run() +} + +// Pasrse the result of 'rosa list ingress' to Ingress struct +func (i *ingressService) ReflectIngressList(result bytes.Buffer) (res *IngressList, err error) { + res = &IngressList{} + theMap := i.client.Parser.TableData.Input(result).Parse().Output() + for _, ingressItem := range theMap { + in := &Ingress{} + err = MapStructure(ingressItem, in) + if err != nil { + return + } + res.Ingresses = append(res.Ingresses, *in) + } + return res, err +} + +// Delete the ingress +func (i *ingressService) DeleteIngress(clusterID string, ingressID string) (output bytes.Buffer, err error) { + output, err = i.client.Runner. + Cmd("delete", "ingress", ingressID). + CmdFlags("-c", clusterID, "-y"). + Run() + if err == nil { + i.ingress[clusterID] = RemoveFromStringSlice(i.ingress[clusterID], ingressID) + } + return +} diff --git a/test/util/rosacli/kubelet_config_service.go b/test/util/rosacli/kubelet_config_service.go new file mode 100644 index 000000000..41fb1d99f --- /dev/null +++ b/test/util/rosacli/kubelet_config_service.go @@ -0,0 +1,102 @@ +package rosacli + +import ( + "bytes" + + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" + "gopkg.in/yaml.v3" +) + +type KubeletConfigService interface { + ResourcesCleaner + + DescribeKubeletConfig(clusterID string) (bytes.Buffer, error) + ReflectKubeletConfigDescription(result bytes.Buffer) *KubeletConfigDescription + EditKubeletConfig(clusterID string, flags ...string) (bytes.Buffer, error) + DeleteKubeletConfig(clusterID string, flags ...string) (bytes.Buffer, error) + CreateKubeletConfig(clusterID string, flags ...string) (bytes.Buffer, error) +} + +type kubeletConfigService struct { + ResourcesService + + created map[string]bool +} + +func NewKubeletConfigService(client *Client) KubeletConfigService { + return &kubeletConfigService{ + ResourcesService: ResourcesService{ + client: client, + }, + created: make(map[string]bool), + } +} + +// Struct for the 'rosa describe kubeletconfig' output +type KubeletConfigDescription struct { + PodPidsLimit int `yaml:"Pod Pids Limit,omitempty"` +} + +// Describe Kubeletconfig +func (k *kubeletConfigService) DescribeKubeletConfig(clusterID string) (bytes.Buffer, error) { + describe := k.client.Runner. + Cmd("describe", "kubeletconfig"). + CmdFlags("-c", clusterID) + + return describe.Run() +} + +// Pasrse the result of 'rosa describe kubeletconfig' to the KubeletConfigDescription struct +func (k *kubeletConfigService) ReflectKubeletConfigDescription(result bytes.Buffer) *KubeletConfigDescription { + res := new(KubeletConfigDescription) + theMap, _ := k.client.Parser.TextData.Input(result).Parse().YamlToMap() + data, _ := yaml.Marshal(&theMap) + yaml.Unmarshal(data, res) + return res +} + +// Edit the kubeletconfig +func (k *kubeletConfigService) EditKubeletConfig(clusterID string, flags ...string) (bytes.Buffer, error) { + combflags := append([]string{"-c", clusterID}, flags...) + editCluster := k.client.Runner. + Cmd("edit", "kubeletconfig"). + CmdFlags(combflags...) + return editCluster.Run() +} + +// Delete the kubeletconfig +func (k *kubeletConfigService) DeleteKubeletConfig(clusterID string, flags ...string) (output bytes.Buffer, err error) { + combflags := append([]string{"-c", clusterID}, flags...) + editCluster := k.client.Runner. + Cmd("delete", "kubeletconfig"). + CmdFlags(combflags...) + output, err = editCluster.Run() + if err == nil { + k.created[clusterID] = false + } + return +} + +// Create the kubeletconfig +func (k *kubeletConfigService) CreateKubeletConfig(clusterID string, flags ...string) (output bytes.Buffer, err error) { + combflags := append([]string{"-c", clusterID}, flags...) + createCluster := k.client.Runner. + Cmd("create", "kubeletconfig"). + CmdFlags(combflags...) + output, err = createCluster.Run() + if err == nil { + k.created[clusterID] = true + } + return +} + +func (k *kubeletConfigService) CleanResources(clusterID string) (errors []error) { + if k.created[clusterID] { + logger.Infof("Remove remaining kubelet config") + _, err := k.DeleteKubeletConfig(clusterID) + if err != nil { + errors = append(errors, err) + } + } + return +} diff --git a/test/util/rosacli/machinepool_service.go b/test/util/rosacli/machinepool_service.go new file mode 100644 index 000000000..5f7f88ac1 --- /dev/null +++ b/test/util/rosacli/machinepool_service.go @@ -0,0 +1,274 @@ +package rosacli + +import ( + "bytes" + + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" + "gopkg.in/yaml.v3" +) + +type MachinePoolService interface { + ResourcesCleaner + + ListMachinePool(clusterID string) (bytes.Buffer, error) + DescribeMachinePool(clusterID string, mpID string) (bytes.Buffer, error) + CreateMachinePool(clusterID string, name string, flags ...string) (bytes.Buffer, error) + EditMachinePool(clusterID string, machinePoolName string, flags ...string) (bytes.Buffer, error) + DeleteMachinePool(clusterID string, machinePoolName string) (bytes.Buffer, error) + + ReflectMachinePoolList(result bytes.Buffer) (mpl MachinePoolList, err error) + ReflectMachinePoolDescription(result bytes.Buffer) (*MachinePoolDescription, error) + + ReflectNodePoolList(result bytes.Buffer) (*NodePoolList, error) + ListAndReflectNodePools(clusterID string) (*NodePoolList, error) + ReflectNodePoolDescription(result bytes.Buffer) (npd *NodePoolDescription, err error) + DescribeAndReflectNodePool(clusterID string, name string) (*NodePoolDescription, error) + + RetrieveHelpForCreate() (bytes.Buffer, error) +} + +type machinepoolService struct { + ResourcesService + + machinePools map[string][]string +} + +func NewMachinePoolService(client *Client) MachinePoolService { + return &machinepoolService{ + ResourcesService: ResourcesService{ + client: client, + }, + machinePools: make(map[string][]string), + } +} + +// Struct for the 'rosa list machinepool' output for non-hosted-cp clusters +type MachinePool struct { + ID string `json:"ID,omitempty"` + AutoScaling string `json:"AUTOSCALING,omitempty"` + Replicas string `json:"REPLICAS,omitempty"` + InstanceType string `json:"INSTANCE TYPE,omitempty"` + Labels string `json:"LABELS,omitempty"` + Taints string `json:"TAINTS,omitempty"` + AvalaiblityZones string `json:"AVAILABILITY ZONES,omitempty"` + Subnets string `json:"SUBNETS,omitempty"` + SpotInstances string `json:"SPOT INSTANCES,omitempty"` + DiskSize string `json:"DISK SIZE,omitempty"` +} +type MachinePoolList struct { + MachinePools []MachinePool `json:"MachinePools,omitempty"` +} + +// Struct for the 'rosa list machinepool' output for non-hosted-cp clusters +type MachinePoolDescription struct { + ID string `yaml:"ID,omitempty"` + ClusterID string `yaml:"Cluster ID,omitempty"` + AutoScaling string `yaml:"Autoscaling,omitempty"` + Replicas string `yaml:"Replicas,omitempty"` + InstanceType string `yaml:"Instance type,omitempty"` + Labels string `yaml:"Labels,omitempty"` + Taints string `yaml:"Taints,omitempty"` + AvailablityZones string `yaml:"Availability zones,omitempty"` + Subnets string `yaml:"Subnets,omitempty"` + SpotInstances string `yaml:"Spot instances,omitempty"` + DiskSize string `yaml:"Disk size,omitempty"` + SecurityGroupIDs string `yaml:"Security Group IDs,omitempty"` +} + +// Struct for the 'rosa list machinepool' output for hosted-cp clusters +type NodePool struct { + ID string `json:"ID,omitempty"` + AutoScaling string `json:"AUTOSCALING,omitempty"` + Replicas string `json:"REPLICAS,omitempty"` + InstanceType string `json:"INSTANCE TYPE,omitempty"` + Labels string `json:"LABELS,omitempty"` + Taints string `json:"TAINTS,omitempty"` + AvalaiblityZones string `json:"AVAILABILITY ZONES,omitempty"` + Subnet string `json:"SUBNET,omitempty"` + Version string `json:"VERSION,omitempty"` + AutoRepair string `json:"AUTOREPAIR,omitempty"` + TuningConfigs string `json:"TUNING CONFIGS,omitempty"` + Message string `json:"MESSAGE,omitempty"` +} + +type NodePoolList struct { + NodePools []NodePool `json:"NodePools,omitempty"` +} + +type NodePoolDescription struct { + ID string `yaml:"ID,omitempty"` + ClusterID string `yaml:"Cluster ID,omitempty"` + AutoScaling string `yaml:"Autoscaling,omitempty"` + DesiredReplicas string `yaml:"Desired replicas,omitempty"` + CurrentReplicas string `yaml:"Current replicas,omitempty"` + InstanceType string `yaml:"Instance type,omitempty"` + Labels string `yaml:"Labels,omitempty"` + Taints string `yaml:"Taints,omitempty"` + AvalaiblityZones string `yaml:"Availability zone,omitempty"` + Subnet string `yaml:"Subnet,omitempty"` + Version string `yaml:"Version,omitempty"` + AutoRepair string `yaml:"Autorepair,omitempty"` + TuningConfigs string `yaml:"Tuning configs,omitempty"` + Message string `yaml:"Message,omitempty"` + ScheduledUpgrade string `yaml:"Scheduled upgrade,omitempty"` +} + +// Create MachinePool +func (m *machinepoolService) CreateMachinePool(clusterID string, name string, flags ...string) (output bytes.Buffer, err error) { + output, err = m.client.Runner. + Cmd("create", "machinepool"). + CmdFlags(append(flags, "-c", clusterID, "--name", name)...). + Run() + if err == nil { + m.machinePools[clusterID] = append(m.machinePools[clusterID], name) + } + return +} + +// List MachinePool +func (m *machinepoolService) ListMachinePool(clusterID string) (bytes.Buffer, error) { + listMachinePool := m.client.Runner. + Cmd("list", "machinepool"). + CmdFlags("-c", clusterID) + return listMachinePool.Run() +} + +// Describe MachinePool +func (m *machinepoolService) DescribeMachinePool(clusterID string, mpID string) (bytes.Buffer, error) { + describeMp := m.client.Runner. + Cmd("describe", "machinepool"). + CmdFlags(mpID, "-c", clusterID) + return describeMp.Run() +} + +// Delete MachinePool +func (m *machinepoolService) DeleteMachinePool(clusterID string, machinePoolName string) (output bytes.Buffer, err error) { + output, err = m.client.Runner. + Cmd("delete", "machinepool"). + CmdFlags("-c", clusterID, machinePoolName, "-y"). + Run() + if err == nil { + m.machinePools[clusterID] = RemoveFromStringSlice(m.machinePools[clusterID], machinePoolName) + } + return +} + +// Edit MachinePool +func (m *machinepoolService) EditMachinePool(clusterID string, machinePoolName string, flags ...string) (bytes.Buffer, error) { + editMachinePool := m.client.Runner. + Cmd("edit", "machinepool", machinePoolName). + CmdFlags(append(flags, "-c", clusterID)...) + + return editMachinePool.Run() +} + +// Pasrse the result of 'rosa list machinepool' to MachinePoolList struct +func (m *machinepoolService) ReflectMachinePoolList(result bytes.Buffer) (mpl MachinePoolList, err error) { + mpl = MachinePoolList{} + theMap := m.client.Parser.TableData.Input(result).Parse().Output() + for _, machinepoolItem := range theMap { + mp := &MachinePool{} + err = MapStructure(machinepoolItem, mp) + if err != nil { + return + } + mpl.MachinePools = append(mpl.MachinePools, *mp) + } + return mpl, err +} + +// Pasrse the result of 'rosa list machinepool' to MachinePoolList struct +func (m *machinepoolService) ReflectMachinePoolDescription(result bytes.Buffer) (mp *MachinePoolDescription, err error) { + mp = new(MachinePoolDescription) + theMap, _ := m.client.Parser.TextData.Input(result).Parse().YamlToMap() + + data, _ := yaml.Marshal(&theMap) + err = yaml.Unmarshal(data, mp) + return mp, err +} + +func (m *machinepoolService) CleanResources(clusterID string) (errors []error) { + var mpsToDel []string + mpsToDel = append(mpsToDel, m.machinePools[clusterID]...) + for _, mpID := range mpsToDel { + logger.Infof("Remove remaining machinepool '%s'", mpID) + _, err := m.DeleteMachinePool(clusterID, mpID) + if err != nil { + errors = append(errors, err) + } + } + + return +} + +// Get specified machinepool by machinepool id +func (mpl MachinePoolList) Machinepool(id string) (mp *MachinePool) { + for _, mpItem := range mpl.MachinePools { + if mpItem.ID == id { + mp = &mpItem + return + } + } + return +} + +func (m *machinepoolService) ListAndReflectNodePools(clusterID string) (npl *NodePoolList, err error) { + output, err := m.ListMachinePool(clusterID) + if err != nil { + return nil, err + } + return m.ReflectNodePoolList(output) +} + +func (m *machinepoolService) DescribeAndReflectNodePool(clusterID string, mpID string) (*NodePoolDescription, error) { + output, err := m.DescribeMachinePool(clusterID, mpID) + if err != nil { + return nil, err + } + return m.ReflectNodePoolDescription(output) +} + +func (m *machinepoolService) ReflectNodePoolList(result bytes.Buffer) (npl *NodePoolList, err error) { + npl = new(NodePoolList) + theMap := m.client.Parser.TableData.Input(result).Parse().Output() + for _, nodepoolItem := range theMap { + np := &NodePool{} + err = MapStructure(nodepoolItem, np) + if err != nil { + return + } + npl.NodePools = append(npl.NodePools, *np) + } + return npl, err +} + +// Create MachinePool +func (m *machinepoolService) RetrieveHelpForCreate() (output bytes.Buffer, err error) { + return m.client.Runner.Cmd("create", "machinepool").CmdFlags("-h").Run() +} + +// Pasrse the result of 'rosa describe cluster' to the RosaClusterDescription struct +func (m *machinepoolService) ReflectNodePoolDescription(result bytes.Buffer) (*NodePoolDescription, error) { + theMap, err := m.client.Parser.TextData.Input(result).Parse().YamlToMap() + if err != nil { + return nil, err + } + data, err := yaml.Marshal(&theMap) + if err != nil { + return nil, err + } + npd := new(NodePoolDescription) + err = yaml.Unmarshal(data, npd) + return npd, err +} + +// Get specified nodepool by nodepool id +func (npl NodePoolList) Nodepool(id string) (np *NodePool) { + for _, npItem := range npl.NodePools { + if npItem.ID == id { + np = &npItem + return + } + } + return +} diff --git a/test/util/rosacli/machinepool_upgrade_service.go b/test/util/rosacli/machinepool_upgrade_service.go new file mode 100644 index 000000000..733c9dc96 --- /dev/null +++ b/test/util/rosacli/machinepool_upgrade_service.go @@ -0,0 +1,198 @@ +package rosacli + +import ( + "bytes" + + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" + "gopkg.in/yaml.v3" +) + +type MachinePoolUpgradeService interface { + ResourcesCleaner + + ListUpgrades(clusterID string, mpID string) (bytes.Buffer, error) + ReflectUpgradesList(result bytes.Buffer) (*MachinePoolUpgradeList, error) + ListAndReflectUpgrades(clusterID string, mpID string) (*MachinePoolUpgradeList, error) + + // Create a manual upgrade. `version`, `scheduleDate` and `scheduleTime` are optional. `schedule*` if provided MUST be both at the same time provided. + CreateManualUpgrade(clusterID string, mpID string, version string, scheduleDate string, scheduleTime string) (bytes.Buffer, error) + // Create an automatic upgrade based on the given cron. + CreateAutomaticUpgrade(clusterID string, mpID string, scheduleCron string) (bytes.Buffer, error) + + DescribeUpgrade(clusterID string, mpID string) (bytes.Buffer, error) + ReflectUpgradeDescription(result bytes.Buffer) (*MachinePoolUpgradeDescription, error) + DescribeAndReflectUpgrade(clusterID string, mpID string) (*MachinePoolUpgradeDescription, error) + + DeleteUpgrade(clusterID string, mpID string) (bytes.Buffer, error) + + RetrieveHelpForCreate() (bytes.Buffer, error) + RetrieveHelpForDescribe() (bytes.Buffer, error) + RetrieveHelpForList() (bytes.Buffer, error) + RetrieveHelpForDelete() (bytes.Buffer, error) +} + +type machinePoolUpgradeService struct { + ResourcesService + + machinePools map[string][]string +} + +func NewMachinePoolUpgradeService(client *Client) MachinePoolUpgradeService { + return &machinePoolUpgradeService{ + ResourcesService: ResourcesService{ + client: client, + }, + + machinePools: make(map[string][]string), + } +} + +type MachinePoolUpgrade struct { + Version string `json:"VERSION,omitempty"` + Notes string `json:"NOTES,omitempty"` +} +type MachinePoolUpgradeList struct { + MachinePoolUpgrades []MachinePoolUpgrade `json:"MachinePoolUpgrades,omitempty"` +} + +// Struct for the 'rosa describe upgrades' output for non-hosted-cp clusters +type MachinePoolUpgradeDescription struct { + ID string `yaml:"ID,omitempty"` + ClusterID string `yaml:"Cluster ID,omitempty"` + ScheduleType string `yaml:"Schedule Type,omitempty"` + NextRun string `yaml:"Next Run,omitempty"` + UpgradeState string `yaml:"Upgrade State,omitempty"` + Version string `yaml:"Version,omitempty"` +} + +// List MachinePool upgrades +func (mpus *machinePoolUpgradeService) ListUpgrades(clusterID string, mpID string) (bytes.Buffer, error) { + listMachinePool := mpus.client.Runner. + Cmd("list", "upgrades"). + CmdFlags("-c", clusterID, "--machinepool", mpID) + return listMachinePool.Run() +} + +// Pasrse the result of 'rosa list upgrades --machinepool' to MachinePoolList struct +func (mpus *machinePoolUpgradeService) ReflectUpgradesList(result bytes.Buffer) (mpul *MachinePoolUpgradeList, err error) { + mpul = &MachinePoolUpgradeList{} + theMap := mpus.client.Parser.TableData.Input(result).Parse().Output() + for _, machinepoolItem := range theMap { + mpu := &MachinePoolUpgrade{} + err = MapStructure(machinepoolItem, mpu) + if err != nil { + return + } + mpul.MachinePoolUpgrades = append(mpul.MachinePoolUpgrades, *mpu) + } + return mpul, err +} + +func (mpus *machinePoolUpgradeService) ListAndReflectUpgrades(clusterID string, mpID string) (mpul *MachinePoolUpgradeList, err error) { + output, err := mpus.ListUpgrades(clusterID, mpID) + if err != nil { + return nil, err + } + return mpus.ReflectUpgradesList(output) +} + +func (mpus *machinePoolUpgradeService) CreateManualUpgrade(clusterID string, mpID string, version string, scheduleDate string, scheduleTime string) (output bytes.Buffer, err error) { + var flags []string + if version != "" { + flags = append(flags, "--version", version) + } + + if scheduleDate != "" { + flags = append(flags, "--schedule-date", scheduleDate) + flags = append(flags, "--schedule-time", scheduleTime) + } + + return mpus.create(clusterID, mpID, flags...) +} + +func (mpus *machinePoolUpgradeService) CreateAutomaticUpgrade(clusterID string, mpID string, scheduleCron string) (output bytes.Buffer, err error) { + return mpus.create(clusterID, mpID, "--schedule", scheduleCron) +} + +func (mpus *machinePoolUpgradeService) create(clusterID string, mpID string, flags ...string) (output bytes.Buffer, err error) { + output, err = mpus.client.Runner. + Cmd("upgrade", "machinepool", mpID). + CmdFlags(append(flags, "-c", clusterID)...). + Run() + if err == nil { + mpus.machinePools[clusterID] = append(mpus.machinePools[clusterID], mpID) + } + return +} + +// Describe MachinePool +func (mpus *machinePoolUpgradeService) DescribeUpgrade(clusterID string, mpID string) (bytes.Buffer, error) { + describeMp := mpus.client.Runner. + Cmd("describe", "upgrade"). + CmdFlags("-c", clusterID, "--machinepool", mpID) + return describeMp.Run() +} + +// Pasrse the result of 'rosa describe upgrade --machinepool' to the RosaClusterDescription struct +func (mpus *machinePoolUpgradeService) ReflectUpgradeDescription(result bytes.Buffer) (*MachinePoolUpgradeDescription, error) { + theMap, err := mpus.client.Parser.TextData.Input(result).Parse().YamlToMap() + if err != nil { + return nil, err + } + data, err := yaml.Marshal(&theMap) + if err != nil { + return nil, err + } + mpud := new(MachinePoolUpgradeDescription) + err = yaml.Unmarshal(data, mpud) + return mpud, err +} + +func (mpus *machinePoolUpgradeService) DescribeAndReflectUpgrade(clusterID string, mpID string) (*MachinePoolUpgradeDescription, error) { + output, err := mpus.DescribeUpgrade(clusterID, mpID) + if err != nil { + return nil, err + } + return mpus.ReflectUpgradeDescription(output) +} + +func (mpus *machinePoolUpgradeService) DeleteUpgrade(clusterID string, mpID string) (output bytes.Buffer, err error) { + output, err = mpus.client.Runner. + Cmd("delete", "upgrade"). + CmdFlags("-c", clusterID, "--machinepool", mpID, "-y").Run() + if err == nil { + mpus.machinePools[clusterID] = RemoveFromStringSlice(mpus.machinePools[clusterID], mpID) + } + return +} + +// Create MachinePool +func (mpus *machinePoolUpgradeService) RetrieveHelpForCreate() (output bytes.Buffer, err error) { + return mpus.client.Runner.Cmd("upgrade", "machinepool").CmdFlags("-h").Run() +} + +func (mpus *machinePoolUpgradeService) RetrieveHelpForList() (output bytes.Buffer, err error) { + return mpus.client.Runner.Cmd("list", "upgrades").CmdFlags("-h").Run() +} + +func (mpus *machinePoolUpgradeService) RetrieveHelpForDescribe() (output bytes.Buffer, err error) { + return mpus.client.Runner.Cmd("list", "upgrade").CmdFlags("-h").Run() +} + +func (mpus *machinePoolUpgradeService) RetrieveHelpForDelete() (output bytes.Buffer, err error) { + return mpus.client.Runner.Cmd("list", "upgrade").CmdFlags("-h").Run() +} + +func (mpus *machinePoolUpgradeService) CleanResources(clusterID string) (errors []error) { + var mpsToDel []string + mpsToDel = append(mpsToDel, mpus.machinePools[clusterID]...) + for _, mpID := range mpsToDel { + logger.Infof("Remove remaining machinepool upgrade on '%s'", mpID) + _, err := mpus.DeleteUpgrade(clusterID, mpID) + if err != nil { + errors = append(errors, err) + } + } + + return +} diff --git a/test/util/rosacli/network_verifier_service.go b/test/util/rosacli/network_verifier_service.go new file mode 100644 index 000000000..be545433a --- /dev/null +++ b/test/util/rosacli/network_verifier_service.go @@ -0,0 +1,60 @@ +package rosacli + +import ( + "bytes" + + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" +) + +type NetworkVerifierService interface { + ResourcesCleaner + CreateNetworkVerifierWithCluster(clusterID string, flags ...string) (bytes.Buffer, error) + CreateNetworkVerifierWithSubnets(flags ...string) (bytes.Buffer, error) + GetNetworkVerifierStatus(flags ...string) (bytes.Buffer, error) +} + +type networkVerifierService struct { + ResourcesService + + nv map[string]string +} + +func NewNetworkVerifierService(client *Client) NetworkVerifierService { + return &networkVerifierService{ + ResourcesService: ResourcesService{ + client: client, + }, + nv: make(map[string]string), + } +} + +func (nv *networkVerifierService) CreateNetworkVerifierWithCluster(clusterID string, flags ...string) (bytes.Buffer, error) { + combflags := append([]string{"-c", clusterID}, flags...) + createNetworkVerifier := nv.client.Runner. + Cmd("verify", "network"). + CmdFlags(combflags...) + + return createNetworkVerifier.Run() +} + +func (nv *networkVerifierService) CreateNetworkVerifierWithSubnets(flags ...string) (bytes.Buffer, error) { + createNetworkVerifier := nv.client.Runner. + Cmd("verify", "network"). + CmdFlags(flags...) + + return createNetworkVerifier.Run() +} + +func (nv *networkVerifierService) GetNetworkVerifierStatus(flags ...string) (bytes.Buffer, error) { + combflags := append([]string{"--watch", "--status-only"}, flags...) + getNetworkVerifierStatus := nv.client.Runner. + Cmd("verify", "network"). + CmdFlags(combflags...) + + return getNetworkVerifierStatus.Run() +} + +func (nv *networkVerifierService) CleanResources(clusterID string) (errors []error) { + logger.Debugf("Nothing to clean in NetworkVerifierService Service") + return +} diff --git a/test/util/rosacli/ocm_resource_service.go b/test/util/rosacli/ocm_resource_service.go new file mode 100644 index 000000000..9948e4cda --- /dev/null +++ b/test/util/rosacli/ocm_resource_service.go @@ -0,0 +1,512 @@ +package rosacli + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" +) + +var RoleTypeSuffixMap = map[string]string{ + "Installer": "Installer-Role", + "Support": "Support-Role", + "Control plane": "ControlPlane-Role", + "Worker": "Worker-Role", +} + +type OCMResourceService interface { + ResourcesCleaner + + ListRegion(flags ...string) ([]*CloudRegion, bytes.Buffer, error) + ReflectRegionList(result bytes.Buffer) (regions []*CloudRegion, err error) + + ListUserRole() (UserRoleList, bytes.Buffer, error) + DeleteUserRole(flags ...string) (bytes.Buffer, error) + LinkUserRole(flags ...string) (bytes.Buffer, error) + UnlinkUserRole(flags ...string) (bytes.Buffer, error) + CreateUserRole(flags ...string) (bytes.Buffer, error) + ReflectUserRoleList(result bytes.Buffer) (url UserRoleList, err error) + + Whoami() (bytes.Buffer, error) + ReflectAccountsInfo(result bytes.Buffer) *AccountsInfo + + CreateAccountRole(flags ...string) (bytes.Buffer, error) + ReflectAccountRoleList(result bytes.Buffer) (arl AccountRoleList, err error) + DeleteAccountRole(flags ...string) (bytes.Buffer, error) + ListAccountRole() (AccountRoleList, bytes.Buffer, error) + UpgradeAccountRole(flags ...string) (bytes.Buffer, error) + + ListOCMRole() (OCMRoleList, bytes.Buffer, error) + DeleteOCMRole(flags ...string) (bytes.Buffer, error) + LinkOCMRole(flags ...string) (bytes.Buffer, error) + UnlinkOCMRole(flags ...string) (bytes.Buffer, error) + CreateOCMRole(flags ...string) (bytes.Buffer, error) + ReflectOCMRoleList(result bytes.Buffer) (orl OCMRoleList, err error) + + ListOIDCConfig() (OIDCConfigList, bytes.Buffer, error) + DeleteOIDCConfig(flags ...string) (bytes.Buffer, error) + CreateOIDCConfig(flags ...string) (bytes.Buffer, error) + ReflectOIDCConfigList(result bytes.Buffer) (oidclist OIDCConfigList, err error) + GetOIDCIdFromList(providerURL string) (string, error) + + DeleteOperatorRoles(flags ...string) (bytes.Buffer, error) + CreateOperatorRoles(flags ...string) (bytes.Buffer, error) + + CreateOIDCProvider(flags ...string) (bytes.Buffer, error) +} + +type ocmResourceService struct { + ResourcesService +} + +func NewOCMResourceService(client *Client) OCMResourceService { + return &ocmResourceService{ + ResourcesService: ResourcesService{ + client: client, + }, + } +} + +// Struct for the 'rosa list region' output +type CloudRegion struct { + ID string `json:"ID,omitempty"` + Name string `json:"NAME,omitempty"` + MultiAZSupported string `json:"MULTI-AZ SUPPORT,omitempty"` + HypershiftSupported string `json:"HOSTED-CP SUPPORT,omitempty"` +} + +// Struct for the 'rosa list user-role' output +type UserRole struct { + RoleName string `json:"ROLE NAME,omitempty"` + RoleArn string `json:"ROLE ARN,omitempty"` + Linded string `json:"LINKED,omitempty"` +} + +type UserRoleList struct { + UserRoleList []UserRole `json:"UserRoleList,omitempty"` +} + +// Struct for the 'rosa list ocm-role' output +type OCMRole struct { + RoleName string `json:"ROLE NAME,omitempty"` + RoleArn string `json:"ROLE ARN,omitempty"` + Linded string `json:"LINKED,omitempty"` + Admin string `json:"ADMIN,omitempty"` + AwsManaged string `json:"AWS MANAGED,omitempty"` +} + +type OCMRoleList struct { + OCMRoleList []OCMRole `json:"OCMRoleList,omitempty"` +} +type AccountsInfo struct { + AWSArn string `json:"AWS ARN,omitempty"` + AWSAccountID string `json:"AWS Account ID,omitempty"` + AWSDefaultRegion string `json:"AWS Default Region,omitempty"` + OCMApi string `json:"OCM API,omitempty"` + OCMAccountEmail string `json:"OCM Account Email,omitempty"` + OCMAccountID string `json:"OCM Account ID,omitempty"` + OCMAccountName string `json:"OCM Account Name,omitempty"` + OCMAccountUsername string `json:"OCM Account Username,omitempty"` + OCMOrganizationExternalID string `json:"OCM Organization External ID,omitempty"` + OCMOrganizationID string `json:"OCM Organization ID,omitempty"` + OCMOrganizationName string `json:"OCM Organization Name,omitempty"` +} + +type AccountRole struct { + RoleName string `json:"ROLE NAME,omitempty"` + RoleType string `json:"ROLE TYPE,omitempty"` + RoleArn string `json:"ROLE ARN,omitempty"` + OpenshiftVersion string `json:"OPENSHIFT VERSION,omitempty"` + AWSManaged string `json:"AWS Managed,omitempty"` +} +type AccountRoleList struct { + AccountRoleList []*AccountRole `json:"AccountRoleList,omitempty"` +} + +type OIDCConfig struct { + ID string `json:"ID,omitempty"` + Managed string `json:"MANAGED,omitempty"` + IssuerUrl string `json:"ISSUER URL,omitempty"` + SecretArn string `json:"SECRET ARN,omitempty"` +} +type OIDCConfigList struct { + OIDCConfigList []OIDCConfig `json:"OIDCConfigList,omitempty"` +} + +// List region +func (ors *ocmResourceService) ListRegion(flags ...string) ([]*CloudRegion, bytes.Buffer, error) { + listRegion := ors.client.Runner + listRegion = listRegion.Cmd("list", "regions").CmdFlags(flags...) + output, err := listRegion.Run() + if err != nil { + return []*CloudRegion{}, output, err + } + rList, err := ors.ReflectRegionList(output) + return rList, output, err +} + +// Pasrse the result of 'rosa regions' to the RegionInfo struct +func (ors *ocmResourceService) ReflectRegionList(result bytes.Buffer) (regions []*CloudRegion, err error) { + theMap := ors.client.Parser.TableData.Input(result).Parse().Output() + for _, regionItem := range theMap { + region := &CloudRegion{} + err = MapStructure(regionItem, region) + if err != nil { + return + } + regions = append(regions, region) + } + return +} + +// Pasrse the result of 'rosa whoami' to the AccountsInfo struct +func (ors *ocmResourceService) ReflectAccountsInfo(result bytes.Buffer) *AccountsInfo { + res := new(AccountsInfo) + theMap, _ := ors.client.Parser.TextData.Input(result).Parse().JsonToMap() + data, _ := json.Marshal(&theMap) + json.Unmarshal(data, res) + return res +} + +// Pasrse the result of 'rosa list user-roles' to NodePoolList struct +func (ors *ocmResourceService) ReflectUserRoleList(result bytes.Buffer) (url UserRoleList, err error) { + url = UserRoleList{} + theMap := ors.client.Parser.TableData.Input(result).Parse().Output() + for _, userroleItem := range theMap { + ur := &UserRole{} + err = MapStructure(userroleItem, ur) + if err != nil { + return + } + url.UserRoleList = append(url.UserRoleList, *ur) + } + return +} + +// run `rosa list user-role` command +func (ors *ocmResourceService) ListUserRole() (UserRoleList, bytes.Buffer, error) { + ors.client.Runner.cmdArgs = []string{} + listUserRole := ors.client.Runner. + Cmd("list", "user-role") + output, err := listUserRole.Run() + if err != nil { + return UserRoleList{}, output, err + } + uList, err := ors.ReflectUserRoleList(output) + return uList, output, err + +} + +// run `rosa delete user-role` command +func (ors *ocmResourceService) DeleteUserRole(flags ...string) (bytes.Buffer, error) { + deleteUserRole := ors.client.Runner + deleteUserRole = deleteUserRole.Cmd("delete", "user-role").CmdFlags(flags...) + return deleteUserRole.Run() +} + +// run `rosa link user-role` command +func (ors *ocmResourceService) LinkUserRole(flags ...string) (bytes.Buffer, error) { + linkUserRole := ors.client.Runner + linkUserRole = linkUserRole.Cmd("link", "user-role").CmdFlags(flags...) + return linkUserRole.Run() +} + +// run `rosa unlink user-role` command +func (ors *ocmResourceService) UnlinkUserRole(flags ...string) (bytes.Buffer, error) { + unlinkUserRole := ors.client.Runner + unlinkUserRole = unlinkUserRole.Cmd("unlink", "user-role").CmdFlags(flags...) + return unlinkUserRole.Run() +} + +// run `rosa create user-role` command +func (ors *ocmResourceService) CreateUserRole(flags ...string) (bytes.Buffer, error) { + createUserRole := ors.client.Runner + createUserRole = createUserRole.Cmd("create", "user-role").CmdFlags(flags...) + return createUserRole.Run() +} + +// run `rosa whoami` command +func (ors *ocmResourceService) Whoami() (bytes.Buffer, error) { + ors.client.Runner.cmdArgs = []string{} + whoami := ors.client.Runner.Cmd("whoami") + return whoami.Run() +} + +// Get specified user-role by user-role prefix and ocmAccountUsername +func (url UserRoleList) UserRole(prefix string, ocmAccountUsername string) (userRoles UserRole) { + userRoleName := fmt.Sprintf("%s-User-%s-Role", prefix, ocmAccountUsername) + for _, roleItme := range url.UserRoleList { + if roleItme.RoleName == userRoleName { + logger.Infof("Find the userRole %s ~", userRoleName) + return roleItme + } + } + return +} + +// run `rosa create account-roles` command +func (ors *ocmResourceService) CreateAccountRole(flags ...string) (bytes.Buffer, error) { + createAccountRole := ors.client.Runner + createAccountRole = createAccountRole.Cmd("create", "account-roles").CmdFlags(flags...) + return createAccountRole.Run() +} + +// Pasrse the result of 'rosa list account-roles' to AccountRoleList struct +func (ors *ocmResourceService) ReflectAccountRoleList(result bytes.Buffer) (arl AccountRoleList, err error) { + arl = AccountRoleList{} + theMap := ors.client.Parser.TableData.Input(result).Parse().Output() + for _, accountRoleItem := range theMap { + ar := &AccountRole{} + err = MapStructure(accountRoleItem, ar) + if err != nil { + return + } + arl.AccountRoleList = append(arl.AccountRoleList, ar) + } + return +} + +// run `rosa delete account-roles` command +func (ors *ocmResourceService) DeleteAccountRole(flags ...string) (bytes.Buffer, error) { + deleteAccountRole := ors.client.Runner + deleteAccountRole = deleteAccountRole.Cmd("delete", "account-roles").CmdFlags(flags...) + return deleteAccountRole.Run() +} + +// run `rosa list account-roles` command +func (ors *ocmResourceService) ListAccountRole() (AccountRoleList, bytes.Buffer, error) { + ors.client.Runner.cmdArgs = []string{} + listAccountRole := ors.client.Runner. + Cmd("list", "account-roles") + output, err := listAccountRole.Run() + if err != nil { + return AccountRoleList{}, output, err + } + arl, err := ors.ReflectAccountRoleList(output) + return arl, output, err + +} + +// Get specified account roles by prefix +func (arl AccountRoleList) AccountRoles(prefix string) (accountRoles []*AccountRole) { + for _, roleItme := range arl.AccountRoleList { + if strings.Contains(roleItme.RoleName, prefix) { + accountRoles = append(accountRoles, roleItme) + } + } + return +} + +// Get specified account role by the arn +func (arl AccountRoleList) AccountRole(arn string) (accountRole *AccountRole) { + for _, roleItem := range arl.AccountRoleList { + if roleItem.RoleArn == arn { + return roleItem + } + } + return +} + +// run `rosa upgrade account-roles` command +func (ors *ocmResourceService) UpgradeAccountRole(flags ...string) (bytes.Buffer, error) { + upgradeAccountRole := ors.client.Runner + upgradeAccountRole = upgradeAccountRole.Cmd("upgrade", "account-roles").CmdFlags(flags...) + return upgradeAccountRole.Run() +} + +func (arl AccountRoleList) InstallerRole(prefix string, hostedcp bool) (accountRole *AccountRole) { + roleType := RoleTypeSuffixMap["Installer"] + if hostedcp { + roleType = "HCP-ROSA-" + roleType + } + for _, roleItem := range arl.AccountRoleList { + // if hostedcp && strings.Contains(lines[i], "-HCP-ROSA-Installer-Role") { + // return lines[i], nil + // } + // if !hostedcp && !strings.Contains(lines[i], "-ROSA-Installer-Role") && strings.Contains(lines[i], "-Installer-Role") { + // return lines[i], nil + // } + if hostedcp && strings.Contains(roleItem.RoleName, prefix) && strings.Contains(roleItem.RoleName, roleType) { + return roleItem + } + if !hostedcp && strings.Contains(roleItem.RoleName, prefix) && strings.Contains(roleItem.RoleName, roleType) && !strings.Contains(roleItem.RoleName, "HCP-ROSA-") { + return roleItem + } + } + return +} + +// run `rosa create ocm-role` command +func (ors *ocmResourceService) CreateOCMRole(flags ...string) (bytes.Buffer, error) { + createOCMRole := ors.client.Runner + createOCMRole = createOCMRole.Cmd("create", "ocm-role").CmdFlags(flags...) + return createOCMRole.Run() +} + +// run `rosa list ocm-role` command +func (ors *ocmResourceService) ListOCMRole() (OCMRoleList, bytes.Buffer, error) { + ors.client.Runner.cmdArgs = []string{} + listOCMRole := ors.client.Runner. + Cmd("list", "ocm-role") + output, err := listOCMRole.Run() + if err != nil { + return OCMRoleList{}, output, err + } + orl, err := ors.ReflectOCMRoleList(output) + return orl, output, err +} + +// run `rosa delete ocm-role` command +func (ors *ocmResourceService) DeleteOCMRole(flags ...string) (bytes.Buffer, error) { + deleteOCMRole := ors.client.Runner + deleteOCMRole = deleteOCMRole.Cmd("delete", "ocm-role").CmdFlags(flags...) + return deleteOCMRole.Run() +} + +// run `rosa link ocm-role` command +func (ors *ocmResourceService) LinkOCMRole(flags ...string) (bytes.Buffer, error) { + linkOCMRole := ors.client.Runner + linkOCMRole = linkOCMRole.Cmd("link", "ocm-role").CmdFlags(flags...) + return linkOCMRole.Run() +} + +// run `rosa unlink ocm-role` command +func (ors *ocmResourceService) UnlinkOCMRole(flags ...string) (bytes.Buffer, error) { + unlinkOCMRole := ors.client.Runner + unlinkOCMRole = unlinkOCMRole.Cmd("unlink", "ocm-role").CmdFlags(flags...) + return unlinkOCMRole.Run() +} + +// Pasrse the result of 'rosa list user-ocm' to NodePoolList struct +func (ors *ocmResourceService) ReflectOCMRoleList(result bytes.Buffer) (orl OCMRoleList, err error) { + orl = OCMRoleList{} + theMap := ors.client.Parser.TableData.Input(result).Parse().Output() + for _, ocmRoleItem := range theMap { + or := &OCMRole{} + err = MapStructure(ocmRoleItem, or) + if err != nil { + return + } + orl.OCMRoleList = append(orl.OCMRoleList, *or) + } + return +} + +// Get specified ocm-role by ocm-role prefix and ocmOUsername +func (url OCMRoleList) OCMRole(prefix string, ocmOrganizationExternalID string) (userRoles OCMRole) { + ocmRoleName := fmt.Sprintf("%s-OCM-Role-%s", prefix, ocmOrganizationExternalID) + for _, roleItme := range url.OCMRoleList { + if roleItme.RoleName == ocmRoleName { + logger.Infof("Find the ocm Role %s ~", ocmRoleName) + return roleItme + } + } + return +} + +// Get the ocm-role which is linked to org +func (url OCMRoleList) FindLinkedOCMRole() (userRoles OCMRole) { + for _, roleItme := range url.OCMRoleList { + if roleItme.Linded == "Yes" { + logger.Infof("Find one linked ocm Role %s ~", roleItme.RoleName) + return roleItme + } + } + return +} + +// run `rosa create oidc-config` command +func (ors *ocmResourceService) CreateOIDCConfig(flags ...string) (bytes.Buffer, error) { + createOIDCConfig := ors.client.Runner + createOIDCConfig = createOIDCConfig.Cmd("create", "oidc-config").CmdFlags(flags...) + return createOIDCConfig.Run() +} + +// run `rosa list oidc-config` command +func (ors *ocmResourceService) ListOIDCConfig() (OIDCConfigList, bytes.Buffer, error) { + ors.client.Runner.cmdArgs = []string{} + listOIDCConfig := ors.client.Runner. + Cmd("list", "oidc-config") + output, err := listOIDCConfig.Run() + if err != nil { + return OIDCConfigList{}, output, err + } + oidcl, err := ors.ReflectOIDCConfigList(output) + return oidcl, output, err + +} + +// run `rosa delete oidc-config` command +func (ors *ocmResourceService) DeleteOIDCConfig(flags ...string) (bytes.Buffer, error) { + deleteOIDCConfig := ors.client.Runner + deleteOIDCConfig = deleteOIDCConfig.Cmd("delete", "oidc-config").CmdFlags(flags...) + return deleteOIDCConfig.Run() +} + +// Pasrse the result of 'rosa list oidc-config' to OIDCConfigList struct +func (ors *ocmResourceService) ReflectOIDCConfigList(result bytes.Buffer) (oidcl OIDCConfigList, err error) { + oidcl = OIDCConfigList{} + theMap := ors.client.Parser.TableData.Input(result).Parse().Output() + for _, oidcConfigItem := range theMap { + oidc := &OIDCConfig{} + err = MapStructure(oidcConfigItem, oidc) + if err != nil { + return + } + oidcl.OIDCConfigList = append(oidcl.OIDCConfigList, *oidc) + } + return +} + +// Get the oidc id by the provider url +func (ors *ocmResourceService) GetOIDCIdFromList(providerURL string) (string, error) { + oidcConfigList, _, err := ors.ListOIDCConfig() + if err != nil { + return "", err + } + for _, item := range oidcConfigList.OIDCConfigList { + if strings.Contains(item.IssuerUrl, providerURL) { + return item.ID, nil + } + } + logger.Warnf("No oidc with the url %s is found.", providerURL) + return "", nil +} + +// Get specified oidc-config by oidc-config-id +func (oidcl OIDCConfigList) OIDCConfig(id string) (oidc OIDCConfig) { + for _, item := range oidcl.OIDCConfigList { + if item.ID == id { + return item + } + } + return +} + +// run `rosa create operator-roles` command +func (ors *ocmResourceService) CreateOperatorRoles(flags ...string) (bytes.Buffer, error) { + createOperatorRoles := ors.client.Runner + createOperatorRoles = createOperatorRoles.Cmd("create", "operator-roles").CmdFlags(flags...) + return createOperatorRoles.Run() +} + +// run `rosa delete operator-roles` command +func (ors *ocmResourceService) DeleteOperatorRoles(flags ...string) (bytes.Buffer, error) { + deleteOperatorRoles := ors.client.Runner + deleteOperatorRoles = deleteOperatorRoles.Cmd("delete", "operator-roles").CmdFlags(flags...) + return deleteOperatorRoles.Run() +} + +// run `rosa create oidc-provider` command +func (ors *ocmResourceService) CreateOIDCProvider(flags ...string) (bytes.Buffer, error) { + createODICProvider := ors.client.Runner + createODICProvider = createODICProvider.Cmd("create", "oidc-provider").CmdFlags(flags...) + return createODICProvider.Run() +} + +func (ors *ocmResourceService) CleanResources(clusterID string) (errors []error) { + logger.Debugf("Nothing releated to cluster was done there") + return +} diff --git a/test/util/rosacli/oidc_utils.go b/test/util/rosacli/oidc_utils.go new file mode 100644 index 000000000..52cf00c08 --- /dev/null +++ b/test/util/rosacli/oidc_utils.go @@ -0,0 +1,45 @@ +package rosacli + +import ( + "regexp" + "strings" + + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" +) + +// Split resources from the aws arn +func SplitARNResources(v string) []string { + var parts []string + var offset int + + for offset <= len(v) { + idx := strings.IndexAny(v[offset:], "/:") + if idx < 0 { + parts = append(parts, v[offset:]) + break + } + parts = append(parts, v[offset:idx+offset]) + offset += idx + 1 + } + return parts +} + +// Extract the oidc provider ARN from the output of `rosa create oidc-config --mode auto` and also for common message containing the arn +func ExtractOIDCProviderARN(output string) string { + oidcProviderArnRE := regexp.MustCompile(`arn:aws:iam::[^']+:oidc-provider/[^']+`) + submatchall := oidcProviderArnRE.FindAllString(output, -1) + if len(submatchall) < 1 { + logger.Warnf("Cannot find sub string matached %s from input string %s! Please check the matching string", oidcProviderArnRE, output) + return "" + } + if len(submatchall) > 1 { + logger.Warnf("Find more than one sub string matached %s! Please check this unexpexted result then update the regex if needed.", oidcProviderArnRE) + } + return submatchall[0] +} + +// Extract the oidc provider ARN from the output of `rosa create oidc-config --mode auto` and also for common message containing the arn +func ExtractOIDCProviderIDFromARN(arn string) string { + spliptElements := SplitARNResources(arn) + return spliptElements[len(spliptElements)-1] +} diff --git a/test/util/rosacli/roles_config.go b/test/util/rosacli/roles_config.go new file mode 100644 index 000000000..ec8f30ad9 --- /dev/null +++ b/test/util/rosacli/roles_config.go @@ -0,0 +1,27 @@ +package rosacli + +import ( + "os" + "strings" +) + +// Get installer role arn from ${SHARED_DIR}/account-roles-arns +func GetInstallerRoleArn(hostedcp bool) (string, error) { + sharedDIR := os.Getenv("SHARED_DIR") + filePath := sharedDIR + "/account-roles-arns" + fileContents, err := os.ReadFile(filePath) + if err != nil { + return "", err + } + lines := strings.Split(string(fileContents), "\n") + for i := range lines { + if hostedcp && strings.Contains(lines[i], "-HCP-ROSA-Installer-Role") { + return lines[i], nil + } + if !hostedcp && !strings.Contains(lines[i], "-ROSA-Installer-Role") && strings.Contains(lines[i], "-Installer-Role") { + return lines[i], nil + } + continue + } + return "", nil +} diff --git a/test/util/rosacli/slice_utils.go b/test/util/rosacli/slice_utils.go new file mode 100644 index 000000000..a3a9672ce --- /dev/null +++ b/test/util/rosacli/slice_utils.go @@ -0,0 +1,27 @@ +package rosacli + +func RemoveFromStringSlice(slice []string, value string) []string { + var newSlice []string + for _, v := range slice { + if v != value { + newSlice = append(newSlice, v) + } + } + return newSlice +} + +func SliceContains(slice []string, value string) bool { + for _, item := range slice { + if item == value { + return true + } + } + return false +} + +func AppendToStringSliceIfNotExist(slice []string, value string) []string { + if !SliceContains(slice, value) { + slice = append(slice, value) + } + return slice +} diff --git a/test/util/rosacli/string_utils.go b/test/util/rosacli/string_utils.go new file mode 100644 index 000000000..fcc3a913c --- /dev/null +++ b/test/util/rosacli/string_utils.go @@ -0,0 +1,46 @@ +package rosacli + +import ( + "fmt" + "math/rand" + "strings" + "time" +) + +func ParseLabels(labels string) []string { + return ParseCommaSeparatedStrings(labels) +} + +func ParseTaints(taints string) []string { + return ParseCommaSeparatedStrings(taints) +} + +func ParseTuningConfigs(tuningConfigs string) []string { + return ParseCommaSeparatedStrings(tuningConfigs) +} + +func ParseCommaSeparatedStrings(input string) (output []string) { + split := strings.Split(strings.ReplaceAll(input, " ", ""), ",") + for _, item := range split { + if strings.TrimSpace(item) != "" { + output = append(output, item) + } + } + return +} + +// Generate random string +func GenerateRandomString(n int) string { + const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + rand.Seed(time.Now().UnixNano()) + + s := make([]byte, n) + for i := range s { + s[i] = letters[rand.Intn(len(letters))] + } + return string(s) +} + +func GenerateRandomName(prefix string, n int) string { + return fmt.Sprintf("%s-%s", prefix, strings.ToLower(GenerateRandomString(n))) +} diff --git a/test/util/rosacli/tuning_config_service.go b/test/util/rosacli/tuning_config_service.go new file mode 100644 index 000000000..721bcf5f7 --- /dev/null +++ b/test/util/rosacli/tuning_config_service.go @@ -0,0 +1,163 @@ +package rosacli + +import ( + "bytes" + "os" + + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" + "gopkg.in/yaml.v3" +) + +type TuningConfigService interface { + ResourcesCleaner + + CreateTuningConfig(clusterID string, tcName string, specContent string, flags ...string) (bytes.Buffer, error) + EditTuningConfig(clusterID string, tcName string, flags ...string) (bytes.Buffer, error) + DeleteTuningConfig(clusterID string, tcName string) (bytes.Buffer, error) + + ListTuningConfigs(clusterID string) (bytes.Buffer, error) + ReflectTuningConfigList(result bytes.Buffer) (mpl *TuningConfigList, err error) + ListTuningConfigsAndReflect(clusterID string) (*TuningConfigList, error) + + DescribeTuningConfig(clusterID string, tcID string) (bytes.Buffer, error) + ReflectTuningConfigDescription(result bytes.Buffer) (npd *TuningConfigDescription, err error) + DescribeTuningConfigAndReflect(clusterID string, tcID string) (*TuningConfigDescription, error) +} + +type tuningConfigService struct { + ResourcesService + + tuningConfigs map[string][]string +} + +func NewTuningConfigService(client *Client) TuningConfigService { + return &tuningConfigService{ + ResourcesService: ResourcesService{ + client: client, + }, + tuningConfigs: make(map[string][]string), + } +} + +type TuningConfig struct { + ID string `json:"ID,omitempty"` + Name string `json:"NAME,omitempty"` +} + +type TuningConfigList struct { + TuningConfigs []TuningConfig `json:"TuningConfigs,omitempty"` +} + +// Struct for the 'rosa describe cluster' output +type TuningConfigDescription struct { + Name string `yaml:"Name,omitempty"` + ID string `yaml:"ID,omitempty"` + Spec string `yaml:"Spec,omitempty"` +} + +func (tcs *tuningConfigService) CreateTuningConfig(clusterID string, tcName string, specContent string, flags ...string) (output bytes.Buffer, err error) { + specPath, err := CreateTempFileWithContent(specContent) + defer os.Remove(specPath) + if err != nil { + return *bytes.NewBufferString(""), err + } + output, err = tcs.client.Runner. + Cmd("create", "tuning-config"). + CmdFlags(append(flags, "-c", clusterID, "--name", tcName, "--spec-path", specPath)...). + Run() + if err == nil { + tcs.tuningConfigs[clusterID] = append(tcs.tuningConfigs[clusterID], tcName) + } + return +} + +func (tcs *tuningConfigService) EditTuningConfig(clusterID string, tcID string, flags ...string) (bytes.Buffer, error) { + combflags := append([]string{"-c", clusterID}, flags...) + return tcs.client.Runner. + Cmd("edit", "tuning-configs", tcID). + CmdFlags(combflags...). + Run() +} + +func (tcs *tuningConfigService) DeleteTuningConfig(clusterID string, tcName string) (output bytes.Buffer, err error) { + output, err = tcs.client.Runner. + Cmd("delete", "tuning-configs", tcName). + CmdFlags("-c", clusterID, "-y"). + Run() + if err == nil { + tcs.tuningConfigs[clusterID] = RemoveFromStringSlice(tcs.tuningConfigs[clusterID], tcName) + } + return +} + +func (tcs *tuningConfigService) ListTuningConfigs(clusterID string) (bytes.Buffer, error) { + list := tcs.client.Runner.Cmd("list", "tuning-configs").CmdFlags("-c", clusterID) + return list.Run() +} + +func (tcs *tuningConfigService) ReflectTuningConfigList(result bytes.Buffer) (tcl *TuningConfigList, err error) { + tcl = &TuningConfigList{} + theMap := tcs.client.Parser.TableData.Input(result).Parse().Output() + for _, tcItem := range theMap { + tuningConfig := &TuningConfig{} + err = MapStructure(tcItem, tuningConfig) + if err != nil { + return + } + tcl.TuningConfigs = append(tcl.TuningConfigs, *tuningConfig) + } + return +} + +func (tcs *tuningConfigService) ListTuningConfigsAndReflect(clusterID string) (*TuningConfigList, error) { + output, err := tcs.ListTuningConfigs(clusterID) + if err != nil { + return nil, err + } + return tcs.ReflectTuningConfigList(output) +} + +func (tcs *tuningConfigService) DescribeTuningConfig(clusterID string, tcID string) (bytes.Buffer, error) { + describe := tcs.client.Runner. + Cmd("describe", "tuning-configs", tcID). + CmdFlags("-c", clusterID) + + return describe.Run() +} + +func (tcs *tuningConfigService) DescribeTuningConfigAndReflect(clusterID string, tcID string) (*TuningConfigDescription, error) { + output, err := tcs.DescribeTuningConfig(clusterID, tcID) + if err != nil { + return nil, err + } + return tcs.ReflectTuningConfigDescription(output) +} + +func (tcs *tuningConfigService) ReflectTuningConfigDescription(result bytes.Buffer) (res *TuningConfigDescription, err error) { + var data []byte + res = &TuningConfigDescription{} + theMap, err := tcs.client.Parser.TextData.Input(result).Parse().YamlToMap() + if err != nil { + return + } + data, err = yaml.Marshal(&theMap) + if err != nil { + return + } + err = yaml.Unmarshal(data, res) + return res, err +} + +func (tcs *tuningConfigService) CleanResources(clusterID string) (errors []error) { + var tcsToDel []string + tcsToDel = append(tcsToDel, tcs.tuningConfigs[clusterID]...) + for _, tcName := range tcsToDel { + logger.Infof("Remove remaining tuningconfig '%s'", tcName) + _, err := tcs.DeleteTuningConfig(clusterID, tcName) + if err != nil { + errors = append(errors, err) + } + } + + return +} diff --git a/test/util/rosacli/user_service.go b/test/util/rosacli/user_service.go new file mode 100644 index 000000000..d621de0a3 --- /dev/null +++ b/test/util/rosacli/user_service.go @@ -0,0 +1,180 @@ +package rosacli + +import ( + "bytes" + + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" +) + +type UserService interface { + ResourcesCleaner + + ListUsers(clusterID string) (GroupUserList, bytes.Buffer, error) + ReflectUsersList(result bytes.Buffer) (gul GroupUserList, err error) + RevokeUser(clusterID string, role string, user string, flags ...string) (bytes.Buffer, error) + GrantUser(clusterID string, role string, user string, flags ...string) (bytes.Buffer, error) + CreateAdmin(clusterID string) (bytes.Buffer, error) + DescribeAdmin(clusterID string) (bytes.Buffer, error) + DeleteAdmin(clusterID string) (bytes.Buffer, error) +} + +type userService struct { + ResourcesService + + usersGranted map[string][]*userRole + adminCreated []string +} + +type userRole struct { + user string + role string +} + +func NewUserService(client *Client) UserService { + return &userService{ + ResourcesService: ResourcesService{ + client: client, + }, + usersGranted: make(map[string][]*userRole), + } +} + +// Struct for the 'rosa list users' output +type GroupUser struct { + ID string `json:"ID,omitempty"` + Groups string `json:"GROUPS,omitempty"` +} +type GroupUserList struct { + GroupUsers []GroupUser `json:"GroupUsers,omitempty"` +} + +// Grant user +func (us *userService) GrantUser(clusterID string, role string, user string, flags ...string) (output bytes.Buffer, err error) { + output, err = us.client.Runner. + Cmd("grant", "user", role). + CmdFlags(append(flags, "-c", clusterID, "--user", user)...). + Run() + if err == nil { + createdUserRole := &userRole{ + user: user, + role: role, + } + us.usersGranted[clusterID] = append(us.usersGranted[clusterID], createdUserRole) + } + return +} + +// Revoke user +func (us *userService) RevokeUser(clusterID string, role string, user string, flags ...string) (output bytes.Buffer, err error) { + output, err = us.client.Runner. + Cmd("revoke", "user", role). + CmdFlags(append(flags, "-y", "-c", clusterID, "--user", user)...). + Run() + if err == nil { + var newRoles []*userRole + for _, createdUserRole := range us.usersGranted[clusterID] { + if createdUserRole.user != user || createdUserRole.role != role { + newRoles = append(newRoles, createdUserRole) + } + } + us.usersGranted[clusterID] = newRoles + } + return +} + +// List users +func (us *userService) ListUsers(clusterID string) (GroupUserList, bytes.Buffer, error) { + listUsers := us.client.Runner. + Cmd("list", "users"). + CmdFlags("-c", clusterID) + output, err := listUsers.Run() + if err != nil { + return GroupUserList{}, output, err + } + gul, err := us.ReflectUsersList(output) + return gul, output, err +} + +// Pasrse the result of 'rosa list user' to []*GroupUser struct +func (us *userService) ReflectUsersList(result bytes.Buffer) (gul GroupUserList, err error) { + gul = GroupUserList{} + theMap := us.client.Parser.TableData.Input(result).Parse().Output() + for _, userItem := range theMap { + user := &GroupUser{} + err = MapStructure(userItem, user) + if err != nil { + return + } + gul.GroupUsers = append(gul.GroupUsers, *user) + } + return gul, err +} + +// Get specified user by user name +func (gl GroupUserList) User(userName string) (user GroupUser, err error) { + for _, userItem := range gl.GroupUsers { + if userItem.ID == userName { + user = userItem + return + } + } + return +} + +// Create admin +func (us *userService) CreateAdmin(clusterID string) (output bytes.Buffer, err error) { + createAdmin := us.client.Runner. + Cmd("create", "admin"). + CmdFlags("-c", clusterID, "-y") + + output, err = createAdmin.Run() + if err == nil { + us.adminCreated = AppendToStringSliceIfNotExist(us.adminCreated, clusterID) + logger.Infof("Add admin to Cluster %v", clusterID) + logger.Infof("Admin created = %v", us.adminCreated) + } + return +} + +// describe admin +func (us *userService) DescribeAdmin(clusterID string) (bytes.Buffer, error) { + describeAdmin := us.client.Runner. + Cmd("describe", "admin"). + CmdFlags("-c", clusterID) + + return describeAdmin.Run() +} + +// delete admin +func (us *userService) DeleteAdmin(clusterID string) (output bytes.Buffer, err error) { + deleteAdmin := us.client.Runner. + Cmd("delete", "admin"). + CmdFlags("-c", clusterID, "-y") + + output, err = deleteAdmin.Run() + if err == nil { + us.adminCreated = RemoveFromStringSlice(us.adminCreated, clusterID) + } + return +} + +func (us *userService) CleanResources(clusterID string) (errors []error) { + if SliceContains(us.adminCreated, clusterID) { + logger.Infof("Remove remaining admin") + if _, err := us.DeleteAdmin(clusterID); err != nil { + errors = append(errors, err) + } + } + + var ugsToDel []*userRole + ugsToDel = append(ugsToDel, us.usersGranted[clusterID]...) + for _, grantedUserRole := range ugsToDel { + logger.Infof("Remove remaining granted user '%s' with role '%s'", grantedUserRole.user, grantedUserRole.role) + _, err := us.RevokeUser(clusterID, grantedUserRole.role, grantedUserRole.user) + if err != nil { + errors = append(errors, err) + } + } + + return +} diff --git a/test/util/rosacli/version_service.go b/test/util/rosacli/version_service.go new file mode 100644 index 000000000..dddc303f0 --- /dev/null +++ b/test/util/rosacli/version_service.go @@ -0,0 +1,188 @@ +package rosacli + +import ( + "bytes" + "sort" + + "github.com/Masterminds/semver" + logger "github.com/openshift/openshift-tests-private/test/extended/util/logext" +) + +const VersionChannelGroupStable = "stable" +const VersionChannelGroupNightly = "nightly" + +type VersionService interface { + ResourcesCleaner + + ReflectVersions(result bytes.Buffer) (*OpenShiftVersionList, error) + ListVersions(channelGroup string, hostedCP bool, flags ...string) (bytes.Buffer, error) + ListAndReflectVersions(channelGroup string, hostedCP bool, flags ...string) (*OpenShiftVersionList, error) +} + +type versionService struct { + ResourcesService +} + +func NewVersionService(client *Client) VersionService { + return &versionService{ + ResourcesService: ResourcesService{ + client: client, + }, + } +} + +type OpenShiftVersion struct { + Version string `json:"VERSION,omitempty"` + Default string `json:"DEFAULT,omitempty"` + AvailableUpgrades string `json:"AVAILABLE UPGRADES,omitempty"` +} + +type OpenShiftVersionList struct { + OpenShiftVersions []*OpenShiftVersion `json:"OpenShiftVersions,omitempty"` +} + +// Reflect versions +func (v *versionService) ReflectVersions(result bytes.Buffer) (versionList *OpenShiftVersionList, err error) { + versionList = &OpenShiftVersionList{} + theMap := v.client.Parser.TableData.Input(result).Parse().Output() + for _, item := range theMap { + version := &OpenShiftVersion{} + err = MapStructure(item, version) + if err != nil { + return versionList, err + } + versionList.OpenShiftVersions = append(versionList.OpenShiftVersions, version) + } + return versionList, err +} + +// list version `rosa list version` or `rosa list version --hosted-cp` +func (v *versionService) ListVersions(channelGroup string, hostedCP bool, flags ...string) (bytes.Buffer, error) { + listVersion := v.client.Runner. + Cmd("list", "versions"). + CmdFlags(flags...) + + if hostedCP { + listVersion.AddCmdFlags("--hosted-cp") + } + + if channelGroup != "" { + listVersion.AddCmdFlags("--channel-group", channelGroup) + } + + return listVersion.Run() +} + +func (v *versionService) ListAndReflectVersions(channelGroup string, hostedCP bool, flags ...string) (versionList *OpenShiftVersionList, err error) { + var output bytes.Buffer + output, err = v.ListVersions(channelGroup, hostedCP, flags...) + if err != nil { + return versionList, err + } + + versionList, err = v.ReflectVersions(output) + return versionList, err +} + +func (v *versionService) CleanResources(clusterID string) (errors []error) { + logger.Debugf("Nothing to clean in Version Service") + return +} + +// This function will find the nearest lower OCP version which version is under `Major.{minor-sub}`. +// `strict` will find only the `Major.{minor-sub}` ones +func (vl *OpenShiftVersionList) FindNearestBackwardMinorVersion(version string, minorSub int64, strict bool) (vs *OpenShiftVersion, err error) { + var baseVersionSemVer *semver.Version + baseVersionSemVer, err = semver.NewVersion(version) + if err != nil { + return + } + nvl, err := vl.FilterVersionsSameMajorAndEqualOrLowerThanMinor(baseVersionSemVer.Major(), baseVersionSemVer.Minor()-minorSub, strict) + if err != nil { + return + } + if nvl, err = nvl.Sort(true); err == nil && nvl.Len() > 0 { + vs = nvl.OpenShiftVersions[0] + } + return + +} + +// Sort sort the version list from lower to higher (or reverse) +func (vl *OpenShiftVersionList) Sort(reverse bool) (nvl *OpenShiftVersionList, err error) { + versionListIndexMap := make(map[string]*OpenShiftVersion) + var semVerList []*semver.Version + var vSemVer *semver.Version + for _, version := range vl.OpenShiftVersions { + versionListIndexMap[version.Version] = version + if vSemVer, err = semver.NewVersion(version.Version); err != nil { + return + } else { + semVerList = append(semVerList, vSemVer) + } + } + + if reverse { + sort.Sort(sort.Reverse(semver.Collection(semVerList))) + } else { + sort.Sort(semver.Collection(semVerList)) + } + + var sortedImageVersionList []*OpenShiftVersion + for _, semverVersion := range semVerList { + sortedImageVersionList = append(sortedImageVersionList, versionListIndexMap[semverVersion.Original()]) + } + + nvl = &OpenShiftVersionList{ + OpenShiftVersions: sortedImageVersionList, + } + + return +} + +// FilterVersionsByMajorMinor filter the version list for all major/minor corresponding and returns a new `OpenShiftVersionList` struct +// `strict` will find only the `Major.minor` ones +func (vl *OpenShiftVersionList) FilterVersionsSameMajorAndEqualOrLowerThanMinor(major int64, minor int64, strict bool) (nvl *OpenShiftVersionList, err error) { + var filteredVersions []*OpenShiftVersion + var semverVersion *semver.Version + for _, version := range vl.OpenShiftVersions { + if semverVersion, err = semver.NewVersion(version.Version); err != nil { + return + } else if semverVersion.Major() == major && + ((strict && semverVersion.Minor() == minor) || (!strict && semverVersion.Minor() <= minor)) { + filteredVersions = append(filteredVersions, version) + } + } + + nvl = &OpenShiftVersionList{ + OpenShiftVersions: filteredVersions, + } + + return +} + +// FilterVersionsByMajorMinor filter the version list for all lower versions than the given one +func (vl *OpenShiftVersionList) FilterVersionsLowerThan(version string) (nvl *OpenShiftVersionList, err error) { + var givenSemVer *semver.Version + givenSemVer, err = semver.NewVersion(version) + + var filteredVersions []*OpenShiftVersion + var semverVersion *semver.Version + for _, version := range vl.OpenShiftVersions { + if semverVersion, err = semver.NewVersion(version.Version); err != nil { + return + } else if semverVersion.LessThan(givenSemVer) { + filteredVersions = append(filteredVersions, version) + } + } + + nvl = &OpenShiftVersionList{ + OpenShiftVersions: filteredVersions, + } + + return +} + +func (vl *OpenShiftVersionList) Len() int { + return len(vl.OpenShiftVersions) +} diff --git a/test/util/router.go b/test/util/router.go new file mode 100644 index 000000000..e385f892a --- /dev/null +++ b/test/util/router.go @@ -0,0 +1,82 @@ +package util + +import ( + "context" + "time" + + o "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + kapierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" +) + +func WaitForRouterInternalIP(oc *CLI) (string, error) { + return waitForNamedRouterServiceIP(oc, "router-internal-default") +} + +func waitForRouterExternalIP(oc *CLI) (string, error) { + return waitForNamedRouterServiceIP(oc, "router-default") +} + +func routerShouldHaveExternalService(oc *CLI) (bool, error) { + foundLoadBalancerServiceStrategyType := false + err := wait.PollImmediate(2*time.Second, 30*time.Second, func() (bool, error) { + ic, err := oc.AdminOperatorClient().OperatorV1().IngressControllers("openshift-ingress-operator").Get(context.Background(), "default", metav1.GetOptions{}) + if kapierrs.IsNotFound(err) { + return false, nil + } + o.Expect(err).NotTo(o.HaveOccurred()) + if ic.Status.EndpointPublishingStrategy == nil { + return false, nil + } + if ic.Status.EndpointPublishingStrategy.Type == "LoadBalancerService" { + foundLoadBalancerServiceStrategyType = true + } + return true, nil + }) + return foundLoadBalancerServiceStrategyType, err +} + +func WaitForRouterServiceIP(oc *CLI) (string, error) { + if useExternal, err := routerShouldHaveExternalService(oc); err != nil { + return "", err + } else if useExternal { + return waitForRouterExternalIP(oc) + } + return WaitForRouterInternalIP(oc) +} + +func waitForNamedRouterServiceIP(oc *CLI, name string) (string, error) { + _, ns, err := GetRouterPodTemplate(oc) + if err != nil { + return "", err + } + + // wait for the service to show up + var endpoint string + err = wait.PollImmediate(2*time.Second, 60*time.Second, func() (bool, error) { + svc, err := oc.AdminKubeClient().CoreV1().Services(ns).Get(context.Background(), name, metav1.GetOptions{}) + if kapierrs.IsNotFound(err) { + return false, nil + } + o.Expect(err).NotTo(o.HaveOccurred()) + if svc.Spec.Type == corev1.ServiceTypeLoadBalancer { + if len(svc.Status.LoadBalancer.Ingress) != 0 { + if len(svc.Status.LoadBalancer.Ingress[0].IP) != 0 { + endpoint = svc.Status.LoadBalancer.Ingress[0].IP + return true, nil + } + if len(svc.Status.LoadBalancer.Ingress[0].Hostname) != 0 { + endpoint = svc.Status.LoadBalancer.Ingress[0].Hostname + return true, nil + } + } + return false, nil + } + endpoint = svc.Spec.ClusterIP + return true, nil + }) + return endpoint, err +} diff --git a/test/util/sar.go b/test/util/sar.go new file mode 100644 index 000000000..fbda8be7d --- /dev/null +++ b/test/util/sar.go @@ -0,0 +1,37 @@ +package util + +import ( + "context" + "fmt" + "time" + + authorizationapiv1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + kclientset "k8s.io/client-go/kubernetes" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +func WaitForSelfSAR(interval, timeout time.Duration, c kclientset.Interface, selfSAR authorizationapiv1.SelfSubjectAccessReviewSpec) error { + err := wait.PollImmediate(interval, timeout, func() (bool, error) { + res, err := c.AuthorizationV1().SelfSubjectAccessReviews().Create(context.Background(), + &authorizationapiv1.SelfSubjectAccessReview{ + Spec: selfSAR, + }, metav1.CreateOptions{}) + if err != nil { + return false, err + } + + if !res.Status.Allowed { + e2e.Logf("Waiting for SelfSAR (ResourceAttributes: %#v, NonResourceAttributes: %#v) to be allowed, current Status: %#v", selfSAR.ResourceAttributes, selfSAR.NonResourceAttributes, res.Status) + return false, nil + } + + return true, nil + }) + if err != nil { + return fmt.Errorf("failed to wait for SelfSAR (ResourceAttributes: %#v, NonResourceAttributes: %#v), err: %v", selfSAR.ResourceAttributes, selfSAR.NonResourceAttributes, err) + } + + return nil +} diff --git a/test/util/ssh_client.go b/test/util/ssh_client.go new file mode 100644 index 000000000..519d9b6b0 --- /dev/null +++ b/test/util/ssh_client.go @@ -0,0 +1,101 @@ +package util + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + + "golang.org/x/crypto/ssh" + + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +type SshClient struct { + User string + Host string + Port int + PrivateKey string +} + +func (sshClient *SshClient) getConfig() (*ssh.ClientConfig, error) { + pemBytes, err := ioutil.ReadFile(sshClient.PrivateKey) + if err != nil { + e2e.Logf("Pem byte failed:%v", err) + } + signer, err := ssh.ParsePrivateKey(pemBytes) + if err != nil { + e2e.Logf("Parse key failed:%v", err) + } + config := &ssh.ClientConfig{ + User: sshClient.User, + Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, + HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error { + return nil + }, + } + return config, err +} + +// Run runs cmd on the remote host. +func (sshClient *SshClient) Run(cmd string) error { + combinedOutput, err := sshClient.RunOutput(cmd) + if err != nil { + return err + } + e2e.Logf("Successfully executed cmd '%s' with output:\n%s", cmd, combinedOutput) + return nil +} + +// RunOutput runs cmd on the remote host and returns its combined standard output and standard error. +func (sshClient *SshClient) RunOutput(cmd string) (string, error) { + config, err := sshClient.getConfig() + if err != nil { + return "", fmt.Errorf("failed to get SSH config: %v", err) + } + + connection, err := ssh.Dial("tcp", fmt.Sprintf("%v:%v", sshClient.Host, sshClient.Port), config) + if err != nil { + return "", fmt.Errorf("failed to dial %s:%d: %v", sshClient.Host, sshClient.Port, err) + } + defer connection.Close() + + session, err := connection.NewSession() + if err != nil { + return "", fmt.Errorf("failed to create session: %v", err) + } + defer session.Close() + + combinedOutputBuffer := NewSynchronizedBuffer() + session.Stdout = combinedOutputBuffer + session.Stderr = combinedOutputBuffer + + err = session.Run(cmd) + if err != nil { + return "", fmt.Errorf("failed to run cmd '%s': %v\n%s", cmd, err, combinedOutputBuffer.String()) + } + return combinedOutputBuffer.String(), nil +} + +func GetPrivateKey() (string, error) { + privateKey := os.Getenv("SSH_CLOUD_PRIV_KEY") + if privateKey == "" { + privateKey = filepath.Join("../internal/config/keys/", "openshift-qe.pem") + } + if _, err := os.Stat(privateKey); os.IsNotExist(err) { + return "", fmt.Errorf("private key file not found: %s", privateKey) + } + return privateKey, nil +} + +func GetPublicKey() (string, error) { + publicKey := os.Getenv("SSH_CLOUD_PUB_KEY") + if publicKey == "" { + publicKey = filepath.Join("../internal/config/keys/", "openshift-qe.pub") + } + if _, err := os.Stat(publicKey); os.IsNotExist(err) { + return "", fmt.Errorf("public key file not found: %s", publicKey) + } + return publicKey, nil +} diff --git a/test/util/statefulsets.go b/test/util/statefulsets.go new file mode 100644 index 000000000..843a32873 --- /dev/null +++ b/test/util/statefulsets.go @@ -0,0 +1,48 @@ +package util + +import ( + "context" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kutilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// RemoveStatefulSets deletes the given stateful sets in a namespace +func RemoveStatefulSets(oc *CLI, sets ...string) error { + errs := []error{} + for _, set := range sets { + e2e.Logf("Removing stateful set %s/%s", oc.Namespace(), set) + if err := oc.AdminKubeClient().AppsV1().StatefulSets(oc.Namespace()).Delete(context.Background(), set, metav1.DeleteOptions{}); err != nil { + e2e.Logf("Error occurred removing stateful set: %v", err) + errs = append(errs, err) + } + + err := wait.PollImmediate(5*time.Second, 5*time.Minute, func() (bool, error) { + pods, err := GetStatefulSetPods(oc, set) + if err != nil { + e2e.Logf("Unable to get pods for statefulset/%s: %v", set, err) + return false, err + } + if len(pods.Items) > 0 { + e2e.Logf("Waiting for pods for statefulset/%s to terminate", set) + return false, nil + } + e2e.Logf("Pods for statefulset/%s have terminated", set) + return true, nil + }) + + if err != nil { + e2e.Logf("Error occurred waiting for pods to terminate for statefulset/%s: %v", set, err) + errs = append(errs, err) + } + } + + if len(errs) != 0 { + return kutilerrors.NewAggregate(errs) + } + + return nil +} diff --git a/test/util/synchronized_buffer.go b/test/util/synchronized_buffer.go new file mode 100644 index 000000000..8ba928d96 --- /dev/null +++ b/test/util/synchronized_buffer.go @@ -0,0 +1,29 @@ +package util + +import ( + "bytes" + "sync" +) + +// SynchronizedBuffer wraps bytes.Buffer with a sync.Mutex for thread-safety. +type SynchronizedBuffer struct { + buf bytes.Buffer + mu sync.Mutex +} + +// NewSynchronizedBuffer initializes an empty SynchronizedBuffer which is ready to use +func NewSynchronizedBuffer() *SynchronizedBuffer { + return &SynchronizedBuffer{} +} + +func (sb *SynchronizedBuffer) Write(p []byte) (n int, err error) { + sb.mu.Lock() + defer sb.mu.Unlock() + return sb.buf.Write(p) +} + +func (sb *SynchronizedBuffer) String() string { + sb.mu.Lock() + defer sb.mu.Unlock() + return sb.buf.String() +} diff --git a/test/util/template.go b/test/util/template.go new file mode 100644 index 000000000..62429a7cc --- /dev/null +++ b/test/util/template.go @@ -0,0 +1,170 @@ +package util + +import ( + "fmt" + "math/rand" + "os" + "path/filepath" + "strings" + "time" + + o "github.com/onsi/gomega" + + "github.com/ghodss/yaml" + "github.com/tidwall/pretty" + "k8s.io/apimachinery/pkg/util/wait" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// ApplyClusterResourceFromTemplateWithError apply the changes to the cluster resource and return error if happned. +// For ex: ApplyClusterResourceFromTemplateWithError(oc, "--ignore-unknown-parameters=true", "-f", "TEMPLATE LOCATION") +func ApplyClusterResourceFromTemplateWithError(oc *CLI, parameters ...string) error { + return resourceFromTemplate(oc, false, true, "", parameters...) +} + +// ApplyClusterResourceFromTemplate apply the changes to the cluster resource. +// For ex: ApplyClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", "TEMPLATE LOCATION") +func ApplyClusterResourceFromTemplate(oc *CLI, parameters ...string) { + resourceFromTemplate(oc, false, false, "", parameters...) +} + +// ApplyNsResourceFromTemplate apply changes to the ns resource. +// No need to add a namespace parameter in the template file as it can be provided as a function argument. +// For ex: ApplyNsResourceFromTemplate(oc, "NAMESPACE", "--ignore-unknown-parameters=true", "-f", "TEMPLATE LOCATION") +func ApplyNsResourceFromTemplate(oc *CLI, namespace string, parameters ...string) { + resourceFromTemplate(oc, false, false, namespace, parameters...) +} + +// CreateClusterResourceFromTemplateWithError create resource from the template and return error if happened. +// For ex: CreateClusterResourceFromTemplateWithError(oc, "--ignore-unknown-parameters=true", "-f", "TEMPLATE LOCATION") +func CreateClusterResourceFromTemplateWithError(oc *CLI, parameters ...string) error { + return resourceFromTemplate(oc, true, true, "", parameters...) +} + +// CreateClusterResourceFromTemplate create resource from the template. +// For ex: CreateClusterResourceFromTemplate(oc, "--ignore-unknown-parameters=true", "-f", "TEMPLATE LOCATION") +func CreateClusterResourceFromTemplate(oc *CLI, parameters ...string) { + resourceFromTemplate(oc, true, false, "", parameters...) +} + +// CreateNsResourceFromTemplate create ns resource from the template. +// No need to add a namespace parameter in the template file as it can be provided as a function argument. +// For ex: CreateNsResourceFromTemplate(oc, "NAMESPACE", "--ignore-unknown-parameters=true", "-f", "TEMPLATE LOCATION") +func CreateNsResourceFromTemplate(oc *CLI, namespace string, parameters ...string) { + resourceFromTemplate(oc, true, false, namespace, parameters...) +} + +func resourceFromTemplate(oc *CLI, create bool, returnError bool, namespace string, parameters ...string) error { + var configFile string + err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) { + fileName := GetRandomString() + "config.json" + stdout, _, err := oc.AsAdmin().Run("process").Args(parameters...).OutputsToFiles(fileName) + if err != nil { + e2e.Logf("the err:%v, and try next round", err) + return false, nil + } + + configFile = stdout + return true, nil + }) + if returnError && err != nil { + e2e.Logf("fail to process %v", parameters) + return err + } + AssertWaitPollNoErr(err, fmt.Sprintf("fail to process %v", parameters)) + + e2e.Logf("the file of resource is %s", configFile) + + var resourceErr error + if create { + if namespace != "" { + resourceErr = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", configFile, "-n", namespace).Execute() + } else { + resourceErr = oc.AsAdmin().WithoutNamespace().Run("create").Args("-f", configFile).Execute() + } + } else { + if namespace != "" { + resourceErr = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile, "-n", namespace).Execute() + } else { + resourceErr = oc.AsAdmin().WithoutNamespace().Run("apply").Args("-f", configFile).Execute() + } + } + if returnError && resourceErr != nil { + e2e.Logf("fail to create/apply resource %v", resourceErr) + return resourceErr + } + AssertWaitPollNoErr(resourceErr, fmt.Sprintf("fail to create/apply resource %v", resourceErr)) + return nil +} + +// GetRandomString to create random string +func GetRandomString() string { + chars := "abcdefghijklmnopqrstuvwxyz0123456789" + seed := rand.New(rand.NewSource(time.Now().UnixNano())) + buffer := make([]byte, 8) + for index := range buffer { + buffer[index] = chars[seed.Intn(len(chars))] + } + return string(buffer) +} + +// ApplyResourceFromTemplateWithNonAdminUser to as normal user to create resource from template +func ApplyResourceFromTemplateWithNonAdminUser(oc *CLI, parameters ...string) error { + var configFile string + err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) { + output, err := oc.Run("process").Args(parameters...).OutputToFile(GetRandomString() + "config.json") + if err != nil { + e2e.Logf("the err:%v, and try next round", err) + return false, nil + } + configFile = output + return true, nil + }) + AssertWaitPollNoErr(err, fmt.Sprintf("fail to process %v", parameters)) + + e2e.Logf("the file of resource is %s", configFile) + return oc.WithoutNamespace().Run("apply").Args("-f", configFile).Execute() +} + +// ProcessTemplate process template given file path and parameters +func ProcessTemplate(oc *CLI, parameters ...string) string { + var configFile string + + err := wait.Poll(3*time.Second, 15*time.Second, func() (bool, error) { + output, err := oc.Run("process").Args(parameters...).OutputToFile(GetRandomString() + "config.json") + if err != nil { + e2e.Logf("the err:%v, and try next round", err) + return false, nil + } + configFile = output + return true, nil + }) + + AssertWaitPollNoErr(err, fmt.Sprintf("fail to process %v", parameters)) + e2e.Logf("the file of resource is %s", configFile) + return configFile +} + +// ParameterizedTemplateByReplaceToFile parameterize template to new file +func ParameterizedTemplateByReplaceToFile(oc *CLI, parameters ...string) string { + isParameterExist, pIndex := StringsSliceElementsHasPrefix(parameters, "-f", true) + o.Expect(isParameterExist).Should(o.BeTrue()) + templateFileName := parameters[pIndex+1] + templateContentByte, readFileErr := os.ReadFile(templateFileName) + o.Expect(readFileErr).ShouldNot(o.HaveOccurred()) + templateContentStr := string(templateContentByte) + isParameterExist, pIndex = StringsSliceElementsHasPrefix(parameters, "-p", true) + o.Expect(isParameterExist).Should(o.BeTrue()) + for i := pIndex + 1; i < len(parameters); i++ { + if strings.Contains(parameters[i], "=") { + tempSlice := strings.Split(parameters[i], "=") + o.Expect(tempSlice).Should(o.HaveLen(2)) + templateContentStr = strings.ReplaceAll(templateContentStr, "${"+tempSlice[0]+"}", tempSlice[1]) + } + } + templateContentJSON, convertErr := yaml.YAMLToJSON([]byte(templateContentStr)) + o.Expect(convertErr).NotTo(o.HaveOccurred()) + configFile := filepath.Join(e2e.TestContext.OutputDir, oc.Namespace()+"-"+GetRandomString()+"config.json") + o.Expect(os.WriteFile(configFile, pretty.Pretty(templateContentJSON), 0644)).ShouldNot(o.HaveOccurred()) + return configFile +} diff --git a/test/util/terraform.go b/test/util/terraform.go new file mode 100644 index 000000000..df54e810f --- /dev/null +++ b/test/util/terraform.go @@ -0,0 +1,184 @@ +package util + +import ( + "context" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/hashicorp/hc-install/checkpoint" + "github.com/hashicorp/hc-install/product" + "github.com/hashicorp/terraform-exec/tfexec" + + tfjson "github.com/hashicorp/terraform-json" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// TerraformExec structure which stores all atributes +// about the Terraform installation and templates location. +type TerraformExec struct { + tfbin *tfexec.Terraform +} + +// NewTerraform function takes care of finding out the terraform binary to use +// or setting up and installing Terraform (if not already installed) in the +// system so that it can be used during the execution of the +// tfexec.Terraform struct. +// Inputs: +// - workingDir: Directory where the Terraform scripts are located +// +// Returns: +// - A TerraformExec struct which can be used to invoke other Terraform +// methods. +func NewTerraform(workingDir string) (*TerraformExec, error) { + + var execPath string + var err error + + // Look for terraform directory intalled in $PATH + if execPath, err = exec.LookPath("terraform"); err != nil { + + files, _ := filepath.Glob("/tmp/terraform_*/terraform") + if len(files) > 0 { + // If a /tmp/terraform_*/terraform binary exist use it + execPath = files[0] + e2e.Logf("using existing terraform binary from %v", execPath) + } else { + // If not, install terraform in /tmp + installer := &checkpoint.LatestVersion{ + Product: product.Terraform, + } + execPath, err = installer.Install(context.Background()) + if err != nil { + e2e.Logf("terraform installation in /tmp %v failed", err) + return nil, err + } + e2e.Logf("terraform installed in %v", execPath) + } + } + + tfinit, err := tfexec.NewTerraform(workingDir, execPath) + if err != nil { + e2e.Logf("error setting up Terraform in working dir %v", workingDir) + return nil, err + } + return &TerraformExec{ + tfbin: tfinit, + }, nil +} + +// TerraformInit executes terraform init in the workingDir templates +func (tf *TerraformExec) TerraformInit() error { + + err := tf.tfbin.Init(context.Background()) + if err != nil { + e2e.Logf("error in terraform init: %s", err) + return err + } + + return nil +} + +// TerraformInitWithUpgrade executes terraform init --upgrade in the workingDir templates +func (tf *TerraformExec) TerraformInitWithUpgrade() error { + + err := tf.tfbin.Init(context.Background(), tfexec.Upgrade(true)) + if err != nil { + e2e.Logf("error in terraform init: %s", err) + return err + } + + return nil +} + +// TerraformShow executes the terraform show command +// Returns: +// - The Terraform state in a tfjson.State struct type +// - Any error which could occur +func (tf *TerraformExec) TerraformShow() (*tfjson.State, error) { + + state, err := tf.tfbin.Show(context.Background()) + if err != nil { + e2e.Logf("error in terraform show: %s", err) + return nil, err + } + return state, nil +} + +// TerraformApply executes terraform apply in the workingDir templates +// Inputs: +// - vars: []string including all the vars to be passed during the +// terraform apply execution. Format: ["host=master.ocp", "num_workers=3"] +func (tf *TerraformExec) TerraformApply(vars ...string) error { + + OptVarList := make([]tfexec.ApplyOption, len(vars)) + // Convert slice of strings into an slice of ApplyOption using Var function + for i, valVar := range vars { + OptVarList[i] = tfexec.Var(valVar) + } + + err := tf.tfbin.Apply(context.Background(), OptVarList...) + if err != nil { + e2e.Logf("error in terraform apply: %s", err) + return err + } + return nil +} + +// TerraformOutput executes terraform show command and returns a map of the +// output values +// Returns: +// - Map of key:string and value:string including the output var name +// and the corresponding value. For more information on output values +// check: https://www.terraform.io/language/values/outputs +// Example: +// { 'instance_ip': '10.0.176.10', 'instance_dns': 'cool.worker.internal.aws.com' } +func (tf *TerraformExec) TerraformOutput() (map[string]string, error) { + + var cmdOutput map[string]tfexec.OutputMeta + mapReturn := make(map[string]string) + + cmdOutput, err := tf.tfbin.Output(context.Background()) + if err != nil { + return nil, err + } + + for key, value := range cmdOutput { + mapReturn[key] = string(value.Value) + } + + return mapReturn, nil + +} + +// TerraformDestroy runs terraform destroy in the workingDir templates directory. +// Inputs: +// - vars: []string A list of the vars passed to the terraform +// destroy commmand. In a similar way as in TerraformApply. +// Format: ["host=master.ocp", "num_workers=3"] +func (tf *TerraformExec) TerraformDestroy(vars ...string) error { + + OptVarList := make([]tfexec.DestroyOption, len(vars)) + // Convert slice of strings into an slice of DestroyOption using Var function + for i, valVar := range vars { + OptVarList[i] = tfexec.Var(valVar) + } + + err := tf.tfbin.Destroy(context.Background(), OptVarList...) + if err != nil { + if strings.Contains(err.Error(), "failed to instantiate provider") { + // Remove .terraform dir and Rerun Terraform Init with --upgrade, then retry Destroy + os.RemoveAll(tf.tfbin.WorkingDir() + "/.terraform/") + tf.TerraformInit() + err = tf.tfbin.Destroy(context.Background(), OptVarList...) + if err == nil { + return nil + } + } + e2e.Logf("error in terraform destroy: %s", err) + return err + } + + return nil +} diff --git a/test/util/terraform_test.go b/test/util/terraform_test.go new file mode 100644 index 000000000..b848d6fb1 --- /dev/null +++ b/test/util/terraform_test.go @@ -0,0 +1,217 @@ +package util + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + + tfjson "github.com/hashicorp/terraform-json" +) + +func TestNewTerraform(t *testing.T) { + + tfDir, err := ioutil.TempDir("", "tfTemp") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tfDir) + + var tf *TerraformExec + tf, err = NewTerraform(tfDir) + if err != nil { + t.Fatal(err) + } + + t.Logf("Using terraform binary from: %v", tf.tfbin.ExecPath()) + + if _, err := exec.LookPath("terraform"); err != nil { + // If the system where the test has run doesn't have terraform + // installed in $PATH then ensure it has been downloaded + files, _ := filepath.Glob("/tmp/terraform_*/terraform") + if _, err := os.Stat(files[0]); os.IsNotExist(err) { + t.Fatalf("Terraform binary was not downloaded") + } + + zipFiles, _ := filepath.Glob("/tmp/terraform*zip*") + if len(zipFiles) == 0 { + t.Fatalf("Terraform zip file was not downloaded") + } + } + +} + +func TestRunAndDestroyTerraform(t *testing.T) { + + tfDir, err := ioutil.TempDir("", "tfTemp") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tfDir) + tfFileContent := "terraform {\n" + + " required_version = \">= 1.0.0\"\n" + + "}\n" + + "\n" + + "resource \"local_file\" \"temp\" {\n" + + " filename = \"" + tfDir + "/temp\"\n" + + " content = \"This is only a test\"\n" + + "}" + + tfFile, err := os.Create(filepath.Join(tfDir, "main.tf")) + if err != nil { + t.Fatal(err) + } + + defer tfFile.Close() + + _, err = tfFile.WriteString(tfFileContent) + if err != nil { + t.Fatal(err) + } + tfFile.Close() + + var tf *TerraformExec + tf, err = NewTerraform(tfDir) + if err != nil { + t.Fatal(err) + } + + t.Logf("Using terraform binary from: %v", tf.tfbin.ExecPath()) + + err = tf.TerraformInit() + if err != nil { + t.Fatal(err) + } + + // check that init was run and the provider directory and lock files were created + for _, file := range []string{"/.terraform", "/.terraform.lock.hcl"} { + if _, err := os.Stat(filepath.Join(tfDir, file)); os.IsNotExist(err) { + t.Fatalf(fmt.Sprintf("Terraform file %v is not present after terraform init", filepath.Join(tfDir, file))) + } + } + + err = tf.TerraformApply() + if err != nil { + t.Fatal(err) + } + + // check that apply was run and the state file and resource /temp files were created + for _, file := range []string{"/terraform.tfstate", "/temp"} { + if _, err := os.Stat(filepath.Join(tfDir, file)); os.IsNotExist(err) { + t.Fatalf(fmt.Sprintf("Terraform file %v is not present after terraform apply", filepath.Join(tfDir, file))) + } + } + + err = tf.TerraformDestroy() + if err != nil { + t.Fatal(err) + } + + // check that destroy was run and the created local_file /temp was removed + if _, err := os.Stat(filepath.Join(tfDir, "/temp")); os.IsExist(err) { + t.Fatalf("Terraform local_file /temp was not cleaned up") + } + +} + +func TestOutputAndShowTerraform(t *testing.T) { + + tfDir, err := ioutil.TempDir("", "tfTemp") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tfDir) + tfFileContent := "terraform {\n" + + " required_version = \">= 1.0.0\"\n" + + "}\n" + + "\n" + + "variable \"text\" {\n" + + " type = string\n" + + " default = \"This is only a test\"\n" + + "}\n" + + "\n" + + "resource \"local_file\" \"temp\" {\n" + + " filename = \"" + tfDir + "/temp\"\n" + + " content = \"${var.text}\"\n" + + "}\n" + + "output \"text_output\" {\n" + + " value = \"${local_file.temp.content}\"\n" + + "}" + + tfFile, err := os.Create(filepath.Join(tfDir, "main.tf")) + if err != nil { + t.Fatal(err) + } + + defer tfFile.Close() + + _, err = tfFile.WriteString(tfFileContent) + if err != nil { + t.Fatal(err) + } + tfFile.Close() + + var tf *TerraformExec + tf, err = NewTerraform(tfDir) + if err != nil { + t.Fatal(err) + } + + t.Logf("Using terraform binary from: %v", tf.tfbin.ExecPath()) + + err = tf.TerraformInitWithUpgrade() + if err != nil { + t.Fatal(err) + } + + inputText := "Apply with input parameter" + + tfArgs := []string{ + "text=" + inputText, + } + err = tf.TerraformApply(tfArgs...) + if err != nil { + t.Fatal(err) + } + + // check that apply was run and the state file and resource /temp files were created + for _, file := range []string{"/temp"} { + if _, err := os.Stat(filepath.Join(tfDir, file)); os.IsNotExist(err) { + t.Fatalf(fmt.Sprintf("Terraform file %v is not present after terraform apply", filepath.Join(tfDir, file))) + } + } + + var tfState *tfjson.State + tfState, err = tf.TerraformShow() + if err != nil { + t.Fatal(err) + } + + if tfState.Values.Outputs["text_output"].Value != inputText { + t.Fatal(fmt.Printf("The Terraform state value for text_output is incorrect. Found: %v, Expected: %v", tfState.Values.Outputs["text_output"].Value, inputText)) + } + + var tfOutput map[string]string + tfOutput, err = tf.TerraformOutput() + if err != nil { + t.Fatal(err) + } + + if strings.Trim(tfOutput["text_output"], "\"") != inputText { + t.Fatal(fmt.Printf("The Terraform output value for text_output is incorrect. Found: %v, Expected: %v", tfOutput["text_output"], inputText)) + } + + err = tf.TerraformDestroy(tfArgs...) + if err != nil { + t.Fatal(err) + } + + // check that destroy was run and the created local_file /temp was removed + if _, err := os.Stat(filepath.Join(tfDir, "/temp")); os.IsExist(err) { + t.Fatalf("Terraform local_file /temp was not cleaned up") + } + +} diff --git a/test/util/test.go b/test/util/test.go new file mode 100644 index 000000000..5a9a60de7 --- /dev/null +++ b/test/util/test.go @@ -0,0 +1,620 @@ +package util + +import ( + "context" + "flag" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "sort" + "strings" + "time" + + "github.com/onsi/ginkgo/v2" + "github.com/onsi/ginkgo/v2/types" + kapiv1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + kclientset "k8s.io/client-go/kubernetes" + rbacv1client "k8s.io/client-go/kubernetes/typed/rbac/v1" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" + conformancetestdata "k8s.io/kubernetes/test/conformance/testdata" + e2e "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/testfiles" + e2etestingmanifests "k8s.io/kubernetes/test/e2e/testing-manifests" + testfixtures "k8s.io/kubernetes/test/fixtures" + + // this appears to inexplicably auto-register global flags. + _ "k8s.io/kubernetes/test/e2e/storage/drivers" + + configv1 "github.com/openshift/api/config/v1" + projectv1 "github.com/openshift/api/project/v1" + configclient "github.com/openshift/client-go/config/clientset/versioned" + securityv1client "github.com/openshift/client-go/security/clientset/versioned" + "github.com/openshift/openshift-tests-private/pkg/version" +) + +const ( + EnvIsExternalOIDCCluster = "ENV_IS_EXTERNAL_OIDC_CLUSTER" + EnvIsKubernetesCluster = "ENV_IS_KUBERNETES_CLUSTER" +) + +var ( + reportFileName string + syntheticSuite string + quiet bool +) + +var TestContext *e2e.TestContextType = &e2e.TestContext + +var ( + IsExternalOIDCClusterFlag = "" + IsKubernetesClusterFlag = "" +) + +func InitStandardFlags() { + e2e.RegisterCommonFlags(flag.CommandLine) + e2e.RegisterClusterFlags(flag.CommandLine) + + // replaced by a bare import above. + //e2e.RegisterStorageFlags() +} + +func InitTest(dryRun bool) error { + InitDefaultEnvironmentVariables() + // interpret synthetic input in `--ginkgo.focus` and/or `--ginkgo.skip` + ginkgo.BeforeEach(checkSyntheticInput) + + TestContext.DeleteNamespace = os.Getenv("DELETE_NAMESPACE") != "false" + TestContext.VerifyServiceAccount = true + testfiles.AddFileSource(e2etestingmanifests.GetE2ETestingManifestsFS()) + testfiles.AddFileSource(testfixtures.GetTestFixturesFS()) + testfiles.AddFileSource(conformancetestdata.GetConformanceTestdataFS()) + TestContext.KubectlPath = "kubectl" + TestContext.KubeConfig = KubeConfigPath() + os.Setenv("KUBECONFIG", TestContext.KubeConfig) + + // "debian" is used when not set. At least GlusterFS tests need "custom". + // (There is no option for "rhel" or "centos".) + TestContext.NodeOSDistro = "custom" + TestContext.MasterOSDistro = "custom" + + // load and set the host variable for kubectl + if !dryRun { + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(&clientcmd.ClientConfigLoadingRules{ExplicitPath: TestContext.KubeConfig}, &clientcmd.ConfigOverrides{}) + cfg, err := clientConfig.ClientConfig() + if err != nil { + return err + } + TestContext.Host = cfg.Host + } + + reportFileName = os.Getenv("TEST_REPORT_FILE_NAME") + if reportFileName == "" { + reportFileName = "junit" + } + + quiet = os.Getenv("TEST_OUTPUT_QUIET") == "true" + + // Ensure that Kube tests run privileged (like they do upstream) + TestContext.CreateTestingNS = createTestingNS + + klog.V(2).Infof("Extended test version %s", version.Get().String()) + return nil +} + +func AnnotateTestSuite() { + // qe take different method to select case, so no need to annotate it. + waitErr := wait.Poll(3*time.Second, 30*time.Second, func() (bool, error) { + out, err := kubectlCmd("get", "node").CombinedOutput() + if err != nil && strings.Contains(string(out), "Service Unavailable") { + e2e.Logf("Fail to get the cluster:%v, error: %v, try again", string(out), err) + return false, nil + } + return true, nil + }) + if waitErr != nil { + e2e.Logf("Fail to get the cluster") + os.Exit(1) + } + + // currently no need them for qe. if it is needed, need to take different method to implement it in pkg/test/ginkgo/test.go + // testRenamer := newGinkgoTestRenamerFromGlobals(e2e.TestContext.Provider, getNetworkSkips()) + + // ginkgo.GetSuite().BuildTree() + // ginkgo.GetSuite().WalkTests(testRenamer.maybeRenameTest) +} + +// PreDetermineExternalOIDCCluster checks if the cluster is using external OIDC preflight to avoid to check it everytime. +func PreDetermineExternalOIDCCluster() (bool, error) { + + clientConfig, err := e2e.LoadConfig(true) + if err != nil { + e2e.Logf("clientConfig err: %v", err) + return false, err + } + client, err := configclient.NewForConfig(clientConfig) + if err != nil { + e2e.Logf("client err: %v", err) + return false, err + } + + var auth *configv1.Authentication + var errAuth error + err = wait.PollImmediate(3*time.Second, 9*time.Second, func() (bool, error) { + auth, errAuth = client.ConfigV1().Authentications().Get(context.Background(), "cluster", metav1.GetOptions{}) + if errAuth != nil { + e2e.Logf("auth err: %v", errAuth) + return false, nil + } + return true, nil + }) + + if err != nil { + return false, errAuth + } + + // auth.Spec.Type is optionial. if it does not exist, auth.Spec.Type is empty string + // if it exists and set as "", it is also empty string + e2e.Logf("Found authentication type used: %v", string(auth.Spec.Type)) + return string(auth.Spec.Type) == string(configv1.AuthenticationTypeOIDC), nil + + // keep it for possible usage + // var out []byte + // var err error + // waitErr := wait.PollImmediate(3*time.Second, 9*time.Second, func() (bool, error) { + // out, err = kubectlCmd("get", "authentication/cluster", "-o=jsonpath={.spec.type}").CombinedOutput() + // if err != nil { + // e2e.Logf("Fail to get the authentication/cluster, error: %v with %v, try again", err, string(out)) + // return false, nil + // } + // e2e.Logf("Found authentication type used: %v", string(out)) + // return true, nil + // }) + // if waitErr != nil { + // return false, fmt.Errorf("error checking if the cluster is using external OIDC: %v", string(out)) + // } + + // return string(out) == string(configv1.AuthenticationTypeOIDC), nil +} + +// PreDetermineK8sCluster checks if the active cluster is a Kubernetes cluster (as opposed to OpenShift). +func PreDetermineK8sCluster() (isK8s bool, err error) { + ctx := context.Background() + + kubeClient, err := e2e.LoadClientset(true) + if err != nil { + return false, fmt.Errorf("failed to load Kubernetes clientset: %w", err) + } + + err = wait.PollUntilContextTimeout(ctx, 3*time.Second, 9*time.Second, true, func(ctx context.Context) (done bool, err error) { + isOpenShift, isOCPErr := IsOpenShiftCluster(ctx, kubeClient.CoreV1().Namespaces()) + if isOCPErr != nil { + e2e.Logf("failed to check if the active cluster is OpenShift: %v", isOCPErr) + return false, nil + } + isK8s = !isOpenShift + return true, nil + }) + + if err != nil { + return false, fmt.Errorf("error during polling: %w", err) + } + + return isK8s, nil +} + +func PreSetEnvK8s() (res string) { + isK8s, err := PreDetermineK8sCluster() + switch { + case err != nil: + res = "unknown" + case isK8s: + res = "yes" + default: + res = "no" + } + _ = os.Setenv(EnvIsKubernetesCluster, res) + return res +} + +func PreSetEnvOIDCCluster() (res string) { + isOIDC, err := PreDetermineExternalOIDCCluster() + switch { + case err != nil: + res = "unknown" + case isOIDC: + res = "yes" + default: + res = "no" + } + _ = os.Setenv(EnvIsExternalOIDCCluster, res) + return res +} + +func kubectlCmd(args ...string) *exec.Cmd { + defaultArgs := []string{} + + // Reference a --server option so tests can run anywhere. + if TestContext.Host != "" { + defaultArgs = append(defaultArgs, "--"+clientcmd.FlagAPIServer+"="+TestContext.Host) + } + if TestContext.KubeConfig != "" { + defaultArgs = append(defaultArgs, "--"+clientcmd.RecommendedConfigPathFlag+"="+TestContext.KubeConfig) + + // Reference the KubeContext + if TestContext.KubeContext != "" { + defaultArgs = append(defaultArgs, "--"+clientcmd.FlagContext+"="+TestContext.KubeContext) + } + + } else { + if TestContext.CertDir != "" { + defaultArgs = append(defaultArgs, + fmt.Sprintf("--certificate-authority=%s", filepath.Join(TestContext.CertDir, "ca.crt")), + fmt.Sprintf("--client-certificate=%s", filepath.Join(TestContext.CertDir, "kubecfg.crt")), + fmt.Sprintf("--client-key=%s", filepath.Join(TestContext.CertDir, "kubecfg.key"))) + } + } + kubectlArgs := append(defaultArgs, args...) + + //We allow users to specify path to kubectl, so you can test either "kubectl" or "cluster/kubectl.sh" + //and so on. + cmd := exec.Command(TestContext.KubectlPath, kubectlArgs...) + + //caller will invoke this and wait on it. + return cmd +} + +func getNetworkSkips() []string { + out, err := kubectlCmd("get", "network.operator.openshift.io", "cluster", "--template", "{{.spec.defaultNetwork.type}}{{if .spec.defaultNetwork.openshiftSDNConfig}} {{.spec.defaultNetwork.type}}/{{.spec.defaultNetwork.openshiftSDNConfig.mode}}{{end}}").CombinedOutput() + if err != nil { + e2e.Logf("Could not get network operator configuration: not adding any plugin-specific skips.") + return nil + } + return strings.Split(string(out), " ") +} + +func newGinkgoTestRenamerFromGlobals(provider string, networkSkips []string) *ginkgoTestRenamer { + var allLabels []string + matches := make(map[string]*regexp.Regexp) + stringMatches := make(map[string][]string) + excludes := make(map[string]*regexp.Regexp) + + for label, items := range testMaps { + sort.Strings(items) + allLabels = append(allLabels, label) + var remain []string + for _, item := range items { + re := regexp.MustCompile(item) + if p, ok := re.LiteralPrefix(); ok { + stringMatches[label] = append(stringMatches[label], p) + } else { + remain = append(remain, item) + } + } + if len(remain) > 0 { + matches[label] = regexp.MustCompile(strings.Join(remain, `|`)) + } + } + for label, items := range labelExcludes { + sort.Strings(items) + excludes[label] = regexp.MustCompile(strings.Join(items, `|`)) + } + sort.Strings(allLabels) + + if provider != "" { + excludedTests = append(excludedTests, fmt.Sprintf(`\[Skipped:%s\]`, provider)) + } + for _, network := range networkSkips { + excludedTests = append(excludedTests, fmt.Sprintf(`\[Skipped:Network/%s\]`, network)) + } + klog.V(4).Infof("openshift-tests-private excluded test regex is %q", strings.Join(excludedTests, `|`)) + excludedTestsFilter := regexp.MustCompile(strings.Join(excludedTests, `|`)) + + return &ginkgoTestRenamer{ + allLabels: allLabels, + stringMatches: stringMatches, + matches: matches, + excludes: excludes, + + excludedTestsFilter: excludedTestsFilter, + } +} + +type ginkgoTestRenamer struct { + allLabels []string + stringMatches map[string][]string + matches map[string]*regexp.Regexp + excludes map[string]*regexp.Regexp + + excludedTestsFilter *regexp.Regexp +} + +func (r *ginkgoTestRenamer) maybeRenameTest(name string, node types.TestSpec) { + labels := "" + for { + count := 0 + for _, label := range r.allLabels { + if strings.Contains(name, label) { + continue + } + + var hasLabel bool + for _, segment := range r.stringMatches[label] { + hasLabel = strings.Contains(name, segment) + if hasLabel { + break + } + } + if !hasLabel { + if re := r.matches[label]; re != nil { + hasLabel = r.matches[label].MatchString(name) + } + } + + if hasLabel { + // TODO: remove when we no longer need it + if re, ok := r.excludes[label]; ok && re.MatchString(name) { + continue + } + count++ + labels += " " + label + name += " " + label + } + } + if count == 0 { + break + } + } + + // if !r.excludedTestsFilter.MatchString(name) { + // isSerial := strings.Contains(name, "[Serial]") + // isConformance := strings.Contains(name, "[Conformance]") + // switch { + // case isSerial && isConformance: + // node.SetText(node.Text() + " [Suite:openshift/conformance/serial/minimal]") + // case isSerial: + // node.SetText(node.Text() + " [Suite:openshift/conformance/serial]") + // case isConformance: + // node.SetText(node.Text() + " [Suite:openshift/conformance/parallel/minimal]") + // default: + // node.SetText(node.Text() + " [Suite:openshift/conformance/parallel]") + // } + // } + // if strings.Contains(node.CodeLocation().FileName, "/origin/test/") && !strings.Contains(node.Text(), "[Suite:openshift") { + // node.SetText(node.Text() + " [Suite:openshift]") + // } + // if strings.Contains(node.CodeLocation().FileName, "/kubernetes/test/e2e/") { + // node.SetText(node.Text() + " [Suite:k8s]") + // } + // node.SetText(node.Text() + labels) +} + +// ProwGCPSetup makes sure certain required env vars are available in the case +// that extended tests are invoked directly via calls to ginkgo/extended.test +func InitDefaultEnvironmentVariables() { + if ad := os.Getenv("ARTIFACT_DIR"); len(strings.TrimSpace(ad)) == 0 { + os.Setenv("ARTIFACT_DIR", filepath.Join(os.TempDir(), "artifacts")) + } +} + +// TODO: Use either explicit tags (k8s.io) or https://github.com/onsi/ginkgo/v2/pull/228 to implement this. +// isPackage determines wether the test is in a package. Ideally would be implemented in ginkgo. +func isPackage(pkg string) bool { + return strings.Contains(ginkgo.CurrentSpecReport().FileName(), pkg) +} + +// TODO: For both is*Test functions, use either explicit tags (k8s.io) or https://github.com/onsi/ginkgo/v2/pull/228 +func isOriginTest() bool { + return isPackage("/origin/test/") +} + +func isKubernetesE2ETest() bool { + return isPackage("/kubernetes/test/e2e/") +} + +func testNameContains(name string) bool { + return strings.Contains(ginkgo.CurrentSpecReport().FullText(), name) +} + +func skipTestNamespaceCustomization() bool { + return (isPackage("/kubernetes/test/e2e/namespace.go") && (testNameContains("should always delete fast") || testNameContains("should delete fast enough"))) +} + +// createTestingNS ensures that kubernetes e2e tests have their service accounts in the privileged and anyuid SCCs +func createTestingNS(ctx context.Context, baseName string, c kclientset.Interface, labels map[string]string) (*kapiv1.Namespace, error) { + if !strings.HasPrefix(baseName, "e2e-") { + baseName = "e2e-" + baseName + } + + ns, err := e2e.CreateTestingNS(ctx, baseName, c, labels) + if err != nil { + return ns, err + } + + // Add anyuid and privileged permissions for upstream tests + if strings.HasPrefix(baseName, "e2e-k8s-") || (isKubernetesE2ETest() && !skipTestNamespaceCustomization()) { + clientConfig, err := getClientConfig(KubeConfigPath()) + if err != nil { + return ns, err + } + securityClient, err := securityv1client.NewForConfig(clientConfig) + if err != nil { + return ns, err + } + e2e.Logf("About to run a Kube e2e test, ensuring namespace is privileged") + // add the "privileged" scc to ensure pods that explicitly + // request extra capabilities are not rejected + addE2EServiceAccountsToSCC(securityClient, []kapiv1.Namespace{*ns}, "privileged") + // add the "anyuid" scc to ensure pods that don't specify a + // uid don't get forced into a range (mimics upstream + // behavior) + addE2EServiceAccountsToSCC(securityClient, []kapiv1.Namespace{*ns}, "anyuid") + // add the "hostmount-anyuid" scc to ensure pods using hostPath + // can execute tests + addE2EServiceAccountsToSCC(securityClient, []kapiv1.Namespace{*ns}, "hostmount-anyuid") + + // The intra-pod test requires that the service account have + // permission to retrieve service endpoints. + rbacClient, err := rbacv1client.NewForConfig(clientConfig) + if err != nil { + return ns, err + } + addRoleToE2EServiceAccounts(rbacClient, []kapiv1.Namespace{*ns}, "view") + + // in practice too many kube tests ignore scheduling constraints + allowAllNodeScheduling(c, ns.Name) + } + + return ns, err +} + +var ( + testMaps = map[string][]string{ + // tests that are known flaky + "[Flaky]": { + `Job should run a job to completion when tasks sometimes fail and are not locally restarted`, // seems flaky, also may require too many resources + `openshift mongodb replication creating from a template`, // flaking on deployment + + // TODO(node): test works when run alone, but not in the suite in CI + `\[Feature:HPA\] Horizontal pod autoscaling \(scale resource: CPU\) \[sig-autoscaling\] ReplicationController light Should scale from 1 pod to 2 pods`, + }, + // tests that must be run without competition + "[Serial]": { + `\[Disruptive\]`, + `\[Exclusive\]`, + }, + } + + // labelExcludes temporarily block tests out of a specific suite + labelExcludes = map[string][]string{} + + excludedTests = []string{ + `\[Disabled:`, + `\[Disruptive\]`, + `\[Exclusive\]`, + `\[Skipped\]`, + `\[Slow\]`, + `\[Flaky\]`, + `\[local\]`, + `\[Suite:openshift/test-cmd\]`, + } +) + +// checkSyntheticInput selects tests based on synthetic skips or focuses +func checkSyntheticInput() { + checkSuiteSkips() +} + +// checkSuiteSkips ensures Origin/Kubernetes synthetic skip labels are applied +// DEPRECATED: remove in a future release +func checkSuiteSkips() { + suiteConfig, _ := ginkgo.GinkgoConfiguration() + switch { + case isOriginTest(): + skip := strings.Join(suiteConfig.SkipStrings, "|") + if strings.Contains(skip, "Synthetic Origin") { + ginkgo.Skip("skipping all openshift/origin tests") + } + case isKubernetesE2ETest(): + skip := strings.Join(suiteConfig.SkipStrings, "|") + if strings.Contains(skip, "Synthetic Kubernetes") { + ginkgo.Skip("skipping all k8s.io/kubernetes tests") + } + } +} + +var longRetry = wait.Backoff{Steps: 100} + +// allowAllNodeScheduling sets the annotation on namespace that allows all nodes to be scheduled onto. +func allowAllNodeScheduling(c kclientset.Interface, namespace string) { + err := retry.RetryOnConflict(longRetry, func() error { + ns, err := c.CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{}) + if err != nil { + return err + } + if ns.Annotations == nil { + ns.Annotations = make(map[string]string) + } + ns.Annotations[projectv1.ProjectNodeSelector] = "" + _, err = c.CoreV1().Namespaces().Update(context.Background(), ns, metav1.UpdateOptions{}) + return err + }) + if err != nil { + FatalErr(err) + } +} + +func addE2EServiceAccountsToSCC(securityClient securityv1client.Interface, namespaces []kapiv1.Namespace, sccName string) { + // Because updates can race, we need to set the backoff retries to be > than the number of possible + // parallel jobs starting at once. Set very high to allow future high parallelism. + err := retry.RetryOnConflict(longRetry, func() error { + scc, err := securityClient.SecurityV1().SecurityContextConstraints().Get(context.Background(), sccName, metav1.GetOptions{}) + if err != nil { + if apierrs.IsNotFound(err) { + return nil + } + return err + } + + for _, ns := range namespaces { + if isE2ENamespace(ns.Name) { + scc.Groups = append(scc.Groups, fmt.Sprintf("system:serviceaccounts:%s", ns.Name)) + } + } + if _, err := securityClient.SecurityV1().SecurityContextConstraints().Update(context.Background(), scc, metav1.UpdateOptions{}); err != nil { + return err + } + return nil + }) + if err != nil { + FatalErr(err) + } +} + +func isE2ENamespace(ns string) bool { + return true + //return strings.HasPrefix(ns, "e2e-") || + // strings.HasPrefix(ns, "aggregator-") || + // strings.HasPrefix(ns, "csi-") || + // strings.HasPrefix(ns, "deployment-") || + // strings.HasPrefix(ns, "disruption-") || + // strings.HasPrefix(ns, "gc-") || + // strings.HasPrefix(ns, "kubectl-") || + // strings.HasPrefix(ns, "proxy-") || + // strings.HasPrefix(ns, "provisioning-") || + // strings.HasPrefix(ns, "statefulset-") || + // strings.HasPrefix(ns, "services-") +} + +func addRoleToE2EServiceAccounts(rbacClient rbacv1client.RbacV1Interface, namespaces []kapiv1.Namespace, roleName string) { + err := retry.RetryOnConflict(longRetry, func() error { + for _, ns := range namespaces { + if isE2ENamespace(ns.Name) && ns.Status.Phase != kapiv1.NamespaceTerminating { + _, err := rbacClient.RoleBindings(ns.Name).Create(context.Background(), &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{GenerateName: "default-" + roleName, Namespace: ns.Name}, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: roleName, + }, + Subjects: []rbacv1.Subject{ + {Name: "default", Namespace: ns.Name, Kind: rbacv1.ServiceAccountKind}, + }, + }, metav1.CreateOptions{}) + if err != nil { + e2e.Logf("Warning: Failed to add role to e2e service account: %v", err) + } + } + } + return nil + }) + if err != nil { + FatalErr(err) + } +} diff --git a/test/util/test_env.go b/test/util/test_env.go new file mode 100644 index 000000000..e8dc25162 --- /dev/null +++ b/test/util/test_env.go @@ -0,0 +1,90 @@ +package util + +import ( + "os" + "path" + "sync" +) + +const ( + TestEnvProw TestEnvType = 1 << iota + TestEnvJenkins + TestEnvLocal + + ArtifactDirEnvProw = "ARTIFACT_DIR" + + PullSecretDirEnvProw = "CLUSTER_PROFILE_DIR" + PullSecretFileNameProw = "pull-secret" + PullSecretLocationEnvLocal = "PULL_SECRET_LOCATION" +) + +type TestEnvType int + +type TestEnv struct { + Type TestEnvType + + ArtifactDir string + PullSecretLocation string +} + +var ( + globalTestEnv *TestEnv + globalTestEnvOnce sync.Once +) + +// GetTestEnv gets a initialized *TestEnv in a thread-safe and lazy manner +func GetTestEnv() *TestEnv { + globalTestEnvOnce.Do(func() { + globalTestEnv = initTestEnv() + }) + return globalTestEnv +} + +func initTestEnv() *TestEnv { + env := &TestEnv{} + env.getEnvType() + env.getPullSecretLocation() + env.getArtifactDir() + return env +} + +func (t *TestEnv) IsRunningInProw() bool { + return t.Type == TestEnvProw +} + +func (t *TestEnv) IsRunningInJenkins() bool { + return t.Type == TestEnvJenkins +} + +func (t *TestEnv) IsRunningLocally() bool { + return t.Type == TestEnvLocal +} + +func (t *TestEnv) getEnvType() { + switch { + case os.Getenv("JENKINS_HOME") != "": + t.Type = TestEnvJenkins + case os.Getenv("OPENSHIFT_CI") != "": + t.Type = TestEnvProw + default: + t.Type = TestEnvLocal + } +} + +func (t *TestEnv) getPullSecretLocation() { + switch { + case t.IsRunningInProw(): + t.PullSecretLocation = path.Join(os.Getenv(PullSecretDirEnvProw), PullSecretFileNameProw) + case t.IsRunningLocally(): + t.PullSecretLocation = os.Getenv(PullSecretLocationEnvLocal) + } +} + +func (t *TestEnv) getArtifactDir() { + switch { + case t.IsRunningInProw(): + t.ArtifactDir = os.Getenv(ArtifactDirEnvProw) + case t.IsRunningLocally(): + t.ArtifactDir = os.TempDir() + } +} diff --git a/test/util/upibaremetalrdu_client.go b/test/util/upibaremetalrdu_client.go new file mode 100644 index 000000000..ab3b5ee22 --- /dev/null +++ b/test/util/upibaremetalrdu_client.go @@ -0,0 +1,108 @@ +package util + +import ( + "context" + "fmt" + "time" + + "github.com/gebn/bmc" + "github.com/gebn/bmc/pkg/ipmi" + o "github.com/onsi/gomega" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + BMPoweredOn = "poweredon" + BMPoweredOff = "poweredoff" +) + +// RDU2Host models the RDU2 host (partial representation) +type RDU2Host struct { + Name string `yaml:"name"` + BmcAddress string `yaml:"bmc_address"` + BmcUser string `yaml:"bmc_user"` + BmcPassword string `yaml:"bmc_pass"` + BmcForwardedPort uint16 `yaml:"bmc_forwarded_port"` + Host string `yaml:"host"` + JumpHost string `yaml:"-"` +} + +// StopUPIbaremetalInstance power off the BM machine +func (h *RDU2Host) StopUPIbaremetalInstance() error { + e2e.Logf("UPI baremetal instance :: %v - %v - %v :: Shutting Down", h.Name, h.Host, h.BmcAddress) + return h.ipmiExec(ipmi.ChassisControlPowerOff) +} + +// StartUPIbaremetalInstance power on the BM machine +func (h *RDU2Host) StartUPIbaremetalInstance() error { + e2e.Logf("UPI baremetal instance :: %v - %v - %v :: Powering on", h.Name, h.Host, h.BmcAddress) + return h.ipmiExec(ipmi.ChassisControlPowerOn) +} + +// GetMachinePowerStatus returns the power status of the BM Machine +func (h *RDU2Host) GetMachinePowerStatus() (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + transport, err := h.newTransport(ctx) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to connect to BMC") + defer transport.Close() + + e2e.Logf("connected to %v (%v - %v - %v) over IPMI v%v", transport.Address(), h.Name, h.Host, h.BmcAddress, transport.Version()) + sess, err := h.newSession(ctx, transport) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create IPMI session") + defer sess.Close(ctx) + status, err := sess.GetChassisStatus(ctx) + o.Expect(err).NotTo(o.HaveOccurred(), fmt.Sprintf("Failed to get chassis status:: %v :: %v", + h.BmcAddress, err)) + if status.PoweredOn { + e2e.Logf("UPI baremetal instance :: %v - %v - %v :: poweredOn", h.Name, h.Host, h.BmcAddress) + return BMPoweredOn, nil + } + e2e.Logf("UPI baremetal instance :: %v - %v - %v :: poweredOff", h.Name, h.Host, h.BmcAddress) + return BMPoweredOff, nil +} + +// newTransport creates a new IPMI transport. It is the caller's responsibility to close it. +func (h *RDU2Host) newTransport(ctx context.Context) (bmc.SessionlessTransport, error) { + transport, err := bmc.Dial(ctx, fmt.Sprintf("%s:%d", h.JumpHost, h.BmcForwardedPort)) + o.Expect(err).NotTo(o.HaveOccurred()) + return transport, nil +} + +// newSession creates a new IPMI session. It is the caller's responsibility to close it. +func (h *RDU2Host) newSession(ctx context.Context, transport bmc.SessionlessTransport) (bmc.Session, error) { + sess, err := transport.NewSession(ctx, &bmc.SessionOpts{ + Username: h.BmcUser, + Password: []byte(h.BmcPassword), + MaxPrivilegeLevel: ipmi.PrivilegeLevelAdministrator, + }) + o.Expect(err).NotTo(o.HaveOccurred()) + return sess, nil +} + +// ipmiExec executes the specified IPMI command on the BMC +func (h *RDU2Host) ipmiExec(cmd ipmi.ChassisControl) error { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + transport, err := h.newTransport(ctx) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to connect to BMC") + defer transport.Close() + + e2e.Logf("connected to %v (%v - %v - %v) over IPMI v%v", transport.Address(), h.Name, h.Host, h.BmcAddress, transport.Version()) + sess, err := h.newSession(ctx, transport) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to create IPMI session") + defer sess.Close(ctx) + + privup := &ipmi.SetSessionPrivilegeLevelCmd{ + Req: ipmi.SetSessionPrivilegeLevelReq{ + PrivilegeLevel: ipmi.PrivilegeLevelAdministrator, + }, + } + err = bmc.ValidateResponse(sess.SendCommand(ctx, privup)) + o.Expect(err).NotTo(o.HaveOccurred(), "Failed to set privilege level") + + if err := sess.ChassisControl(ctx, cmd); err != nil { + return err + } + return nil +} diff --git a/test/util/url/url.go b/test/util/url/url.go new file mode 100644 index 000000000..a3a5e9768 --- /dev/null +++ b/test/util/url/url.go @@ -0,0 +1,378 @@ +package url + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "path/filepath" + "strings" + "time" + + o "github.com/onsi/gomega" + + exutil "github.com/openshift/openshift-tests-private/test/extended/util" + v1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + kclientset "k8s.io/client-go/kubernetes" + e2e "k8s.io/kubernetes/test/e2e/framework" + e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" +) + +type Tester struct { + client kclientset.Interface + namespace string + podName string + errorPassThrough bool +} + +func NewTester(client kclientset.Interface, ns string) *Tester { + return &Tester{client: client, namespace: ns} +} + +func (ut *Tester) Close() { + if err := ut.client.CoreV1().Pods(ut.namespace).Delete(context.Background(), ut.podName, *metav1.NewDeleteOptions(1)); err != nil { + e2e.Logf("Failed to delete exec pod %s: %v", ut.podName, err) + } + ut.podName = "" +} + +func (ut *Tester) Response(test *Test) *Response { + responses := ut.Responses(test) + if responses == nil { + return nil + } + if len(responses) == 0 { + return nil + } + return responses[0] +} + +func (ut *Tester) Responses(tests ...*Test) []*Response { + if len(ut.podName) == 0 { + _, err := createExecPod(ut.client, ut.namespace, "execpod") + if err != nil && !apierrs.IsAlreadyExists(err) { + // exit even on error passthrough, unless the exec pod + // was already created by a test running in parallel + o.Expect(err).NotTo(o.HaveOccurred()) + } + ut.podName = "execpod" + } + // testToScript needs to run after creating the pod + // in case we need to rsync files for a post body + script := testsToScript(tests) + output, err := e2eoutput.RunHostCmd(ut.namespace, ut.podName, script) + if !ut.errorPassThrough { + o.Expect(err).NotTo(o.HaveOccurred()) + } + if err != nil { + return []*Response{ + { + Error: fmt.Sprintf("%#v", err), + }, + } + + } + responses, err := parseResponses(output) + if !ut.errorPassThrough { + o.Expect(err).NotTo(o.HaveOccurred()) + } + if err != nil { + return []*Response{ + { + Error: fmt.Sprintf("%#v", err), + Body: []byte(output), + }, + } + } + if len(responses) != len(tests) { + // exit even on error passthrough + o.Expect(fmt.Errorf("number of tests did not match number of responses: %d and %d", len(responses), len(tests))).NotTo(o.HaveOccurred()) + + } + return responses +} + +func (ut *Tester) WithErrorPassthrough(pt bool) *Tester { + ut.errorPassThrough = pt + return ut +} + +func (ut *Tester) Podname() string { + return ut.podName +} + +func (ut *Tester) Within(t time.Duration, tests ...*Test) { + var errs []error + failing := tests + err := wait.PollImmediate(time.Second, t, func() (bool, error) { + errs = errs[:0] + responses := ut.Responses(failing...) + var next []*Test + for i, res := range responses { + if err := failing[i].Test(i, res); err != nil { + next = append(next, failing[i]) + errs = append(errs, err) + } + } + e2e.Logf("%d/%d failed out of %d", len(errs), len(failing), len(tests)) + // perform one more loop if we haven't seen all tests pass at the same time + if len(next) == 0 && len(failing) != len(tests) { + failing = tests + return false, nil + } + failing = next + return len(errs) == 0, nil + }) + if len(errs) > 0 { + o.Expect(fmt.Errorf("%d/%d tests failed after %s: %v", len(errs), len(tests), t, errs)) + } + o.Expect(err).ToNot(o.HaveOccurred()) +} + +// createExecPod creates a simple centos:7 pod in a sleep loop used as a +// vessel for kubectl exec commands. +// Returns the name of the created pod. +func createExecPod(clientset kclientset.Interface, ns, name string) (string, error) { + e2e.Logf("Creating new exec pod") + immediate := int64(0) + execPod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Command: []string{"/bin/bash", "-c", "exec sleep 10000"}, + Name: "hostexec", + Image: "centos:7", + ImagePullPolicy: v1.PullIfNotPresent, + }, + }, + HostNetwork: false, + TerminationGracePeriodSeconds: &immediate, + }, + } + client := clientset.CoreV1() + created, err := client.Pods(ns).Create(context.Background(), execPod, metav1.CreateOptions{}) + if err != nil { + return "", err + } + err = wait.PollImmediate(e2e.Poll, 5*time.Minute, func() (bool, error) { + retrievedPod, err := client.Pods(execPod.Namespace).Get(context.Background(), created.Name, metav1.GetOptions{}) + if err != nil { + return false, nil + } + return retrievedPod.Status.Phase == v1.PodRunning, nil + }) + if err != nil { + return "", err + } + return created.Name, nil +} + +func testsToScript(tests []*Test) string { + testScripts := []string{ + "set -euo pipefail", + `function json_escape() {`, + ` python -c 'import json,sys; print json.dumps(sys.stdin.read())'`, + `}`, + } + for i, test := range tests { + testScripts = append(testScripts, test.ToShell(i)) + } + script := strings.Join(testScripts, "\n") + return script +} + +func parseResponses(out string) ([]*Response, error) { + var responses []*Response + d := json.NewDecoder(bytes.NewReader([]byte(out))) + for i := 0; ; i++ { + r := &Response{} + if err := d.Decode(r); err != nil { + if err == io.EOF { + return responses, nil + } + return nil, fmt.Errorf("response %d could not be decoded: %v", i, err) + } + + if i != r.Test { + return nil, fmt.Errorf("response %d does not match test body %d", i, r.Test) + } + + // parse the HTTP response + res, err := http.ReadResponse(bufio.NewReader(bytes.NewBufferString(r.Headers)), nil) + if err != nil { + return nil, fmt.Errorf("response %d was unparseable: %v\n%s", i, err, r.Headers) + } + if res.StatusCode != r.CURL.Code { + return nil, fmt.Errorf("response %d returned a different status code than was encoded in the headers:\n%s", i, r.Headers) + } + res.Body = ioutil.NopCloser(bytes.NewBuffer(r.Body)) + r.Response = res + + responses = append(responses, r) + } +} + +type Response struct { + Test int `json:"test"` + ReturnCode int `json:"rc"` + Error string `json:"error"` + + CURL CURL `json:"curl"` + Body []byte `json:"body"` + Headers string `json:"headers"` + + Response *http.Response +} + +type CURL struct { + Code int `json:"code"` +} + +type Test struct { + Name string + Req *http.Request + SkipVerify bool + // we capture this here vs. the httpRequest + // to facilitate passing to curl + PostBodyFile string + PodName string + Oc *exutil.CLI + + Wants []func(*http.Response) error +} + +func Expect(method, url string) *Test { + req, err := http.NewRequest(method, url, nil) + if err != nil { + panic(err) + } + return &Test{ + Req: req, + } +} + +func (ut *Test) WithBodyToUpload(filename, podname string, oc *exutil.CLI) *Test { + ut.PostBodyFile = filename + ut.PodName = podname + ut.Oc = oc + return ut +} + +func (ut *Test) WithToken(token string) *Test { + return ut.WithHeader("Authorization", "Bearer "+token) +} + +func (ut *Test) WithHeader(hdr, value string) *Test { + ut.Req.Header.Set(hdr, value) + return ut +} + +func (ut *Test) Through(addr string) *Test { + ut.Req.Header.Set("Host", ut.Req.URL.Host) + ut.Req.URL.Host = addr + return ut +} + +func (ut *Test) HasStatusCode(codes ...int) *Test { + ut.Wants = append(ut.Wants, func(res *http.Response) error { + for _, code := range codes { + if res.StatusCode == code { + return nil + } + } + return fmt.Errorf("status code %d not in %v", res.StatusCode, codes) + }) + return ut +} + +// RedirectsTo func +func (ut *Test) RedirectsTo(url string, codes ...int) *Test { + if len(codes) == 0 { + codes = []int{http.StatusFound, http.StatusPermanentRedirect, http.StatusTemporaryRedirect} + } + ut.HasStatusCode(codes...) + ut.Wants = append(ut.Wants, func(res *http.Response) error { + location := res.Header.Get("Location") + if location != url { + return fmt.Errorf("Location header was %q, not %q", location, url) + } + return nil + }) + return ut +} + +// SkipTLSVerification func +func (ut *Test) SkipTLSVerification() *Test { + ut.SkipVerify = true + return ut +} + +// Test func +func (ut *Test) Test(i int, res *Response) error { + if len(res.Error) > 0 || res.ReturnCode != 0 { + return fmt.Errorf("test %d was not successful: %d %s", i, res.ReturnCode, res.Error) + } + for _, fn := range ut.Wants { + if err := fn(res.Response); err != nil { + return fmt.Errorf("test %d was not successful: %v", i, err) + } + } + if len(ut.Wants) == 0 { + if res.Response.StatusCode < 200 || res.Response.StatusCode >= 300 { + return fmt.Errorf("test %d did not return a 2xx status code: %d", i, res.Response.StatusCode) + } + } + return nil +} + +// ToShell func +func (ut *Test) ToShell(i int) string { + var lines []string + if len(ut.Name) > 0 { + lines = append(lines, fmt.Sprintf("# Test: %s (%d)", ut.Name, i)) + } else { + lines = append(lines, fmt.Sprintf("# Test: %d", i)) + } + var headers []string + for k, values := range ut.Req.Header { + for _, v := range values { + headers = append(headers, fmt.Sprintf("-H %q", k+":"+v)) + } + } + lines = append(lines, `rc=0`) + post := "" + if strings.ToLower(strings.Trim(ut.Req.Method, " ")) == "post" { + post = " -H 'Expect:' " + if len(ut.PostBodyFile) > 0 { + basename := filepath.Base(ut.PostBodyFile) + dirname := filepath.Dir(ut.PostBodyFile) + lastsubdir := filepath.Base(dirname) + err := ut.Oc.AsAdmin().Run("rsync").Args(dirname, ut.PodName+":"+"/tmp", "--strategy=tar").Execute() + o.Expect(err).NotTo(o.HaveOccurred()) + post = post + " -d @/tmp/" + lastsubdir + "/" + basename + } else { + post = post + " -d '' " + } + } + cmd := fmt.Sprintf(`curl -X %s %s %s -s -S -o /tmp/body -D /tmp/headers %q`, ut.Req.Method, strings.Join(headers, " "), post, ut.Req.URL) + cmd += ` -w '{"code":%{http_code}}'` + if ut.SkipVerify { + cmd += ` -k` + } + cmd += " 2>/tmp/error 1>/tmp/output || rc=$?" + lines = append(lines, `: > /tmp/body /tmp/headers`) + lines = append(lines, cmd) + lines = append(lines, fmt.Sprintf(`echo "{\"test\":%d,\"rc\":$(echo $rc),\"curl\":$(cat /tmp/output),\"error\":$(cat /tmp/error | json_escape),\"body\":\"$(cat /tmp/body | base64 -w 0 -)\",\"headers\":$(cat /tmp/headers | json_escape)}"`, i)) + return strings.Join(lines, "\n") +} diff --git a/test/util/url/url_test.go b/test/util/url/url_test.go new file mode 100644 index 000000000..081c3539d --- /dev/null +++ b/test/util/url/url_test.go @@ -0,0 +1,13 @@ +package url + +import ( + "fmt" + "testing" +) + +func TestTestsToScript(t *testing.T) { + tests := []*Test{ + Expect("GET", "https://www.google.com"), + } + fmt.Println(testsToScript(tests)) +} diff --git a/test/util/vmware_client.go b/test/util/vmware_client.go new file mode 100644 index 000000000..f12d40eef --- /dev/null +++ b/test/util/vmware_client.go @@ -0,0 +1,170 @@ +package util + +import ( + "context" + "flag" + "net/url" + "time" + + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/property" + + //"github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + + o "github.com/onsi/gomega" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +// Vmware is object +type Vmware struct { + GovmomiURL string +} + +// A ByName is vm type object +type ByName []mo.VirtualMachine + +func (n ByName) Len() int { return len(n) } +func (n ByName) Swap(i, j int) { n[i], n[j] = n[j], n[i] } +func (n ByName) Less(i, j int) bool { return n[i].Name < n[j].Name } + +// A creating represents constants ... +const ( + PropRuntimePowerState = "summary.runtime.powerState" + PropConfigTemplate = "summary.config.template" +) + +// Login represents connect and log in to ESX or vCenter ... +func (vmware *Vmware) Login() (*Vmware, *govmomi.Client) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + flag.Parse() + + // Parse URL from string + u, err := url.Parse(vmware.GovmomiURL) + if err != nil { + e2e.Logf("Error parsing vmware url") + o.Expect(err).NotTo(o.HaveOccurred()) + } + + // Connect and log in to ESX or vCenter + c, err := govmomi.NewClient(ctx, u, true) + if err != nil { + e2e.Logf("Error in login, please check vmware url\n") + o.Expect(err).NotTo(o.HaveOccurred()) + } + return vmware, c +} + +// GetVspheresInstance represents to get vmware instance. +func (vmware *Vmware) GetVspheresInstance(c *govmomi.Client, vmInstance string) (string, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pc := property.DefaultCollector(c.Client) + vm, err := find.NewFinder(c.Client).VirtualMachine(ctx, vmInstance) + if err != nil { + return "", err + } + + var vms []mo.VirtualMachine + err = pc.RetrieveOne(ctx, vm.Reference(), []string{"name"}, &vms) + if err != nil { + return "", err + } + + e2e.Logf("Virtual machines found: %v", vms[0].Name) + return vms[0].Name, nil +} + +// GetVspheresInstanceState represents get instance state. +func (vmware *Vmware) GetVspheresInstanceState(c *govmomi.Client, vmInstance string) (string, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pc := property.DefaultCollector(c.Client) + vm, err := find.NewFinder(c.Client).VirtualMachine(ctx, vmInstance) + if err != nil { + return "", err + } + + var vms []mo.VirtualMachine + err = pc.RetrieveOne(ctx, vm.Reference(), []string{"summary"}, &vms) + if err != nil { + return "", err + } + + e2e.Logf("%s: %s\n", vms[0].Summary.Config.Name, vms[0].Summary.Runtime.PowerState) + return string(vms[0].Summary.Runtime.PowerState), nil +} + +// StopVsphereInstance represents stopping instance ... +func (vmware *Vmware) StopVsphereInstance(c *govmomi.Client, vmInstance string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + pc := property.DefaultCollector(c.Client) + vm, err := find.NewFinder(c.Client).VirtualMachine(ctx, vmInstance) + if err != nil { + return err + } + // power off VM after some time + go func() { + time.Sleep(time.Millisecond * 100) + vm.PowerOff(ctx) + }() + + return property.Wait(ctx, pc, vm.Reference(), []string{"runtime.powerState"}, func(changes []types.PropertyChange) bool { + for _, change := range changes { + state := change.Val.(types.VirtualMachinePowerState) + e2e.Logf("%v", state) + if state == types.VirtualMachinePowerStatePoweredOff { + return true + } + } + // continue polling + return false + }) +} + +// StartVsphereInstance represents starting instance ... +func (vmware *Vmware) StartVsphereInstance(c *govmomi.Client, vmInstance string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + pc := property.DefaultCollector(c.Client) + vm, err := find.NewFinder(c.Client).VirtualMachine(ctx, vmInstance) + if err != nil { + return err + } + + go func() { + // power on VM after some time + time.Sleep(time.Millisecond * 100) + vm.PowerOn(ctx) + }() + + return property.Wait(ctx, pc, vm.Reference(), []string{"runtime.powerState"}, func(changes []types.PropertyChange) bool { + for _, change := range changes { + state := change.Val.(types.VirtualMachinePowerState) + e2e.Logf("%v", state) + if state == types.VirtualMachinePowerStatePoweredOn { + return true + } + } + // continue polling + return false + }) +} + +// GetVsphereConnectionLogout get connection logout +func (vmware *Vmware) GetVsphereConnectionLogout(c *govmomi.Client) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + errLogout := c.Logout(ctx) + if errLogout != nil { + return errLogout + } + return nil +} diff --git a/test/util/volumes.go b/test/util/volumes.go new file mode 100644 index 000000000..8c27f723b --- /dev/null +++ b/test/util/volumes.go @@ -0,0 +1,57 @@ +package util + +import ( + "context" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +func DeletePVCsForDeployment(client clientset.Interface, oc *CLI, deploymentPrefix string) { + pvclist, err := client.CoreV1().PersistentVolumeClaims(oc.Namespace()).List(context.Background(), metav1.ListOptions{}) + if err != nil { + e2e.Logf("pvc list error %#v\n", err) + } + for _, pvc := range pvclist.Items { + e2e.Logf("found pvc %s\n", pvc.Name) + if strings.HasPrefix(pvc.Name, deploymentPrefix) { + err = client.CoreV1().PersistentVolumeClaims(oc.Namespace()).Delete(context.Background(), pvc.Name, metav1.DeleteOptions{}) + if err != nil { + e2e.Logf("pvc del error %#v\n", err) + } else { + e2e.Logf("deleted pvc %s\n", pvc.Name) + } + } + } +} + +func DumpPersistentVolumeInfo(oc *CLI) { + e2e.Logf("Dumping persistent volume info for cluster") + out, err := oc.AsAdmin().Run("get").Args("pv").Output() + if err != nil { + e2e.Logf("Error dumping persistent volume info: %v", err) + return + } + e2e.Logf("\n%s", out) + out, err = oc.AsAdmin().Run("get").Args("pv", "-o", "yaml").Output() + if err != nil { + e2e.Logf("Error dumping persistent volume info: %v", err) + return + } + e2e.Logf(out) + out, err = oc.AsAdmin().Run("get").Args("pvc", "-n", oc.Namespace()).Output() + if err != nil { + e2e.Logf("Error dumping persistent volume claim info: %v", err) + return + } + e2e.Logf("\n%s", out) + out, err = oc.AsAdmin().Run("get").Args("pvc", "-n", oc.Namespace(), "-o", "yaml").Output() + if err != nil { + e2e.Logf("Error dumping persistent volume claim info: %v", err) + return + } + e2e.Logf(out) + +} diff --git a/test/util/yaml.go b/test/util/yaml.go new file mode 100644 index 000000000..10726fe82 --- /dev/null +++ b/test/util/yaml.go @@ -0,0 +1,160 @@ +package util + +import ( + "encoding/json" + "io/ioutil" + "strconv" + "strings" + + "gopkg.in/yaml.v3" + e2e "k8s.io/kubernetes/test/e2e/framework" +) + +/* +YamlReplace define a YAML modification given. +Example: + + YamlReplace { + Path: 'spec.template.spec.imagePullSecrets', + Value: '- name: notmatch-secret', + } +*/ +type YamlReplace struct { + Path string // path to modify or create value + Value string // a string literal or YAML string (ex. 'name: frontend') to be set under the given path +} + +func convert(i interface{}) interface{} { + switch x := i.(type) { + case map[interface{}]interface{}: + //A map with key and value using arbitrary value + m2 := map[string]interface{}{} + for k, v := range x { + m2[k.(string)] = convert(v) + } + return m2 + + case map[string]interface{}: + //A map with string key and an arbitrary value + m2 := map[string]interface{}{} + for k, v := range x { + m2[k] = convert(v) + } + return m2 + + case []interface{}: + // Arbitrary type value + for i, v := range x { + x[i] = convert(v) + } + } + return i +} + +/* +Yaml2Json converts yaml file to json format. +Example: + + util.Yaml2Json(string(yamlFile)) +*/ +func Yaml2Json(s string) (string, error) { + var ( + body interface{} + errJson error + b []byte + ) + if err := yaml.Unmarshal([]byte(s), &body); err != nil { + e2e.Failf("Failed to unmarshal yaml with error: %v", err) + } + + body = convert(body) + + if b, errJson = json.Marshal(body); errJson != nil { + e2e.Failf("Failed to marshal json with error: %v", errJson) + } + return string(b), errJson +} + +/* +ModifyYamlFileContent modify the content of YAML file given the file path and a list of YamlReplace struct. +Example: +ModifyYamlFileContent(file, []YamlReplace { + + { + Path: 'spec.template.spec.imagePullSecrets', + Value: '- name: notmatch-secret', + }, + }) +*/ +func ModifyYamlFileContent(file string, replacements []YamlReplace) { + input, err := ioutil.ReadFile(file) + if err != nil { + e2e.Failf("read file %s failed: %v", file, err) + } + + var doc yaml.Node + if err = yaml.Unmarshal(input, &doc); err != nil { + e2e.Failf("unmarshal yaml for file %s failed: %v", file, err) + } + + for _, replacement := range replacements { + path := strings.Split(replacement.Path, ".") + value := yaml.Node{ + Kind: yaml.ScalarNode, + Value: replacement.Value, + } + setYamlValue(&doc, path, value) + } + + output, err := yaml.Marshal(doc.Content[0]) + if err != nil { + e2e.Failf("marshal yaml for file %s failed: %v", file, err) + } + + if err = ioutil.WriteFile(file, output, 0o755); err != nil { + e2e.Failf("write file %s failed: %v", file, err) + } +} + +// setYamlValue set (or create if path not exist) a leaf yaml.Node according to given path +func setYamlValue(root *yaml.Node, path []string, value yaml.Node) { + if len(path) == 0 { + var valueParsed yaml.Node + if err := yaml.Unmarshal([]byte(value.Value), &valueParsed); err == nil { + *root = *valueParsed.Content[0] + } else { + *root = value + } + return + } + key := path[0] + rest := path[1:] + switch root.Kind { + case yaml.DocumentNode: + setYamlValue(root.Content[0], path, value) + case yaml.MappingNode: + for i := 0; i < len(root.Content); i += 2 { + if root.Content[i].Value == key { + setYamlValue(root.Content[i+1], rest, value) + return + } + } + // key not found + root.Content = append(root.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Value: key, + }, &yaml.Node{ + Kind: yaml.MappingNode, + }) + l := len(root.Content) + setYamlValue(root.Content[l-1], rest, value) + case yaml.SequenceNode: + index, err := strconv.Atoi(key) + if err != nil { + e2e.Failf("string to int failed: %v", err) + } + if index < len(root.Content) { + setYamlValue(root.Content[index], rest, value) + } + } +}