Skip to content

Commit 9cc3209

Browse files
committed
asset/manifests: generate Cluster API infrastructure manifests
Signed-off-by: Vince Prignano <[email protected]>
1 parent a46827f commit 9cc3209

File tree

4 files changed

+613
-0
lines changed

4 files changed

+613
-0
lines changed

pkg/asset/manifests/aws/cluster.go

Lines changed: 227 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,227 @@
1+
package aws
2+
3+
import (
4+
"context"
5+
"fmt"
6+
"time"
7+
8+
"github.com/pkg/errors"
9+
corev1 "k8s.io/api/core/v1"
10+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
11+
"k8s.io/utils/ptr"
12+
capa "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2"
13+
14+
"github.com/openshift/installer/pkg/asset/installconfig"
15+
"github.com/openshift/installer/pkg/asset/manifests/capiutils"
16+
)
17+
18+
// GenerateClusterAssets generates the manifests for the cluster-api.
19+
func GenerateClusterAssets(installConfig *installconfig.InstallConfig, clusterID *installconfig.ClusterID) (*capiutils.GenerateClusterAssetsOutput, error) {
20+
manifests := capiutils.Manifests{}
21+
mainCIDR := capiutils.CIDRFromInstallConfig(installConfig)
22+
23+
zones, err := installConfig.AWS.AvailabilityZones(context.TODO())
24+
if err != nil {
25+
return nil, errors.Wrap(err, "failed to get availability zones")
26+
}
27+
28+
awsCluster := &capa.AWSCluster{
29+
ObjectMeta: metav1.ObjectMeta{
30+
Name: clusterID.InfraID,
31+
Namespace: capiutils.Namespace,
32+
},
33+
Spec: capa.AWSClusterSpec{
34+
Region: installConfig.Config.AWS.Region,
35+
NetworkSpec: capa.NetworkSpec{
36+
VPC: capa.VPCSpec{
37+
CidrBlock: mainCIDR.String(),
38+
AvailabilityZoneUsageLimit: ptr.To(len(zones)),
39+
AvailabilityZoneSelection: &capa.AZSelectionSchemeOrdered,
40+
},
41+
CNI: &capa.CNISpec{
42+
CNIIngressRules: capa.CNIIngressRules{
43+
{
44+
Description: "ICMP",
45+
Protocol: capa.SecurityGroupProtocolICMP,
46+
FromPort: -1,
47+
ToPort: -1,
48+
},
49+
{
50+
Description: "Port 22 (TCP)",
51+
Protocol: capa.SecurityGroupProtocolTCP,
52+
FromPort: 22,
53+
ToPort: 22,
54+
},
55+
{
56+
Description: "Port 4789 (UDP) for VXLAN",
57+
Protocol: capa.SecurityGroupProtocolUDP,
58+
FromPort: 4789,
59+
ToPort: 4789,
60+
},
61+
{
62+
Description: "Port 6081 (UDP) for geneve",
63+
Protocol: capa.SecurityGroupProtocolUDP,
64+
FromPort: 6081,
65+
ToPort: 6081,
66+
},
67+
{
68+
Description: "Port 500 (UDP) for IKE",
69+
Protocol: capa.SecurityGroupProtocolUDP,
70+
FromPort: 500,
71+
ToPort: 500,
72+
},
73+
{
74+
Description: "Port 4500 (UDP) for IKE NAT",
75+
Protocol: capa.SecurityGroupProtocolUDP,
76+
FromPort: 4500,
77+
ToPort: 4500,
78+
},
79+
{
80+
Description: "ESP",
81+
Protocol: capa.SecurityGroupProtocolESP,
82+
FromPort: -1,
83+
ToPort: -1,
84+
},
85+
{
86+
Description: "Port 6441-6442 (TCP) for ovndb",
87+
Protocol: capa.SecurityGroupProtocolTCP,
88+
FromPort: 6441,
89+
ToPort: 6442,
90+
},
91+
{
92+
Description: "Port 9000-9999 for node ports (TCP)",
93+
Protocol: capa.SecurityGroupProtocolTCP,
94+
FromPort: 9000,
95+
ToPort: 9999,
96+
},
97+
{
98+
Description: "Port 9000-9999 for node ports (UDP)",
99+
Protocol: capa.SecurityGroupProtocolUDP,
100+
FromPort: 9000,
101+
ToPort: 9999,
102+
},
103+
{
104+
Description: "Service node ports (TCP)",
105+
Protocol: capa.SecurityGroupProtocolTCP,
106+
FromPort: 30000,
107+
ToPort: 32767,
108+
},
109+
{
110+
Description: "Service node ports (UDP)",
111+
Protocol: capa.SecurityGroupProtocolUDP,
112+
FromPort: 30000,
113+
ToPort: 32767,
114+
},
115+
},
116+
},
117+
AdditionalControlPlaneIngressRules: []capa.IngressRule{
118+
{
119+
Description: "MCS traffic from cluster network",
120+
Protocol: capa.SecurityGroupProtocolTCP,
121+
FromPort: 22623,
122+
ToPort: 22623,
123+
SourceSecurityGroupRoles: []capa.SecurityGroupRole{"node", "controlplane"},
124+
},
125+
{
126+
Description: "controller-manager",
127+
Protocol: capa.SecurityGroupProtocolTCP,
128+
FromPort: 10257,
129+
ToPort: 10257,
130+
SourceSecurityGroupRoles: []capa.SecurityGroupRole{"controlplane", "node"},
131+
},
132+
{
133+
Description: "kube-scheduler",
134+
Protocol: capa.SecurityGroupProtocolTCP,
135+
FromPort: 10259,
136+
ToPort: 10259,
137+
SourceSecurityGroupRoles: []capa.SecurityGroupRole{"controlplane", "node"},
138+
},
139+
{
140+
Description: "SSH everyone",
141+
Protocol: capa.SecurityGroupProtocolTCP,
142+
FromPort: 22,
143+
ToPort: 22,
144+
CidrBlocks: []string{"0.0.0.0/0"},
145+
},
146+
},
147+
},
148+
S3Bucket: &capa.S3Bucket{
149+
Name: fmt.Sprintf("openshift-bootstrap-data-%s", clusterID.InfraID),
150+
PresignedURLDuration: &metav1.Duration{Duration: 1 * time.Hour},
151+
},
152+
ControlPlaneLoadBalancer: &capa.AWSLoadBalancerSpec{
153+
Name: ptr.To(clusterID.InfraID + "-ext"),
154+
LoadBalancerType: capa.LoadBalancerTypeNLB,
155+
Scheme: &capa.ELBSchemeInternetFacing,
156+
AdditionalListeners: []capa.AdditionalListenerSpec{
157+
{
158+
Port: 22623,
159+
Protocol: capa.ELBProtocolTCP,
160+
},
161+
},
162+
},
163+
},
164+
}
165+
166+
// If the install config has subnets, use them.
167+
if len(installConfig.AWS.Subnets) > 0 {
168+
privateSubnets, err := installConfig.AWS.PrivateSubnets(context.TODO())
169+
if err != nil {
170+
return nil, errors.Wrap(err, "failed to get private subnets")
171+
}
172+
for _, subnet := range privateSubnets {
173+
awsCluster.Spec.NetworkSpec.Subnets = append(awsCluster.Spec.NetworkSpec.Subnets, capa.SubnetSpec{
174+
ID: subnet.ID,
175+
CidrBlock: subnet.CIDR,
176+
AvailabilityZone: subnet.Zone.Name,
177+
IsPublic: subnet.Public,
178+
})
179+
}
180+
publicSubnets, err := installConfig.AWS.PublicSubnets(context.TODO())
181+
if err != nil {
182+
return nil, errors.Wrap(err, "failed to get public subnets")
183+
}
184+
185+
for _, subnet := range publicSubnets {
186+
awsCluster.Spec.NetworkSpec.Subnets = append(awsCluster.Spec.NetworkSpec.Subnets, capa.SubnetSpec{
187+
ID: subnet.ID,
188+
CidrBlock: subnet.CIDR,
189+
AvailabilityZone: subnet.Zone.Name,
190+
IsPublic: subnet.Public,
191+
})
192+
}
193+
194+
vpc, err := installConfig.AWS.VPC(context.TODO())
195+
if err != nil {
196+
return nil, errors.Wrap(err, "failed to get VPC")
197+
}
198+
awsCluster.Spec.NetworkSpec.VPC = capa.VPCSpec{
199+
ID: vpc,
200+
}
201+
}
202+
203+
manifests = append(manifests, &capiutils.Manifest{Object: awsCluster, Filename: "02_infra-cluster.yaml"})
204+
205+
id := &capa.AWSClusterControllerIdentity{
206+
ObjectMeta: metav1.ObjectMeta{
207+
Name: "default",
208+
Namespace: capiutils.Namespace,
209+
},
210+
Spec: capa.AWSClusterControllerIdentitySpec{
211+
AWSClusterIdentitySpec: capa.AWSClusterIdentitySpec{
212+
AllowedNamespaces: &capa.AllowedNamespaces{}, // Allow all namespaces.
213+
},
214+
},
215+
}
216+
manifests = append(manifests, &capiutils.Manifest{Object: id, Filename: "01_aws-cluster-controller-identity-default.yaml"})
217+
218+
return &capiutils.GenerateClusterAssetsOutput{
219+
Manifests: manifests,
220+
InfrastructureRef: &corev1.ObjectReference{
221+
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta2",
222+
Kind: "AWSCluster",
223+
Name: awsCluster.Name,
224+
Namespace: awsCluster.Namespace,
225+
},
226+
}, nil
227+
}
Lines changed: 122 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,122 @@
1+
package azure
2+
3+
import (
4+
"github.com/pkg/errors"
5+
corev1 "k8s.io/api/core/v1"
6+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
7+
capz "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
8+
9+
"github.com/openshift/installer/pkg/asset/installconfig"
10+
"github.com/openshift/installer/pkg/asset/manifests/capiutils"
11+
"github.com/openshift/installer/pkg/asset/manifests/capiutils/cidr"
12+
)
13+
14+
// GenerateClusterAssets generates the manifests for the cluster-api.
15+
func GenerateClusterAssets(installConfig *installconfig.InstallConfig, clusterID *installconfig.ClusterID) (*capiutils.GenerateClusterAssetsOutput, error) {
16+
manifests := capiutils.Manifests{}
17+
mainCIDR := capiutils.CIDRFromInstallConfig(installConfig)
18+
19+
session, err := installConfig.Azure.Session()
20+
if err != nil {
21+
return nil, errors.Wrap(err, "failed to create Azure session")
22+
}
23+
24+
subnets, err := cidr.SplitIntoSubnetsIPv4(mainCIDR.String(), 2)
25+
if err != nil {
26+
return nil, errors.Wrap(err, "failed to split CIDR into subnets")
27+
}
28+
29+
// CAPZ expects the capz-system to be created.
30+
azureNamespace := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "capz-system"}}
31+
manifests = append(manifests, &capiutils.Manifest{Object: azureNamespace, Filename: "00_azure-namespace.yaml"})
32+
33+
azureCluster := &capz.AzureCluster{
34+
ObjectMeta: metav1.ObjectMeta{
35+
Name: clusterID.InfraID,
36+
Namespace: capiutils.Namespace,
37+
},
38+
Spec: capz.AzureClusterSpec{
39+
ResourceGroup: clusterID.InfraID,
40+
AzureClusterClassSpec: capz.AzureClusterClassSpec{
41+
SubscriptionID: session.Credentials.SubscriptionID,
42+
Location: installConfig.Config.Azure.Region,
43+
AzureEnvironment: string(installConfig.Azure.CloudName),
44+
IdentityRef: &corev1.ObjectReference{
45+
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
46+
Kind: "AzureClusterIdentity",
47+
Name: clusterID.InfraID,
48+
},
49+
},
50+
NetworkSpec: capz.NetworkSpec{
51+
Vnet: capz.VnetSpec{
52+
ID: installConfig.Config.Azure.VirtualNetwork,
53+
VnetClassSpec: capz.VnetClassSpec{
54+
CIDRBlocks: []string{
55+
mainCIDR.String(),
56+
},
57+
},
58+
},
59+
Subnets: capz.Subnets{
60+
{
61+
SubnetClassSpec: capz.SubnetClassSpec{
62+
Name: "control-plane-subnet",
63+
Role: capz.SubnetControlPlane,
64+
CIDRBlocks: []string{
65+
subnets[0].String(),
66+
},
67+
},
68+
},
69+
{
70+
SubnetClassSpec: capz.SubnetClassSpec{
71+
Name: "worker-subnet",
72+
Role: capz.SubnetNode,
73+
CIDRBlocks: []string{
74+
subnets[1].String(),
75+
},
76+
},
77+
},
78+
},
79+
},
80+
},
81+
}
82+
manifests = append(manifests, &capiutils.Manifest{Object: azureCluster, Filename: "02_azure-cluster.yaml"})
83+
84+
azureClientSecret := &corev1.Secret{
85+
ObjectMeta: metav1.ObjectMeta{
86+
Name: clusterID.InfraID + "-azure-client-secret",
87+
Namespace: capiutils.Namespace,
88+
},
89+
StringData: map[string]string{
90+
"clientSecret": session.Credentials.ClientSecret,
91+
},
92+
}
93+
manifests = append(manifests, &capiutils.Manifest{Object: azureClientSecret, Filename: "01_azure-client-secret.yaml"})
94+
95+
id := &capz.AzureClusterIdentity{
96+
ObjectMeta: metav1.ObjectMeta{
97+
Name: clusterID.InfraID,
98+
Namespace: capiutils.Namespace,
99+
},
100+
Spec: capz.AzureClusterIdentitySpec{
101+
Type: capz.ManualServicePrincipal,
102+
AllowedNamespaces: &capz.AllowedNamespaces{}, // Allow all namespaces.
103+
ClientID: session.Credentials.ClientID,
104+
ClientSecret: corev1.SecretReference{
105+
Name: azureClientSecret.Name,
106+
Namespace: azureClientSecret.Namespace,
107+
},
108+
TenantID: session.Credentials.TenantID,
109+
},
110+
}
111+
manifests = append(manifests, &capiutils.Manifest{Object: id, Filename: "01_aws-cluster-controller-identity-default.yaml"})
112+
113+
return &capiutils.GenerateClusterAssetsOutput{
114+
Manifests: manifests,
115+
InfrastructureRef: &corev1.ObjectReference{
116+
APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
117+
Kind: "AzureCluster",
118+
Name: azureCluster.Name,
119+
Namespace: azureCluster.Namespace,
120+
},
121+
}, nil
122+
}

0 commit comments

Comments
 (0)