Skip to content

Commit 4b04993

Browse files
authored
Merge pull request #482 from alexeldeib/ace/caks
feat: aks provider
2 parents 8d4a40f + e09bf5a commit 4b04993

File tree

50 files changed

+3088
-117
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

50 files changed

+3088
-117
lines changed

.dockerignore

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,8 @@
66
!/cloud/**
77
!/controllers/**
88
!/exp/**
9+
!/feature/**
910
!/pkg/**
1011
!/main.go
1112
!/go.mod
12-
!/go.sum
13+
!/go.sum

Makefile

Lines changed: 18 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,7 @@ RBAC_ROOT ?= $(MANIFEST_ROOT)/rbac
7373
PULL_POLICY ?= Always
7474

7575
CLUSTER_TEMPLATE ?= cluster-template.yaml
76+
MANAGED_CLUSTER_TEMPLATE ?= cluster-template-aks.yaml
7677

7778
## --------------------------------------
7879
## Help
@@ -319,7 +320,7 @@ create-management-cluster: $(KUSTOMIZE) $(ENVSUBST)
319320
$(MAKE) kind-create
320321

321322
# Install cert manager and wait for availability
322-
kubectl create -f https://github.com/jetstack/cert-manager/releases/download/v0.11.1/cert-manager.yaml
323+
kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v0.11.1/cert-manager.yaml
323324
kubectl wait --for=condition=Available --timeout=5m apiservice v1beta1.webhook.cert-manager.io
324325

325326
# Deploy CAPI
@@ -347,16 +348,30 @@ create-workload-cluster: $(ENVSUBST)
347348
$(ENVSUBST) < $(TEMPLATES_DIR)/$(CLUSTER_TEMPLATE) | kubectl apply -f -
348349

349350
# Wait for the kubeconfig to become available.
350-
timeout 300 bash -c "while ! kubectl get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done"
351+
timeout --foreground 300 bash -c "while ! kubectl get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done"
351352
# Get kubeconfig and store it locally.
352353
kubectl get secrets $(CLUSTER_NAME)-kubeconfig -o json | jq -r .data.value | base64 --decode > ./kubeconfig
353-
timeout 600 bash -c "while ! kubectl --kubeconfig=./kubeconfig get nodes | grep master; do sleep 1; done"
354+
timeout --foreground 600 bash -c "while ! kubectl --kubeconfig=./kubeconfig get nodes | grep master; do sleep 1; done"
354355

355356
# Deploy calico
356357
kubectl --kubeconfig=./kubeconfig apply -f templates/addons/calico.yaml
357358

358359
@echo 'run "kubectl --kubeconfig=./kubeconfig ..." to work with the new target cluster'
359360

361+
.PHONY: create-aks-cluster
362+
create-aks-cluster: $(KUSTOMIZE) $(ENVSUBST)
363+
# Create managed Cluster.
364+
$(ENVSUBST) < $(TEMPLATES_DIR)/$(MANAGED_CLUSTER_TEMPLATE) | kubectl apply -f -
365+
366+
# Wait for the kubeconfig to become available.
367+
timeout --foreground 300 bash -c "while ! kubectl get secrets | grep $(CLUSTER_NAME)-kubeconfig; do sleep 1; done"
368+
# Get kubeconfig and store it locally.
369+
kubectl get secrets $(CLUSTER_NAME)-kubeconfig -o json | jq -r .data.value | base64 --decode > ./kubeconfig
370+
timeout --foreground 600 bash -c "while ! kubectl --kubeconfig=./kubeconfig get nodes | grep master; do sleep 1; done"
371+
372+
@echo 'run "kubectl --kubeconfig=./kubeconfig ..." to work with the new target cluster'
373+
374+
360375
.PHONY: create-cluster
361376
create-cluster: create-management-cluster create-workload-cluster ## Create a workload development Kubernetes cluster on Azure in a kind management cluster.
362377

cloud/interfaces.go

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,3 +35,10 @@ type GetterService interface {
3535
Reconcile(ctx context.Context, spec interface{}) error
3636
Delete(ctx context.Context, spec interface{}) error
3737
}
38+
39+
// CredentialGetter is a GetterService which knows how to retrieve credentials for an Azure
40+
// resource in a resource group.
41+
type CredentialGetter interface {
42+
GetterService
43+
GetCredentials(ctx context.Context, group string, cluster string) ([]byte, error)
44+
}

cloud/scope/machinepool.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@ type (
5353
// MachinePoolScope defines a scope defined around a machine pool and its cluster.
5454
MachinePoolScope struct {
5555
logr.Logger
56+
AzureClients
5657
client client.Client
5758
patchHelper *patch.Helper
5859
Cluster *capiv1.Cluster
@@ -85,6 +86,10 @@ func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, erro
8586
params.Logger = klogr.New()
8687
}
8788

89+
if err := params.AzureClients.setCredentials(params.AzureCluster.Spec.SubscriptionID); err != nil {
90+
return nil, errors.Wrap(err, "failed to create Azure session")
91+
}
92+
8893
helper, err := patch.NewHelper(params.AzureMachinePool, params.Client)
8994
if err != nil {
9095
return nil, errors.Wrap(err, "failed to init patch helper")
@@ -95,6 +100,7 @@ func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, erro
95100
MachinePool: params.MachinePool,
96101
AzureCluster: params.AzureCluster,
97102
AzureMachinePool: params.AzureMachinePool,
103+
AzureClients: params.AzureClients,
98104
Logger: params.Logger,
99105
patchHelper: helper,
100106
}, nil

cloud/scope/managedcontrolplane.go

Lines changed: 100 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,100 @@
1+
/*
2+
Copyright 2020 The Kubernetes Authors.
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
*/
16+
17+
package scope
18+
19+
import (
20+
"context"
21+
22+
"github.com/go-logr/logr"
23+
"github.com/pkg/errors"
24+
"k8s.io/apimachinery/pkg/runtime"
25+
"k8s.io/klog/klogr"
26+
infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1alpha3"
27+
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
28+
expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha3"
29+
30+
"sigs.k8s.io/cluster-api/util/patch"
31+
"sigs.k8s.io/controller-runtime/pkg/client"
32+
)
33+
34+
// ManagedControlPlaneScopeParams defines the input parameters used to create a new
35+
type ManagedControlPlaneScopeParams struct {
36+
AzureClients
37+
Client client.Client
38+
Logger logr.Logger
39+
Cluster *clusterv1.Cluster
40+
ControlPlane *infrav1exp.AzureManagedControlPlane
41+
InfraMachinePool *infrav1exp.AzureManagedMachinePool
42+
MachinePool *expv1.MachinePool
43+
PatchTarget runtime.Object
44+
}
45+
46+
// NewManagedControlPlaneScope creates a new Scope from the supplied parameters.
47+
// This is meant to be called for each reconcile iteration.
48+
func NewManagedControlPlaneScope(params ManagedControlPlaneScopeParams) (*ManagedControlPlaneScope, error) {
49+
if params.Cluster == nil {
50+
return nil, errors.New("failed to generate new scope from nil Cluster")
51+
}
52+
53+
if params.ControlPlane == nil {
54+
return nil, errors.New("failed to generate new scope from nil ControlPlane")
55+
}
56+
57+
if params.Logger == nil {
58+
params.Logger = klogr.New()
59+
}
60+
61+
if err := params.AzureClients.setCredentials(params.ControlPlane.Spec.SubscriptionID); err != nil {
62+
return nil, errors.Wrap(err, "failed to create Azure session")
63+
}
64+
65+
helper, err := patch.NewHelper(params.PatchTarget, params.Client)
66+
if err != nil {
67+
return nil, errors.Wrap(err, "failed to init patch helper")
68+
}
69+
70+
return &ManagedControlPlaneScope{
71+
Logger: params.Logger,
72+
Client: params.Client,
73+
AzureClients: params.AzureClients,
74+
Cluster: params.Cluster,
75+
ControlPlane: params.ControlPlane,
76+
MachinePool: params.MachinePool,
77+
InfraMachinePool: params.InfraMachinePool,
78+
PatchTarget: params.PatchTarget,
79+
patchHelper: helper,
80+
}, nil
81+
}
82+
83+
// ManagedControlPlaneScope defines the basic context for an actuator to operate upon.
84+
type ManagedControlPlaneScope struct {
85+
logr.Logger
86+
Client client.Client
87+
patchHelper *patch.Helper
88+
89+
AzureClients
90+
Cluster *clusterv1.Cluster
91+
MachinePool *expv1.MachinePool
92+
ControlPlane *infrav1exp.AzureManagedControlPlane
93+
InfraMachinePool *infrav1exp.AzureManagedMachinePool
94+
PatchTarget runtime.Object
95+
}
96+
97+
// PatchObject persists the cluster configuration and status.
98+
func (s *ManagedControlPlaneScope) PatchObject(ctx context.Context) error {
99+
return s.patchHelper.Patch(ctx, s.PatchTarget)
100+
}
Lines changed: 133 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,133 @@
1+
/*
2+
Copyright 2020 The Kubernetes Authors.
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
*/
16+
17+
package agentpools
18+
19+
import (
20+
"context"
21+
"fmt"
22+
23+
"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2020-02-01/containerservice"
24+
"github.com/google/go-cmp/cmp"
25+
"github.com/pkg/errors"
26+
"k8s.io/klog"
27+
azure "sigs.k8s.io/cluster-api-provider-azure/cloud"
28+
)
29+
30+
// Spec contains properties to create a agent pool.
31+
type Spec struct {
32+
Name string
33+
ResourceGroup string
34+
Cluster string
35+
Version *string
36+
SKU string
37+
Replicas int32
38+
OSDiskSizeGB int32
39+
}
40+
41+
// Get fetches a agent pool from Azure.
42+
func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) {
43+
agentPoolSpec, ok := spec.(*Spec)
44+
if !ok {
45+
return containerservice.AgentPool{}, errors.New("expected agent pool specification")
46+
}
47+
return s.Client.Get(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name)
48+
}
49+
50+
// Reconcile idempotently creates or updates a agent pool, if possible.
51+
func (s *Service) Reconcile(ctx context.Context, spec interface{}) error {
52+
agentPoolSpec, ok := spec.(*Spec)
53+
if !ok {
54+
return errors.New("expected agent pool specification")
55+
}
56+
57+
profile := containerservice.AgentPool{
58+
ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{
59+
VMSize: containerservice.VMSizeTypes(agentPoolSpec.SKU),
60+
OsDiskSizeGB: &agentPoolSpec.OSDiskSizeGB,
61+
Count: &agentPoolSpec.Replicas,
62+
Type: containerservice.VirtualMachineScaleSets,
63+
OrchestratorVersion: agentPoolSpec.Version,
64+
},
65+
}
66+
67+
existingSpec, err := s.Get(ctx, spec)
68+
if err != nil && !azure.ResourceNotFound(err) {
69+
return errors.Wrapf(err, "failed to get existing agent pool")
70+
}
71+
existingPool, ok := existingSpec.(containerservice.AgentPool)
72+
if !ok {
73+
return errors.New("expected agent pool specification")
74+
}
75+
76+
// For updates, we want to pass whatever we find in the existing
77+
// cluster, normalized to reflect the input we originally provided.
78+
// AKS will populate defaults and read-only values, which we want
79+
// to strip/clean to match what we expect.
80+
isCreate := azure.ResourceNotFound(err)
81+
if isCreate {
82+
err = s.Client.CreateOrUpdate(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name, profile)
83+
if err != nil {
84+
return fmt.Errorf("failed to create or update agent pool, %#+v", err)
85+
}
86+
} else {
87+
// Normalize individual agent pools to diff in case we need to update
88+
existingProfile := containerservice.AgentPool{
89+
ManagedClusterAgentPoolProfileProperties: &containerservice.ManagedClusterAgentPoolProfileProperties{
90+
VMSize: existingPool.ManagedClusterAgentPoolProfileProperties.VMSize,
91+
OsDiskSizeGB: existingPool.ManagedClusterAgentPoolProfileProperties.OsDiskSizeGB,
92+
Count: existingPool.ManagedClusterAgentPoolProfileProperties.Count,
93+
Type: containerservice.VirtualMachineScaleSets,
94+
OrchestratorVersion: existingPool.ManagedClusterAgentPoolProfileProperties.OrchestratorVersion,
95+
},
96+
}
97+
98+
// Diff and check if we require an update
99+
diff := cmp.Diff(profile, existingProfile)
100+
if diff != "" {
101+
klog.V(2).Infof("Update required (+new -old):\n%s", diff)
102+
err = s.Client.CreateOrUpdate(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name, profile)
103+
if err != nil {
104+
return fmt.Errorf("failed to create or update agent pool, %#+v", err.Error())
105+
}
106+
} else {
107+
klog.V(2).Infof("Normalized and desired agent pool matched, no update needed")
108+
}
109+
}
110+
111+
return nil
112+
}
113+
114+
// Delete deletes the virtual network with the provided name.
115+
func (s *Service) Delete(ctx context.Context, spec interface{}) error {
116+
agentPoolSpec, ok := spec.(*Spec)
117+
if !ok {
118+
return errors.New("expected agent pool specification")
119+
}
120+
121+
klog.V(2).Infof("deleting agent pool %s ", agentPoolSpec.Name)
122+
err := s.Client.Delete(ctx, agentPoolSpec.ResourceGroup, agentPoolSpec.Cluster, agentPoolSpec.Name)
123+
if err != nil {
124+
if azure.ResourceNotFound(err) {
125+
// already deleted
126+
return nil
127+
}
128+
return errors.Wrapf(err, "failed to delete agent pool %s in resource group %s", agentPoolSpec.Name, agentPoolSpec.ResourceGroup)
129+
}
130+
131+
klog.V(2).Infof("Successfully deleted agent pool %s ", agentPoolSpec.Name)
132+
return nil
133+
}

0 commit comments

Comments
 (0)