Skip to content

Commit 6704b54

Browse files
author
Alay Patel
authored
Add support for ingress and automation for developers to test on minikube (#56)
* add automation to create two minikube clusters * Add --endpoint flag to transfer-pvc subcommand * add hack/delete-clusters.sh * garbage collect resources after trying to follow logs * fixups: add changes to scripts from PR review Co-authored-by: David Zager <dzager@redhat.com> * update ingress endpoint to pass subdomain * fixups: update bash scripts with suggestions from code review round 2 Co-authored-by: David Zager <dzager@redhat.com> * fixups: run gofmt and goimports * bump go mod to use latest crane-lib
1 parent f45be82 commit 6704b54

File tree

5 files changed

+246
-43
lines changed

5 files changed

+246
-43
lines changed

cmd/transfer-pvc/transfer-pvc.go

Lines changed: 167 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,8 @@ import (
88
"os"
99
"time"
1010

11-
"github.com/konveyor/crane-lib/state_transfer"
1211
"github.com/konveyor/crane-lib/state_transfer/endpoint"
12+
"github.com/konveyor/crane-lib/state_transfer/endpoint/ingress"
1313
"github.com/konveyor/crane-lib/state_transfer/endpoint/route"
1414
"github.com/konveyor/crane-lib/state_transfer/meta"
1515
metadata "github.com/konveyor/crane-lib/state_transfer/meta"
@@ -21,9 +21,12 @@ import (
2121
"github.com/sirupsen/logrus"
2222
"github.com/spf13/cobra"
2323
corev1 "k8s.io/api/core/v1"
24+
networkingv1 "k8s.io/api/networking/v1"
2425
"k8s.io/apimachinery/pkg/api/errors"
2526
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
27+
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
2628
"k8s.io/apimachinery/pkg/types"
29+
errorsutil "k8s.io/apimachinery/pkg/util/errors"
2730
"k8s.io/apimachinery/pkg/util/wait"
2831
"k8s.io/cli-runtime/pkg/genericclioptions"
2932
"k8s.io/client-go/kubernetes"
@@ -42,6 +45,7 @@ type TransferPVCOptions struct {
4245
DestinationContext string
4346
PVCName string
4447
PVCNamespace string
48+
Endpoint string
4549

4650
// TODO: add more fields for PVC mapping/think of a config file to get inputs?
4751
sourceContext *clientcmdapi.Context
@@ -83,6 +87,7 @@ func addFlagsForTransferPVCOptions(t *TransferPVCOptions, cmd *cobra.Command) {
8387
cmd.Flags().StringVar(&t.DestinationContext, "destination-context", "", "The name of destination context current kubeconfig")
8488
cmd.Flags().StringVar(&t.PVCNamespace, "pvc-namespace", "", "The namespace of the pvc which is to be transferred, if empty it will try to use the namespace in source-context, if both are empty it will error")
8589
cmd.Flags().StringVar(&t.PVCName, "pvc-name", "", "The pvc name which is to be transferred on the source")
90+
cmd.Flags().StringVar(&t.Endpoint, "endpoint", "nignx-ingress", "The type of networking endpoing to use to accept traffic in destination cluster. The options available are `nginx-ingress` and `route`")
8691
}
8792

8893
func (t *TransferPVCOptions) Complete(c *cobra.Command, args []string) error {
@@ -181,12 +186,6 @@ func (t *TransferPVCOptions) run() error {
181186
log.Fatal(err, "unable to get destination client")
182187
}
183188

184-
// quiesce the applications if needed on the source side
185-
err = state_transfer.QuiesceApplications(srcCfg, t.PVCNamespace)
186-
if err != nil {
187-
log.Fatal(err, "unable to quiesce application on source cluster")
188-
}
189-
190189
// set up the PVC on destination to receive the data
191190
pvc := &corev1.PersistentVolumeClaim{}
192191
err = srcClient.Get(context.TODO(), client.ObjectKey{Namespace: t.PVCNamespace, Name: t.PVCName}, pvc)
@@ -211,39 +210,15 @@ func (t *TransferPVCOptions) run() error {
211210
log.Fatal(err, "invalid pvc list")
212211
}
213212

214-
// create a route for data transfer
215-
// TODO: pass in subdomain instead of ""
216-
r := route.NewEndpoint(
217-
types.NamespacedName{
218-
Namespace: pvc.Namespace,
219-
Name: pvc.Name,
220-
}, route.EndpointTypePassthrough, metadata.Labels, "")
221-
e, err := endpoint.Create(r, destClient)
222-
if err != nil {
223-
log.Fatal(err, "unable to create route endpoint")
224-
}
225-
226-
_ = wait.PollUntil(time.Second*5, func() (done bool, err error) {
227-
e, err := route.GetEndpointFromKubeObjects(destClient, e.NamespacedName())
228-
if err != nil {
229-
log.Println(err, "unable to check route health, retrying...")
230-
return false, nil
231-
}
232-
ready, err := e.IsHealthy(destClient)
233-
if err != nil {
234-
log.Println(err, "unable to check route health, retrying...")
235-
return false, nil
236-
}
237-
return ready, nil
238-
}, make(<-chan struct{}))
239-
240-
e, err = route.GetEndpointFromKubeObjects(destClient, e.NamespacedName())
241-
if err != nil {
242-
log.Fatal(err, "unable to get the route object")
243-
} else {
244-
log.Println("route endpoint is created and is healthy")
213+
var e endpoint.Endpoint
214+
switch t.Endpoint {
215+
case "route":
216+
e = createAndWaitForRoute(pvc, destClient)
217+
case "nignx-ingress":
218+
e = createAndWaitForIngress(pvc, destClient)
219+
default:
220+
log.Fatalf("unsupported endpoint type %s\n", t.Endpoint)
245221
}
246-
247222
// create an stunnel transport to carry the data over the route
248223

249224
s := stunnel.NewTransport(meta.NewNamespacedPair(
@@ -278,7 +253,7 @@ func (t *TransferPVCOptions) run() error {
278253
rsync.Username("root"),
279254
}
280255

281-
rsyncTransfer, err := rsync.NewTransfer(s, r, srcCfg, destCfg, pvcList, rsyncTransferOptions...)
256+
rsyncTransfer, err := rsync.NewTransfer(s, e, srcCfg, destCfg, pvcList, rsyncTransferOptions...)
282257
if err != nil {
283258
log.Fatal(err, "error creating rsync transfer")
284259
} else {
@@ -312,6 +287,80 @@ func (t *TransferPVCOptions) run() error {
312287
log.Fatal(err, "error following rsync client logs")
313288
}
314289

290+
log.Println("followed the logs, garbage collecting created resources on both source and destination")
291+
return garbageCollect(srcClient, destClient, map[string]string{"app": "crane2"}, t.Endpoint, t.PVCNamespace)
292+
}
293+
294+
func garbageCollect(srcClient client.Client, destClient client.Client, labels map[string]string, endpoint, namespace string) error {
295+
srcGVK := []client.Object{
296+
&corev1.Pod{},
297+
&corev1.ConfigMap{},
298+
&corev1.Secret{},
299+
}
300+
destGVK := []client.Object{
301+
&corev1.Pod{},
302+
&corev1.ConfigMap{},
303+
&corev1.Secret{},
304+
}
305+
switch endpoint {
306+
case "route":
307+
destGVK = append(destGVK, &routev1.Route{})
308+
case "nignx-ingress":
309+
destGVK = append(destGVK, &networkingv1.Ingress{})
310+
}
311+
312+
err := deleteResourcesForGVK(srcClient, srcGVK, labels, namespace)
313+
if err != nil {
314+
return err
315+
}
316+
317+
err = deleteResourcesForGVK(destClient, destGVK, labels, namespace)
318+
if err != nil {
319+
return err
320+
}
321+
322+
return deleteResourcesIteratively(destClient, []client.Object{
323+
&corev1.Service{
324+
TypeMeta: metav1.TypeMeta{
325+
Kind: "Service",
326+
APIVersion: corev1.SchemeGroupVersion.Version,
327+
},
328+
}}, labels, namespace)
329+
}
330+
331+
func deleteResourcesIteratively(c client.Client, iterativeTypes []client.Object, labels map[string]string, namespace string) error {
332+
listOptions := []client.ListOption{
333+
client.MatchingLabels(labels),
334+
client.InNamespace(namespace),
335+
}
336+
errs := []error{}
337+
for _, objList := range iterativeTypes {
338+
ulist := &unstructured.UnstructuredList{}
339+
ulist.SetGroupVersionKind(objList.GetObjectKind().GroupVersionKind())
340+
err := c.List(context.TODO(), ulist, listOptions...)
341+
if err != nil {
342+
// if we hit error with one api still try all others
343+
errs = append(errs, err)
344+
continue
345+
}
346+
for _, item := range ulist.Items {
347+
err = c.Delete(context.TODO(), &item, client.PropagationPolicy(metav1.DeletePropagationBackground))
348+
if err != nil {
349+
// if we hit error deleting on continue delete others
350+
errs = append(errs, err)
351+
}
352+
}
353+
}
354+
return errorsutil.NewAggregate(errs)
355+
}
356+
357+
func deleteResourcesForGVK(c client.Client, gvk []client.Object, labels map[string]string, namespace string) error {
358+
for _, obj := range gvk {
359+
err := c.DeleteAllOf(context.TODO(), obj, client.InNamespace(namespace), client.MatchingLabels(labels))
360+
if err != nil {
361+
return err
362+
}
363+
}
315364
return nil
316365
}
317366

@@ -334,6 +383,10 @@ func followClientLogs(srcConfig *rest.Config, c client.Client, namespace string,
334383
clientPod = &clientPodList.Items[0]
335384

336385
for _, containerStatus := range clientPod.Status.ContainerStatuses {
386+
if containerStatus.State.Terminated != nil && containerStatus.State.Terminated.ExitCode == 0 {
387+
log.Printf("container %s in pod %s completed successfully", containerStatus.Name, client.ObjectKey{Namespace: namespace, Name: clientPod.Name})
388+
break
389+
}
337390
if !containerStatus.Ready {
338391
log.Println(fmt.Errorf("container %s in pod %s is not ready", containerStatus.Name, client.ObjectKey{Namespace: namespace, Name: clientPod.Name}))
339392
return false, nil
@@ -370,6 +423,80 @@ func followClientLogs(srcConfig *rest.Config, c client.Client, namespace string,
370423
return err
371424
}
372425

426+
func createAndWaitForIngress(pvc *corev1.PersistentVolumeClaim, destClient client.Client) endpoint.Endpoint {
427+
// create a route for data transfer
428+
// TODO: add a config flag for subdomain
429+
r := ingress.NewEndpoint(
430+
types.NamespacedName{
431+
Namespace: pvc.Namespace,
432+
Name: pvc.Name,
433+
}, metadata.Labels, "crane.dev")
434+
e, err := endpoint.Create(r, destClient)
435+
if err != nil {
436+
log.Fatal(err, "unable to create endpoint")
437+
}
438+
439+
_ = wait.PollUntil(time.Second*5, func() (done bool, err error) {
440+
e, err := ingress.GetEndpointFromKubeObjects(destClient, e.NamespacedName())
441+
if err != nil {
442+
log.Println(err, "unable to check health, retrying...")
443+
return false, nil
444+
}
445+
ready, err := e.IsHealthy(destClient)
446+
if err != nil {
447+
log.Println(err, "unable to check health, retrying...")
448+
return false, nil
449+
}
450+
return ready, nil
451+
}, make(<-chan struct{}))
452+
453+
e, err = ingress.GetEndpointFromKubeObjects(destClient, e.NamespacedName())
454+
if err != nil {
455+
log.Fatal(err, "unable to get the route object")
456+
} else {
457+
log.Println("endpoint is created and is healthy")
458+
}
459+
460+
return e
461+
}
462+
463+
func createAndWaitForRoute(pvc *corev1.PersistentVolumeClaim, destClient client.Client) endpoint.Endpoint {
464+
// create a route for data transfer
465+
// TODO: pass in subdomain instead of ""
466+
r := route.NewEndpoint(
467+
types.NamespacedName{
468+
Namespace: pvc.Namespace,
469+
Name: pvc.Name,
470+
}, route.EndpointTypePassthrough, metadata.Labels, "")
471+
e, err := endpoint.Create(r, destClient)
472+
if err != nil {
473+
log.Fatal(err, "unable to create route endpoint")
474+
}
475+
476+
_ = wait.PollUntil(time.Second*5, func() (done bool, err error) {
477+
e, err := route.GetEndpointFromKubeObjects(destClient, e.NamespacedName())
478+
if err != nil {
479+
log.Println(err, "unable to check route health, retrying...")
480+
return false, nil
481+
}
482+
ready, err := e.IsHealthy(destClient)
483+
if err != nil {
484+
log.Println(err, "unable to check route health, retrying...")
485+
return false, nil
486+
}
487+
return ready, nil
488+
}, make(<-chan struct{}))
489+
490+
e, err = route.GetEndpointFromKubeObjects(destClient, e.NamespacedName())
491+
if err != nil {
492+
log.Fatal(err, "unable to get the route object")
493+
} else {
494+
log.Println("route endpoint is created and is healthy")
495+
}
496+
497+
return e
498+
}
499+
373500
func clearDestPVC(destPVC *corev1.PersistentVolumeClaim) {
374501
// TODO: some of this needs to be configuration option exposed to the user
375502
destPVC.ResourceVersion = ""

go.mod

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ go 1.16
55
require (
66
github.com/ghodss/yaml v1.0.0
77
github.com/jarcoal/httpmock v1.0.8
8-
github.com/konveyor/crane-lib v0.0.4
8+
github.com/konveyor/crane-lib v0.0.5
99
github.com/mitchellh/mapstructure v1.4.1
1010
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5
1111
github.com/openshift/api v0.0.0-20210625082935-ad54d363d274

go.sum

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -461,8 +461,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
461461
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
462462
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
463463
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
464-
github.com/konveyor/crane-lib v0.0.4 h1:CWGBC5MTmdlrEqu1F5eBSR0HRYBlo2QV/Y/bHguJPvM=
465-
github.com/konveyor/crane-lib v0.0.4/go.mod h1:C0H3dr85YlsaAt1Av7zFu4IPdwG4+SW7wEBFE+1udTw=
464+
github.com/konveyor/crane-lib v0.0.5 h1:qDpSvCJTy76lat1p03EuPW4EDsC+Yy3k/zhVzEJozEc=
465+
github.com/konveyor/crane-lib v0.0.5/go.mod h1:C0H3dr85YlsaAt1Av7zFu4IPdwG4+SW7wEBFE+1udTw=
466466
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
467467
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
468468
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=

hack/minikube-clusters-delete.sh

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
#!/usr/bin/env bash
2+
set +x
3+
4+
SRC_CLUSTER_NAME=src
5+
DEST_CLUSTER_NAME=dest
6+
7+
SOURCE_IP=$(minikube ip -p ${SRC_CLUSTER_NAME})
8+
DEST_IP=$(minikube ip -p ${DEST_CLUSTER_NAME})
9+
SOURCE_IP_RANGE="${SOURCE_IP%.*}.0/24"
10+
DEST_IP_RANGE="${DEST_IP%.*}.0/24"
11+
12+
sudo iptables -D FORWARD -p all -s $SOURCE_IP_RANGE -d $DEST_IP_RANGE -j ACCEPT
13+
sudo iptables -D FORWARD -p all -s $DEST_IP_RANGE -d $SOURCE_IP_RANGE -j ACCEPT
14+
15+
minikube delete -p ${SRC_CLUSTER_NAME}
16+
minikube delete -p ${DEST_CLUSTER_NAME}
17+

hack/minikube-clusters-start.sh

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
#!/usr/bin/env bash
2+
set +x
3+
4+
SRC_CLUSTER_NAME=src
5+
DEST_CLUSTER_NAME=dest
6+
7+
minikube status -p ${SRC_CLUSTER_NAME} >> /dev/null
8+
if [[ $? == 0 ]]; then
9+
echo "run hack/minikube-delete-clusters.sh before running this script"; exit 1
10+
fi
11+
minikube status -p ${DEST_CLUSTER_NAME} >> /dev/null
12+
if [[ $? == 0 ]]; then
13+
echo "run hack/minikube-delete-clusters.sh before running this script"; exit 1
14+
fi
15+
16+
echo "create two minikube clusters"
17+
18+
minikube start -p ${SRC_CLUSTER_NAME}
19+
minikube start -p ${DEST_CLUSTER_NAME}
20+
21+
echo "clusters started, configuring networking between source and destination clusters"
22+
23+
SOURCE_IP=$(minikube ip -p ${SRC_CLUSTER_NAME})
24+
DEST_IP=$(minikube ip -p ${DEST_CLUSTER_NAME})
25+
SOURCE_IP_RANGE="${SOURCE_IP%.*}.0/24"
26+
DEST_IP_RANGE="${DEST_IP%.*}.0/24"
27+
28+
sudo iptables -I FORWARD 2 -p all -s $SOURCE_IP_RANGE -d $DEST_IP_RANGE -j ACCEPT
29+
sudo iptables -I FORWARD 3 -p all -s $DEST_IP_RANGE -d $SOURCE_IP_RANGE -j ACCEPT
30+
31+
minikube ssh -p ${SRC_CLUSTER_NAME} sudo ip r add $DEST_IP_RANGE via $(echo $SOURCE_IP | cut -d"." -f1-3).1
32+
minikube ssh -p ${DEST_CLUSTER_NAME} sudo ip r add $SOURCE_IP_RANGE via $(echo $DEST_IP | cut -d"." -f1-3).1
33+
34+
minikube ssh -p ${SRC_CLUSTER_NAME} "ping -c 4 ${DEST_IP}"
35+
if [ "$?" != 0 ];
36+
then
37+
echo "unable to set up networking"
38+
exit 1
39+
fi
40+
41+
echo "network setup successful, configuring nginx ingress on destination cluster"
42+
minikube addons -p ${DEST_CLUSTER_NAME} enable ingress
43+
44+
minikube update-context -p ${SRC_CLUSTER_NAME}
45+
46+
# this hack does not work if the script is run twice
47+
COREFILE=$(kubectl get cm -n kube-system coredns -ojson | jq '.data.Corefile')
48+
COREFILE=$(echo $COREFILE | sed s/'fallthrough\\n }\\n/& file \/etc\/coredns\/crane.db crane.dev\\n/')
49+
kubectl get cm -n kube-system coredns -ojson | jq ".data.Corefile = ${COREFILE}" | kubectl replace -f -
50+
51+
kubectl patch cm -n kube-system coredns --type='json' -p='[{"op": "replace", "path": "/data/crane.db", "value": "; crane.dev test file\ncrane.dev. IN SOA a.crane.dev. b.crane.dev. 2 604800 86400 2419200 604800\ncrane.dev. IN NS a.crane.dev.\ncrane.dev. IN NS b.crane.dev.\na.crane.dev. IN A 127.0.0.1\nb.crane.dev. IN A 127.0.0.1\n\n*.crane.dev. IN A DEST_IP\n"}]'
52+
kubectl get cm -n kube-system coredns -oyaml | sed "s/DEST_IP/${DEST_IP}/" | kubectl replace -f -
53+
54+
kubectl patch deploy -n kube-system coredns --type='json' -p='[{"op": "add", "path": "/spec/template/spec/volumes/0/configMap/items/1", "value": {"key": "crane.db", "path": "crane.db"}}]'
55+
56+
kubectl patch deploy --context=${DEST_CLUSTER_NAME} -n ingress-nginx ingress-nginx-controller --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/12", "value": "--enable-ssl-passthrough"}]'
57+
58+
# force a rollout
59+
kubectl delete rs -n ingress-nginx --context=${DEST_CLUSTER_NAME} -l app.kubernetes.io/component=controller,app.kubernetes.io/instance=ingress-nginx

0 commit comments

Comments
 (0)