|
| 1 | +package core |
| 2 | + |
| 3 | +import ( |
| 4 | + "fmt" |
| 5 | + "strings" |
| 6 | + |
| 7 | + "github.com/containers/kubernetes-mcp-server/pkg/api" |
| 8 | + internalk8s "github.com/containers/kubernetes-mcp-server/pkg/kubernetes" |
| 9 | + "github.com/google/jsonschema-go/jsonschema" |
| 10 | + corev1 "k8s.io/api/core/v1" |
| 11 | + rbacv1 "k8s.io/api/rbac/v1" |
| 12 | + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" |
| 13 | + "k8s.io/apimachinery/pkg/util/rand" |
| 14 | + "sigs.k8s.io/yaml" |
| 15 | +) |
| 16 | + |
| 17 | +func initMustGatherPlan(o internalk8s.Openshift) []api.ServerTool { |
| 18 | + return []api.ServerTool{{ |
| 19 | + Tool: api.Tool{ |
| 20 | + Name: "must_gather_plan", |
| 21 | + Description: "Provides a detailed plan (read-only) to collect a must-gather bundle based on the flags/parameters supported by oc commands.", |
| 22 | + InputSchema: &jsonschema.Schema{ |
| 23 | + Type: "object", |
| 24 | + Properties: map[string]*jsonschema.Schema{ |
| 25 | + "image": { |
| 26 | + Type: "string", |
| 27 | + Description: "The image to use for the must-gather. Defaults to registry.redhat.io/openshift4/ose-must-gather:latest.", |
| 28 | + }, |
| 29 | + "dest_dir": { |
| 30 | + Type: "string", |
| 31 | + Description: "The destination directory for the output. Defaults to ./must-gather-results.", |
| 32 | + }, |
| 33 | + "node_name": { |
| 34 | + Type: "string", |
| 35 | + Description: "The node to gather information from.", |
| 36 | + }, |
| 37 | + "image_stream": { |
| 38 | + Type: "string", |
| 39 | + Description: "An image stream to use for the must-gather. (Not yet supported, use --image)", |
| 40 | + }, |
| 41 | + }, |
| 42 | + }, |
| 43 | + }, |
| 44 | + Handler: func(params api.ToolHandlerParams) (*api.ToolCallResult, error) { |
| 45 | + args := params.GetArguments() |
| 46 | + image, _ := args["image"].(string) |
| 47 | + destDir, _ := args["dest_dir"].(string) |
| 48 | + nodeName, _ := args["node_name"].(string) |
| 49 | + imageStream, _ := args["image_stream"].(string) |
| 50 | + |
| 51 | + if imageStream != "" { |
| 52 | + return nil, fmt.Errorf("the --image-stream parameter is not yet supported. Please use the --image parameter") |
| 53 | + } |
| 54 | + |
| 55 | + if image == "" { |
| 56 | + image = "registry.redhat.io/openshift4/ose-must-gather:latest" |
| 57 | + } |
| 58 | + if destDir == "" { |
| 59 | + destDir = "./must-gather-results" |
| 60 | + } |
| 61 | + |
| 62 | + suffix := rand.String(5) |
| 63 | + namespaceName := fmt.Sprintf("openshift-must-gather-%s", suffix) |
| 64 | + podName := fmt.Sprintf("must-gather-%s", suffix) |
| 65 | + serviceAccountName := "must-gather-admin" |
| 66 | + clusterRoleBindingName := fmt.Sprintf("%s-%s", namespaceName, serviceAccountName) |
| 67 | + |
| 68 | + namespace := &corev1.Namespace{ |
| 69 | + TypeMeta: metav1.TypeMeta{ |
| 70 | + APIVersion: "v1", |
| 71 | + Kind: "Namespace", |
| 72 | + }, |
| 73 | + ObjectMeta: metav1.ObjectMeta{ |
| 74 | + Name: namespaceName, |
| 75 | + }, |
| 76 | + } |
| 77 | + |
| 78 | + serviceAccount := &corev1.ServiceAccount{ |
| 79 | + TypeMeta: metav1.TypeMeta{ |
| 80 | + APIVersion: "v1", |
| 81 | + Kind: "ServiceAccount", |
| 82 | + }, |
| 83 | + ObjectMeta: metav1.ObjectMeta{ |
| 84 | + Name: serviceAccountName, |
| 85 | + Namespace: namespaceName, |
| 86 | + }, |
| 87 | + } |
| 88 | + |
| 89 | + clusterRoleBinding := &rbacv1.ClusterRoleBinding{ |
| 90 | + TypeMeta: metav1.TypeMeta{ |
| 91 | + APIVersion: "rbac.authorization.k8s.io/v1", |
| 92 | + Kind: "ClusterRoleBinding", |
| 93 | + }, |
| 94 | + ObjectMeta: metav1.ObjectMeta{ |
| 95 | + Name: clusterRoleBindingName, |
| 96 | + }, |
| 97 | + Subjects: []rbacv1.Subject{ |
| 98 | + { |
| 99 | + Kind: "ServiceAccount", |
| 100 | + Name: serviceAccountName, |
| 101 | + Namespace: namespaceName, |
| 102 | + }, |
| 103 | + }, |
| 104 | + RoleRef: rbacv1.RoleRef{ |
| 105 | + APIGroup: "rbac.authorization.k8s.io", |
| 106 | + Kind: "ClusterRole", |
| 107 | + Name: "cluster-admin", |
| 108 | + }, |
| 109 | + } |
| 110 | + |
| 111 | + pod := &corev1.Pod{ |
| 112 | + TypeMeta: metav1.TypeMeta{ |
| 113 | + APIVersion: "v1", |
| 114 | + Kind: "Pod", |
| 115 | + }, |
| 116 | + ObjectMeta: metav1.ObjectMeta{ |
| 117 | + Name: podName, |
| 118 | + Namespace: namespaceName, |
| 119 | + }, |
| 120 | + Spec: corev1.PodSpec{ |
| 121 | + Containers: []corev1.Container{ |
| 122 | + { |
| 123 | + Name: "must-gather", |
| 124 | + Image: image, |
| 125 | + Command: []string{ |
| 126 | + "/bin/sh", |
| 127 | + "-c", |
| 128 | + "/usr/bin/gather && sleep infinity", |
| 129 | + }, |
| 130 | + VolumeMounts: []corev1.VolumeMount{ |
| 131 | + { |
| 132 | + Name: "must-gather-output", |
| 133 | + MountPath: "/must-gather", |
| 134 | + }, |
| 135 | + }, |
| 136 | + }, |
| 137 | + }, |
| 138 | + Volumes: []corev1.Volume{ |
| 139 | + { |
| 140 | + Name: "must-gather-output", |
| 141 | + VolumeSource: corev1.VolumeSource{ |
| 142 | + EmptyDir: &corev1.EmptyDirVolumeSource{}, |
| 143 | + }, |
| 144 | + }, |
| 145 | + }, |
| 146 | + RestartPolicy: corev1.RestartPolicyNever, |
| 147 | + NodeName: nodeName, |
| 148 | + ServiceAccountName: serviceAccountName, |
| 149 | + }, |
| 150 | + } |
| 151 | + |
| 152 | + namespaceYaml, err := yaml.Marshal(namespace) |
| 153 | + if err != nil { |
| 154 | + return nil, fmt.Errorf("failed to marshal namespace to yaml: %w", err) |
| 155 | + } |
| 156 | + serviceAccountYaml, err := yaml.Marshal(serviceAccount) |
| 157 | + if err != nil { |
| 158 | + return nil, fmt.Errorf("failed to marshal service account to yaml: %w", err) |
| 159 | + } |
| 160 | + clusterRoleBindingYaml, err := yaml.Marshal(clusterRoleBinding) |
| 161 | + if err != nil { |
| 162 | + return nil, fmt.Errorf("failed to marshal cluster role binding to yaml: %w", err) |
| 163 | + } |
| 164 | + podYaml, err := yaml.Marshal(pod) |
| 165 | + if err != nil { |
| 166 | + return nil, fmt.Errorf("failed to marshal pod to yaml: %w", err) |
| 167 | + } |
| 168 | + |
| 169 | + var result strings.Builder |
| 170 | + result.WriteString("# Save the following content to a file (e.g., must-gather-plan.yaml) and apply it with 'kubectl apply -f must-gather-plan.yaml'\n") |
| 171 | + result.WriteString("# Monitor the pod's logs to see when the must-gather process is complete:\n") |
| 172 | + result.WriteString(fmt.Sprintf("# kubectl logs -f -n %s %s\n", namespaceName, podName)) |
| 173 | + result.WriteString("# Once the logs indicate completion, copy the results with:\n") |
| 174 | + result.WriteString(fmt.Sprintf("# kubectl cp -n %s %s:/must-gather %s\n", namespaceName, podName, destDir)) |
| 175 | + result.WriteString("# Finally, clean up the resources with:\n") |
| 176 | + result.WriteString(fmt.Sprintf("# kubectl delete ns %s\n", namespaceName)) |
| 177 | + result.WriteString(fmt.Sprintf("# kubectl delete clusterrolebinding %s\n", clusterRoleBindingName)) |
| 178 | + result.WriteString("---\n") |
| 179 | + result.Write(namespaceYaml) |
| 180 | + result.WriteString("---\n") |
| 181 | + result.Write(serviceAccountYaml) |
| 182 | + result.WriteString("---\n") |
| 183 | + result.Write(clusterRoleBindingYaml) |
| 184 | + result.WriteString("---\n") |
| 185 | + result.Write(podYaml) |
| 186 | + |
| 187 | + return api.NewToolCallResult(result.String(), nil), nil |
| 188 | + }, |
| 189 | + }} |
| 190 | +} |
0 commit comments