|
| 1 | +--- |
| 2 | +title: cdks8s through ArgoCD |
| 3 | +published: true |
| 4 | +--- |
| 5 | + |
| 6 | +- [Automatic deployment of kubernetes manifests described by cdk8s](#automatic-deployment-of-kubernetes-manifests-described-by-cdk8s) |
| 7 | + - [ArgoCD in short](#argocd-in-short) |
| 8 | + - [cdk8s in short](#cdk8s-in-short) |
| 9 | + - [An example](#an-example) |
| 10 | + - [Adding a cdk8s plugin to argocd](#adding-a-cdk8s-plugin-to-argocd) |
| 11 | + - [Dockerfile](#dockerfile) |
| 12 | + - [Plugging in as an argo-repo-server sidecar](#plugging-in-as-an-argo-repo-server-sidecar) |
| 13 | + - [ArgoCD documentation and further reading](#argocd-documentation-and-further-reading) |
| 14 | + - [The point of all this](#the-point-of-all-this) |
| 15 | + |
| 16 | + |
| 17 | +# Automatic deployment of kubernetes manifests described by cdk8s |
| 18 | + |
| 19 | +cdk8s is a tool from AWS to be able to deal with kubernetes manifests in an imperative way which for a lot of people accelerates their investment in learning about and using kubernetes. Though not perhaps inline with the declarative nature of traditional gitops approaches, it is not really that different from jsonnet, helm and/or kustomize which I think simplifies a lot in terms of expressing not only what we want deployed but also how to best manage changes to that state. |
| 20 | + |
| 21 | +## ArgoCD in short |
| 22 | + |
| 23 | +Chances are if you are reading this you already know a bit about ArgoCD. It allows us to package kubernetes manifests in "applications" which helps to compartmentalize things that you are managing inside kubernetes as well as giving a cli and a UI in that abstraction level. For each application we can choose how we want to observe and synchronize the declared state of our application as ArgoCD he from git into kubernetes. |
| 24 | + |
| 25 | +https://argo-cd.readthedocs.io/en/stable/ |
| 26 | + |
| 27 | +## cdk8s in short |
| 28 | + |
| 29 | +cdk8s and similar tools allows you to generate the kubnernets json or yaml in a way that is a bit more advanced than kustomize. Where as helm is a way to package apps for different types of consumption, and kustomize allows you to manipulate simple yaml further, cdk8s is a more heavy weight ground up yaml generation in different high level languages. |
| 30 | + |
| 31 | +In my example I am generating a deployment and a service with typescript, while also adding some extra npm packages for string manipulation. |
| 32 | + |
| 33 | +### An example |
| 34 | + |
| 35 | +``` typescript |
| 36 | +// A typescript app that when run through `cdk8s synth` becomes a deployment and a service |
| 37 | +import { Construct } from "constructs"; |
| 38 | +import { App, Chart, ChartProps } from "cdk8s"; |
| 39 | +import { |
| 40 | + IntOrString, |
| 41 | + KubeDeployment, |
| 42 | + KubeService, |
| 43 | + Quantity, |
| 44 | +} from "./imports/k8s"; |
| 45 | +import { kebabCase } from "lodash"; |
| 46 | + |
| 47 | +export class MyChart extends Chart { |
| 48 | + constructor( |
| 49 | + scope: Construct, |
| 50 | + id: string, |
| 51 | + props: ChartProps = { disableResourceNameHashes: true } |
| 52 | + ) { |
| 53 | + super(scope, id, props); |
| 54 | + |
| 55 | + const label = { |
| 56 | + app: "cdk8s-demo", |
| 57 | + demo: kebabCase("knowledge sharing"), |
| 58 | + }; |
| 59 | + new KubeDeployment(this, "deployment", { |
| 60 | + spec: { |
| 61 | + selector: { matchLabels: label }, |
| 62 | + replicas: 1, |
| 63 | + template: { |
| 64 | + metadata: { labels: label }, |
| 65 | + spec: { |
| 66 | + containers: [ |
| 67 | + { |
| 68 | + name: "echoserver", |
| 69 | + image: "ealen/echo-server:latest", |
| 70 | + ports: [{ containerPort: 80 }], |
| 71 | + resources: { |
| 72 | + limits: { |
| 73 | + cpu: Quantity.fromString("0.5"), |
| 74 | + memory: Quantity.fromString("256Mi"), |
| 75 | + }, |
| 76 | + requests: { |
| 77 | + cpu: Quantity.fromString("10m"), |
| 78 | + memory: Quantity.fromString("10Mi"), |
| 79 | + }, |
| 80 | + }, |
| 81 | + }, |
| 82 | + ], |
| 83 | + }, |
| 84 | + }, |
| 85 | + }, |
| 86 | + }); |
| 87 | + new KubeService(this, "service", { |
| 88 | + spec: { |
| 89 | + type: "ClusterIP", |
| 90 | + ports: [{ port: 80, targetPort: IntOrString.fromNumber(80) }], |
| 91 | + selector: label, |
| 92 | + }, |
| 93 | + }); |
| 94 | + } |
| 95 | +} |
| 96 | + |
| 97 | +const app = new App(); |
| 98 | +new MyChart(app, "cdk8s-demo"); |
| 99 | +app.synth(); |
| 100 | +``` |
| 101 | + |
| 102 | +After `cdk8s synth` which in the repo would be run as `npm run synth` there will be a file in the `dist` folder that looks like this. It is ready for deployment with `kubectl apply` |
| 103 | + |
| 104 | +``` yaml |
| 105 | +apiVersion: apps/v1 |
| 106 | +kind: Deployment |
| 107 | +metadata: |
| 108 | + name: cdk8s-demo-deployment |
| 109 | +spec: |
| 110 | + replicas: 1 |
| 111 | + selector: |
| 112 | + matchLabels: |
| 113 | + app: cdk8s-demo |
| 114 | + demo: knowledge-sharing |
| 115 | + template: |
| 116 | + metadata: |
| 117 | + labels: |
| 118 | + app: cdk8s-demo |
| 119 | + demo: knowledge-sharing |
| 120 | + spec: |
| 121 | + containers: |
| 122 | + - image: ealen/echo-server:latest |
| 123 | + name: echoserver |
| 124 | + ports: |
| 125 | + - containerPort: 80 |
| 126 | + resources: |
| 127 | + limits: |
| 128 | + cpu: "0.5" |
| 129 | + memory: 256Mi |
| 130 | + requests: |
| 131 | + cpu: 10m |
| 132 | + memory: 10Mi |
| 133 | +--- |
| 134 | +apiVersion: v1 |
| 135 | +kind: Service |
| 136 | +metadata: |
| 137 | + name: cdk8s-demo-service |
| 138 | +spec: |
| 139 | + ports: |
| 140 | + - port: 80 |
| 141 | + targetPort: 80 |
| 142 | + selector: |
| 143 | + app: cdk8s-demo |
| 144 | + demo: knowledge-sharing |
| 145 | + type: ClusterIP |
| 146 | + |
| 147 | +``` |
| 148 | + |
| 149 | +## Adding a cdk8s plugin to argocd |
| 150 | + |
| 151 | +While learning about cdk8s I was surprised to learn that it didn't work out of the box with argocd, and that I couldn't find any pre-made plugins. |
| 152 | + |
| 153 | +I did however come across [this great post about cdk8s by Max Brenner](https://shipit.dev/posts/integrating-cdk8s-with-argocd.html) and also [their repository on how to run cdk8s in a container](https://github.com/brennerm/cdk8s-docker). |
| 154 | + |
| 155 | +### Dockerfile |
| 156 | + |
| 157 | +From there I built my own version of the typescript container such that it would work without running as root which argocd plugins are not allowed to do for good reason. |
| 158 | + |
| 159 | +``` Dockerfile |
| 160 | +# docker.io/dsoderlund/cdk8s:typescript |
| 161 | +FROM node:alpine |
| 162 | + |
| 163 | +RUN yarn global add cdk8s-cli && yarn cache clean |
| 164 | +RUN mkdir /files |
| 165 | +RUN mkdir /home/node/.npm-cache |
| 166 | +RUN chown -R 999:0 /home/node/.npm-cache |
| 167 | +WORKDIR /files |
| 168 | + |
| 169 | +ADD entrypoint-typescript.sh /entrypoint.sh |
| 170 | + |
| 171 | +ENV NPM_CONFIG_CACHE=/home/node/.npm-cache |
| 172 | +ENTRYPOINT ["/entrypoint.sh"] |
| 173 | +``` |
| 174 | + |
| 175 | +The entrypoint.sh script allows you to run this from the command line and get it to perform the steps needed to work with write back to a volume you mount to docker, in the case of the plugin we will override this command. |
| 176 | + |
| 177 | +### Plugging in as an argo-repo-server sidecar |
| 178 | + |
| 179 | +So the idea here is that we want argocd repository server to render the yaml for us by invoking `cdk8s synth` for an app just like it does for helm, kustomize, or plain yaml. It should do this if it knows that it is a cdk8s-typescript style apps which is evident by the presence of the file `./imports/k8s.ts`. |
| 180 | + |
| 181 | +There is a working example you can clone and run in [my reference platform github repo](https://github.com/QuadmanSWE/ds-ref-platform/blob/main/2_platform/argocd/kustomization.yaml). Here is what the running app looks like through argocd. |
| 182 | + |
| 183 | + |
| 184 | + |
| 185 | +``` yaml |
| 186 | +# ... Removed for brevity, imagine an argocd helm values declaration. |
| 187 | +configs: |
| 188 | + cmp: |
| 189 | + create: true |
| 190 | + plugins: |
| 191 | + cdk8s-typescript: |
| 192 | + init: |
| 193 | + command: ["sh", "-c"] |
| 194 | + args: |
| 195 | + - > |
| 196 | + echo "init cdk8s-typescript" && |
| 197 | + npm install |
| 198 | + generate: |
| 199 | + command: ["sh", "-c"] |
| 200 | + args: |
| 201 | + - > |
| 202 | + cdk8s synth > /dev/null && |
| 203 | + cat dist/* |
| 204 | + discover: |
| 205 | + fileName: "./imports/k8s.ts" |
| 206 | +repoServer: |
| 207 | + extraContainers: |
| 208 | + - name: cdk8s-typescript |
| 209 | + command: |
| 210 | + - "/var/run/argocd/argocd-cmp-server" |
| 211 | + image: docker.io/dsoderlund/cdk8s:typescript |
| 212 | + securityContext: |
| 213 | + runAsNonRoot: true |
| 214 | + runAsUser: 999 |
| 215 | + volumeMounts: |
| 216 | + - mountPath: /tmp |
| 217 | + name: cmp-tmp |
| 218 | + - mountPath: /var/run/argocd |
| 219 | + name: var-files |
| 220 | + - mountPath: /home/argocd/cmp-server/plugins |
| 221 | + name: plugins |
| 222 | + - mountPath: /home/argocd/cmp-server/config/plugin.yaml |
| 223 | + name: argocd-cmp-cm |
| 224 | + subPath: cdk8s-typescript.yaml |
| 225 | + volumes: |
| 226 | + - name: argocd-cmp-cm |
| 227 | + configMap: |
| 228 | + name: argocd-cmp-cm |
| 229 | + - name: cmp-tmp |
| 230 | + emptyDir: {} |
| 231 | +``` |
| 232 | +
|
| 233 | +### ArgoCD documentation and further reading |
| 234 | +
|
| 235 | +[Here is the docs for how these config mangement plugins works.](https://argo-cd.readthedocs.io/en/stable/operator-manual/config-management-plugins/) I also recommend reading [this](https://dev.to/ali_nazari_f9773d74e0b0e4/using-cdk8s-ytt-or-gomplate-with-argocd-through-config-management-plugins-4f6g) and [this](https://codefresh.io/blog/using-argo-cds-new-config-management-plugins-to-build-kustomize-helm-and-more/) article which though using different approaches and a bit different end results than what I am after, do a great job in explaining what all the different parts are. |
| 236 | +
|
| 237 | +
|
| 238 | +## The point of all this |
| 239 | +
|
| 240 | +The act of making sure an application or piece of infrastructure is deployed as desired is the key of gitops, it should not have too strong oppinions on how to express that desired state as long as it can be rendered into clear, straightforward, nonambiguous yaml. |
| 241 | +
|
| 242 | +Thus if we are managing a gitops solution we can create a great experience in managing both applications and infrastructure with rich high level tools without sacrificing the reconciliation loop of kubernetes. |
0 commit comments