Skip to content

Commit 15e2c8d

Browse files
authored
External Fleet agent recipe (elastic#8788)
Configuring Fleet to be accessible from both inside as well as outside the Kubernetes cluster is a bit tricky. This recipe aims at highlighting some of the gotchas.
1 parent 47da4d3 commit 15e2c8d

File tree

2 files changed

+359
-0
lines changed

2 files changed

+359
-0
lines changed

config/recipes/elastic-agent/README.asciidoc

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,3 +46,7 @@ Deploys single instance Elastic Agent Deployment in Fleet mode with APM integrat
4646
===== Synthetic monitoring - `synthetic-monitoring.yaml`
4747

4848
Deploys an Fleet-enrolled Elastic Agent that can be used as for link:https://www.elastic.co/guide/en/observability/current/monitor-uptime-synthetics.html[Synthetic monitoring]. This Elastic Agent uses the `elastic-agent-complete` image. The agent policy still needs to be link:https://www.elastic.co/guide/en/observability/current/synthetics-private-location.html#synthetics-private-location-add[registered as private location] in Kibana.
49+
50+
===== Fleet Server exposed both internally and externally - `fleet-ingress-setup.yaml`
51+
52+
This example shows how to expose the Fleet Server to the outside world using a Kubernetes Ingress resource. The Fleet Server is configured to use custom TLS certificates, and all communications are secured with TLS. The same Fleet Server is also accessible from within the cluster, allowing agents to connect to it regardless of their location. Refer to the comments in the `fleet-ingress-setup.yaml` file for more details on how to set up the Ingress resource and TLS certificates to enable this configuration.
Lines changed: 355 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,355 @@
1+
apiVersion: kibana.k8s.elastic.co/v1
2+
kind: Kibana
3+
metadata:
4+
name: kibana
5+
spec:
6+
version: 9.1.0
7+
count: 1
8+
elasticsearchRef:
9+
name: elasticsearch
10+
config:
11+
xpack.fleet.agents.elasticsearch.hosts: ["https://es.example.com:443"]
12+
xpack.fleet.agents.fleet_server.hosts: [ "https://fleet.example.com:443"]
13+
xpack.fleet.packages:
14+
- name: system
15+
version: latest
16+
- name: elastic_agent
17+
version: latest
18+
- name: fleet_server
19+
version: latest
20+
- name: kubernetes
21+
version: latest
22+
- name: apm
23+
version: latest
24+
xpack.fleet.agentPolicies:
25+
- name: Fleet Server on ECK policy
26+
id: eck-fleet-server
27+
namespace: elastic
28+
is_managed: true
29+
monitoring_enabled:
30+
- logs
31+
- metrics
32+
unenroll_timeout: 900
33+
package_policies:
34+
- name: fleet_server-1
35+
id: fleet_server-1
36+
package:
37+
name: fleet_server
38+
- name: Elastic Agent on ECK policy
39+
id: eck-agent
40+
namespace: elastic
41+
is_managed: true
42+
monitoring_enabled:
43+
- logs
44+
- metrics
45+
unenroll_timeout: 900
46+
package_policies:
47+
- package:
48+
name: system
49+
name: system-1
50+
- package:
51+
name: kubernetes
52+
name: kubernetes-1
53+
54+
---
55+
apiVersion: elasticsearch.k8s.elastic.co/v1
56+
kind: Elasticsearch
57+
metadata:
58+
name: elasticsearch
59+
spec:
60+
version: 9.1.0
61+
nodeSets:
62+
- name: default-3
63+
count: 3
64+
config:
65+
node.store.allow_mmap: false
66+
volumeClaimTemplates:
67+
- metadata:
68+
name: elasticsearch-data
69+
spec:
70+
accessModes: ["ReadWriteOnce"]
71+
resources:
72+
requests:
73+
storage: 30Gi
74+
75+
---
76+
apiVersion: networking.k8s.io/v1
77+
kind: Ingress
78+
metadata:
79+
name: elastic-ingress
80+
annotations:
81+
# Disable HTTP traffic
82+
kubernetes.io/ingress.allow-http: "false"
83+
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
84+
nginx.ingress.kubernetes.io/proxy-ssl-verify: "off"
85+
nginx.ingress.kubernetes.io/ssl-redirect: "true"
86+
# Depending on the ingress implementation in your environment you may need to specify the ingress class
87+
# kubernetes.io/ingress.class: "example"
88+
spec:
89+
# or alternatively use the ingressClassName field. Consult the documentation of your ingress controller.
90+
# ingressClassName: example
91+
tls:
92+
# The assumption here is that these are certificates that are trusted both by agents outside the cluster as well as
93+
# as inside. See the comments in the Agent spec below for more details.
94+
- hosts: ["fleet.example.com"]
95+
secretName: fleet-server-acme
96+
- hosts: ["es.example.com"]
97+
secretName: es-acme
98+
- hosts: ["kb.example.com"]
99+
secretName: kb-acme
100+
rules:
101+
- host: "kb.example.com"
102+
http:
103+
paths:
104+
- path: "/"
105+
pathType: Prefix
106+
backend:
107+
service:
108+
name: kibana-kb-http
109+
port:
110+
number: 5601
111+
- host: "es.example.com"
112+
http:
113+
paths:
114+
- path: "/"
115+
pathType: Prefix
116+
backend:
117+
service:
118+
name: elasticsearch-es-http
119+
port:
120+
number: 9200
121+
- host: "fleet.example.com"
122+
http:
123+
paths:
124+
- path: "/"
125+
pathType: Prefix
126+
backend:
127+
service:
128+
name: fleet-server-agent-http
129+
port:
130+
number: 8220
131+
---
132+
apiVersion: agent.k8s.elastic.co/v1alpha1
133+
kind: Agent
134+
metadata:
135+
name: fleet-server
136+
spec:
137+
version: 9.1.0
138+
http:
139+
# Configuring the same certificates used for the ingress here has the effect that
140+
# the CA certificate that is expected in ca.crt inside this secret is propagated to the agents
141+
# and configured in the FLEET_CA environment variable.
142+
# Without this the agents would only trust the self-signed certificates generated by ECK.
143+
tls:
144+
certificate:
145+
secretName: fleet-server-acme
146+
kibanaRef:
147+
name: kibana
148+
elasticsearchRefs:
149+
- name: elasticsearch
150+
mode: fleet
151+
fleetServerEnabled: true
152+
policyID: eck-fleet-server
153+
deployment:
154+
replicas: 1
155+
podTemplate:
156+
spec:
157+
containers:
158+
- name: agent
159+
env:
160+
# Force Elastic Agent to bootstrap itself through the public Fleet Server URL
161+
# We are asuming here the certificates configured above are only valid for the public URL.
162+
- name: FLEET_URL
163+
value: https://fleet.example.com:443
164+
serviceAccountName: fleet-server
165+
automountServiceAccountToken: true
166+
securityContext:
167+
runAsUser: 0
168+
---
169+
apiVersion: agent.k8s.elastic.co/v1alpha1
170+
kind: Agent
171+
metadata:
172+
name: elastic-agent
173+
spec:
174+
config:
175+
fleet:
176+
enabled: true
177+
providers.kubernetes:
178+
add_resource_metadata:
179+
deployment: true
180+
version: 9.1.0
181+
kibanaRef:
182+
name: kibana
183+
fleetServerRef:
184+
name: fleet-server
185+
mode: fleet
186+
policyID: eck-agent
187+
daemonSet:
188+
podTemplate:
189+
spec:
190+
volumes:
191+
- name: fleet-ca
192+
secret:
193+
secretName: fleet-server-acme
194+
containers:
195+
- name: agent
196+
env:
197+
- name: FLEET_URL
198+
value: https://fleet.example.com
199+
volumeMounts:
200+
- name: fleet-ca
201+
mountPath: /mnt/extra
202+
serviceAccountName: elastic-agent
203+
hostNetwork: true
204+
dnsPolicy: ClusterFirstWithHostNet
205+
automountServiceAccountToken: true
206+
securityContext:
207+
runAsUser: 0
208+
---
209+
apiVersion: rbac.authorization.k8s.io/v1
210+
kind: ClusterRole
211+
metadata:
212+
name: fleet-server
213+
rules:
214+
- apiGroups: [""]
215+
resources:
216+
- pods
217+
- namespaces
218+
- nodes
219+
verbs:
220+
- get
221+
- watch
222+
- list
223+
- apiGroups: ["apps"]
224+
resources:
225+
- replicasets
226+
verbs:
227+
- get
228+
- watch
229+
- list
230+
- apiGroups: ["batch"]
231+
resources:
232+
- jobs
233+
verbs:
234+
- get
235+
- watch
236+
- list
237+
- apiGroups: ["coordination.k8s.io"]
238+
resources:
239+
- leases
240+
verbs:
241+
- get
242+
- create
243+
- update
244+
---
245+
apiVersion: v1
246+
kind: ServiceAccount
247+
metadata:
248+
name: fleet-server
249+
namespace: default
250+
---
251+
apiVersion: rbac.authorization.k8s.io/v1
252+
kind: ClusterRoleBinding
253+
metadata:
254+
name: fleet-server
255+
subjects:
256+
- kind: ServiceAccount
257+
name: fleet-server
258+
namespace: default
259+
roleRef:
260+
kind: ClusterRole
261+
name: fleet-server
262+
apiGroup: rbac.authorization.k8s.io
263+
---
264+
apiVersion: rbac.authorization.k8s.io/v1
265+
kind: ClusterRole
266+
metadata:
267+
name: elastic-agent
268+
rules:
269+
- apiGroups: [""]
270+
resources:
271+
- pods
272+
- nodes
273+
- namespaces
274+
- events
275+
- services
276+
- configmaps
277+
verbs:
278+
- get
279+
- watch
280+
- list
281+
- apiGroups: ["coordination.k8s.io"]
282+
resources:
283+
- leases
284+
verbs:
285+
- get
286+
- create
287+
- update
288+
- nonResourceURLs:
289+
- "/metrics"
290+
verbs:
291+
- get
292+
- apiGroups: ["extensions"]
293+
resources:
294+
- replicasets
295+
verbs:
296+
- "get"
297+
- "list"
298+
- "watch"
299+
- apiGroups:
300+
- "apps"
301+
resources:
302+
- statefulsets
303+
- deployments
304+
- replicasets
305+
- daemonsets
306+
verbs:
307+
- "get"
308+
- "list"
309+
- "watch"
310+
- apiGroups:
311+
- ""
312+
resources:
313+
- nodes/stats
314+
verbs:
315+
- get
316+
- nonResourceURLs:
317+
- "/metrics"
318+
verbs:
319+
- get
320+
- apiGroups:
321+
- "batch"
322+
resources:
323+
- jobs
324+
- cronjobs
325+
verbs:
326+
- "get"
327+
- "list"
328+
- "watch"
329+
- apiGroups:
330+
- "storage.k8s.io"
331+
resources:
332+
- storageclasses
333+
verbs:
334+
- "get"
335+
- "list"
336+
- "watch"
337+
---
338+
apiVersion: v1
339+
kind: ServiceAccount
340+
metadata:
341+
name: elastic-agent
342+
namespace: default
343+
---
344+
apiVersion: rbac.authorization.k8s.io/v1
345+
kind: ClusterRoleBinding
346+
metadata:
347+
name: elastic-agent
348+
subjects:
349+
- kind: ServiceAccount
350+
name: elastic-agent
351+
namespace: default
352+
roleRef:
353+
kind: ClusterRole
354+
name: elastic-agent
355+
apiGroup: rbac.authorization.k8s.io

0 commit comments

Comments
 (0)