@@ -21,18 +21,18 @@ source "${KUBE_ROOT}/cluster/gce/config-common.sh"
21
21
22
22
# Specifying KUBE_GCE_API_ENDPOINT will override the default GCE Compute API endpoint (https://www.googleapis.com/compute/v1/).
23
23
# This endpoint has to be pointing to v1 api. For example, https://www.googleapis.com/compute/staging_v1/
24
- GCE_API_ENDPOINT=${KUBE_GCE_API_ENDPOINT:- }
25
- GCLOUD=gcloud
24
+ export GCE_API_ENDPOINT=${KUBE_GCE_API_ENDPOINT:- }
25
+ export GCLOUD=gcloud
26
26
ZONE=${KUBE_GCE_ZONE:- us-central1-b}
27
- REGION=${ZONE% -* }
27
+ export REGION=${ZONE% -* }
28
28
RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:- false}
29
29
REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:- true}
30
30
NODE_SIZE=${NODE_SIZE:- n1-standard-2}
31
31
NUM_NODES=${NUM_NODES:- 3}
32
32
NUM_WINDOWS_NODES=${NUM_WINDOWS_NODES:- 0}
33
33
MASTER_SIZE=${MASTER_SIZE:- n1-standard-$(get-master-size)}
34
34
MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:- } # To allow choosing better architectures.
35
- MASTER_DISK_TYPE=pd-ssd
35
+ export MASTER_DISK_TYPE=pd-ssd
36
36
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:- $(get-master-disk-size)}
37
37
MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:- $(get-master-root-disk-size)}
38
38
NODE_DISK_TYPE=${NODE_DISK_TYPE:- pd-standard}
@@ -53,7 +53,7 @@ NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-}
53
53
# Accelerators to be attached to each node. Format "type=<accelerator-type>,count=<accelerator-count>"
54
54
# More information on available GPUs here - https://cloud.google.com/compute/docs/gpus/
55
55
NODE_ACCELERATORS=${NODE_ACCELERATORS:- " " }
56
- REGISTER_MASTER_KUBELET=${REGISTER_MASTER:- true}
56
+ export REGISTER_MASTER_KUBELET=${REGISTER_MASTER:- true}
57
57
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:- false}
58
58
PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:- false}
59
59
KUBE_DELETE_NODES=${KUBE_DELETE_NODES:- true}
84
84
# Also please update corresponding image for node e2e at:
85
85
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
86
86
GCI_VERSION=${KUBE_GCI_VERSION:- cos-81-12871-59-0}
87
- MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:- }
88
- MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:- cos-cloud}
89
- NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:- ${GCI_VERSION} }
90
- NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:- cos-cloud}
91
- NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:- default}
87
+ export MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:- }
88
+ export MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:- cos-cloud}
89
+ export NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:- ${GCI_VERSION} }
90
+ export NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:- cos-cloud}
91
+ export NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:- default}
92
92
93
93
# KUBELET_TEST_ARGS are extra arguments passed to kubelet.
94
- KUBELET_TEST_ARGS=${KUBE_KUBELET_EXTRA_ARGS:- }
94
+ export KUBELET_TEST_ARGS=${KUBE_KUBELET_EXTRA_ARGS:- }
95
95
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:- docker}
96
- CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:- }
96
+ export CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:- }
97
97
CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:- }
98
98
LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:- }
99
99
if [[ " ${CONTAINER_RUNTIME} " == " containerd" ]]; then
100
- CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:- containerd}
101
- LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:- ctr -n=k8s.io images import}
100
+ export CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:- containerd}
101
+ export LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:- ctr -n=k8s.io images import}
102
102
fi
103
103
104
104
# Ability to inject custom versions (Ubuntu OS images ONLY)
105
105
# if KUBE_UBUNTU_INSTALL_CONTAINERD_VERSION or KUBE_UBUNTU_INSTALL_RUNC_VERSION
106
106
# is set to empty then we do not override the version(s) and just
107
107
# use whatever is in the default installation of containerd package
108
- UBUNTU_INSTALL_CONTAINERD_VERSION=${KUBE_UBUNTU_INSTALL_CONTAINERD_VERSION:- }
109
- UBUNTU_INSTALL_RUNC_VERSION=${KUBE_UBUNTU_INSTALL_RUNC_VERSION:- }
108
+ export UBUNTU_INSTALL_CONTAINERD_VERSION=${KUBE_UBUNTU_INSTALL_CONTAINERD_VERSION:- }
109
+ export UBUNTU_INSTALL_RUNC_VERSION=${KUBE_UBUNTU_INSTALL_RUNC_VERSION:- }
110
110
111
111
# MASTER_EXTRA_METADATA is the extra instance metadata on master instance separated by commas.
112
- MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:- ${KUBE_EXTRA_METADATA:- } }
112
+ export MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:- ${KUBE_EXTRA_METADATA:- } }
113
113
# MASTER_EXTRA_METADATA is the extra instance metadata on node instance separated by commas.
114
- NODE_EXTRA_METADATA=${KUBE_NODE_EXTRA_METADATA:- ${KUBE_EXTRA_METADATA:- } }
114
+ export NODE_EXTRA_METADATA=${KUBE_NODE_EXTRA_METADATA:- ${KUBE_EXTRA_METADATA:- } }
115
115
116
116
NETWORK=${KUBE_GCE_NETWORK:- default}
117
117
# Enable network deletion by default (for kube-down), unless we're using 'default' network.
126
126
INSTANCE_PREFIX=" ${KUBE_GCE_INSTANCE_PREFIX:- kubernetes} "
127
127
CLUSTER_NAME=" ${CLUSTER_NAME:- ${INSTANCE_PREFIX} } "
128
128
MASTER_NAME=" ${INSTANCE_PREFIX} -master"
129
- AGGREGATOR_MASTER_NAME=" ${INSTANCE_PREFIX} -aggregator"
130
- INITIAL_ETCD_CLUSTER=" ${MASTER_NAME} "
131
- MASTER_TAG=" ${INSTANCE_PREFIX} -master"
132
- NODE_TAG=" ${INSTANCE_PREFIX} -minion"
129
+ export AGGREGATOR_MASTER_NAME=" ${INSTANCE_PREFIX} -aggregator"
130
+ export INITIAL_ETCD_CLUSTER=" ${MASTER_NAME} "
131
+ export MASTER_TAG=" ${INSTANCE_PREFIX} -master"
132
+ export NODE_TAG=" ${INSTANCE_PREFIX} -minion"
133
133
134
134
CLUSTER_IP_RANGE=" ${CLUSTER_IP_RANGE:- $(get-cluster-ip-range)} "
135
135
MASTER_IP_RANGE=" ${MASTER_IP_RANGE:- 10.246.0.0/ 24} "
136
136
# NODE_IP_RANGE is used when ENABLE_IP_ALIASES=true or CREATE_CUSTOM_NETWORK=true.
137
137
# It is the primary range in the subnet and is the range used for node instance IPs.
138
138
NODE_IP_RANGE=" $( get-node-ip-range) "
139
+ export NODE_IP_RANGE
139
140
140
141
# NOTE: Avoid giving nodes empty scopes, because kubelet needs a service account
141
142
# in order to initialize properly.
@@ -147,27 +148,27 @@ EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}"
147
148
VOLUME_PLUGIN_DIR=" ${VOLUME_PLUGIN_DIR:-/ home/ kubernetes/ flexvolume} "
148
149
149
150
SERVICE_CLUSTER_IP_RANGE=" ${SERVICE_CLUSTER_IP_RANGE:- 10.0.0.0/ 16} " # formerly PORTAL_NET
150
- ALLOCATE_NODE_CIDRS=true
151
+ export ALLOCATE_NODE_CIDRS=true
151
152
152
153
# When set to true, Docker Cache is enabled by default as part of the cluster bring up.
153
- ENABLE_DOCKER_REGISTRY_CACHE=true
154
+ export ENABLE_DOCKER_REGISTRY_CACHE=true
154
155
155
156
# Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests:
156
157
# glbc - CE L7 Load Balancer Controller
157
- ENABLE_L7_LOADBALANCING=" ${KUBE_ENABLE_L7_LOADBALANCING:- glbc} "
158
+ export ENABLE_L7_LOADBALANCING=" ${KUBE_ENABLE_L7_LOADBALANCING:- glbc} "
158
159
159
160
# Optional: Enable Metrics Server. Metrics Server should be enable everywhere,
160
161
# since it's a critical component, but in the first release we need a way to disable
161
162
# this in case of stability issues.
162
163
# TODO(piosz) remove this option once Metrics Server became a stable thing.
163
- ENABLE_METRICS_SERVER=" ${KUBE_ENABLE_METRICS_SERVER:- true} "
164
+ export ENABLE_METRICS_SERVER=" ${KUBE_ENABLE_METRICS_SERVER:- true} "
164
165
165
166
# Optional: Metadata agent to setup as part of the cluster bring up:
166
167
# none - No metadata agent
167
168
# stackdriver - Stackdriver metadata agent
168
169
# Metadata agent is a daemon set that provides metadata of kubernetes objects
169
170
# running on the same node for exporting metrics and logs.
170
- ENABLE_METADATA_AGENT=" ${KUBE_ENABLE_METADATA_AGENT:- none} "
171
+ export ENABLE_METADATA_AGENT=" ${KUBE_ENABLE_METADATA_AGENT:- none} "
171
172
172
173
# One special node out of NUM_NODES would be created of this type if specified.
173
174
# Useful for scheduling heapster in large clusters with nodes of small size.
@@ -180,7 +181,7 @@ HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
180
181
NUM_ADDITIONAL_NODES=" ${NUM_ADDITIONAL_NODES:- } "
181
182
ADDITIONAL_MACHINE_TYPE=" ${ADDITIONAL_MACHINE_TYPE:- } "
182
183
183
- MASTER_NODE_LABELS=" ${KUBE_MASTER_NODE_LABELS:- } "
184
+ export MASTER_NODE_LABELS=" ${KUBE_MASTER_NODE_LABELS:- } "
184
185
# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
185
186
NON_MASTER_NODE_LABELS=" ${KUBE_NON_MASTER_NODE_LABELS:- } "
186
187
WINDOWS_NON_MASTER_NODE_LABELS=" ${WINDOWS_NON_MASTER_NODE_LABELS:- } "
202
203
203
204
# Optional: Enable netd.
204
205
ENABLE_NETD=" ${KUBE_ENABLE_NETD:- false} "
205
- CUSTOM_NETD_YAML=" ${KUBE_CUSTOM_NETD_YAML:- } "
206
- CUSTOM_CALICO_NODE_DAEMONSET_YAML=" ${KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML:- } "
207
- CUSTOM_TYPHA_DEPLOYMENT_YAML=" ${KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML:- } "
206
+ export CUSTOM_NETD_YAML=" ${KUBE_CUSTOM_NETD_YAML:- } "
207
+ export CUSTOM_CALICO_NODE_DAEMONSET_YAML=" ${KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML:- } "
208
+ export CUSTOM_TYPHA_DEPLOYMENT_YAML=" ${KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML:- } "
208
209
209
210
# To avoid running netd on a node that is not configured appropriately,
210
211
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
@@ -213,8 +214,8 @@ if [[ ${ENABLE_NETD:-} == "true" ]]; then
213
214
NON_MASTER_NODE_LABELS=" ${NON_MASTER_NODE_LABELS: +${NON_MASTER_NODE_LABELS} ,} cloud.google.com/gke-netd-ready=true"
214
215
fi
215
216
216
- ENABLE_NODELOCAL_DNS=" ${KUBE_ENABLE_NODELOCAL_DNS:- false} "
217
- LOCAL_DNS_IP=" ${KUBE_LOCAL_DNS_IP:- 169.254.20.10} "
217
+ export ENABLE_NODELOCAL_DNS=" ${KUBE_ENABLE_NODELOCAL_DNS:- false} "
218
+ export LOCAL_DNS_IP=" ${KUBE_LOCAL_DNS_IP:- 169.254.20.10} "
218
219
219
220
# Enable metadata concealment by firewalling pod traffic to the metadata server
220
221
# and run a proxy daemonset on nodes.
@@ -230,12 +231,12 @@ if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
230
231
fi
231
232
232
233
# Optional: Enable node logging.
233
- ENABLE_NODE_LOGGING=" ${KUBE_ENABLE_NODE_LOGGING:- true} "
234
- LOGGING_DESTINATION=" ${KUBE_LOGGING_DESTINATION:- gcp} " # options: elasticsearch, gcp
234
+ export ENABLE_NODE_LOGGING=" ${KUBE_ENABLE_NODE_LOGGING:- true} "
235
+ export LOGGING_DESTINATION=" ${KUBE_LOGGING_DESTINATION:- gcp} " # options: elasticsearch, gcp
235
236
236
237
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
237
- ENABLE_CLUSTER_LOGGING=" ${KUBE_ENABLE_CLUSTER_LOGGING:- true} "
238
- ELASTICSEARCH_LOGGING_REPLICAS=1
238
+ export ENABLE_CLUSTER_LOGGING=" ${KUBE_ENABLE_CLUSTER_LOGGING:- true} "
239
+ export ELASTICSEARCH_LOGGING_REPLICAS=1
239
240
240
241
# Optional: Don't require https for registries in our local RFC1918 network
241
242
if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:- false} == " true" ]]; then
246
247
RUNTIME_CONFIG=" ${KUBE_RUNTIME_CONFIG:- } "
247
248
248
249
if [[ " ${KUBE_FEATURE_GATES:- } " == " AllAlpha=true" ]]; then
249
- RUNTIME_CONFIG=" ${KUBE_RUNTIME_CONFIG:- api/ all=true} "
250
+ export RUNTIME_CONFIG=" ${KUBE_RUNTIME_CONFIG:- api/ all=true} "
250
251
fi
251
252
252
253
# If feature gates includes AllAlpha or EndpointSlice, and EndpointSlice has not been disabled, add EndpointSlice controller to list of controllers to run.
257
258
# Optional: set feature gates
258
259
FEATURE_GATES=" ${KUBE_FEATURE_GATES:- } "
259
260
260
- if [[ ! -z " ${NODE_ACCELERATORS} " ]]; then
261
+ if [[ -n " ${NODE_ACCELERATORS} " ]]; then
261
262
if [[ -z " ${FEATURE_GATES:- } " ]]; then
262
263
FEATURE_GATES=" DevicePlugins=true"
263
264
else
271
272
# Optional: Install cluster DNS.
272
273
# Set CLUSTER_DNS_CORE_DNS to 'false' to install kube-dns instead of CoreDNS.
273
274
CLUSTER_DNS_CORE_DNS=" ${CLUSTER_DNS_CORE_DNS:- true} "
274
- ENABLE_CLUSTER_DNS=" ${KUBE_ENABLE_CLUSTER_DNS:- true} "
275
- DNS_SERVER_IP=" ${KUBE_DNS_SERVER_IP:- 10.0.0.10} "
276
- DNS_DOMAIN=" ${KUBE_DNS_DOMAIN:- cluster.local} "
277
- DNS_MEMORY_LIMIT=" ${KUBE_DNS_MEMORY_LIMIT:- 170Mi} "
275
+ export ENABLE_CLUSTER_DNS=" ${KUBE_ENABLE_CLUSTER_DNS:- true} "
276
+ export DNS_SERVER_IP=" ${KUBE_DNS_SERVER_IP:- 10.0.0.10} "
277
+ export DNS_DOMAIN=" ${KUBE_DNS_DOMAIN:- cluster.local} "
278
+ export DNS_MEMORY_LIMIT=" ${KUBE_DNS_MEMORY_LIMIT:- 170Mi} "
278
279
279
280
# Optional: Enable DNS horizontal autoscaler
280
- ENABLE_DNS_HORIZONTAL_AUTOSCALER=" ${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:- true} "
281
+ export ENABLE_DNS_HORIZONTAL_AUTOSCALER=" ${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:- true} "
281
282
282
283
# Optional: Install Kubernetes UI
283
- ENABLE_CLUSTER_UI=" ${KUBE_ENABLE_CLUSTER_UI:- true} "
284
+ export ENABLE_CLUSTER_UI=" ${KUBE_ENABLE_CLUSTER_UI:- true} "
284
285
285
286
# Optional: Install node problem detector.
286
287
# none - Not run node problem detector.
@@ -290,7 +291,7 @@ if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then
290
291
# Enable standalone mode by default for gci.
291
292
ENABLE_NODE_PROBLEM_DETECTOR=" ${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:- standalone} "
292
293
else
293
- ENABLE_NODE_PROBLEM_DETECTOR=" ${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:- daemonset} "
294
+ export ENABLE_NODE_PROBLEM_DETECTOR=" ${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:- daemonset} "
294
295
fi
295
296
NODE_PROBLEM_DETECTOR_VERSION=" ${NODE_PROBLEM_DETECTOR_VERSION:- } "
296
297
NODE_PROBLEM_DETECTOR_TAR_HASH=" ${NODE_PROBLEM_DETECTOR_TAR_HASH:- } "
@@ -304,10 +305,10 @@ CNI_STORAGE_URL_BASE="${CNI_STORAGE_URL_BASE:-https://storage.googleapis.com/k8s
304
305
# Optional: Create autoscaler for cluster's nodes.
305
306
ENABLE_CLUSTER_AUTOSCALER=" ${KUBE_ENABLE_CLUSTER_AUTOSCALER:- false} "
306
307
if [[ " ${ENABLE_CLUSTER_AUTOSCALER} " == " true" ]]; then
307
- AUTOSCALER_MIN_NODES=" ${KUBE_AUTOSCALER_MIN_NODES:- } "
308
- AUTOSCALER_MAX_NODES=" ${KUBE_AUTOSCALER_MAX_NODES:- } "
309
- AUTOSCALER_ENABLE_SCALE_DOWN=" ${KUBE_AUTOSCALER_ENABLE_SCALE_DOWN:- true} "
310
- AUTOSCALER_EXPANDER_CONFIG=" ${KUBE_AUTOSCALER_EXPANDER_CONFIG:- --expander=price} "
308
+ export AUTOSCALER_MIN_NODES=" ${KUBE_AUTOSCALER_MIN_NODES:- } "
309
+ export AUTOSCALER_MAX_NODES=" ${KUBE_AUTOSCALER_MAX_NODES:- } "
310
+ export AUTOSCALER_ENABLE_SCALE_DOWN=" ${KUBE_AUTOSCALER_ENABLE_SCALE_DOWN:- true} "
311
+ export AUTOSCALER_EXPANDER_CONFIG=" ${KUBE_AUTOSCALER_EXPANDER_CONFIG:- --expander=price} "
311
312
fi
312
313
313
314
# Optional: Enable allocation of pod IPs using IP aliases.
319
320
# new subnetwork will be created for the cluster.
320
321
ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:- false}
321
322
NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:- RangeAllocator}
322
- if [ ${ENABLE_IP_ALIASES} = true ]; then
323
+ if [ " ${ENABLE_IP_ALIASES} " = true ]; then
323
324
# Number of Pods that can run on this node.
324
325
MAX_PODS_PER_NODE=${MAX_PODS_PER_NODE:- 110}
325
326
# Size of ranges allocated to each node.
326
- IP_ALIAS_SIZE=" /$( get-alias-range-size ${MAX_PODS_PER_NODE} ) "
327
+ IP_ALIAS_SIZE=" /$( get-alias-range-size " ${MAX_PODS_PER_NODE} " ) "
328
+ export IP_ALIAS_SIZE
327
329
IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:- ${INSTANCE_PREFIX} -subnet-default}
328
330
# If we're using custom network, use the subnet we already create for it as the one for ip-alias.
329
331
# Note that this means SUBNETWORK would override KUBE_GCE_IP_ALIAS_SUBNETWORK in case of custom network.
330
332
if [[ " ${CREATE_CUSTOM_NETWORK} " == true ]]; then
331
- IP_ALIAS_SUBNETWORK=" ${SUBNETWORK} "
333
+ export IP_ALIAS_SUBNETWORK=" ${SUBNETWORK} "
332
334
fi
333
- # Reserve the services IP space to avoid being allocated for other GCP resources.
334
- SERVICE_CLUSTER_IP_SUBNETWORK=${KUBE_GCE_SERVICE_CLUSTER_IP_SUBNETWORK:- ${INSTANCE_PREFIX} -subnet-services}
335
- NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:- CloudAllocator}
335
+ export NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:- CloudAllocator}
336
336
SECONDARY_RANGE_NAME=${SECONDARY_RANGE_NAME:- }
337
337
# Add to the provider custom variables.
338
338
PROVIDER_VARS=" ${PROVIDER_VARS:- } ENABLE_IP_ALIASES"
339
339
PROVIDER_VARS=" ${PROVIDER_VARS:- } NODE_IPAM_MODE"
340
340
PROVIDER_VARS=" ${PROVIDER_VARS:- } SECONDARY_RANGE_NAME"
341
341
elif [[ -n " ${MAX_PODS_PER_NODE:- } " ]]; then
342
342
# Should not have MAX_PODS_PER_NODE set for route-based clusters.
343
- echo -e " ${color_red} Cannot set MAX_PODS_PER_NODE for route-based projects for ${PROJECT} ." >&2
343
+ echo -e " ${color_red:- } Cannot set MAX_PODS_PER_NODE for route-based projects for ${PROJECT} ." >&2
344
344
exit 1
345
345
fi
346
346
@@ -387,12 +387,12 @@ NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, kubenet
387
387
# Network Policy plugin specific settings.
388
388
NETWORK_POLICY_PROVIDER=" ${NETWORK_POLICY_PROVIDER:- none} " # calico
389
389
390
- NON_MASQUERADE_CIDR=" 0.0.0.0/0"
390
+ export NON_MASQUERADE_CIDR=" 0.0.0.0/0"
391
391
392
392
# How should the kubelet configure hairpin mode?
393
393
HAIRPIN_MODE=" ${HAIRPIN_MODE:- hairpin-veth} " # promiscuous-bridge, hairpin-veth, none
394
394
# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
395
- E2E_STORAGE_TEST_ENVIRONMENT=" ${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:- false} "
395
+ export E2E_STORAGE_TEST_ENVIRONMENT=" ${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:- false} "
396
396
397
397
# Evict pods whenever compute resource availability on the nodes gets below a threshold.
398
398
EVICTION_HARD=" ${EVICTION_HARD:- memory.available<250Mi,nodefs.available<10% ,nodefs.inodesFree<5% } "
433
433
434
434
# Fluentd requirements
435
435
# YAML exists to trigger a configuration refresh when changes are made.
436
- FLUENTD_GCP_YAML_VERSION=" v3.2.0"
436
+ export FLUENTD_GCP_YAML_VERSION=" v3.2.0"
437
437
FLUENTD_GCP_VERSION=" ${FLUENTD_GCP_VERSION:- 1.6.17} "
438
438
FLUENTD_GCP_MEMORY_LIMIT=" ${FLUENTD_GCP_MEMORY_LIMIT:- } "
439
439
FLUENTD_GCP_CPU_REQUEST=" ${FLUENTD_GCP_CPU_REQUEST:- } "
@@ -476,7 +476,7 @@ ROTATE_CERTIFICATES="${ROTATE_CERTIFICATES:-}"
476
476
# into kube-controller-manager via `--concurrent-service-syncs`
477
477
CONCURRENT_SERVICE_SYNCS=" ${CONCURRENT_SERVICE_SYNCS:- } "
478
478
479
- SERVICEACCOUNT_ISSUER=" https://kubernetes.io/${CLUSTER_NAME} "
479
+ export SERVICEACCOUNT_ISSUER=" https://kubernetes.io/${CLUSTER_NAME} "
480
480
481
481
# Optional: Enable Node termination Handler for Preemptible and GPU VMs.
482
482
# https://github.com/GoogleCloudPlatform/k8s-node-termination-handler
491
491
WINDOWS_NODE_TAINTS=" ${WINDOWS_NODE_TAINTS:- node.kubernetes.io/ os=win1809: NoSchedule} "
492
492
493
493
# Whether to set up a private GCE cluster, i.e. a cluster where nodes have only private IPs.
494
- GCE_PRIVATE_CLUSTER=" ${KUBE_GCE_PRIVATE_CLUSTER:- false} "
495
- GCE_PRIVATE_CLUSTER_PORTS_PER_VM=" ${KUBE_GCE_PRIVATE_CLUSTER_PORTS_PER_VM:- } "
494
+ export GCE_PRIVATE_CLUSTER=" ${KUBE_GCE_PRIVATE_CLUSTER:- false} "
495
+ export GCE_PRIVATE_CLUSTER_PORTS_PER_VM=" ${KUBE_GCE_PRIVATE_CLUSTER_PORTS_PER_VM:- } "
496
496
497
497
# Optional: Create apiserver konnectivity server and agent.
498
- ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE=" ${KUBE_ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE:- false} "
499
- KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE=" ${KUBE_KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:- grpc} "
498
+ export ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE=" ${KUBE_ENABLE_EGRESS_VIA_KONNECTIVITY_SERVICE:- false} "
499
+ export KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE=" ${KUBE_KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:- grpc} "
0 commit comments