context) {
-
- // * Log custom resource
- log.info("LocustTest deleted: {}", resource.getMetadata().getName());
-
- log.debug(
- "Deleted in namespace: {}, \nCR with name: {}, and generation: {}, \nimage: {}, \nmaster command: {}, \nworker command: {}, \nreplicas: {} \nconfigMap:'{}'.",
- resource.getMetadata().getNamespace(),
- resource.getMetadata().getName(),
- resource.getMetadata().getGeneration(),
- resource.getSpec().getImage(),
- resource.getSpec().getMasterCommandSeed(),
- resource.getSpec().getWorkerCommandSeed(),
- resource.getSpec().getWorkerReplicas(),
- resource.getSpec().getConfigMap());
-
-
- // * Delete load generation resource
- // Delete Master node service
- deletionManager.deleteService(resource, MASTER);
-
- // Delete Master job
- deletionManager.deleteJob(resource, MASTER);
-
- // Delete Worker jobs
- deletionManager.deleteJob(resource, WORKER);
-
- return DeleteControl.defaultDelete();
- }
-
-}
diff --git a/src/main/java/com/locust/operator/controller/config/SysConfig.java b/src/main/java/com/locust/operator/controller/config/SysConfig.java
deleted file mode 100644
index bc3844d9..00000000
--- a/src/main/java/com/locust/operator/controller/config/SysConfig.java
+++ /dev/null
@@ -1,103 +0,0 @@
-package com.locust.operator.controller.config;
-
-import io.micronaut.context.annotation.Property;
-import jakarta.inject.Singleton;
-import lombok.Getter;
-import lombok.ToString;
-import org.apache.commons.lang3.math.NumberUtils;
-
-@Getter
-@ToString
-@Singleton
-public class SysConfig {
-
- // * Kafka
- @Property(name = "config.load-generation-pods.kafka.bootstrap-servers")
- private String kafkaBootstrapServers;
- @Property(name = "config.load-generation-pods.kafka.security.enabled")
- private boolean kafkaSecurityEnabled;
- @Property(name = "config.load-generation-pods.kafka.security.protocol")
- private String kafkaSecurityProtocol;
- @Property(name = "config.load-generation-pods.kafka.security.username")
- private String kafkaUsername;
- @Property(name = "config.load-generation-pods.kafka.security.password")
- private String kafkaUserPassword;
- @Property(name = "config.load-generation-pods.kafka.sasl.mechanism")
- private String kafkaSaslMechanism;
- @ToString.Exclude
- @Property(name = "config.load-generation-pods.kafka.sasl.jaas.config")
- private String kafkaSaslJaasConfig;
-
- // * Generated job characteristics
- /**
- * We use Object here to prevent automatic conversion from null to 0.
- *
- * See {@link #getTtlSecondsAfterFinished()} for understanding how the
- * value is converted to an integer.
- */
- @Property(name = "config.load-generation-jobs.ttl-seconds-after-finished")
- private Object ttlSecondsAfterFinished;
-
- // * Generated pod characteristics
- @Property(name = "config.load-generation-pods.resource.cpu-request")
- private String podCpuRequest;
- @Property(name = "config.load-generation-pods.resource.mem-request")
- private String podMemRequest;
- @Property(name = "config.load-generation-pods.resource.ephemeralStorage-request")
- private String podEphemeralStorageRequest;
- @Property(name = "config.load-generation-pods.resource.cpu-limit")
- private String podCpuLimit;
- @Property(name = "config.load-generation-pods.resource.mem-limit")
- private String podMemLimit;
- @Property(name = "config.load-generation-pods.resource.ephemeralStorage-limit")
- private String podEphemeralStorageLimit;
-
- // * Metrics exporter container characteristics
- @Property(name = "config.load-generation-pods.metricsExporter.image")
- private String metricsExporterImage;
- @Property(name = "config.load-generation-pods.metricsExporter.port")
- private Integer metricsExporterPort;
- @Property(name = "config.load-generation-pods.metricsExporter.pullPolicy")
- private String metricsExporterPullPolicy;
- @Property(name = "config.load-generation-pods.metricsExporter.resource.cpu-request")
- private String metricsExporterCpuRequest;
- @Property(name = "config.load-generation-pods.metricsExporter.resource.mem-request")
- private String metricsExporterMemRequest;
- @Property(name = "config.load-generation-pods.metricsExporter.resource.ephemeralStorage-request")
- private String metricsExporterEphemeralStorageRequest;
- @Property(name = "config.load-generation-pods.metricsExporter.resource.cpu-limit")
- private String metricsExporterCpuLimit;
- @Property(name = "config.load-generation-pods.metricsExporter.resource.mem-limit")
- private String metricsExporterMemLimit;
- @Property(name = "config.load-generation-pods.metricsExporter.resource.ephemeralStorage-limit")
- private String metricsExporterEphemeralStorageLimit;
-
- @Property(name = "config.load-generation-pods.affinity.enableCrInjection")
- private boolean affinityCrInjectionEnabled;
- @Property(name = "config.load-generation-pods.taintTolerations.enableCrInjection")
- private boolean tolerationsCrInjectionEnabled;
-
- /**
- * Value configured for setting Kubernetes Jobs' ttlSecondsAfterFinished property.
- * This method will try to convert the value to an integer or fail and report invalid values.
- * {@code null} or empty strings will result in a {@code null} return.
- *
- * @return either {@code null} or an integer value greater than or equal to 0
- */
- public Integer getTtlSecondsAfterFinished() {
- final String stringValue = String.valueOf(this.ttlSecondsAfterFinished);
-
- if (NumberUtils.isDigits(stringValue)) {
- return Integer.parseInt(stringValue);
- } else if (stringValue.isEmpty()) {
- return null;
- } else {
- throw new IllegalArgumentException(
- String.format(
- "Invalid value '%s' for property ttl-seconds-after-finished",
- stringValue
- )
- );
- }
- }
-}
diff --git a/src/main/java/com/locust/operator/controller/dto/LoadGenerationNode.java b/src/main/java/com/locust/operator/controller/dto/LoadGenerationNode.java
deleted file mode 100644
index e75e410d..00000000
--- a/src/main/java/com/locust/operator/controller/dto/LoadGenerationNode.java
+++ /dev/null
@@ -1,36 +0,0 @@
-package com.locust.operator.controller.dto;
-
-import com.locust.operator.customresource.internaldto.LocustTestAffinity;
-import com.locust.operator.customresource.internaldto.LocustTestToleration;
-import lombok.AccessLevel;
-import lombok.AllArgsConstructor;
-import lombok.Builder;
-import lombok.Data;
-import lombok.NoArgsConstructor;
-
-import java.util.List;
-import java.util.Map;
-
-@Data
-@Builder
-@AllArgsConstructor
-@NoArgsConstructor(access = AccessLevel.NONE)
-public class LoadGenerationNode {
-
- private String name;
- private Map labels;
- private Map annotations;
- private LocustTestAffinity affinity;
- private List tolerations;
- private Integer ttlSecondsAfterFinished;
- private List command;
- private OperationalMode operationalMode;
- private String image;
- private String imagePullPolicy;
- private List imagePullSecrets;
- private Integer replicas;
- private List ports;
- private String configMap;
- private String libConfigMap;
-
-}
diff --git a/src/main/java/com/locust/operator/controller/dto/MetricsExporterContainer.java b/src/main/java/com/locust/operator/controller/dto/MetricsExporterContainer.java
deleted file mode 100644
index ac1e10f0..00000000
--- a/src/main/java/com/locust/operator/controller/dto/MetricsExporterContainer.java
+++ /dev/null
@@ -1,22 +0,0 @@
-package com.locust.operator.controller.dto;
-
-import io.fabric8.kubernetes.api.model.ResourceRequirements;
-import lombok.AccessLevel;
-import lombok.AllArgsConstructor;
-import lombok.Builder;
-import lombok.Data;
-import lombok.NoArgsConstructor;
-
-@Data
-@Builder
-@AllArgsConstructor
-@NoArgsConstructor(access = AccessLevel.NONE)
-public class MetricsExporterContainer {
-
- private String containerName;
- private String containerImage;
- private String pullPolicy;
- private int exporterPort;
- private ResourceRequirements resourceRequirements;
-
-}
diff --git a/src/main/java/com/locust/operator/controller/dto/OperationalMode.java b/src/main/java/com/locust/operator/controller/dto/OperationalMode.java
deleted file mode 100644
index f187b730..00000000
--- a/src/main/java/com/locust/operator/controller/dto/OperationalMode.java
+++ /dev/null
@@ -1,14 +0,0 @@
-package com.locust.operator.controller.dto;
-
-import lombok.AllArgsConstructor;
-import lombok.Getter;
-
-@AllArgsConstructor
-public enum OperationalMode {
-
- MASTER("master"),
- WORKER("worker");
-
- @Getter
- public final String mode;
-}
diff --git a/src/main/java/com/locust/operator/controller/dto/OperatorType.java b/src/main/java/com/locust/operator/controller/dto/OperatorType.java
deleted file mode 100644
index ca96cb03..00000000
--- a/src/main/java/com/locust/operator/controller/dto/OperatorType.java
+++ /dev/null
@@ -1,15 +0,0 @@
-package com.locust.operator.controller.dto;
-
-import lombok.AllArgsConstructor;
-import lombok.Getter;
-
-@AllArgsConstructor
-public enum OperatorType {
-
- EXISTS("Exists"),
- EQUAL("Equal");
-
- @Getter
- public final String type;
-
-}
diff --git a/src/main/java/com/locust/operator/controller/utils/Constants.java b/src/main/java/com/locust/operator/controller/utils/Constants.java
deleted file mode 100644
index 25093553..00000000
--- a/src/main/java/com/locust/operator/controller/utils/Constants.java
+++ /dev/null
@@ -1,88 +0,0 @@
-package com.locust.operator.controller.utils;
-
-import lombok.NoArgsConstructor;
-
-import java.util.List;
-
-import static lombok.AccessLevel.PRIVATE;
-
-@NoArgsConstructor(access = PRIVATE)
-public class Constants {
-
- public static final String NODE_NAME_TEMPLATE = "%s-%s";
-
- // Master node constants
- public static final int MASTER_NODE_REPLICA_COUNT = 1;
- public static final int DEFAULT_WEB_UI_PORT = 8089;
- // 8089 -> Web interface
- // 5557, 5558 -> Node communication
- public static final List MASTER_NODE_PORTS = List.of(5557, 5558, DEFAULT_WEB_UI_PORT);
-
- public static final Integer WORKER_NODE_PORT = 8080;
- // Master node command template: %s -> Team test configuration
- public static final String MASTER_CMD_TEMPLATE = "%s "
- // Declare `master` operation mode & availability port
- + "--master --master-port=%d "
- // Number of workers to wait for before starting the test
- + "--expect-workers=%d "
- // Auto start the test while keeping the UI available
- + "--autostart --autoquit 60 "
- // Allow to automatically rebalance users if new workers are added or removed during a test run.
- + "--enable-rebalancing "
- // Log only the summary
- + "--only-summary ";
-
- // Worker node constants
- // When used, output will be: " --worker --master-port= --master-host="
- public static final String WORKER_CMD_TEMPLATE = "%s --worker --master-port=%d --master-host=%s";
-
- // Generic k8s constants
- public static final String APP_DEFAULT_LABEL = "performance-test-name";
- public static final String SERVICE_SELECTOR_LABEL = "performance-test-pod-name";
- public static final String MANAGED_BY_LABEL_KEY = "managed-by";
- public static final String MANAGED_BY_LABEL_VALUE = "locust-k8s-operator";
-
- // Environment variables names
- public static final String KAFKA_BOOTSTRAP_SERVERS = "KAFKA_BOOTSTRAP_SERVERS";
- public static final String KAFKA_SECURITY_ENABLED = "KAFKA_SECURITY_ENABLED";
- public static final String KAFKA_SECURITY_PROTOCOL_CONFIG = "KAFKA_SECURITY_PROTOCOL_CONFIG";
- public static final String KAFKA_SASL_MECHANISM = "KAFKA_SASL_MECHANISM";
- public static final String KAFKA_SASL_JAAS_CONFIG = "KAFKA_SASL_JAAS_CONFIG";
- public static final String KAFKA_USERNAME = "KAFKA_USERNAME";
- public static final String KAFKA_PASSWORD = "KAFKA_PASSWORD";
-
- // Service constants
- public static final String PORT_DEFAULT_NAME = "port";
- public static final String TCP_PROTOCOL = "TCP";
- public static final String METRICS_PORT_NAME = "prometheus-metrics";
-
- // Job constants
- public static final String DEFAULT_RESTART_POLICY = "Never";
- public static final int BACKOFF_LIMIT = 0;
- public static final String DEFAULT_MOUNT_PATH = "/lotest/src/";
- public static final String LIB_MOUNT_PATH = "/opt/locust/lib";
- public static final String CONTAINER_ARGS_SEPARATOR = " ";
-
- // Node Affinity constants
- public static final String DEFAULT_NODE_MATCH_EXPRESSION_OPERATOR = "In";
-
- // Metrics
- public static final String PROMETHEUS_IO_SCRAPE = "prometheus.io/scrape";
- public static final String PROMETHEUS_IO_PATH = "prometheus.io/path";
- public static final String PROMETHEUS_IO_PORT = "prometheus.io/port";
- public static final String PROMETHEUS_IO_ENDPOINT = "/metrics";
-
- // Metrics container
- public static final String EXPORTER_CONTAINER_NAME = "locust-metrics-exporter";
-
- public static final String EXPORTER_URI_ENV_VAR = "LOCUST_EXPORTER_URI";
- // localhost is used because the exporter container is in the same pod as the master container.
- // This means that they share the same network
- public static final String EXPORTER_URI_ENV_VAR_VALUE = String.format("http://localhost:%s", DEFAULT_WEB_UI_PORT);
-
- public static final String EXPORTER_PORT_ENV_VAR = "LOCUST_EXPORTER_WEB_LISTEN_ADDRESS";
-
- public static final String DEFAULT_RESOURCE_TARGET = "defaultTarget";
- public static final String METRICS_EXPORTER_RESOURCE_TARGET = "metricsExporter";
-
-}
diff --git a/src/main/java/com/locust/operator/controller/utils/LoadGenHelpers.java b/src/main/java/com/locust/operator/controller/utils/LoadGenHelpers.java
deleted file mode 100644
index 2c39f50f..00000000
--- a/src/main/java/com/locust/operator/controller/utils/LoadGenHelpers.java
+++ /dev/null
@@ -1,397 +0,0 @@
-package com.locust.operator.controller.utils;
-
-import com.locust.operator.controller.config.SysConfig;
-import com.locust.operator.controller.dto.LoadGenerationNode;
-import com.locust.operator.controller.dto.MetricsExporterContainer;
-import com.locust.operator.controller.dto.OperationalMode;
-import com.locust.operator.customresource.LocustTest;
-import com.locust.operator.customresource.internaldto.LocustTestAffinity;
-import com.locust.operator.customresource.internaldto.LocustTestToleration;
-import io.fabric8.kubernetes.api.model.Quantity;
-import io.fabric8.kubernetes.api.model.ResourceRequirements;
-import jakarta.inject.Singleton;
-import lombok.extern.slf4j.Slf4j;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-
-import static com.locust.operator.controller.dto.OperationalMode.MASTER;
-import static com.locust.operator.controller.dto.OperationalMode.WORKER;
-import static com.locust.operator.controller.utils.Constants.CONTAINER_ARGS_SEPARATOR;
-import static com.locust.operator.controller.utils.Constants.DEFAULT_RESOURCE_TARGET;
-import static com.locust.operator.controller.utils.Constants.EXPORTER_CONTAINER_NAME;
-import static com.locust.operator.controller.utils.Constants.KAFKA_BOOTSTRAP_SERVERS;
-import static com.locust.operator.controller.utils.Constants.KAFKA_PASSWORD;
-import static com.locust.operator.controller.utils.Constants.KAFKA_SASL_JAAS_CONFIG;
-import static com.locust.operator.controller.utils.Constants.KAFKA_SASL_MECHANISM;
-import static com.locust.operator.controller.utils.Constants.KAFKA_SECURITY_ENABLED;
-import static com.locust.operator.controller.utils.Constants.KAFKA_SECURITY_PROTOCOL_CONFIG;
-import static com.locust.operator.controller.utils.Constants.KAFKA_USERNAME;
-import static com.locust.operator.controller.utils.Constants.MASTER_CMD_TEMPLATE;
-import static com.locust.operator.controller.utils.Constants.MASTER_NODE_PORTS;
-import static com.locust.operator.controller.utils.Constants.MASTER_NODE_REPLICA_COUNT;
-import static com.locust.operator.controller.utils.Constants.METRICS_EXPORTER_RESOURCE_TARGET;
-import static com.locust.operator.controller.utils.Constants.NODE_NAME_TEMPLATE;
-import static com.locust.operator.controller.utils.Constants.WORKER_CMD_TEMPLATE;
-import static com.locust.operator.controller.utils.Constants.WORKER_NODE_PORT;
-
-@Slf4j
-@Singleton
-public class LoadGenHelpers {
-
- private final SysConfig config;
-
- public LoadGenHelpers(SysConfig config) {
- this.config = config;
- }
-
- /**
- * Parse an LocustTest resource and convert it a LoadGenerationNode object after: - Constructing the node operational command based on
- * the `mode` parameter - Set the replica count based on the `mode` parameter
- *
- * @param resource Custom resource object
- * @param mode Operational mode
- * @return Load generation node configuration
- */
- public LoadGenerationNode generateLoadGenNodeObject(LocustTest resource, OperationalMode mode) {
-
- return LoadGenerationNode.builder()
- .name(constructNodeName(resource, mode))
- .labels(constructNodeLabels(resource, mode))
- .annotations(constructNodeAnnotations(resource, mode))
- .affinity(getNodeAffinity(resource))
- .tolerations(getPodToleration(resource))
- .ttlSecondsAfterFinished(getTtlSecondsAfterFinished())
- .command(constructNodeCommand(resource, mode))
- .operationalMode(mode)
- .image(getNodeImage(resource))
- .imagePullPolicy(getNodeImagePullPolicy(resource))
- .imagePullSecrets(getNodeImagePullSecrets(resource))
- .replicas(getReplicaCount(resource, mode))
- .ports(getNodePorts(resource, mode))
- .configMap(getConfigMap(resource))
- .libConfigMap(getLibConfigMap(resource))
- .build();
-
- }
-
- private Integer getTtlSecondsAfterFinished() {
- return this.config.getTtlSecondsAfterFinished();
- }
-
- private List getPodToleration(LocustTest resource) {
-
- return config.isTolerationsCrInjectionEnabled() ? resource.getSpec().getTolerations() : null;
-
- }
-
- public String getConfigMap(LocustTest resource) {
-
- return resource.getSpec().getConfigMap();
-
- }
-
- public String getLibConfigMap(LocustTest resource) {
-
- return resource.getSpec().getLibConfigMap();
-
- }
-
- private String getNodeImage(LocustTest resource) {
-
- return resource.getSpec().getImage();
-
- }
-
- private String getNodeImagePullPolicy(LocustTest resource) {
- return resource.getSpec().getImagePullPolicy();
- }
-
- private List getNodeImagePullSecrets(LocustTest resource) {
- return resource.getSpec().getImagePullSecrets();
- }
-
- public LocustTestAffinity getNodeAffinity(LocustTest resource) {
-
- return config.isAffinityCrInjectionEnabled() ? resource.getSpec().getAffinity() : null;
-
- }
-
- public String constructNodeName(LocustTest customResource, OperationalMode mode) {
-
- return String
- .format(NODE_NAME_TEMPLATE, customResource.getMetadata().getName(), mode.getMode())
- .replace(".", "-");
-
- }
-
- /**
- * Constructs the labels to attach to the master and worker pods.
- *
- * @param customResource The custom resource object
- * @param mode The operational mode
- * @return A non-null, possibly empty map of labels
- */
- public Map constructNodeLabels(final LocustTest customResource, final OperationalMode mode) {
- final Map> labels = Optional.ofNullable(customResource.getSpec().getLabels())
- .orElse(new HashMap<>());
- final Map result;
- if (mode.equals(MASTER)) {
- result = labels.getOrDefault(MASTER.getMode(), new HashMap<>());
- } else {
- // Worker
- result = labels.getOrDefault(WORKER.getMode(), new HashMap<>());
- }
- log.debug("Labels attached to {} pod are {}", mode.getMode(), result);
- return result;
- }
-
- /**
- * Constructs the annotations to attach to the master and worker pods.
- *
- * @param customResource The custom resource object
- * @param mode The operational mode
- * @return A non-null, possibly empty map of annotations
- */
- public Map constructNodeAnnotations(final LocustTest customResource, final OperationalMode mode) {
- final Map> annotations = Optional.ofNullable(customResource.getSpec().getAnnotations())
- .orElse(new HashMap<>());
- final Map result;
- if (mode.equals(MASTER)) {
- result = annotations.getOrDefault(MASTER.getMode(), new HashMap<>());
- } else {
- // Worker
- result = annotations.getOrDefault(WORKER.getMode(), new HashMap<>());
- }
- log.debug("Annotations attached to {} pod are {}", mode.getMode(), result);
- return result;
- }
-
- /**
- * Construct node command based on mode of operation
- *
- * @param customResource Custom resource object
- * @param mode Operational mode
- * @return Node command
- */
- private List constructNodeCommand(LocustTest customResource, OperationalMode mode) {
-
- String cmd;
-
- if (mode.equals(MASTER)) {
- cmd = String.format(MASTER_CMD_TEMPLATE,
- customResource.getSpec().getMasterCommandSeed(),
- MASTER_NODE_PORTS.getFirst(),
- customResource.getSpec().getWorkerReplicas());
- } else {
- // worker
- cmd = String.format(WORKER_CMD_TEMPLATE,
- customResource.getSpec().getWorkerCommandSeed(),
- MASTER_NODE_PORTS.getFirst(),
- constructNodeName(customResource, MASTER)
- );
- }
-
- log.debug("Constructed command: {}", cmd);
- // Split the command on <\s> to match expected container args
- return List.of(cmd.split(CONTAINER_ARGS_SEPARATOR));
- }
-
- /**
- * Get Replica count based on mode of operation
- *
- * @param customResource Custom resource object
- * @param mode Operational mode
- * @return Replica count
- */
- private int getReplicaCount(LocustTest customResource, OperationalMode mode) {
-
- Integer replicaCount;
-
- if (mode.equals(MASTER)) {
- replicaCount = MASTER_NODE_REPLICA_COUNT;
- } else {
- replicaCount = customResource.getSpec().getWorkerReplicas();
- }
-
- log.debug("Replica count for node: {}, with mode: {}, is: {}", customResource.getMetadata().getName(), mode, replicaCount);
- return replicaCount;
-
- }
-
- private List getNodePorts(LocustTest customResource, OperationalMode mode) {
-
- List ports;
-
- if (mode.equals(MASTER)) {
- ports = MASTER_NODE_PORTS;
- } else {
- ports = Collections.singletonList(WORKER_NODE_PORT);
- }
-
- log.debug("Ports list for node: {}, with mode: {}, is: {}", customResource.getMetadata().getName(), mode, ports);
- return ports;
-
- }
-
- public Map generateContainerEnvironmentMap() {
- HashMap environmentMap = new HashMap<>();
-
- environmentMap.put(KAFKA_BOOTSTRAP_SERVERS, config.getKafkaBootstrapServers());
- environmentMap.put(KAFKA_SECURITY_ENABLED, String.valueOf(config.isKafkaSecurityEnabled()));
- environmentMap.put(KAFKA_SECURITY_PROTOCOL_CONFIG, config.getKafkaSecurityProtocol());
- environmentMap.put(KAFKA_SASL_MECHANISM, config.getKafkaSaslMechanism());
- environmentMap.put(KAFKA_SASL_JAAS_CONFIG, config.getKafkaSaslJaasConfig());
- environmentMap.put(KAFKA_USERNAME, config.getKafkaUsername());
- environmentMap.put(KAFKA_PASSWORD, config.getKafkaUserPassword());
-
- return environmentMap;
- }
-
- /**
- * Constructs a MetricsExporterContainer using the configuration settings and resource requirements.
- *
- * @return A MetricsExporterContainer instance configured with the specified settings and resource requirements.
- */
- public MetricsExporterContainer constructMetricsExporterContainer() {
- return new MetricsExporterContainer(
- EXPORTER_CONTAINER_NAME,
- config.getMetricsExporterImage(),
- config.getMetricsExporterPullPolicy(),
- config.getMetricsExporterPort(),
- this.getResourceRequirements(METRICS_EXPORTER_RESOURCE_TARGET)
-
- );
- }
-
- /**
- * Get resource request and limit for containers
- *
- * @return resource requirements
- */
- public ResourceRequirements getResourceRequirements(String target) {
-
- Map resourceRequests;
- Map resourceLimits;
-
- // Default target
- if (target.equals(DEFAULT_RESOURCE_TARGET)) {
-
- resourceRequests = this.getResourceRequests();
- resourceLimits = this.getResourceLimits();
-
- // If not default target, then the assumed target is a "Metrics Exporter" container!
- // + No need for "else if" in order to avoid unneeded checks and increased complexity
- // + in a future implementation if another "target" is introduced,
- // + the method should be updated and this comment removed.
- } else {
-
- resourceRequests = this.getMetricsExporterResourceRequests();
- resourceLimits = this.getMetricsExporterResourceLimits();
-
- }
-
- final var resourceRequest = new ResourceRequirements();
-
- // Add memory and cpu resource requests
- resourceRequest.setRequests(resourceRequests);
-
- // Add memory and cpu resource limits
- resourceRequest.setLimits(resourceLimits);
-
- return resourceRequest;
-
- }
-
- /**
- * Get requested resources based on configuration (defaults or HELM).
- *
- * @return the resources request to use
- */
- private Map getResourceRequests() {
- String memOverride = config.getPodMemRequest();
- String cpuOverride = config.getPodCpuRequest();
- String ephemeralOverride = config.getPodEphemeralStorageRequest();
-
- log.debug("Using resource requests - cpu: {}, mem: {}, ephemeral: {}", cpuOverride, memOverride, ephemeralOverride);
-
- return generateResourceOverrideMap(memOverride, cpuOverride, ephemeralOverride);
- }
-
- /**
- * Get resource limits based on configuration (defaults or HELM).
- *
- * @return the resource limits to use
- */
- private Map getResourceLimits() {
- String memOverride = config.getPodMemLimit();
- String cpuOverride = config.getPodCpuLimit();
- String ephemeralOverride = config.getPodEphemeralStorageLimit();
-
- log.debug("Using resource limits - cpu: {}, mem: {}, ephemeral: {}", cpuOverride, memOverride, ephemeralOverride);
-
- return generateResourceOverrideMap(memOverride, cpuOverride, ephemeralOverride);
- }
-
- /**
- * Get resources request for Metrics Exporter container.
- *
- * @return the resource requests to use
- */
- private Map getMetricsExporterResourceRequests() {
- String memOverride = config.getMetricsExporterMemRequest();
- String cpuOverride = config.getMetricsExporterCpuRequest();
- String ephemeralOverride = config.getMetricsExporterEphemeralStorageRequest();
-
- log.debug("Using resource requests for metrics exporter - cpu: {}, mem: {}, ephemeral: {}", cpuOverride, memOverride,
- ephemeralOverride);
-
- return generateResourceOverrideMap(memOverride, cpuOverride, ephemeralOverride);
- }
-
- /**
- * Get resource limits for Metrics Exporter container.
- *
- * @return the resource requests to use
- */
- private Map getMetricsExporterResourceLimits() {
- String memOverride = config.getMetricsExporterMemLimit();
- String cpuOverride = config.getMetricsExporterCpuLimit();
- String ephemeralOverride = config.getMetricsExporterEphemeralStorageLimit();
-
- log.debug("Using resource limits - cpu: {}, mem: {}, ephemeral: {}", cpuOverride, memOverride, ephemeralOverride);
-
- return generateResourceOverrideMap(memOverride, cpuOverride, ephemeralOverride);
- }
-
- /**
- * Generates a resource override map based on the provided memory, CPU, and ephemeral storage overrides.
- *
- * @param memOverride The memory override value to be used for the "memory" resource.
- * @param cpuOverride The CPU override value to be used for the "cpu" resource.
- * @param ephemeralOverride The ephemeral storage override value to be used for the "ephemeral-storage" resource. This value will be
- * applied only if the Kubernetes version supports "ephemeral-storage" requests.
- * @return A Map containing resource overrides for memory, CPU, and ephemeral storage.
- */
- private Map generateResourceOverrideMap(String memOverride, String cpuOverride, String ephemeralOverride) {
- Map resourceOverrideMap = new HashMap<>();
-
- Optional.ofNullable(memOverride)
- .filter(s -> !s.isBlank())
- .ifPresent(override -> resourceOverrideMap.put("memory", new Quantity(override)));
-
- Optional.ofNullable(cpuOverride)
- .filter(s -> !s.isBlank())
- .ifPresent(override -> resourceOverrideMap.put("cpu", new Quantity(override)));
-
- Optional.ofNullable(ephemeralOverride)
- .filter(s -> !s.isBlank())
- .ifPresent(override -> resourceOverrideMap.put("ephemeral-storage", new Quantity(override)));
-
- return resourceOverrideMap;
- }
-
-}
diff --git a/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationHelpers.java b/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationHelpers.java
deleted file mode 100644
index 841a0d04..00000000
--- a/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationHelpers.java
+++ /dev/null
@@ -1,587 +0,0 @@
-package com.locust.operator.controller.utils.resource.manage;
-
-import com.locust.operator.controller.dto.LoadGenerationNode;
-import com.locust.operator.controller.dto.MetricsExporterContainer;
-import com.locust.operator.controller.utils.LoadGenHelpers;
-import io.fabric8.kubernetes.api.model.Affinity;
-import io.fabric8.kubernetes.api.model.AffinityBuilder;
-import io.fabric8.kubernetes.api.model.ConfigMapVolumeSource;
-import io.fabric8.kubernetes.api.model.ConfigMapVolumeSourceBuilder;
-import io.fabric8.kubernetes.api.model.Container;
-import io.fabric8.kubernetes.api.model.ContainerBuilder;
-import io.fabric8.kubernetes.api.model.ContainerPort;
-import io.fabric8.kubernetes.api.model.ContainerPortBuilder;
-import io.fabric8.kubernetes.api.model.EnvVar;
-import io.fabric8.kubernetes.api.model.EnvVarBuilder;
-import io.fabric8.kubernetes.api.model.LocalObjectReference;
-import io.fabric8.kubernetes.api.model.LocalObjectReferenceBuilder;
-import io.fabric8.kubernetes.api.model.NodeAffinity;
-import io.fabric8.kubernetes.api.model.NodeAffinityBuilder;
-import io.fabric8.kubernetes.api.model.NodeSelector;
-import io.fabric8.kubernetes.api.model.NodeSelectorBuilder;
-import io.fabric8.kubernetes.api.model.NodeSelectorRequirement;
-import io.fabric8.kubernetes.api.model.NodeSelectorRequirementBuilder;
-import io.fabric8.kubernetes.api.model.NodeSelectorTermBuilder;
-import io.fabric8.kubernetes.api.model.ObjectMeta;
-import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
-import io.fabric8.kubernetes.api.model.PodSpec;
-import io.fabric8.kubernetes.api.model.PodSpecBuilder;
-import io.fabric8.kubernetes.api.model.PodTemplateSpec;
-import io.fabric8.kubernetes.api.model.PodTemplateSpecBuilder;
-import io.fabric8.kubernetes.api.model.Service;
-import io.fabric8.kubernetes.api.model.ServiceBuilder;
-import io.fabric8.kubernetes.api.model.Toleration;
-import io.fabric8.kubernetes.api.model.TolerationBuilder;
-import io.fabric8.kubernetes.api.model.Volume;
-import io.fabric8.kubernetes.api.model.VolumeBuilder;
-import io.fabric8.kubernetes.api.model.VolumeMount;
-import io.fabric8.kubernetes.api.model.VolumeMountBuilder;
-import io.fabric8.kubernetes.api.model.batch.v1.Job;
-import io.fabric8.kubernetes.api.model.batch.v1.JobBuilder;
-import io.fabric8.kubernetes.api.model.batch.v1.JobSpec;
-import io.fabric8.kubernetes.api.model.batch.v1.JobSpecBuilder;
-import jakarta.inject.Singleton;
-import lombok.extern.slf4j.Slf4j;
-import lombok.val;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.stream.Collectors;
-
-import static com.locust.operator.controller.dto.OperationalMode.MASTER;
-import static com.locust.operator.controller.dto.OperatorType.EQUAL;
-import static com.locust.operator.controller.utils.Constants.APP_DEFAULT_LABEL;
-import static com.locust.operator.controller.utils.Constants.BACKOFF_LIMIT;
-import static com.locust.operator.controller.utils.Constants.DEFAULT_MOUNT_PATH;
-import static com.locust.operator.controller.utils.Constants.LIB_MOUNT_PATH;
-import static com.locust.operator.controller.utils.Constants.DEFAULT_NODE_MATCH_EXPRESSION_OPERATOR;
-import static com.locust.operator.controller.utils.Constants.DEFAULT_RESOURCE_TARGET;
-import static com.locust.operator.controller.utils.Constants.DEFAULT_RESTART_POLICY;
-import static com.locust.operator.controller.utils.Constants.DEFAULT_WEB_UI_PORT;
-import static com.locust.operator.controller.utils.Constants.EXPORTER_PORT_ENV_VAR;
-import static com.locust.operator.controller.utils.Constants.EXPORTER_URI_ENV_VAR;
-import static com.locust.operator.controller.utils.Constants.EXPORTER_URI_ENV_VAR_VALUE;
-import static com.locust.operator.controller.utils.Constants.MANAGED_BY_LABEL_KEY;
-import static com.locust.operator.controller.utils.Constants.MANAGED_BY_LABEL_VALUE;
-import static com.locust.operator.controller.utils.Constants.METRICS_PORT_NAME;
-import static com.locust.operator.controller.utils.Constants.PORT_DEFAULT_NAME;
-import static com.locust.operator.controller.utils.Constants.PROMETHEUS_IO_ENDPOINT;
-import static com.locust.operator.controller.utils.Constants.PROMETHEUS_IO_PATH;
-import static com.locust.operator.controller.utils.Constants.PROMETHEUS_IO_PORT;
-import static com.locust.operator.controller.utils.Constants.PROMETHEUS_IO_SCRAPE;
-import static com.locust.operator.controller.utils.Constants.SERVICE_SELECTOR_LABEL;
-import static com.locust.operator.controller.utils.Constants.TCP_PROTOCOL;
-
-@Slf4j
-@Singleton
-public class ResourceCreationHelpers {
-
- private final LoadGenHelpers loadGenHelpers;
-
- public ResourceCreationHelpers(LoadGenHelpers loadGenHelpers) {
- this.loadGenHelpers = loadGenHelpers;
- }
-
- /**
- * Prepare a Kubernetes Job.
- *
- * Reference: Kubernetes Job Docs
- *
- * @param nodeConfig Load generation configuration
- * @return Job
- */
- protected Job prepareJob(LoadGenerationNode nodeConfig, String testName) {
- return new JobBuilder()
- .withMetadata(prepareJobMetadata(nodeConfig))
- .withSpec(prepareJobSpec(nodeConfig, testName))
- .build();
- }
-
- /**
- * Prepare Kubernetes 'Job > Metadata'.
- *
- * Reference: Kubernetes Job Docs
- *
- * @param nodeConfig Load generation configuration
- * @return ObjectMeta
- */
- private ObjectMeta prepareJobMetadata(LoadGenerationNode nodeConfig) {
-
- // * Metadata
- ObjectMeta jobMeta = new ObjectMetaBuilder()
- .withName(nodeConfig.getName())
- .build();
-
- log.debug("Prepared Kubernetes 'Job > Metadata': {}", jobMeta);
-
- return jobMeta;
-
- }
-
- /**
- * Prepare Kubernetes 'Job > Spec'.
- *
- * Reference: Kubernetes Job Docs
- *
- * @param nodeConfig Load generation configuration
- * @return JobSpec
- */
- private JobSpec prepareJobSpec(LoadGenerationNode nodeConfig, String testName) {
-
- // * Job Spec configuration
- JobSpec jobSpec = new JobSpecBuilder()
- .withTtlSecondsAfterFinished(nodeConfig.getTtlSecondsAfterFinished())
-
- // Pods count
- // Setting the `Parallelism` attribute will result in k8s deploying pods to match the requested value
- // effectively enabling control over the deployed pod count.
- .withParallelism(nodeConfig.getReplicas())
-
- // Backoff limit
- .withBackoffLimit(BACKOFF_LIMIT)
-
- // Template
- .withTemplate(prepareSpecTemplate(nodeConfig, testName))
-
- .build();
-
- log.debug("Prepared Kubernetes 'Job > Spec': {}", jobSpec);
-
- return jobSpec;
-
- }
-
- /**
- * Prepare Kubernetes 'Job > Spec > Template'.
- *
- * Reference: Kubernetes Job Docs
- *
- * @param nodeConfig Load generation configuration
- * @return PodTemplateSpec
- */
- private PodTemplateSpec prepareSpecTemplate(LoadGenerationNode nodeConfig, String testName) {
-
- PodTemplateSpec specTemplate = new PodTemplateSpecBuilder()
- .withMetadata(prepareTemplateMetadata(nodeConfig, testName))
- .withSpec(prepareTemplateSpec(nodeConfig))
- .build();
-
- log.debug("Prepared Kubernetes 'Job > Spec > Template': {}", specTemplate);
-
- return specTemplate;
-
- }
-
- /**
- * Prepare Kubernetes 'Job > Spec > Template > Metadata'.
- *
- * Reference: Kubernetes Job Docs
- *
- * @param nodeConfig The node configuration object.
- * @param testName Test name.
- * @return PodTemplateSpec.
- */
- private ObjectMeta prepareTemplateMetadata(LoadGenerationNode nodeConfig, String testName) {
-
- ObjectMeta templateMeta = new ObjectMetaBuilder()
- // Labels
- .addToLabels(APP_DEFAULT_LABEL, testName)
- .addToLabels(SERVICE_SELECTOR_LABEL, nodeConfig.getName())
- .addToLabels(MANAGED_BY_LABEL_KEY, MANAGED_BY_LABEL_VALUE)
- .addToLabels(nodeConfig.getLabels())
-
- // Annotations
- // Enable Prometheus endpoint discovery by Prometheus server
- .addToAnnotations(PROMETHEUS_IO_SCRAPE, "true")
- .addToAnnotations(PROMETHEUS_IO_PATH, PROMETHEUS_IO_ENDPOINT)
- .addToAnnotations(PROMETHEUS_IO_PORT, String.valueOf(loadGenHelpers.constructMetricsExporterContainer().getExporterPort()))
- .addToAnnotations(nodeConfig.getAnnotations())
-
- .build();
-
- log.debug("Prepared Kubernetes 'Job > Spec > Template > Metadata': {}", templateMeta);
-
- return templateMeta;
-
- }
-
- /**
- * Prepare Kubernetes 'Job > Spec > Template > Spec'.
- *
- * Reference: Kubernetes Job Docs
- *
- * @param nodeConfig Load generation configuration
- * @return PodTemplateSpec
- */
- private PodSpec prepareTemplateSpec(LoadGenerationNode nodeConfig) {
-
- PodSpec templateSpec = new PodSpecBuilder()
- // images
- .withImagePullSecrets(prepareImagePullSecrets(nodeConfig))
-
- // Containers
- .withContainers(prepareContainerList(nodeConfig))
- .withVolumes(prepareVolumesList(nodeConfig))
- .withAffinity(prepareAffinity(nodeConfig))
- .withTolerations(prepareTolerations(nodeConfig))
- .withRestartPolicy(DEFAULT_RESTART_POLICY)
- .build();
-
- log.debug("Prepared Kubernetes 'Job > Spec > Template > Spec': {}", templateSpec);
-
- return templateSpec;
-
- }
-
- private List prepareImagePullSecrets(LoadGenerationNode nodeConfig) {
- final List references = new ArrayList<>();
-
- if (nodeConfig.getImagePullSecrets() != null) {
- references.addAll(
- nodeConfig.getImagePullSecrets()
- .stream()
- .map(secretName -> new LocalObjectReferenceBuilder().withName(secretName).build())
- .toList()
- );
- }
-
- log.debug("Prepared image pull secrets: {}", references);
-
- return references;
- }
-
- private List prepareVolumesList(LoadGenerationNode nodeConfig) {
-
- List volumeList = new ArrayList<>();
-
- if (nodeConfig.getConfigMap() != null) {
- volumeList.add(prepareVolume(nodeConfig));
- }
-
- if (nodeConfig.getLibConfigMap() != null) {
- volumeList.add(prepareLibVolume(nodeConfig));
- }
-
- return volumeList;
-
- }
-
- private Affinity prepareAffinity(LoadGenerationNode nodeConfig) {
-
- // Construct Affinity
- var affinityBuilder = new AffinityBuilder();
-
- //! Note for future feature extensions:
- //! When adding support for more "Affinity" options, the evaluation inside the `if` condition is to be split into several checks.
- if (nodeConfig.getAffinity() != null && nodeConfig.getAffinity().getNodeAffinity() != null) {
- affinityBuilder.withNodeAffinity(prepareNodeAffinity(nodeConfig));
- }
-
- var affinity = affinityBuilder.build();
- log.debug("Prepared pod affinity: '{}'", affinity);
-
- return affinity;
-
- }
-
- private NodeAffinity prepareNodeAffinity(LoadGenerationNode nodeConfig) {
-
- var nodeAffinityBuilder = new NodeAffinityBuilder();
-
- // Prepare Required during scheduling node selector
- var requiredDuringSchedulingNodeSelector = prepareRequiredDuringSchedulingNodeSelector(nodeConfig);
-
- nodeAffinityBuilder.withRequiredDuringSchedulingIgnoredDuringExecution(requiredDuringSchedulingNodeSelector);
-
- return nodeAffinityBuilder.build();
-
- }
-
- private NodeSelector prepareRequiredDuringSchedulingNodeSelector(LoadGenerationNode nodeConfig) {
-
- // Required during scheduling
- List matchExpressions = new ArrayList<>();
-
- final var requiredDuringScheduling = Optional.ofNullable(
- nodeConfig.getAffinity().getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution()).orElse(new HashMap<>());
-
- requiredDuringScheduling.forEach((requiredAffinityKey, requiredAffinityValue) -> matchExpressions
- .add(new NodeSelectorRequirementBuilder().withKey(requiredAffinityKey).withOperator(DEFAULT_NODE_MATCH_EXPRESSION_OPERATOR)
- .withValues(requiredAffinityValue).build()));
-
- var nodeSelectorTerms = new NodeSelectorTermBuilder().withMatchExpressions(matchExpressions).build();
-
- return new NodeSelectorBuilder().withNodeSelectorTerms(nodeSelectorTerms).build();
- }
-
- private List prepareTolerations(LoadGenerationNode nodeConfig) {
-
- List tolerations = new ArrayList<>();
-
- if (nodeConfig.getTolerations() != null) {
-
- // For each configured node toleration from the Custom Resource, build a toleration object and add it to list
- nodeConfig.getTolerations().forEach(nodeToleration -> {
- var tolerationBuilder = new TolerationBuilder();
- tolerationBuilder
- .withKey(nodeToleration.getKey())
- .withOperator(nodeToleration.getOperator())
- .withEffect(nodeToleration.getEffect());
-
- if (nodeToleration.getOperator().equals(EQUAL.getType())) {
- tolerationBuilder.withValue(nodeToleration.getValue());
- }
-
- tolerations.add(tolerationBuilder.build());
- });
- }
-
- log.debug("Prepared pod tolerations: '{}'", tolerations);
- return tolerations;
-
- }
-
- private static Volume prepareVolume(LoadGenerationNode nodeConfig) {
- return new VolumeBuilder()
- .withName(nodeConfig.getName())
- .withConfigMap(prepareConfigMapSource(nodeConfig))
- .build();
- }
-
- private static Volume prepareLibVolume(LoadGenerationNode nodeConfig) {
- return new VolumeBuilder()
- .withName("lib")
- .withConfigMap(prepareLibConfigMapSource(nodeConfig))
- .build();
- }
-
- private static ConfigMapVolumeSource prepareConfigMapSource(LoadGenerationNode nodeConfig) {
- return new ConfigMapVolumeSourceBuilder()
- .withName(nodeConfig.getConfigMap())
- .build();
- }
-
- private static ConfigMapVolumeSource prepareLibConfigMapSource(LoadGenerationNode nodeConfig) {
- return new ConfigMapVolumeSourceBuilder()
- .withName(nodeConfig.getLibConfigMap())
- .build();
- }
-
- private List prepareContainerList(LoadGenerationNode nodeConfig) {
-
- List constantsList = new ArrayList<>();
-
- // Load generation container
- constantsList.add(prepareLoadGenContainer(nodeConfig));
-
- // Inject metrics container only if `master`
- if (nodeConfig.getOperationalMode().equals(MASTER)) {
- constantsList.add(prepareMetricsExporterContainer(loadGenHelpers.constructMetricsExporterContainer()));
- }
-
- return constantsList;
-
- }
-
- /**
- * Prepare locust prometheus metrics exporter container.
- *
- * Reference for default exporter: locust exporter docs
- *
- * @param exporterContainer The metrics exporter container
- * @return Container
- */
- private Container prepareMetricsExporterContainer(final MetricsExporterContainer exporterContainer) {
-
- HashMap envMap = new HashMap<>();
-
- envMap.put(EXPORTER_URI_ENV_VAR, EXPORTER_URI_ENV_VAR_VALUE);
- envMap.put(EXPORTER_PORT_ENV_VAR, String.format(":%s", exporterContainer.getExporterPort()));
-
- Container container = new ContainerBuilder()
-
- // Name
- .withName(exporterContainer.getContainerName())
-
- // Image
- .withImage(exporterContainer.getContainerImage())
- .withImagePullPolicy(exporterContainer.getPullPolicy())
-
- // Resources
- .withResources(exporterContainer.getResourceRequirements())
-
- // Ports
- .withPorts(new ContainerPortBuilder().withContainerPort(exporterContainer.getExporterPort()).build())
-
- // Environment
- .withEnv(prepareContainerEnvironmentVariables(envMap))
-
- .build();
-
- log.debug("Prepared Kubernetes metrics exporter container: {}", container);
-
- return container;
- }
-
- /**
- * Prepare a load generation container.
- *
- * Reference: Kubernetes containers Docs
- *
- * @param nodeConfig Load generation configuration
- * @return Container
- */
- private Container prepareLoadGenContainer(LoadGenerationNode nodeConfig) {
- Container container = new ContainerBuilder()
-
- // Name
- .withName(nodeConfig.getName())
-
- // Resource config
- .withResources(loadGenHelpers.getResourceRequirements(DEFAULT_RESOURCE_TARGET))
-
- // Image
- .withImage(nodeConfig.getImage())
- .withImagePullPolicy(nodeConfig.getImagePullPolicy())
-
- // Ports
- .withPorts(prepareContainerPorts(nodeConfig.getPorts()))
-
- // Environment
- .withEnv(prepareContainerEnvironmentVariables(loadGenHelpers.generateContainerEnvironmentMap()))
-
- // Container command
- .withArgs(nodeConfig.getCommand())
-
- // Mount configMap as volume
- .withVolumeMounts(prepareVolumeMounts(nodeConfig))
-
- .build();
-
- log.debug("Prepared Kubernetes load generator container: {}", container);
-
- return container;
- }
-
- private List prepareVolumeMounts(LoadGenerationNode nodeConfig) {
-
- List mounts = new ArrayList<>();
-
- if (nodeConfig.getConfigMap() != null) {
- // Prepare main configMap mount
- mounts.add(new VolumeMountBuilder()
- .withName(nodeConfig.getName())
- .withMountPath(DEFAULT_MOUNT_PATH)
- .withReadOnly(false)
- .build());
- }
-
- if (nodeConfig.getLibConfigMap() != null) {
- // Prepare lib configMap mount
- mounts.add(new VolumeMountBuilder()
- .withName("lib")
- .withMountPath(LIB_MOUNT_PATH)
- .withReadOnly(false)
- .build());
- }
-
- return mounts;
-
- }
-
- /**
- * Prepare container Environment variable.
- *
- * Reference: Kubernetes containers Docs
- *
- * @param envMap Environment variable map
- * @return ContainerPort
- */
- private List prepareContainerEnvironmentVariables(Map envMap) {
-
- List containerEnvVars = envMap
- .entrySet()
- .stream()
- .map(entry -> new EnvVarBuilder()
- .withName(entry.getKey())
- .withValue(entry.getValue())
- .build())
- .collect(Collectors.toList());
-
- log.debug("Prepared container environment variable list: {}", containerEnvVars);
-
- return containerEnvVars;
-
- }
-
- /**
- * Prepare container ports.
- *
- * Reference: Kubernetes containers Docs
- *
- * @param portsList Container port list
- * @return ContainerPort
- */
- private List prepareContainerPorts(List portsList) {
-
- List containerPortList = portsList
- .stream()
- .map(port -> new ContainerPortBuilder().withContainerPort(port).build())
- .collect(Collectors.toList());
-
- log.debug("Prepared container ports list: {}", containerPortList);
-
- return containerPortList;
-
- }
-
- protected Service prepareService(LoadGenerationNode nodeConfig) {
-
- // Initial service configuration
- var serviceConfig = new ServiceBuilder()
-
- // Metadata
- .withNewMetadata()
- .withName(nodeConfig.getName())
- .endMetadata()
-
- // Spec
- .withNewSpec()
- .withSelector(Collections.singletonMap(SERVICE_SELECTOR_LABEL, nodeConfig.getName()));
-
- // Map ports
- nodeConfig.getPorts()
- .stream()
- .filter(port -> !port.equals(DEFAULT_WEB_UI_PORT))
- .forEach(port -> {
-
- val portName = PORT_DEFAULT_NAME + port;
-
- serviceConfig
- .addNewPort()
- .withName(portName)
- .withProtocol(TCP_PROTOCOL)
- .withPort(port)
- .endPort();
- });
-
- // Metrics port
- serviceConfig
- .addNewPort()
- .withName(METRICS_PORT_NAME)
- .withProtocol(TCP_PROTOCOL)
- .withPort(loadGenHelpers.constructMetricsExporterContainer().getExporterPort())
- .endPort();
-
- // Finalize building the service object
- var service = serviceConfig.endSpec();
-
- return service.build();
-
- }
-
-}
diff --git a/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationManager.java b/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationManager.java
deleted file mode 100644
index 5f947955..00000000
--- a/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationManager.java
+++ /dev/null
@@ -1,59 +0,0 @@
-package com.locust.operator.controller.utils.resource.manage;
-
-import com.locust.operator.controller.dto.LoadGenerationNode;
-import io.fabric8.kubernetes.api.model.Service;
-import io.fabric8.kubernetes.api.model.batch.v1.Job;
-import io.fabric8.kubernetes.client.ConfigBuilder;
-import io.fabric8.kubernetes.client.KubernetesClient;
-import io.fabric8.kubernetes.client.KubernetesClientBuilder;
-import jakarta.inject.Singleton;
-import lombok.extern.slf4j.Slf4j;
-
-@Slf4j
-@Singleton
-public class ResourceCreationManager {
-
- private final ResourceCreationHelpers creationHelper;
-
- public ResourceCreationManager(ResourceCreationHelpers creationHelper) {
- this.creationHelper = creationHelper;
- }
-
- public void createJob(LoadGenerationNode nodeConfig, String namespace, String testName) {
-
- try (KubernetesClient client = new KubernetesClientBuilder().withConfig(new ConfigBuilder().build()).build()) {
-
- log.info("Creating Job for: {} in namespace: {}", nodeConfig.getName(), namespace);
-
- Job job = creationHelper.prepareJob(nodeConfig, testName);
- job = client.batch().v1().jobs().inNamespace(namespace).resource(job).serverSideApply();
-
- log.info("Created job with name: {}", job.getMetadata().getName());
- log.debug("Created job details: {}", job);
- } catch (Exception e) {
-
- log.error("Exception occurred during Job creation: {}", e.getLocalizedMessage(), e);
-
- }
-
- }
-
- public void createMasterService(LoadGenerationNode nodeConfig, String namespace) {
- try (KubernetesClient client = new KubernetesClientBuilder().withConfig(new ConfigBuilder().build()).build()) {
-
- log.info("Creating service for: {} in namespace: {}", nodeConfig.getName(), namespace);
-
- Service service = creationHelper.prepareService(nodeConfig);
-
- service = client.services().inNamespace(namespace).resource(service).create();
- log.info("Created service with name: {}", service.getMetadata().getName());
- log.debug("Created service {}", service);
-
- } catch (Exception e) {
-
- log.error("Exception occurred during service creation: {}", e.getLocalizedMessage(), e);
-
- }
- }
-
-}
diff --git a/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceDeletionManager.java b/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceDeletionManager.java
deleted file mode 100644
index 38c2a0e5..00000000
--- a/src/main/java/com/locust/operator/controller/utils/resource/manage/ResourceDeletionManager.java
+++ /dev/null
@@ -1,64 +0,0 @@
-package com.locust.operator.controller.utils.resource.manage;
-
-import com.locust.operator.controller.dto.OperationalMode;
-import com.locust.operator.controller.utils.LoadGenHelpers;
-import com.locust.operator.customresource.LocustTest;
-import io.fabric8.kubernetes.api.model.StatusDetails;
-import io.fabric8.kubernetes.client.ConfigBuilder;
-import io.fabric8.kubernetes.client.KubernetesClient;
-import io.fabric8.kubernetes.client.KubernetesClientBuilder;
-import jakarta.inject.Singleton;
-import lombok.extern.slf4j.Slf4j;
-import lombok.val;
-
-import java.util.List;
-import java.util.Optional;
-
-@Slf4j
-@Singleton
-public class ResourceDeletionManager {
-
- private final LoadGenHelpers loadGenHelpers;
-
- public ResourceDeletionManager(LoadGenHelpers loadGenHelpers) {
- this.loadGenHelpers = loadGenHelpers;
- }
-
- public Optional> deleteJob(LocustTest crdInstance, OperationalMode mode) {
-
- try (KubernetesClient client = new KubernetesClientBuilder().withConfig(new ConfigBuilder().build()).build()) {
-
- val namespace = crdInstance.getMetadata().getNamespace();
- val resourceName = loadGenHelpers.constructNodeName(crdInstance, mode);
- log.info("Deleting Job for: {} in namespace: {}", crdInstance.getMetadata().getName(), namespace);
- return Optional.ofNullable(client.batch().v1().jobs().inNamespace(namespace).withName(resourceName).delete());
-
- } catch (Exception e) {
-
- log.error("Exception occurred during Job deletion: {}", e.getLocalizedMessage(), e);
- return Optional.empty();
-
- }
-
- }
-
- public Optional> deleteService(LocustTest crdInstance, OperationalMode mode) {
-
- try (KubernetesClient client = new KubernetesClientBuilder().withConfig(new ConfigBuilder().build()).build()) {
-
- val namespace = crdInstance.getMetadata().getNamespace();
- val resourceName = loadGenHelpers.constructNodeName(crdInstance, mode);
-
- log.info("Deleting Service for: {} in namespace: {}", crdInstance.getMetadata().getName(), namespace);
- return Optional.ofNullable(client.services().inNamespace(namespace).withName(resourceName).delete());
-
- } catch (Exception e) {
-
- log.error("Exception occurred during Service deletion: {}", e.getLocalizedMessage(), e);
- return Optional.empty();
-
- }
-
- }
-
-}
diff --git a/src/main/java/com/locust/operator/customresource/LocustTest.java b/src/main/java/com/locust/operator/customresource/LocustTest.java
deleted file mode 100644
index f5dd194c..00000000
--- a/src/main/java/com/locust/operator/customresource/LocustTest.java
+++ /dev/null
@@ -1,23 +0,0 @@
-package com.locust.operator.customresource;
-
-import io.fabric8.kubernetes.api.model.Namespaced;
-import io.fabric8.kubernetes.client.CustomResource;
-import io.fabric8.kubernetes.model.annotation.Group;
-import io.fabric8.kubernetes.model.annotation.Version;
-
-import java.io.Serial;
-
-@Group(LocustTest.GROUP)
-@Version(LocustTest.VERSION)
-public class LocustTest extends CustomResource implements Namespaced {
-
- public static final String GROUP = "locust.io";
- public static final String VERSION = "v1";
-
- // Used during deserialization to verify that the sender and receiver of a serialized object
- // have loaded classes for that object that are compatible with respect to serialization.
- // Manually setting this avoids the automatic allocation and thus removes the chance of unexpected failure during runtime.
- @Serial
- private static final long serialVersionUID = 1;
-
-}
diff --git a/src/main/java/com/locust/operator/customresource/LocustTestSpec.java b/src/main/java/com/locust/operator/customresource/LocustTestSpec.java
deleted file mode 100644
index 8d12dc46..00000000
--- a/src/main/java/com/locust/operator/customresource/LocustTestSpec.java
+++ /dev/null
@@ -1,43 +0,0 @@
-package com.locust.operator.customresource;
-
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonInclude.Include;
-import com.fasterxml.jackson.databind.JsonDeserializer;
-import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
-import com.locust.operator.customresource.internaldto.LocustTestAffinity;
-import com.locust.operator.customresource.internaldto.LocustTestToleration;
-import io.fabric8.kubernetes.api.model.KubernetesResource;
-import lombok.Data;
-
-import java.io.Serial;
-import java.util.List;
-import java.util.Map;
-
-// This @JsonDeserialize overrides the deserializer used in KubernetesResource,
-// in order to be able to deserialize correctly the fields in the 'spec' field of the json
-@JsonDeserialize(using = JsonDeserializer.None.class)
-@JsonInclude(Include.NON_NULL)
-@Data
-public class LocustTestSpec implements KubernetesResource {
-
- // Used during deserialization to verify that the sender and receiver of a serialized object
- // have loaded classes for that object that are compatible with respect to serialization.
- // Manually setting this avoids the automatic allocation and thus removes the chance of unexpected failure during runtime.
- @Serial
- private static final long serialVersionUID = 1;
-
- private Map> labels;
- private Map> annotations;
- private LocustTestAffinity affinity;
- private List tolerations;
-
- private String masterCommandSeed;
- private String workerCommandSeed;
- private Integer workerReplicas;
- private String configMap;
- private String libConfigMap;
- private String image;
- private String imagePullPolicy;
- private List imagePullSecrets;
-
-}
diff --git a/src/main/java/com/locust/operator/customresource/internaldto/LocustTestAffinity.java b/src/main/java/com/locust/operator/customresource/internaldto/LocustTestAffinity.java
deleted file mode 100644
index ea9d404c..00000000
--- a/src/main/java/com/locust/operator/customresource/internaldto/LocustTestAffinity.java
+++ /dev/null
@@ -1,19 +0,0 @@
-package com.locust.operator.customresource.internaldto;
-
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonInclude.Include;
-import com.fasterxml.jackson.databind.JsonDeserializer;
-import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
-import lombok.Data;
-
-import java.io.Serializable;
-
-@JsonDeserialize(using = JsonDeserializer.None.class)
-@JsonInclude(Include.NON_NULL)
-@Data
-public class LocustTestAffinity implements Serializable {
-
- private static final long serialVersionUID = 1;
- private LocustTestNodeAffinity nodeAffinity;
-
-}
diff --git a/src/main/java/com/locust/operator/customresource/internaldto/LocustTestNodeAffinity.java b/src/main/java/com/locust/operator/customresource/internaldto/LocustTestNodeAffinity.java
deleted file mode 100644
index e1bb9038..00000000
--- a/src/main/java/com/locust/operator/customresource/internaldto/LocustTestNodeAffinity.java
+++ /dev/null
@@ -1,21 +0,0 @@
-package com.locust.operator.customresource.internaldto;
-
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonInclude.Include;
-import com.fasterxml.jackson.databind.JsonDeserializer;
-import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
-import lombok.Data;
-
-import java.io.Serializable;
-import java.util.Map;
-
-@JsonDeserialize(using = JsonDeserializer.None.class)
-@JsonInclude(Include.NON_NULL)
-@Data
-public class LocustTestNodeAffinity implements Serializable {
-
- private static final long serialVersionUID = 1;
-
- private Map requiredDuringSchedulingIgnoredDuringExecution;
-
-}
diff --git a/src/main/java/com/locust/operator/customresource/internaldto/LocustTestToleration.java b/src/main/java/com/locust/operator/customresource/internaldto/LocustTestToleration.java
deleted file mode 100644
index c25f2a00..00000000
--- a/src/main/java/com/locust/operator/customresource/internaldto/LocustTestToleration.java
+++ /dev/null
@@ -1,24 +0,0 @@
-package com.locust.operator.customresource.internaldto;
-
-import com.fasterxml.jackson.annotation.JsonInclude;
-import com.fasterxml.jackson.annotation.JsonInclude.Include;
-import lombok.AllArgsConstructor;
-import lombok.Data;
-import lombok.NoArgsConstructor;
-
-import java.io.Serializable;
-
-@Data
-@NoArgsConstructor
-@AllArgsConstructor
-@JsonInclude(Include.NON_NULL)
-public class LocustTestToleration implements Serializable {
-
- private static final long serialVersionUID = 1;
-
- private String key;
- private String operator;
- private String value;
- private String effect;
-
-}
diff --git a/src/main/resources/application.yml b/src/main/resources/application.yml
deleted file mode 100644
index 562705f9..00000000
--- a/src/main/resources/application.yml
+++ /dev/null
@@ -1,81 +0,0 @@
-micronaut:
- application:
- name: locustK8sOperator
- server:
- port: ${APP_SERVER_PORT:8081}
-
- metrics:
- binders:
- web:
- enabled: ${METRICS_WEB_ENABLE:true}
- jvm:
- enabled: ${METRICS_JVM_ENABLE:true}
- uptime:
- enabled: ${METRICS_UPTIME_ENABLE:true}
- processor:
- enabled: ${METRICS_PROCESSOR_ENABLE:true}
- files:
- enabled: ${METRICS_FILES_ENABLE:true}
- logback:
- enabled: ${METRICS_LOGBACK_ENABLE:true}
- executor:
- enabled: ${METRICS_EXECUTOR_ENABLE:true}
- kafka:
- enabled: ${METRICS_KAFKA_ENABLE:false}
- export:
- prometheus:
- enabled: true
- descriptions: true
- step: ${METRICS_PROMETHEUS_STEP:`PT30S`}
- enabled: true
-netty:
- default:
- allocator:
- max-order: 3
-
----
-config:
- watcher:
- # 1m -> every 1 minute
- resyncPeriod: 1m
- k8s:
- namespace: ${K8S_NAMESPACE:default}
- load-generation-jobs:
- ttl-seconds-after-finished: ${JOB_TTL_SECONDS_AFTER_FINISHED:}
- load-generation-pods:
- affinity:
- enableCrInjection: ${ENABLE_AFFINITY_CR_INJECTION:false}
- taintTolerations:
- enableCrInjection: ${ENABLE_TAINT_TOLERATIONS_CR_INJECTION:false}
- resource:
- cpu-request: ${POD_CPU_REQUEST:`250m`}
- mem-request: ${POD_MEM_REQUEST:`128Mi`}
- ephemeralStorage-request: ${POD_EPHEMERAL_REQUEST:`30M`}
- cpu-limit: ${POD_CPU_LIMIT:`1000m`}
- mem-limit: ${POD_MEM_LIMIT:`1024Mi`}
- ephemeralStorage-limit: ${POD_EPHEMERAL_LIMIT:`50M`}
- metricsExporter:
- image: ${METRICS_EXPORTER_IMAGE:`containersol/locust_exporter:v0.5.0`}
- port: ${METRICS_EXPORTER_PORT:`9646`}
- pullPolicy: ${METRICS_EXPORTER_IMAGE_PULL_POLICY:`Always`}
- resource:
- cpu-request: ${METRICS_EXPORTER_CPU_REQUEST:`250m`}
- mem-request: ${METRICS_EXPORTER_MEM_REQUEST:`128Mi`}
- ephemeralStorage-request: ${METRICS_EXPORTER_EPHEMERAL_REQUEST:`30M`}
- cpu-limit: ${METRICS_EXPORTER_CPU_LIMIT:`1000m`}
- mem-limit: ${METRICS_EXPORTER_MEM_LIMIT:`1024Mi`}
- ephemeralStorage-limit: ${METRICS_EXPORTER_EPHEMERAL_LIMIT:`50M`}
-
- kafka:
- bootstrap-servers: ${KAFKA_BOOTSTRAP_SERVERS:`localhost:9092`}
- security:
- enabled: ${KAFKA_SECURITY_ENABLED:`false`}
- protocol: ${KAFKA_SECURITY_PROTOCOL_CONFIG:`SASL_PLAINTEXT`}
- username: ${KAFKA_USERNAME:`localKafkaUser`}
- password: ${KAFKA_PASSWORD:`localKafkaPassword`}
- sasl:
- mechanism: ${KAFKA_SASL_MECHANISM:`SCRAM-SHA-512`}
- jaas:
- config: ${KAFKA_SASL_JAAS_CONFIG:`placeholder`}
-
-
diff --git a/src/main/resources/logback.xml b/src/main/resources/logback.xml
deleted file mode 100644
index bbf9e194..00000000
--- a/src/main/resources/logback.xml
+++ /dev/null
@@ -1,14 +0,0 @@
-
-
-
-
-
- %green(%d{ISO8601}) %highlight(%-5level) [%blue(%t)] %yellow(%C{1}): %msg%n%throwable
-
-
-
-
-
-
-
diff --git a/src/test/java/com/locust/OperatorStarterTests.java b/src/test/java/com/locust/OperatorStarterTests.java
deleted file mode 100644
index e6c880d4..00000000
--- a/src/test/java/com/locust/OperatorStarterTests.java
+++ /dev/null
@@ -1,59 +0,0 @@
-package com.locust;
-
-import com.locust.operator.controller.LocustTestReconciler;
-import io.fabric8.kubeapitest.junit.EnableKubeAPIServer;
-import io.fabric8.kubeapitest.junit.KubeConfig;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.DisplayName;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.TestInstance;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-
-import static com.locust.operator.controller.TestFixtures.creatKubernetesClient;
-import static com.locust.operator.controller.TestFixtures.setupCustomResourceDefinition;
-
-@TestInstance(TestInstance.Lifecycle.PER_CLASS)
-@EnableKubeAPIServer(updateKubeConfigFile = true)
-public class OperatorStarterTests {
-
- @KubeConfig
- static String configYaml;
-
- @Mock
- private LocustTestReconciler reconciler;
-
- @BeforeAll
- void setup() {
- MockitoAnnotations.openMocks(this);
- setupCustomResourceDefinition(creatKubernetesClient(configYaml));
- }
-
- /**
- * Ideally this test should be replaced with a combination of
- * {@link io.micronaut.test.extensions.junit5.annotation.MicronautTest @MicronautTest} and a check that "Application.isRunning()"
- * returns true.
- *
- * The only reason this test was designed this way is that I am not able to find how to inject `k8sServerUrl` variable into the test
- * environment before @MicronautTest boots up the Application. If the application trys to boot without `k8sServerUrl` being injected, it
- * will throw an Exception.
- */
- @Test
- @DisplayName("Functional: Check operator startup core")
- void operatorStarterCore() {
-
- // * Setup
- var operatorStarter = new LocustTestOperatorStarter(reconciler);
-
- // * Act & Assert
- // Passing "null" to onApplicationEvent(ServerStartupEvent event) is safe since the event is not used by the "operatorStarter" logic.
- //executeWithK8sMockServer(k8sServerUrl, () -> operatorStarter.onApplicationEvent(null));
- operatorStarter.onApplicationEvent(null);
-
- // * Assert
- // This test doesn't need an explicit assertion statement since the onApplicationEvent() logic
- // will throw an exception if it doesn't manage to start the Operator.
-
- }
-
-}
diff --git a/src/test/java/com/locust/operator/controller/LocustTestReconcilerTests.java b/src/test/java/com/locust/operator/controller/LocustTestReconcilerTests.java
deleted file mode 100644
index 1a911b78..00000000
--- a/src/test/java/com/locust/operator/controller/LocustTestReconcilerTests.java
+++ /dev/null
@@ -1,194 +0,0 @@
-package com.locust.operator.controller;
-
-import com.locust.operator.controller.config.SysConfig;
-import com.locust.operator.controller.utils.LoadGenHelpers;
-import com.locust.operator.controller.utils.resource.manage.ResourceCreationHelpers;
-import com.locust.operator.controller.utils.resource.manage.ResourceCreationManager;
-import com.locust.operator.controller.utils.resource.manage.ResourceDeletionManager;
-import io.fabric8.kubeapitest.junit.EnableKubeAPIServer;
-import io.fabric8.kubeapitest.junit.KubeConfig;
-import io.fabric8.kubernetes.client.KubernetesClient;
-import lombok.extern.slf4j.Slf4j;
-import lombok.val;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.DisplayName;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.TestInstance;
-import org.junit.jupiter.api.extension.ExtendWith;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.mockito.junit.jupiter.MockitoExtension;
-
-import static com.locust.operator.controller.TestFixtures.DEFAULT_NAMESPACE;
-import static com.locust.operator.controller.TestFixtures.creatKubernetesClient;
-import static com.locust.operator.controller.TestFixtures.deleteLocustTestCrd;
-import static com.locust.operator.controller.TestFixtures.prepareLocustTest;
-import static com.locust.operator.controller.TestFixtures.setupCustomResourceDefinition;
-import static com.locust.operator.controller.utils.TestFixtures.setupSysconfigMock;
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.assertj.core.api.SoftAssertions.assertSoftly;
-
-@Slf4j
-@ExtendWith(MockitoExtension.class)
-@TestInstance(TestInstance.Lifecycle.PER_CLASS)
-@EnableKubeAPIServer(updateKubeConfigFile = true)
-class LocustTestReconcilerTests {
-
- @Mock
- private SysConfig sysConfig;
-
- @KubeConfig
- static String configYaml;
-
- private LocustTestReconciler locustTestReconciler;
- private KubernetesClient k8sTestClient;
-
- @BeforeAll
- void setupMethodMock() {
-
- // Mock configuration
- MockitoAnnotations.openMocks(this);
- var loadGenHelpers = new LoadGenHelpers(sysConfig);
- var creationHelper = new ResourceCreationHelpers(loadGenHelpers);
- var creationManager = new ResourceCreationManager(creationHelper);
- var deletionManager = new ResourceDeletionManager(loadGenHelpers);
- locustTestReconciler = new LocustTestReconciler(loadGenHelpers, creationManager, deletionManager);
-
- // Setup SysConfig mock
- setupSysconfigMock(sysConfig);
-
- // Setup and deploy the CRD
- k8sTestClient = creatKubernetesClient(configYaml);
-
- }
-
- @BeforeEach
- void setup() {
- // Setup and deploy the CRD
- setupCustomResourceDefinition(k8sTestClient);
- }
-
- @AfterEach
- void tearDown() throws InterruptedException {
- // Clean resources from cluster
- deleteLocustTestCrd(k8sTestClient);
- // Dirty loop until the CRD is deleted to avoid test failures due to the CRD not being deleted
- while (!k8sTestClient.apiextensions().v1().customResourceDefinitions().list().getItems().isEmpty()) {
- Thread.sleep(50);
- }
- }
-
- @Test
- @DisplayName("Functional: Reconcile - onAdd event")
- void reconcileOnAddEvent() {
- // * Setup
- val expectedJobCount = 2;
- val expectedServiceCount = 2;
- val resourceName = "team.perftest";
- val expectedMasterResourceName = "team-perftest-master"; // Based on the conversion logic
- val expectedWorkerResourceName = "team-perftest-worker"; // Based on the conversion logic
- val locustTest = prepareLocustTest(resourceName);
-
- // * Act
- // Passing "null" to context is safe as it is not used in the "reconcile()" method
- locustTestReconciler.reconcile(locustTest, null);
-
- // Get All Jobs created
- val jobList = k8sTestClient.batch().v1().jobs().inNamespace(DEFAULT_NAMESPACE).list();
- log.debug("Acquired Job list: {}", jobList);
-
- // Get All Services created
- val serviceList = k8sTestClient.services().inNamespace(DEFAULT_NAMESPACE).list();
- log.debug("Acquired Service list: {}", serviceList);
-
- // * Assert
- assertSoftly(softly -> {
- // Assert master/worker jobs have been created
- softly.assertThat(jobList.getItems().size()).isEqualTo(expectedJobCount);
- softly.assertThat(jobList.getItems().get(0).getMetadata().getName()).isEqualTo(expectedMasterResourceName);
- softly.assertThat(jobList.getItems().get(1).getMetadata().getName()).isEqualTo(expectedWorkerResourceName);
-
- // Assert master service have been created
- softly.assertThat(serviceList.getItems().size()).isEqualTo(expectedServiceCount);
- // checking for the second item as the first service is the default kubernetes service
- softly.assertThat(serviceList.getItems().get(1).getMetadata().getName()).isEqualTo(expectedMasterResourceName);
- });
-
- }
-
- @Test
- @DisplayName("Functional: Reconcile - NOOP to onUpdate event")
- void reconcileOnUpdateEvent() {
- // * Setup
- val resourceName = "team.perftest";
- val resourceGeneration = 1L;
- val workerReplicaCount = 100;
- val locustTest = prepareLocustTest(resourceName, workerReplicaCount, resourceGeneration);
-
- // Deploy CR
- // Passing "null" to context is safe as it is not used in the "reconcile()" method
- locustTestReconciler.reconcile(locustTest, null);
-
- // * Act
- // Increase worker count
- val updatedResourceGeneration = 2L;
- val updatedWorkerReplicaCount = 37;
- val updatedLocustTest = prepareLocustTest(resourceName, updatedWorkerReplicaCount, updatedResourceGeneration);
-
- // Update deployed CR
- // Passing "null" to context is safe as it is not used in the "reconcile()" method
- locustTestReconciler.reconcile(updatedLocustTest, null);
-
- // Get All Jobs created
- val jobList = k8sTestClient.batch().v1().jobs().inNamespace(DEFAULT_NAMESPACE).list();
- log.debug("Acquired Job list: {}", jobList);
-
- // * Assert
- // Assert NOOP on update
- assertThat(jobList.getItems().get(1).getSpec().getParallelism()).isEqualTo(workerReplicaCount);
-
- }
-
- @Test
- @DisplayName("Functional: Reconcile - cleanup onDelete event")
- void cleanupOnDeleteEvent() {
- // * Setup
- val expectedJobCount = 0;
- val expectedServiceCount = 1; // 1 Because of the default kubernetes service remaining post deletion
- val resourceName = "team.perftest";
- val expectedDefaultServiceName = "kubernetes";
- val locustTest = prepareLocustTest(resourceName);
-
- // Deploy CR
- // Passing "null" to context is safe as it is not used in the "reconcile()" method
- locustTestReconciler.reconcile(locustTest, null);
-
- // * Act
- // Delete CR
- // Passing "null" to context is safe as it is not used in the "cleanup()" method
- locustTestReconciler.cleanup(locustTest, null);
-
- // Get All Jobs created
- val jobList = k8sTestClient.batch().v1().jobs().inNamespace(DEFAULT_NAMESPACE).list();
- log.debug("Acquired Job list: {}", jobList);
-
- // Get All Services created
- val serviceList = k8sTestClient.services().inNamespace(DEFAULT_NAMESPACE).list();
- log.debug("Acquired Service list: {}", serviceList);
-
- // * Assert
- assertSoftly(softly -> {
- // Assert master/worker jobs have been deleted
- softly.assertThat(jobList.getItems().size()).isEqualTo(expectedJobCount);
-
- // Assert master service have been deleted
- softly.assertThat(serviceList.getItems().size()).isEqualTo(expectedServiceCount);
- softly.assertThat(serviceList.getItems().get(0).getMetadata().getName()).isEqualTo(expectedDefaultServiceName);
-
- });
-
- }
-
-}
diff --git a/src/test/java/com/locust/operator/controller/TestFixtures.java b/src/test/java/com/locust/operator/controller/TestFixtures.java
deleted file mode 100644
index 8e98bcf2..00000000
--- a/src/test/java/com/locust/operator/controller/TestFixtures.java
+++ /dev/null
@@ -1,187 +0,0 @@
-package com.locust.operator.controller;
-
-import com.locust.operator.customresource.LocustTest;
-import com.locust.operator.customresource.LocustTestSpec;
-import io.fabric8.kubernetes.api.model.ObjectMetaBuilder;
-import io.fabric8.kubernetes.api.model.StatusDetails;
-import io.fabric8.kubernetes.api.model.apiextensions.v1.CustomResourceDefinition;
-import io.fabric8.kubernetes.client.Config;
-import io.fabric8.kubernetes.client.KubernetesClient;
-import io.fabric8.kubernetes.client.KubernetesClientBuilder;
-import lombok.NoArgsConstructor;
-import lombok.SneakyThrows;
-import lombok.extern.slf4j.Slf4j;
-import lombok.val;
-
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static com.locust.operator.controller.dto.OperationalMode.MASTER;
-import static com.locust.operator.controller.dto.OperationalMode.WORKER;
-import static com.locust.operator.customresource.LocustTest.GROUP;
-import static com.locust.operator.customresource.LocustTest.VERSION;
-import static lombok.AccessLevel.PRIVATE;
-
-import static java.nio.charset.StandardCharsets.UTF_8;
-
-@Slf4j
-@NoArgsConstructor(access = PRIVATE)
-public class TestFixtures {
-
- public static final String CRD_FILE_PATH = "charts/locust-k8s-operator/crds/locust-test-crd.yaml";
- public static final String DEFAULT_API_VERSION = GROUP + "/" + VERSION;
- public static final String KIND = "LocustTest";
- public static final String DEFAULT_SEED_COMMAND = "--locustfile src/demo.py";
- public static final String DEFAULT_TEST_IMAGE = "xlocust:latest";
- public static final String DEFAULT_IMAGE_PULL_POLICY = "IfNotPresent";
- public static final List DEFAULT_IMAGE_PULL_SECRETS = Collections.emptyList();
- public static final String DEFAULT_TEST_CONFIGMAP = "demo-test-configmap";
- public static final String DEFAULT_NAMESPACE = "default";
- public static final int REPLICAS = 50;
- public static final long DEFAULT_CR_GENERATION = 1L;
- public static final Map DEFAULT_MASTER_LABELS = Map.of("role", "master");
- public static final Map DEFAULT_WORKER_LABELS = Map.of("role", "worker");
- public static final Map DEFAULT_MASTER_ANNOTATIONS = Map.of("locust.io/role", "master");
- public static final Map DEFAULT_WORKER_ANNOTATIONS = new HashMap<>();
-
- @SneakyThrows
- public static CustomResourceDefinition prepareCustomResourceDefinition(KubernetesClient k8sClient) {
-
- return loadCrdFile(Paths.get(CRD_FILE_PATH), k8sClient);
- }
-
- private static CustomResourceDefinition loadCrdFile(Path path, KubernetesClient k8sClient) throws IOException {
-
- // Purge HELM specific lines from CRD file
- ByteArrayInputStream inputStream = removeHelmSpecialLines(path);
-
- // Load CRD
- return k8sClient.apiextensions().v1()
- .customResourceDefinitions()
- .load(inputStream)
- .item();
- }
-
- /**
- * Removes HELM condition that is not supported when loading the CRD for the component tests.
- *
- * @param path Path to the CRD file
- * @return Processed file content
- */
- private static ByteArrayInputStream removeHelmSpecialLines(Path path) throws IOException {
-
- StringBuilder stringBuilder = new StringBuilder();
- Files.lines(path).filter(line -> !line.startsWith("{{"))
- .map(line -> line + "\n")
- .forEach(stringBuilder::append);
- return new ByteArrayInputStream(stringBuilder.toString().getBytes(UTF_8));
- }
-
- public static CustomResourceDefinition createCrd(CustomResourceDefinition crd, KubernetesClient k8sClient) {
- return k8sClient.apiextensions().v1().customResourceDefinitions().resource(crd).create();
- }
-
- public static List deleteLocustTestCrd(KubernetesClient k8sClient) {
-
- log.debug("Deleting LocustTest CRD instances");
-
- val crdClient = k8sClient.apiextensions().v1().customResourceDefinitions().withName("locusttests.locust.io");
- return crdClient.delete();
- }
-
- public static LocustTest prepareLocustTest(String resourceName) {
-
- return prepareLocustTest(resourceName, REPLICAS, DEFAULT_CR_GENERATION);
-
- }
-
- public static LocustTest prepareLocustTest(String resourceName, Integer replicas, Long generation) {
-
- var locustTest = new LocustTest();
-
- // API version
- locustTest.setApiVersion(DEFAULT_API_VERSION);
-
- // Kind
- locustTest.setKind(KIND);
-
- // Metadata
- locustTest.setMetadata(new ObjectMetaBuilder()
- .withName(resourceName)
- .withNamespace(DEFAULT_NAMESPACE)
- .withGeneration(generation)
- .build());
-
- // Spec
- var spec = new LocustTestSpec();
- spec.setMasterCommandSeed(DEFAULT_SEED_COMMAND);
- spec.setWorkerCommandSeed(DEFAULT_SEED_COMMAND);
- spec.setConfigMap(DEFAULT_TEST_CONFIGMAP);
- spec.setImage(DEFAULT_TEST_IMAGE);
- spec.setImagePullPolicy(DEFAULT_IMAGE_PULL_POLICY);
- spec.setImagePullSecrets(DEFAULT_IMAGE_PULL_SECRETS);
- spec.setWorkerReplicas(replicas);
-
- var labels = new HashMap>();
- labels.put(MASTER.getMode(), DEFAULT_MASTER_LABELS);
- labels.put(WORKER.getMode(), DEFAULT_WORKER_LABELS);
- spec.setLabels(labels);
-
- var annotations = new HashMap>();
- annotations.put(MASTER.getMode(), DEFAULT_MASTER_ANNOTATIONS);
- annotations.put(WORKER.getMode(), DEFAULT_WORKER_ANNOTATIONS);
- spec.setAnnotations(annotations);
-
- locustTest.setSpec(spec);
- log.debug("Created resource object:\n{}", locustTest);
-
- return locustTest;
-
- }
-
- /**
- * Creates a new instance of KubernetesClient using the provided YAML configuration.
- *
- * This method uses the KubernetesClientBuilder to create a new KubernetesClient. The builder is configured with a Config object, which
- * is created from the provided YAML configuration using the Config.fromKubeconfig method.
- *
- * @param configYaml A string representing the Kubernetes configuration in YAML format.
- * @return A new instance of KubernetesClient configured according to the provided YAML configuration.
- */
- public static KubernetesClient creatKubernetesClient(String configYaml) {
- // Instantiate a KubernetesClientBuilder, configure it with the provided YAML configuration
- return new KubernetesClientBuilder().
- withConfig(Config.fromKubeconfig(configYaml))
- .build();
- }
-
- /**
- * Prepares and creates a Custom Resource Definition (CRD) in the Kubernetes cluster associated with the provided client.
- *
- * This method first prepares a CRD using the prepareCustomResourceDefinition method. It then creates that CRD in the Kubernetes cluster
- * using the createCrd method. Both of these methods use the provided KubernetesClient to interact with the Kubernetes API.
- * After the CRD is created, it logs the details of the created CRD and returns it.
- *
- * @param testClient The KubernetesClient to use when interacting with the Kubernetes server API.
- * @return The created CustomResourceDefinition.
- */
- public static CustomResourceDefinition setupCustomResourceDefinition(KubernetesClient testClient) {
- // Prepare and create the Custom Resource Definition
- val expectedCrd = prepareCustomResourceDefinition(testClient);
-
- // Create the Custom Resource Definition
- val crd = createCrd(expectedCrd, testClient);
-
- // Log and return the created CRD
- log.debug("Created CRD details: {}", crd);
- return crd;
- }
-
-}
diff --git a/src/test/java/com/locust/operator/controller/utils/LoadGenHelpersTests.java b/src/test/java/com/locust/operator/controller/utils/LoadGenHelpersTests.java
deleted file mode 100644
index b9cf41e8..00000000
--- a/src/test/java/com/locust/operator/controller/utils/LoadGenHelpersTests.java
+++ /dev/null
@@ -1,86 +0,0 @@
-package com.locust.operator.controller.utils;
-
-import com.locust.operator.controller.config.SysConfig;
-import lombok.val;
-import org.junit.jupiter.api.DisplayName;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.TestInstance;
-import org.junit.jupiter.api.extension.ExtendWith;
-import org.mockito.junit.jupiter.MockitoExtension;
-import org.mockito.junit.jupiter.MockitoSettings;
-import org.mockito.quality.Strictness;
-
-import static com.locust.operator.controller.TestFixtures.prepareLocustTest;
-import static com.locust.operator.controller.dto.OperationalMode.MASTER;
-import static com.locust.operator.controller.dto.OperationalMode.WORKER;
-import static com.locust.operator.controller.utils.Constants.DEFAULT_RESOURCE_TARGET;
-import static com.locust.operator.controller.utils.TestFixtures.assertNodeConfig;
-import static com.locust.operator.controller.utils.TestFixtures.setupSysconfigMock;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-@ExtendWith(MockitoExtension.class)
-@TestInstance(TestInstance.Lifecycle.PER_CLASS)
-@MockitoSettings(strictness = Strictness.LENIENT)
-public class LoadGenHelpersTests {
-
- @Test
- @DisplayName("Functional: Master node configuration")
- void masterLoadConfigGeneration() {
-
- // * Setup
- final SysConfig config = mock(SysConfig.class);
- setupSysconfigMock(config);
- final LoadGenHelpers loadGenHelpers = new LoadGenHelpers(config);
- val resourceName = "qe.performanceTest";
- val operationalMode = MASTER;
- val locustTest = prepareLocustTest(resourceName);
-
- // * Act
- val generatedNodeConfig = loadGenHelpers.generateLoadGenNodeObject(locustTest, operationalMode);
-
- // * Assert
- assertNodeConfig(locustTest, generatedNodeConfig, operationalMode);
-
- }
-
- @Test
- @DisplayName("Functional: Worker node configuration")
- void workerLoadConfigGeneration() {
-
- // * Setup
- final SysConfig config = mock(SysConfig.class);
- setupSysconfigMock(config);
- final LoadGenHelpers loadGenHelpers = new LoadGenHelpers(config);
- val resourceName = "eq.test";
- val operationalMode = WORKER;
- val locustTest = prepareLocustTest(resourceName);
-
- // * Act
- val generatedNodeConfig = loadGenHelpers.generateLoadGenNodeObject(locustTest, operationalMode);
-
- // * Assert
- assertNodeConfig(locustTest, generatedNodeConfig, operationalMode);
-
- }
-
- @Test
- @DisplayName("Functional: Unbound CPU limit configuration")
- void unboundCpuLimitConfiguration() {
-
- // * Setup
- final SysConfig config = mock(SysConfig.class);
- setupSysconfigMock(config);
- when(config.getPodCpuLimit()).thenReturn("");
- final LoadGenHelpers loadGenHelpers = new LoadGenHelpers(config);
-
- // * Act
- val resourceRequirements = loadGenHelpers.getResourceRequirements(DEFAULT_RESOURCE_TARGET);
-
- // * Assert
- assertFalse(resourceRequirements.getLimits().containsKey("cpu"), "CPU limit should not be set when the config value is blank");
-
- }
-
-}
diff --git a/src/test/java/com/locust/operator/controller/utils/TestFixtures.java b/src/test/java/com/locust/operator/controller/utils/TestFixtures.java
deleted file mode 100644
index c5bb3242..00000000
--- a/src/test/java/com/locust/operator/controller/utils/TestFixtures.java
+++ /dev/null
@@ -1,408 +0,0 @@
-package com.locust.operator.controller.utils;
-
-import com.locust.operator.controller.config.SysConfig;
-import com.locust.operator.controller.dto.LoadGenerationNode;
-import com.locust.operator.controller.dto.MetricsExporterContainer;
-import com.locust.operator.controller.dto.OperationalMode;
-import com.locust.operator.customresource.LocustTest;
-import com.locust.operator.customresource.internaldto.LocustTestAffinity;
-import com.locust.operator.customresource.internaldto.LocustTestNodeAffinity;
-import com.locust.operator.customresource.internaldto.LocustTestToleration;
-import io.fabric8.kubernetes.api.model.KubernetesResourceList;
-import io.fabric8.kubernetes.api.model.LocalObjectReference;
-import io.fabric8.kubernetes.api.model.NamespaceBuilder;
-import io.fabric8.kubernetes.api.model.PodList;
-import io.fabric8.kubernetes.api.model.Quantity;
-import io.fabric8.kubernetes.api.model.ResourceRequirements;
-import io.fabric8.kubernetes.api.model.ServiceList;
-import io.fabric8.kubernetes.api.model.batch.v1.JobList;
-import io.fabric8.kubernetes.client.KubernetesClient;
-import lombok.NoArgsConstructor;
-import lombok.SneakyThrows;
-import lombok.extern.slf4j.Slf4j;
-import lombok.val;
-
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-
-import static com.github.stefanbirkner.systemlambda.SystemLambda.withEnvironmentVariable;
-import static com.locust.operator.controller.TestFixtures.REPLICAS;
-import static com.locust.operator.controller.dto.OperationalMode.MASTER;
-import static com.locust.operator.controller.dto.OperatorType.EQUAL;
-import static com.locust.operator.controller.utils.Constants.CONTAINER_ARGS_SEPARATOR;
-import static com.locust.operator.controller.utils.Constants.EXPORTER_CONTAINER_NAME;
-import static com.locust.operator.controller.utils.Constants.KAFKA_BOOTSTRAP_SERVERS;
-import static com.locust.operator.controller.utils.Constants.KAFKA_PASSWORD;
-import static com.locust.operator.controller.utils.Constants.KAFKA_SASL_JAAS_CONFIG;
-import static com.locust.operator.controller.utils.Constants.KAFKA_SASL_MECHANISM;
-import static com.locust.operator.controller.utils.Constants.KAFKA_SECURITY_ENABLED;
-import static com.locust.operator.controller.utils.Constants.KAFKA_SECURITY_PROTOCOL_CONFIG;
-import static com.locust.operator.controller.utils.Constants.KAFKA_USERNAME;
-import static lombok.AccessLevel.PRIVATE;
-import static org.assertj.core.api.SoftAssertions.assertSoftly;
-import static org.mockito.Mockito.when;
-
-@Slf4j
-@NoArgsConstructor(access = PRIVATE)
-public class TestFixtures {
-
- public static final List DEFAULT_MASTER_PORT_LIST = List.of(5557, 5558, 8089);
- public static final List DEFAULT_WORKER_PORT_LIST = List.of(8080);
- public static final Integer MASTER_REPLICA_COUNT = 1;
- public static final String DEFAULT_SEED_COMMAND = "--locustfile src/GQ/src/demo.py";
- public static final String DEFAULT_TEST_IMAGE = "xlocust:latest";
- public static final String DEFAULT_METRICS_IMAGE = "containersol/locust_exporter:v0.5.0";
- public static final int EXPECTED_GENERIC_RESOURCE_COUNT = 1;
- public static final int EXPECTED_SERVICE_RESOURCE_COUNT = 2;
- public static final String K8S_SERVER_URL_ENV_VAR = "KUBERNETES_MASTER";
- public static final String MOCK_KAFKA_BOOTSTRAP_VALUE = "localhost:9092";
- public static final boolean MOCK_SECURITY_VALUE = true;
- public static final boolean MOCK_AFFINITY_INJECTION_VALUE = true;
- public static final boolean MOCK_TOLERATION_INJECTION_VALUE = true;
- public static final String MOCK_SECURITY_PROTOCOL_VALUE = "SASL_PLAINTEXT";
- public static final String MOCK_SASL_MECHANISM_VALUE = "SCRAM-SHA-512";
- public static final String MOCK_SASL_JAAS_CONFIG_VALUE = "placeholder";
- public static final String MOCK_USERNAME = "localKafkaUser";
- public static final String MOCK_PASSWORD = "localKafkaPassword";
- public static final String MOCK_POD_MEM = "1024Mi";
- public static final String MOCK_POD_CPU = "1000m";
- public static final String MOCK_POD_EPHEMERAL_STORAGE = "50M";
- public static final Integer MOCK_POD_PORT = 9646;
- public static final Integer MOCK_TTL_SECONDS_AFTER_FINISHED = 60;
- public static final Map DEFAULT_MASTER_LABELS = Map.of("role", "master");
- public static final Map DEFAULT_WORKER_LABELS = Map.of("role", "worker");
- public static final Map DEFAULT_MASTER_ANNOTATIONS = Map.of("locust.io/role", "master");
- public static final Map DEFAULT_WORKER_ANNOTATIONS = new HashMap<>();
-
- public static void assertNodeConfig(LocustTest customResource, LoadGenerationNode generatedNodeConfig,
- OperationalMode mode) {
-
- String expectedConfigName = customResource.getMetadata().getName().replace('.', '-');
-
- Map expectedLabels = mode.equals(MASTER) ? DEFAULT_MASTER_LABELS : DEFAULT_WORKER_LABELS;
-
- Map expectedAnnotations = mode.equals(MASTER) ? DEFAULT_MASTER_ANNOTATIONS : DEFAULT_WORKER_ANNOTATIONS;
-
- Integer expectedReplicas = mode.equals(MASTER) ? MASTER_REPLICA_COUNT : customResource.getSpec().getWorkerReplicas();
-
- List expectedPortList = mode.equals(MASTER) ? DEFAULT_MASTER_PORT_LIST : DEFAULT_WORKER_PORT_LIST;
-
- assertSoftly(softly -> {
- softly.assertThat(generatedNodeConfig.getName()).contains(expectedConfigName);
- softly.assertThat(generatedNodeConfig.getLabels()).isEqualTo(expectedLabels);
- softly.assertThat(generatedNodeConfig.getAnnotations()).isEqualTo(expectedAnnotations);
- softly.assertThat(generatedNodeConfig.getTtlSecondsAfterFinished()).isEqualTo(MOCK_TTL_SECONDS_AFTER_FINISHED);
- softly.assertThat(generatedNodeConfig.getOperationalMode()).isEqualTo(mode);
- softly.assertThat(generatedNodeConfig.getPorts()).isEqualTo(expectedPortList);
- softly.assertThat(generatedNodeConfig.getReplicas()).isEqualTo(expectedReplicas);
- });
- }
-
- public static LoadGenerationNode prepareNodeConfig(String nodeName, OperationalMode mode) {
- var nodeConfig = LoadGenerationNode.builder()
- .name(nodeName)
- .labels(mode.equals(MASTER) ? DEFAULT_MASTER_LABELS : DEFAULT_WORKER_LABELS)
- .annotations(mode.equals(MASTER) ? DEFAULT_MASTER_ANNOTATIONS : DEFAULT_WORKER_ANNOTATIONS)
- .command(List.of(DEFAULT_SEED_COMMAND.split(CONTAINER_ARGS_SEPARATOR)))
- .operationalMode(mode)
- .image(DEFAULT_TEST_IMAGE)
- .replicas(mode.equals(MASTER) ? MASTER_REPLICA_COUNT : REPLICAS)
- .ports(mode.equals(MASTER) ? DEFAULT_MASTER_PORT_LIST : DEFAULT_WORKER_PORT_LIST)
- .build();
-
- log.debug("Created node configuration: {}", nodeConfig);
- return nodeConfig;
- }
-
- public static LoadGenerationNode prepareNodeConfigWithTtlSecondsAfterFinished(
- String nodeName, OperationalMode mode, Integer ttlSecondsAfterFinished) {
- var nodeConfig = LoadGenerationNode.builder()
- .name(nodeName)
- .labels(mode.equals(MASTER) ? DEFAULT_MASTER_LABELS : DEFAULT_WORKER_LABELS)
- .annotations(mode.equals(MASTER) ? DEFAULT_MASTER_ANNOTATIONS : DEFAULT_WORKER_ANNOTATIONS)
- .ttlSecondsAfterFinished(ttlSecondsAfterFinished)
- .command(List.of(DEFAULT_SEED_COMMAND.split(CONTAINER_ARGS_SEPARATOR)))
- .operationalMode(mode)
- .image(DEFAULT_TEST_IMAGE)
- .replicas(mode.equals(MASTER) ? MASTER_REPLICA_COUNT : REPLICAS)
- .ports(mode.equals(MASTER) ? DEFAULT_MASTER_PORT_LIST : DEFAULT_WORKER_PORT_LIST)
- .build();
-
- log.debug("Created node configuration: {}", nodeConfig);
- return nodeConfig;
- }
-
- public static LoadGenerationNode prepareNodeConfigWithNodeAffinity(String nodeName, OperationalMode mode, String affinityKey,
- String affinityValue) {
-
- // Init instances
- val nodeAffinity = new LocustTestNodeAffinity();
- val affinity = new LocustTestAffinity();
- val nodeConfig = prepareNodeConfig(nodeName, mode);
-
- // Set affinity
- nodeAffinity.setRequiredDuringSchedulingIgnoredDuringExecution(Map.of(affinityKey, affinityValue));
- affinity.setNodeAffinity(nodeAffinity);
-
- // Push affinity config to object
- nodeConfig.setAffinity(affinity);
- log.debug("Created node configuration with nodeAffinity: {}", nodeConfig);
-
- return nodeConfig;
-
- }
-
- public static LoadGenerationNode prepareNodeConfigWithTolerations(String nodeName, OperationalMode mode,
- LocustTestToleration toleration) {
-
- val nodeConfig = prepareNodeConfig(nodeName, mode);
- nodeConfig.setTolerations(Collections.singletonList(toleration));
-
- return nodeConfig;
-
- }
-
- public static LoadGenerationNode prepareNodeConfigWithPullPolicyAndSecrets(
- String nodeName, OperationalMode mode, String pullPolicy, List pullSecrets) {
-
- val nodeConfig = prepareNodeConfig(nodeName, mode);
- nodeConfig.setImagePullPolicy(pullPolicy);
- nodeConfig.setImagePullSecrets(pullSecrets);
-
- return nodeConfig;
-
- }
-
- public static void assertK8sServiceCreation(String nodeName, ServiceList serviceList) {
- assertK8sResourceCreation(nodeName, serviceList, EXPECTED_SERVICE_RESOURCE_COUNT);
- }
-
- public static > void assertK8sResourceCreation(String nodeName, T resourceList) {
- assertK8sResourceCreation(nodeName, resourceList, EXPECTED_GENERIC_RESOURCE_COUNT);
- }
-
- private static > void assertK8sResourceCreation(String nodeName, T resourceList,
- int expectedResourceCount) {
- val resourceNamesList = extractNames(resourceList);
- log.debug("Acquired resource list: {}", resourceNamesList);
-
- assertSoftly(softly -> {
- softly.assertThat(resourceList.getItems().size()).isEqualTo(expectedResourceCount);
- softly.assertThat(resourceNamesList).contains(nodeName);
- });
- }
-
- private static > List extractNames(T resourceList) {
- return resourceList.getItems().stream()
- .map(item -> item.getMetadata().getName())
- .collect(Collectors.toList());
- }
-
- public static void createNamespace(KubernetesClient testClient, String namespace) {
-
- testClient.namespaces()
- .resource(new NamespaceBuilder()
- .withNewMetadata()
- .withName(namespace)
- .endMetadata()
- .build())
- .serverSideApply();
- }
-
- public static void assertImagePullData(LoadGenerationNode nodeConfig, PodList podList) {
-
- podList.getItems().forEach(pod -> {
- final List references = pod.getSpec()
- .getImagePullSecrets()
- .stream()
- .map(LocalObjectReference::getName)
- .toList();
-
- assertSoftly(softly -> softly.assertThat(references).isEqualTo(nodeConfig.getImagePullSecrets()));
-
- pod.getSpec()
- .getContainers()
- .forEach(container -> assertSoftly(
- softly -> softly.assertThat(container.getImagePullPolicy()).isEqualTo(nodeConfig.getImagePullPolicy())));
- });
- }
-
- public static void assertK8sTtlSecondsAfterFinished(JobList jobList, Integer ttlSecondsAfterFinished) {
- jobList.getItems().forEach(job -> {
- val actualTtlSecondsAfterFinished = job.getSpec().getTtlSecondsAfterFinished();
- assertSoftly(softly -> softly.assertThat(actualTtlSecondsAfterFinished).isEqualTo(ttlSecondsAfterFinished));
- });
- }
-
- public static void assertK8sNodeAffinity(LoadGenerationNode nodeConfig, JobList jobList, String k8sNodeLabelKey) {
-
- jobList.getItems().forEach(job -> {
- val nodeSelectorTerms = job.getSpec().getTemplate().getSpec().getAffinity().getNodeAffinity()
- .getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms();
-
- nodeSelectorTerms.forEach(selectorTerm -> {
- val actualSelectorKey = selectorTerm.getMatchExpressions().get(0).getKey();
- val actualSelectorValue = selectorTerm.getMatchExpressions().get(0).getValues().get(0);
- val desiredSelectorValue = nodeConfig.getAffinity().getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution()
- .get(k8sNodeLabelKey);
-
- assertSoftly(softly -> {
- softly.assertThat(actualSelectorKey).isEqualTo(k8sNodeLabelKey);
- softly.assertThat(actualSelectorValue).isEqualTo(desiredSelectorValue);
- });
- });
-
- });
-
- }
-
- public static void assertK8sTolerations(JobList jobList, LocustTestToleration expectedToleration) {
-
- jobList.getItems().forEach(job -> {
- val actualTolerations = job.getSpec().getTemplate().getSpec().getTolerations();
-
- assertSoftly(softly -> {
- softly.assertThat(actualTolerations.get(0).getKey()).isEqualTo(expectedToleration.getKey());
- softly.assertThat(actualTolerations.get(0).getEffect()).isEqualTo(expectedToleration.getEffect());
- softly.assertThat(actualTolerations.get(0).getOperator()).isEqualTo(expectedToleration.getOperator());
-
- if (expectedToleration.getOperator().equals(EQUAL.getType())) {
- softly.assertThat(actualTolerations.get(0).getValue()).isEqualTo(expectedToleration.getValue());
- }
- });
-
- });
-
- }
-
- /**
- * Method to run `runnable` methods while injection the "KUBERNETES_MASTER" in the run environment. This is required as the core methods
- * uses an internally created k8s client that searches for configuration in a specific order. Injecting the environment variable this
- * way allows the internal client to connect to the mock server.
- *
- * @param mockServerUrl Mock server URL
- * @param runnable Runnable object to run
- */
- @SneakyThrows
- public static void executeWithK8sMockServer(String mockServerUrl, Runnable runnable) {
-
- withEnvironmentVariable(K8S_SERVER_URL_ENV_VAR, mockServerUrl)
- .execute(runnable::run);
-
- }
-
- public static Map containerEnvironmentMap() {
- HashMap environmentMap = new HashMap<>();
-
- environmentMap.put(KAFKA_BOOTSTRAP_SERVERS, MOCK_KAFKA_BOOTSTRAP_VALUE);
- environmentMap.put(KAFKA_SECURITY_ENABLED, String.valueOf(MOCK_SECURITY_VALUE));
- environmentMap.put(KAFKA_SECURITY_PROTOCOL_CONFIG, MOCK_SECURITY_PROTOCOL_VALUE);
- environmentMap.put(KAFKA_SASL_MECHANISM, MOCK_SASL_MECHANISM_VALUE);
- environmentMap.put(KAFKA_SASL_JAAS_CONFIG, MOCK_SASL_JAAS_CONFIG_VALUE);
- environmentMap.put(KAFKA_USERNAME, MOCK_USERNAME);
- environmentMap.put(KAFKA_PASSWORD, MOCK_PASSWORD);
-
- return environmentMap;
-
- }
-
- public static MetricsExporterContainer mockMetricsExporterContainer() {
-
- // Set Resource overrides
- Map resourceOverrideMap = new HashMap<>();
-
- resourceOverrideMap.put("memory", new Quantity(MOCK_POD_MEM));
- resourceOverrideMap.put("cpu", new Quantity(MOCK_POD_CPU));
- resourceOverrideMap.put("ephemeral-storage", new Quantity(MOCK_POD_EPHEMERAL_STORAGE));
-
- // Construct resource request
- final var mockResourceRequest = new ResourceRequirements();
-
- mockResourceRequest.setRequests(resourceOverrideMap);
- mockResourceRequest.setLimits(resourceOverrideMap);
-
- return new MetricsExporterContainer(
- EXPORTER_CONTAINER_NAME,
- "containersol/locust_exporter:v0.5.0",
- "Always",
- 9646,
- mockResourceRequest
-
- );
- }
-
- public static void setupSysconfigMock(SysConfig mockedConfInstance) {
-
- // Kafka
- when(mockedConfInstance.getKafkaBootstrapServers())
- .thenReturn(MOCK_KAFKA_BOOTSTRAP_VALUE);
- when(mockedConfInstance.isKafkaSecurityEnabled())
- .thenReturn(MOCK_SECURITY_VALUE);
- when(mockedConfInstance.getKafkaSecurityProtocol())
- .thenReturn(MOCK_SECURITY_PROTOCOL_VALUE);
- when(mockedConfInstance.getKafkaUsername())
- .thenReturn(MOCK_USERNAME);
- when(mockedConfInstance.getKafkaUserPassword())
- .thenReturn(MOCK_PASSWORD);
- when(mockedConfInstance.getKafkaSaslMechanism())
- .thenReturn(MOCK_SASL_MECHANISM_VALUE);
- when(mockedConfInstance.getKafkaSaslJaasConfig())
- .thenReturn(MOCK_SASL_JAAS_CONFIG_VALUE);
-
- // Resource request :: Load generation node
- when(mockedConfInstance.getPodMemRequest())
- .thenReturn(MOCK_POD_MEM);
- when(mockedConfInstance.getPodCpuRequest())
- .thenReturn(MOCK_POD_CPU);
- when(mockedConfInstance.getPodEphemeralStorageRequest())
- .thenReturn(MOCK_POD_EPHEMERAL_STORAGE);
-
- // Resource request :: Metrics exporter
- when(mockedConfInstance.getMetricsExporterMemRequest())
- .thenReturn(MOCK_POD_MEM);
- when(mockedConfInstance.getMetricsExporterCpuRequest())
- .thenReturn(MOCK_POD_CPU);
- when(mockedConfInstance.getMetricsExporterEphemeralStorageRequest())
- .thenReturn(MOCK_POD_EPHEMERAL_STORAGE);
-
- // Port binding :: Metrics exporter
- when(mockedConfInstance.getMetricsExporterPort())
- .thenReturn(MOCK_POD_PORT);
-
- // Image :: Metrics exporter
- when(mockedConfInstance.getMetricsExporterImage())
- .thenReturn(DEFAULT_METRICS_IMAGE);
-
- // Job characteristics
- when(mockedConfInstance.getTtlSecondsAfterFinished())
- .thenReturn(MOCK_TTL_SECONDS_AFTER_FINISHED);
-
- // Resource limit :: Load generation node
- when(mockedConfInstance.getPodMemLimit())
- .thenReturn(MOCK_POD_MEM);
- when(mockedConfInstance.getPodCpuLimit())
- .thenReturn(MOCK_POD_CPU);
- when(mockedConfInstance.getPodEphemeralStorageLimit())
- .thenReturn(MOCK_POD_EPHEMERAL_STORAGE);
-
- // Resource limit :: Metrics exporter
- when(mockedConfInstance.getMetricsExporterMemLimit())
- .thenReturn(MOCK_POD_MEM);
- when(mockedConfInstance.getMetricsExporterCpuLimit())
- .thenReturn(MOCK_POD_CPU);
- when(mockedConfInstance.getMetricsExporterEphemeralStorageLimit())
- .thenReturn(MOCK_POD_EPHEMERAL_STORAGE);
-
- // Affinity
- when(mockedConfInstance.isAffinityCrInjectionEnabled())
- .thenReturn(MOCK_AFFINITY_INJECTION_VALUE);
-
- // Taints Toleration
- when(mockedConfInstance.isTolerationsCrInjectionEnabled())
- .thenReturn(MOCK_TOLERATION_INJECTION_VALUE);
- }
-
-}
diff --git a/src/test/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationManagerTests.java b/src/test/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationManagerTests.java
deleted file mode 100644
index 8ec4ead3..00000000
--- a/src/test/java/com/locust/operator/controller/utils/resource/manage/ResourceCreationManagerTests.java
+++ /dev/null
@@ -1,271 +0,0 @@
-package com.locust.operator.controller.utils.resource.manage;
-
-import com.locust.operator.controller.utils.LoadGenHelpers;
-import com.locust.operator.customresource.internaldto.LocustTestToleration;
-import io.fabric8.kubeapitest.junit.EnableKubeAPIServer;
-import io.fabric8.kubeapitest.junit.KubeConfig;
-import io.fabric8.kubernetes.client.KubernetesClient;
-import lombok.extern.slf4j.Slf4j;
-import lombok.val;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.DisplayName;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.TestInstance;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-
-import java.util.List;
-
-import static com.locust.operator.controller.TestFixtures.creatKubernetesClient;
-import static com.locust.operator.controller.dto.OperationalMode.MASTER;
-import static com.locust.operator.controller.utils.TestFixtures.assertImagePullData;
-import static com.locust.operator.controller.utils.TestFixtures.assertK8sNodeAffinity;
-import static com.locust.operator.controller.utils.TestFixtures.assertK8sResourceCreation;
-import static com.locust.operator.controller.utils.TestFixtures.assertK8sServiceCreation;
-import static com.locust.operator.controller.utils.TestFixtures.assertK8sTolerations;
-import static com.locust.operator.controller.utils.TestFixtures.assertK8sTtlSecondsAfterFinished;
-import static com.locust.operator.controller.utils.TestFixtures.containerEnvironmentMap;
-import static com.locust.operator.controller.utils.TestFixtures.createNamespace;
-import static com.locust.operator.controller.utils.TestFixtures.mockMetricsExporterContainer;
-import static com.locust.operator.controller.utils.TestFixtures.prepareNodeConfig;
-import static com.locust.operator.controller.utils.TestFixtures.prepareNodeConfigWithNodeAffinity;
-import static com.locust.operator.controller.utils.TestFixtures.prepareNodeConfigWithPullPolicyAndSecrets;
-import static com.locust.operator.controller.utils.TestFixtures.prepareNodeConfigWithTolerations;
-import static com.locust.operator.controller.utils.TestFixtures.prepareNodeConfigWithTtlSecondsAfterFinished;
-import static org.mockito.Mockito.when;
-
-@Slf4j
-@TestInstance(TestInstance.Lifecycle.PER_CLASS)
-@EnableKubeAPIServer(updateKubeConfigFile = true)
-public class ResourceCreationManagerTests {
-
- @Mock
- private LoadGenHelpers loadGenHelpers;
- private ResourceCreationManager CreationManager;
-
- @KubeConfig
- static String configYaml;
-
- KubernetesClient testClient;
-
- @BeforeAll
- void setupMethodMock() {
-
- MockitoAnnotations.openMocks(this);
- var creationHelper = new ResourceCreationHelpers(loadGenHelpers);
- CreationManager = new ResourceCreationManager(creationHelper);
- when(loadGenHelpers.generateContainerEnvironmentMap())
- .thenReturn(containerEnvironmentMap());
- when(loadGenHelpers.constructMetricsExporterContainer())
- .thenReturn(mockMetricsExporterContainer());
-
- testClient = creatKubernetesClient(configYaml);
- }
-
- @Test
- @DisplayName("Functional: Create a kubernetes Job")
- void createJobTest() {
-
- // * Setup
- val namespace = "default";
- val nodeName = "mnt-demo-test";
- val resourceName = "mnt.demo-test";
- val nodeConfig = prepareNodeConfig(nodeName, MASTER);
-
- // * Act
- CreationManager.createJob(nodeConfig, namespace, resourceName);
-
- // Get All Jobs created by the method
- val jobList = testClient.batch().v1().jobs().inNamespace(namespace).list();
- log.debug("Acquired Job list: {}", jobList);
-
- // * Assert
- assertK8sResourceCreation(nodeName, jobList);
-
- }
-
- @Test
- @DisplayName("Functional: Create a kubernetes Service")
- void createServiceTest() {
-
- // * Setup
- val namespace = "default";
- val nodeName = "act-kafka-test";
- val nodeConfig = prepareNodeConfig(nodeName, MASTER);
-
- // * Act
- CreationManager.createMasterService(nodeConfig, namespace);
-
- // Get All Services created by the method
- val serviceList = testClient.services().inNamespace(namespace).list();
- log.debug("Acquired Service list: {}", serviceList);
-
- // * Assert
- assertK8sServiceCreation(nodeName, serviceList);
-
- }
-
- @Test
- @DisplayName("Functional: Create a kubernetes Job with Default TTL Seconds After Finished")
- void createJobWithDefaultTtlSecondsAfterFinishedTest() {
-
- // * Setup
- val namespace = "ttl-ns";
- val nodeName = "ttl-demo-test";
- val resourceName = "ttl.demo-test";
- final Integer defaultTtlSecondsAfterFinished = null;
- val nodeConfig = prepareNodeConfigWithTtlSecondsAfterFinished(nodeName, MASTER, defaultTtlSecondsAfterFinished);
-
- // * Act
- CreationManager.createJob(nodeConfig, namespace, resourceName);
-
- // Get All Jobs created by the method
- val jobList = testClient.batch().v1().jobs().inNamespace(namespace).list();
- log.debug("Acquired Job list: {}", jobList);
-
- // * Assert
- assertK8sTtlSecondsAfterFinished(jobList, defaultTtlSecondsAfterFinished);
-
- }
-
- @Test
- @DisplayName("Functional: Create a kubernetes Job with TTL Seconds After Finished")
- void createJobWithTtlSecondsAfterFinishedTest() {
-
- // * Setup
- val namespace = "ttl-ns";
- val nodeName = "ttl-demo-test";
- val resourceName = "ttl.demo-test";
- val ttlSecondsAfterFinished = Integer.valueOf(120);
- val nodeConfig = prepareNodeConfigWithTtlSecondsAfterFinished(nodeName, MASTER, ttlSecondsAfterFinished);
-
- // * Act
- CreationManager.createJob(nodeConfig, namespace, resourceName);
-
- // Get All Jobs created by the method
- val jobList = testClient.batch().v1().jobs().inNamespace(namespace).list();
- log.debug("Acquired Job list: {}", jobList);
-
- // * Assert
- assertK8sTtlSecondsAfterFinished(jobList, ttlSecondsAfterFinished);
-
- }
-
- @Test
- @DisplayName("Functional: Create a kubernetes Job with Node Affinity")
- void createJobWithNodeAffinityTest() {
-
- // * Setup
- val namespace = "node-affinity";
- val nodeName = "locust-demo-test";
- val resourceName = "locust.demo-test";
- val k8sNodeLabelKey = "organisation.com/nodeLabel";
- val k8sNodeLabelValue = "performance-nodes";
- val nodeConfig = prepareNodeConfigWithNodeAffinity(nodeName, MASTER, k8sNodeLabelKey, k8sNodeLabelValue);
-
- // Create test namespace
- createNamespace(testClient, namespace);
-
- // * Act
- CreationManager.createJob(nodeConfig, namespace, resourceName);
-
- // Get All Jobs created by the method
- val jobList = testClient.batch().v1().jobs().inNamespace(namespace).list();
- log.debug("Acquired Job list: {}", jobList);
-
- // * Assert
- assertK8sResourceCreation(nodeName, jobList);
- assertK8sNodeAffinity(nodeConfig, jobList, k8sNodeLabelKey);
-
- }
-
- @Test
- @DisplayName("Functional: Create a kubernetes Job with Tolerations and Toleration Operator set to Equal")
- void createJobWithTolerationsAndOperatorEqualTest() {
- // * Setup
- val namespace = "taint-toleration-equal";
- val nodeName = "locust-demo-test";
- val resourceName = "locust.demo-test";
-
- // Toleration
- val tolerationKey = "taintA";
- val tolerationEffect = "NoSchedule";
- val tolerationEqualOperator = "Equal";
- val tolerationValue = "dedicatedToPerformance";
-
- // Create test namespace
- createNamespace(testClient, namespace);
-
- val toleration = new LocustTestToleration(tolerationKey, tolerationEqualOperator, tolerationValue, tolerationEffect);
- val nodeConfig = prepareNodeConfigWithTolerations(nodeName, MASTER, toleration);
-
- // * Act
- CreationManager.createJob(nodeConfig, namespace, resourceName);
-
- // Get All Jobs created by the method
- val jobList = testClient.batch().v1().jobs().inNamespace(namespace).list();
- log.debug("Acquired Job list: {}", jobList);
-
- // * Assert
- assertK8sResourceCreation(nodeName, jobList);
- assertK8sTolerations(jobList, toleration);
-
- }
-
- @Test
- @DisplayName("Functional: Create a kubernetes Job with Tolerations and Toleration Operator set to Exists")
- void createJobWithTolerationsAndOperatorExistsTest() {
- // * Setup
- val namespace = "taint-toleration-exists";
- val nodeName = "locust-demo-test";
- val resourceName = "locust.demo-test";
-
- // Toleration
- val tolerationKey = "taintA";
- val tolerationEffect = "NoSchedule";
- val tolerationEqualOperator = "Exists";
-
- val toleration = new LocustTestToleration(tolerationKey, tolerationEqualOperator, null, tolerationEffect);
- val nodeConfig = prepareNodeConfigWithTolerations(nodeName, MASTER, toleration);
-
- // Create test namespace
- createNamespace(testClient, namespace);
-
- // * Act
- CreationManager.createJob(nodeConfig, namespace, resourceName);
-
- // Get All Jobs created by the method
- val jobList = testClient.batch().v1().jobs().inNamespace(namespace).list();
- log.debug("Acquired Job list: {}", jobList);
-
- // * Assert
- assertK8sResourceCreation(nodeName, jobList);
- assertK8sTolerations(jobList, toleration);
-
- }
-
- @Test
- @DisplayName("Functional: Create a kubernetes Job with Pod image pull policy and secrets")
- void createJobWithPodImagePullPolicyAndSecrets() {
-
- // * Setup
- val namespace = "default";
- val nodeName = "mnt-demo-test";
- val resourceName = "mnt.demo-test";
- val nodeConfig = prepareNodeConfigWithPullPolicyAndSecrets(
- nodeName, MASTER, "Always", List.of("my-private-registry-secret", "gcr-cred-secret")
- );
-
- // * Act
- CreationManager.createJob(nodeConfig, namespace, resourceName);
-
- // Get All Pods created by the method
- val podList = testClient.pods().inNamespace(namespace).list();
- log.debug("Acquired Pod list: {}", podList);
-
- // * Assert
- assertImagePullData(nodeConfig, podList);
-
- }
-
-}
diff --git a/src/test/java/com/locust/operator/controller/utils/resource/manage/ResourceDeletionManagerTests.java b/src/test/java/com/locust/operator/controller/utils/resource/manage/ResourceDeletionManagerTests.java
deleted file mode 100644
index 6d92b228..00000000
--- a/src/test/java/com/locust/operator/controller/utils/resource/manage/ResourceDeletionManagerTests.java
+++ /dev/null
@@ -1,139 +0,0 @@
-package com.locust.operator.controller.utils.resource.manage;
-
-import com.locust.operator.controller.config.SysConfig;
-import com.locust.operator.controller.utils.LoadGenHelpers;
-import io.fabric8.kubernetes.client.KubernetesClient;
-import io.fabric8.kubernetes.client.server.mock.EnableKubernetesMockClient;
-import lombok.extern.slf4j.Slf4j;
-import lombok.val;
-import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.DisplayName;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.TestInstance;
-import org.junit.jupiter.api.extension.ExtendWith;
-import org.mockito.Mock;
-import org.mockito.MockitoAnnotations;
-import org.mockito.junit.jupiter.MockitoExtension;
-
-import java.util.Collections;
-
-import static com.locust.operator.controller.TestFixtures.prepareLocustTest;
-import static com.locust.operator.controller.dto.OperationalMode.MASTER;
-import static com.locust.operator.controller.utils.TestFixtures.executeWithK8sMockServer;
-import static com.locust.operator.controller.utils.TestFixtures.prepareNodeConfig;
-import static com.locust.operator.controller.utils.TestFixtures.setupSysconfigMock;
-import static org.assertj.core.api.AssertionsForClassTypes.assertThat;
-
-@Slf4j
-@ExtendWith(MockitoExtension.class)
-@TestInstance(TestInstance.Lifecycle.PER_CLASS)
-@EnableKubernetesMockClient(https = false, crud = true)
-public class ResourceDeletionManagerTests {
-
- @Mock
- private SysConfig sysConfig;
- private ResourceCreationManager creationManager;
- private ResourceDeletionManager deletionManager;
-
- String k8sServerUrl;
- KubernetesClient testClient;
-
- @BeforeAll
- void setupMethodMock() {
-
- MockitoAnnotations.openMocks(this);
- var loadGenHelpers = new LoadGenHelpers(sysConfig);
- var creationHelper = new ResourceCreationHelpers(loadGenHelpers);
- creationManager = new ResourceCreationManager(creationHelper);
- deletionManager = new ResourceDeletionManager(loadGenHelpers);
- setupSysconfigMock(sysConfig);
-
- }
-
- @BeforeEach
- void setup() {
- k8sServerUrl = testClient.getMasterUrl().toString();
- }
-
- @Test
- @DisplayName("Functional: Delete a kubernetes Job")
- void deleteJobTest() {
-
- // * Setup
- val namespace = "default";
- val nodeName = "mnt-demo-test-master";
- val resourceName = "mnt.demo-test";
- val nodeConfig = prepareNodeConfig(nodeName, MASTER);
- val locustTest = prepareLocustTest(resourceName);
-
- // * Act
- executeWithK8sMockServer(k8sServerUrl, () -> creationManager.createJob(nodeConfig, namespace, resourceName));
- executeWithK8sMockServer(k8sServerUrl, () -> deletionManager.deleteJob(locustTest, MASTER));
-
- // Get All Jobs created by the method
- val jobList = testClient.batch().v1().jobs().inNamespace(namespace).list();
- log.debug("Acquired Job list: {}", jobList);
-
- // * Assert
- assertThat(jobList.getItems().size()).isEqualTo(0);
-
- }
-
- @Test
- @DisplayName("Functional: Check that when Job deletion fails, an empty list is returned.")
- void deleteJobFailureReturnEmptyListTest() {
-
- // * Setup
- val resourceName = "mnt.demo-test";
- val locustTest = prepareLocustTest(resourceName);
-
- // * Act
- val deletedJobStatus = deletionManager.deleteJob(locustTest, MASTER).orElse(Collections.emptyList());
-
- // * Assert
- assertThat(deletedJobStatus.isEmpty()).isTrue();
-
- }
-
- @Test
- @DisplayName("Functional: Delete a kubernetes Service")
- void deleteServiceTest() {
-
- // * Setup
- val namespace = "default";
- val nodeName = "act-kafka-test-master";
- val resourceName = "act.kafka-test";
- val nodeConfig = prepareNodeConfig(nodeName, MASTER);
- val locustTest = prepareLocustTest(resourceName);
-
- // * Act
- executeWithK8sMockServer(k8sServerUrl, () -> creationManager.createMasterService(nodeConfig, namespace));
- executeWithK8sMockServer(k8sServerUrl, () -> deletionManager.deleteService(locustTest, MASTER));
-
- // Get All Jobs created by the method
- val serviceList = testClient.services().inNamespace(namespace).list();
- log.debug("Acquired Deployment list: {}", serviceList);
-
- // * Assert
- assertThat(serviceList.getItems().size()).isEqualTo(0);
-
- }
-
- @Test
- @DisplayName("Functional: Check that when Service deletion fails, empty list is returned")
- void deleteServiceFailureReturnEmptyListTest() {
-
- // * Setup
- val resourceName = "mnt.demo-test";
- val locustTest = prepareLocustTest(resourceName);
-
- // * Act
- val deletedServiceStatus = deletionManager.deleteService(locustTest, MASTER).orElse(Collections.emptyList());
-
- // * Assert
- assertThat(deletedServiceStatus.isEmpty()).isTrue();
-
- }
-
-}
diff --git a/src/test/resources/logback-test.xml b/src/test/resources/logback-test.xml
deleted file mode 100644
index 3f27c99f..00000000
--- a/src/test/resources/logback-test.xml
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
-
- false
-
-
- %green(%d{ISO8601}) %highlight(%-5level) [%blue(%t)] %yellow(%C{1}): %msg%n%throwable
-
-
-
-
-
-
-
-
-
-
-
diff --git a/test/e2e/.gitkeep b/test/e2e/.gitkeep
new file mode 100644
index 00000000..e69de29b
diff --git a/test/e2e/conversion/configmap.yaml b/test/e2e/conversion/configmap.yaml
new file mode 100644
index 00000000..1312ea5c
--- /dev/null
+++ b/test/e2e/conversion/configmap.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: e2e-test-scripts
+ namespace: default
+data:
+ locustfile.py: |
+ from locust import HttpUser, task, between
+
+ class E2ETestUser(HttpUser):
+ wait_time = between(1, 2)
+
+ @task
+ def hello(self):
+ self.client.get("/")
diff --git a/test/e2e/conversion/run-e2e.sh b/test/e2e/conversion/run-e2e.sh
new file mode 100755
index 00000000..e3289ebc
--- /dev/null
+++ b/test/e2e/conversion/run-e2e.sh
@@ -0,0 +1,174 @@
+#!/bin/bash
+set -e
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m' # No Color
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
+
+echo -e "${YELLOW}=== E2E Conversion Webhook Tests ===${NC}"
+echo "Project root: ${PROJECT_ROOT}"
+
+# Helper functions
+pass() {
+ echo -e "${GREEN}β PASS:${NC} $1"
+}
+
+fail() {
+ echo -e "${RED}β FAIL:${NC} $1"
+ exit 1
+}
+
+info() {
+ echo -e "${YELLOW}β${NC} $1"
+}
+
+# Verify prerequisites
+info "Checking prerequisites..."
+command -v kubectl >/dev/null 2>&1 || fail "kubectl not found"
+command -v kind >/dev/null 2>&1 || fail "kind not found"
+
+# Verify cluster is running
+kubectl cluster-info >/dev/null 2>&1 || fail "Kubernetes cluster not reachable"
+pass "Cluster is reachable"
+
+# Verify operator is running
+info "Checking operator deployment..."
+kubectl wait --for=condition=Available deployment/locust-k8s-operator-controller-manager -n locust-k8s-operator-system --timeout=60s || fail "Operator not running"
+pass "Operator is running"
+
+# Verify storage version
+info "Verifying v2 is storage version..."
+STORAGE_VERSION=$(kubectl get crd locusttests.locust.io -o jsonpath='{.spec.versions[?(@.storage==true)].name}')
+if [ "$STORAGE_VERSION" != "v2" ]; then
+ fail "Storage version is '$STORAGE_VERSION', expected 'v2'"
+fi
+pass "v2 is storage version"
+
+# Cleanup any previous test resources
+info "Cleaning up previous test resources..."
+kubectl delete locusttest e2e-test-v1 e2e-test-v2 --ignore-not-found=true 2>/dev/null || true
+kubectl delete configmap e2e-test-scripts --ignore-not-found=true 2>/dev/null || true
+sleep 2
+
+# Create ConfigMap
+info "Creating test ConfigMap..."
+kubectl apply -f "${SCRIPT_DIR}/configmap.yaml"
+pass "ConfigMap created"
+
+echo ""
+echo -e "${YELLOW}=== Test 1: Create v1 CR ===${NC}"
+info "Creating v1 LocustTest..."
+kubectl apply -f "${SCRIPT_DIR}/v1-cr.yaml"
+sleep 3
+
+# Verify v1 CR is created and can be read
+V1_NAME=$(kubectl get locusttests.v1.locust.io e2e-test-v1 -o jsonpath='{.metadata.name}' 2>/dev/null || echo "")
+if [ "$V1_NAME" != "e2e-test-v1" ]; then
+ fail "v1 CR not created properly"
+fi
+pass "v1 CR created successfully"
+
+echo ""
+echo -e "${YELLOW}=== Test 2: Read v1 CR as v2 ===${NC}"
+info "Reading v1 CR via v2 API..."
+V2_IMAGE=$(kubectl get locusttests.v2.locust.io e2e-test-v1 -o jsonpath='{.spec.image}' 2>/dev/null || echo "")
+V2_WORKER_REPLICAS=$(kubectl get locusttests.v2.locust.io e2e-test-v1 -o jsonpath='{.spec.worker.replicas}' 2>/dev/null || echo "")
+V2_MASTER_CMD=$(kubectl get locusttests.v2.locust.io e2e-test-v1 -o jsonpath='{.spec.master.command}' 2>/dev/null || echo "")
+
+if [ "$V2_IMAGE" != "locustio/locust:2.20.0" ]; then
+ fail "v2 image mismatch: got '$V2_IMAGE'"
+fi
+if [ "$V2_WORKER_REPLICAS" != "2" ]; then
+ fail "v2 worker.replicas mismatch: got '$V2_WORKER_REPLICAS'"
+fi
+if [ "$V2_MASTER_CMD" != "locust" ]; then
+ fail "v2 master.command mismatch: got '$V2_MASTER_CMD'"
+fi
+pass "v1βv2 conversion works correctly"
+
+echo ""
+echo -e "${YELLOW}=== Test 3: Create v2 CR ===${NC}"
+info "Creating v2 LocustTest..."
+kubectl apply -f "${SCRIPT_DIR}/v2-cr.yaml"
+sleep 3
+
+V2_NAME=$(kubectl get locusttests.v2.locust.io e2e-test-v2 -o jsonpath='{.metadata.name}' 2>/dev/null || echo "")
+if [ "$V2_NAME" != "e2e-test-v2" ]; then
+ fail "v2 CR not created properly"
+fi
+pass "v2 CR created successfully"
+
+echo ""
+echo -e "${YELLOW}=== Test 4: Read v2 CR as v1 ===${NC}"
+info "Reading v2 CR via v1 API..."
+V1_IMAGE=$(kubectl get locusttests.v1.locust.io e2e-test-v2 -o jsonpath='{.spec.image}' 2>/dev/null || echo "")
+V1_WORKER_REPLICAS=$(kubectl get locusttests.v1.locust.io e2e-test-v2 -o jsonpath='{.spec.workerReplicas}' 2>/dev/null || echo "")
+V1_MASTER_CMD=$(kubectl get locusttests.v1.locust.io e2e-test-v2 -o jsonpath='{.spec.masterCommandSeed}' 2>/dev/null || echo "")
+
+if [ "$V1_IMAGE" != "locustio/locust:2.20.0" ]; then
+ fail "v1 image mismatch: got '$V1_IMAGE'"
+fi
+if [ "$V1_WORKER_REPLICAS" != "3" ]; then
+ fail "v1 workerReplicas mismatch: got '$V1_WORKER_REPLICAS'"
+fi
+if [ "$V1_MASTER_CMD" != "locust" ]; then
+ fail "v1 masterCommandSeed mismatch: got '$V1_MASTER_CMD'"
+fi
+pass "v2βv1 conversion works correctly"
+
+echo ""
+echo -e "${YELLOW}=== Test 5: Update v1 CR ===${NC}"
+info "Updating v1 CR workerReplicas..."
+kubectl patch locusttests.v1.locust.io e2e-test-v1 --type=merge -p '{"spec":{"workerReplicas":5}}'
+sleep 2
+
+# Verify update is reflected in v2 view
+V2_UPDATED_REPLICAS=$(kubectl get locusttests.v2.locust.io e2e-test-v1 -o jsonpath='{.spec.worker.replicas}' 2>/dev/null || echo "")
+if [ "$V2_UPDATED_REPLICAS" != "5" ]; then
+ fail "v2 worker.replicas not updated: got '$V2_UPDATED_REPLICAS'"
+fi
+pass "v1 update reflected in v2 view"
+
+echo ""
+echo -e "${YELLOW}=== Test 6: Verify Reconciler Creates Jobs ===${NC}"
+info "Checking if reconciler created Jobs for e2e-test-v2..."
+sleep 2
+
+# Check jobs for e2e-test-v2 (created via v2 API, should have jobs)
+MASTER_JOB=$(kubectl get jobs -l performance-test-pod-name=e2e-test-v2-master -o name 2>/dev/null | head -1)
+WORKER_JOB=$(kubectl get jobs -l performance-test-pod-name=e2e-test-v2-worker -o name 2>/dev/null | head -1)
+
+if [ -z "$MASTER_JOB" ]; then
+ fail "Master Job not created for e2e-test-v2"
+fi
+if [ -z "$WORKER_JOB" ]; then
+ fail "Worker Job not created for e2e-test-v2"
+fi
+pass "Reconciler created Jobs from v2 resources"
+
+echo ""
+echo -e "${YELLOW}=== Test 7: Verify Deprecation Warning ===${NC}"
+info "Checking deprecation warning on v1 API..."
+DEPRECATION_OUTPUT=$(kubectl get locusttests.v1.locust.io e2e-test-v1 2>&1)
+if echo "$DEPRECATION_OUTPUT" | grep -q "deprecated"; then
+ pass "Deprecation warning shown for v1 API"
+else
+ info "Note: Deprecation warning may not be visible in all kubectl versions"
+ pass "Deprecation warning test skipped (kubectl version dependent)"
+fi
+
+# Cleanup
+echo ""
+info "Cleaning up test resources..."
+kubectl delete locusttest e2e-test-v1 e2e-test-v2 --ignore-not-found=true 2>/dev/null || true
+kubectl delete configmap e2e-test-scripts --ignore-not-found=true 2>/dev/null || true
+
+echo ""
+echo -e "${GREEN}========================================${NC}"
+echo -e "${GREEN} All E2E Conversion Tests PASSED! ${NC}"
+echo -e "${GREEN}========================================${NC}"
diff --git a/test/e2e/conversion/v1-cr.yaml b/test/e2e/conversion/v1-cr.yaml
new file mode 100644
index 00000000..64130a1c
--- /dev/null
+++ b/test/e2e/conversion/v1-cr.yaml
@@ -0,0 +1,16 @@
+apiVersion: locust.io/v1
+kind: LocustTest
+metadata:
+ name: e2e-test-v1
+ namespace: default
+spec:
+ masterCommandSeed: "locust"
+ workerCommandSeed: "locust"
+ workerReplicas: 2
+ image: "locustio/locust:2.20.0"
+ configMap: "e2e-test-scripts"
+ labels:
+ master:
+ app: locust-master
+ worker:
+ app: locust-worker
diff --git a/test/e2e/conversion/v2-cr.yaml b/test/e2e/conversion/v2-cr.yaml
new file mode 100644
index 00000000..b062c627
--- /dev/null
+++ b/test/e2e/conversion/v2-cr.yaml
@@ -0,0 +1,19 @@
+apiVersion: locust.io/v2
+kind: LocustTest
+metadata:
+ name: e2e-test-v2
+ namespace: default
+spec:
+ image: "locustio/locust:2.20.0"
+ master:
+ command: "locust"
+ autostart: true
+ labels:
+ app: locust-master
+ worker:
+ command: "locust"
+ replicas: 3
+ labels:
+ app: locust-worker
+ testFiles:
+ configMapRef: "e2e-test-scripts"
diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go
new file mode 100644
index 00000000..51575c6d
--- /dev/null
+++ b/test/e2e/e2e_suite_test.go
@@ -0,0 +1,112 @@
+/*
+Copyright 2026.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package e2e
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "testing"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "github.com/AbdelrhmanHamouda/locust-k8s-operator/test/utils"
+)
+
+var (
+ // Optional Environment Variables:
+ // - CERT_MANAGER_INSTALL_SKIP=true: Skips CertManager installation during test setup.
+ // These variables are useful if CertManager is already installed, avoiding
+ // re-installation and conflicts.
+ skipCertManagerInstall = os.Getenv("CERT_MANAGER_INSTALL_SKIP") == "true"
+ // isCertManagerAlreadyInstalled will be set true when CertManager CRDs be found on the cluster
+ isCertManagerAlreadyInstalled = false
+
+ // projectImage is the name of the image which will be build and loaded
+ // with the code source changes to be tested.
+ projectImage = "example.com/locust-k8s-operator:v0.0.1"
+)
+
+// TestE2E runs the end-to-end (e2e) test suite for the project. These tests execute in an isolated,
+// temporary environment to validate project changes with the purposed to be used in CI jobs.
+// The default setup requires Kind, builds/loads the Manager Docker image locally, and installs
+// CertManager.
+func TestE2E(t *testing.T) {
+ RegisterFailHandler(Fail)
+ _, _ = fmt.Fprintf(GinkgoWriter, "Starting locust-k8s-operator integration test suite\n")
+ RunSpecs(t, "e2e suite")
+}
+
+var _ = BeforeSuite(func() {
+ By("building the manager(Operator) image")
+ //nolint:gosec,lll // Test code with known safe projectImage
+ cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectImage))
+ _, err := utils.Run(cmd)
+ ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to build the manager(Operator) image")
+
+ // TODO(user): If you want to change the e2e test vendor from Kind, ensure the image is
+ // built and available before running the tests. Also, remove the following block.
+ By("loading the manager(Operator) image on Kind")
+ err = utils.LoadImageToKindClusterWithName(projectImage)
+ ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to load the manager(Operator) image into Kind")
+
+ // The tests-e2e are intended to run on a temporary cluster that is created and destroyed for testing.
+ // To prevent errors when tests run in environments with CertManager already installed,
+ // we check for its presence before execution.
+ // Setup CertManager before the suite if not skipped and if not already installed
+ if !skipCertManagerInstall {
+ By("checking if cert manager is installed already")
+ isCertManagerAlreadyInstalled = utils.IsCertManagerCRDsInstalled()
+ if !isCertManagerAlreadyInstalled {
+ _, _ = fmt.Fprintf(GinkgoWriter, "Installing CertManager...\n")
+ Expect(utils.InstallCertManager()).To(Succeed(), "Failed to install CertManager")
+ } else {
+ _, _ = fmt.Fprintf(GinkgoWriter, "WARNING: CertManager is already installed. Skipping installation...\n")
+ }
+ }
+
+ By("deploying the operator")
+ //nolint:gosec,lll // Test code with known safe projectImage
+ cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectImage))
+ _, err = utils.Run(cmd)
+ ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Failed to deploy the operator")
+
+ By("waiting for the controller-manager deployment to be ready")
+ err = utils.WaitForControllerReady("locust-k8s-operator-system", "5m")
+ ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Controller-manager deployment not ready")
+
+ By("waiting for the webhook certificate to be ready")
+ err = utils.WaitForCertificateReady("locust-k8s-operator-system", "locust-k8s-operator-serving-cert", "2m")
+ ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Webhook certificate not ready")
+
+ By("waiting for the webhook service endpoint to be ready")
+ err = utils.WaitForWebhookReady("locust-k8s-operator-system", "locust-k8s-operator-webhook-service", "2m")
+ ExpectWithOffset(1, err).NotTo(HaveOccurred(), "Webhook service endpoint not ready")
+})
+
+var _ = AfterSuite(func() {
+ By("undeploying the operator")
+ cmd := exec.Command("make", "undeploy")
+ _, _ = utils.Run(cmd)
+
+ // Teardown CertManager after the suite if not skipped and if it was not already installed
+ if !skipCertManagerInstall && !isCertManagerAlreadyInstalled {
+ _, _ = fmt.Fprintf(GinkgoWriter, "Uninstalling CertManager...\n")
+ utils.UninstallCertManager()
+ }
+})
diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go
new file mode 100644
index 00000000..c31c604f
--- /dev/null
+++ b/test/e2e/e2e_test.go
@@ -0,0 +1,352 @@
+/*
+Copyright 2026.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package e2e
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "github.com/AbdelrhmanHamouda/locust-k8s-operator/test/utils"
+)
+
+// namespace where the project is deployed in
+const namespace = "locust-k8s-operator-system"
+
+// serviceAccountName created for the project
+const serviceAccountName = "locust-k8s-operator-controller-manager"
+
+// metricsServiceName is the name of the metrics service of the project
+const metricsServiceName = "locust-k8s-operator-controller-manager-metrics-service"
+
+// metricsRoleBindingName is the name of the RBAC that will be created to allow get the metrics data
+const metricsRoleBindingName = "locust-k8s-operator-metrics-binding"
+
+var _ = Describe("Manager", Ordered, func() {
+ var controllerPodName string
+
+ // Before running the tests, verify the controller is running.
+ // The actual deployment is handled in BeforeSuite (e2e_suite_test.go).
+ BeforeAll(func() {
+ By("verifying the namespace exists")
+ cmd := exec.Command("kubectl", "get", "ns", namespace)
+ _, err := utils.Run(cmd)
+ Expect(err).NotTo(HaveOccurred(), "Namespace should exist from BeforeSuite")
+ })
+
+ // After all tests have been executed, clean up test-specific resources.
+ // The operator undeployment is handled in AfterSuite (e2e_suite_test.go).
+ AfterAll(func() {
+ By("cleaning up the curl pod for metrics")
+ cmd := exec.Command("kubectl", "delete", "pod", "curl-metrics", "-n", namespace, "--ignore-not-found=true")
+ _, _ = utils.Run(cmd)
+ })
+
+ // After each test, check for failures and collect logs, events,
+ // and pod descriptions for debugging.
+ AfterEach(func() {
+ specReport := CurrentSpecReport()
+ if specReport.Failed() {
+ By("Fetching controller manager pod logs")
+ //nolint:gosec // Test code with validated pod name from test setup
+ cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace)
+ controllerLogs, err := utils.Run(cmd)
+ if err == nil {
+ _, _ = fmt.Fprintf(GinkgoWriter, "Controller logs:\n %s", controllerLogs)
+ } else {
+ _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Controller logs: %s", err)
+ }
+
+ By("Fetching Kubernetes events")
+ cmd = exec.Command("kubectl", "get", "events", "-n", namespace, "--sort-by=.lastTimestamp")
+ eventsOutput, err := utils.Run(cmd)
+ if err == nil {
+ _, _ = fmt.Fprintf(GinkgoWriter, "Kubernetes events:\n%s", eventsOutput)
+ } else {
+ _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get Kubernetes events: %s", err)
+ }
+
+ By("Fetching curl-metrics logs")
+ cmd = exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace)
+ metricsOutput, err := utils.Run(cmd)
+ if err == nil {
+ _, _ = fmt.Fprintf(GinkgoWriter, "Metrics logs:\n %s", metricsOutput)
+ } else {
+ _, _ = fmt.Fprintf(GinkgoWriter, "Failed to get curl-metrics logs: %s", err)
+ }
+
+ By("Fetching controller manager pod description")
+ //nolint:gosec // Test code with validated pod name from test setup
+ cmd = exec.Command("kubectl", "describe", "pod", controllerPodName, "-n", namespace)
+ podDescription, err := utils.Run(cmd)
+ if err == nil {
+ fmt.Println("Pod description:\n", podDescription)
+ } else {
+ fmt.Println("Failed to describe controller pod")
+ }
+
+ By("Fetching LocustTest CRs")
+ cmd = exec.Command("kubectl", "get", "locusttest", "-n", namespace, "-o", "yaml")
+ output, err := utils.Run(cmd)
+ if err == nil {
+ _, _ = fmt.Fprintf(GinkgoWriter, "LocustTest CRs:\n%s", output)
+ }
+
+ By("Fetching Jobs")
+ cmd = exec.Command("kubectl", "get", "jobs", "-n", namespace, "-o", "wide")
+ output, err = utils.Run(cmd)
+ if err == nil {
+ _, _ = fmt.Fprintf(GinkgoWriter, "Jobs:\n%s", output)
+ }
+
+ By("Fetching Services")
+ cmd = exec.Command("kubectl", "get", "services", "-n", namespace)
+ output, err = utils.Run(cmd)
+ if err == nil {
+ _, _ = fmt.Fprintf(GinkgoWriter, "Services:\n%s", output)
+ }
+ }
+ })
+
+ SetDefaultEventuallyTimeout(2 * time.Minute)
+ SetDefaultEventuallyPollingInterval(time.Second)
+
+ Context("Manager", func() {
+ It("should run successfully", func() {
+ By("validating that the controller-manager pod is running as expected")
+ verifyControllerUp := func(g Gomega) {
+ // Get the name of the controller-manager pod
+ cmd := exec.Command("kubectl", "get",
+ "pods", "-l", "control-plane=controller-manager",
+ "-o", "go-template={{ range .items }}"+
+ "{{ if not .metadata.deletionTimestamp }}"+
+ "{{ .metadata.name }}"+
+ "{{ \"\\n\" }}{{ end }}{{ end }}",
+ "-n", namespace,
+ )
+
+ podOutput, err := utils.Run(cmd)
+ g.Expect(err).NotTo(HaveOccurred(), "Failed to retrieve controller-manager pod information")
+ podNames := utils.GetNonEmptyLines(podOutput)
+ g.Expect(podNames).To(HaveLen(1), "expected 1 controller pod running")
+ controllerPodName = podNames[0]
+ g.Expect(controllerPodName).To(ContainSubstring("controller-manager"))
+
+ // Validate the pod's status
+ //nolint:gosec // Test code with validated pod name from test setup
+ cmd = exec.Command("kubectl", "get",
+ "pods", controllerPodName, "-o", "jsonpath={.status.phase}",
+ "-n", namespace,
+ )
+ output, err := utils.Run(cmd)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(output).To(Equal("Running"), "Incorrect controller-manager pod status")
+ }
+ Eventually(verifyControllerUp).Should(Succeed())
+ })
+
+ It("should ensure the metrics endpoint is serving metrics", func() {
+ By("creating a ClusterRoleBinding for the service account to allow access to metrics")
+ // Delete existing binding if it exists (cleanup from previous runs)
+ //nolint:gosec // Test code with validated binding name from test setup
+ cleanupCmd := exec.Command("kubectl", "delete", "clusterrolebinding", metricsRoleBindingName,
+ "--ignore-not-found",
+ )
+ _, _ = utils.Run(cleanupCmd) // Ignore errors - binding may not exist
+
+ //nolint:gosec // Test code with validated namespace and service account from test setup
+ cmd := exec.Command("kubectl", "create", "clusterrolebinding", metricsRoleBindingName,
+ "--clusterrole=locust-k8s-operator-metrics-reader",
+ fmt.Sprintf("--serviceaccount=%s:%s", namespace, serviceAccountName),
+ )
+ _, err := utils.Run(cmd)
+ Expect(err).NotTo(HaveOccurred(), "Failed to create ClusterRoleBinding")
+
+ By("validating that the metrics service is available")
+ cmd = exec.Command("kubectl", "get", "service", metricsServiceName, "-n", namespace)
+ _, err = utils.Run(cmd)
+ Expect(err).NotTo(HaveOccurred(), "Metrics service should exist")
+
+ By("getting the service account token")
+ token, err := serviceAccountToken()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(token).NotTo(BeEmpty())
+
+ By("waiting for the metrics endpoint to be ready")
+ verifyMetricsEndpointReady := func(g Gomega) {
+ cmd := exec.Command("kubectl", "get", "endpoints", metricsServiceName, "-n", namespace)
+ output, err := utils.Run(cmd)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(output).To(ContainSubstring("8443"), "Metrics endpoint is not ready")
+ }
+ Eventually(verifyMetricsEndpointReady).Should(Succeed())
+
+ By("verifying that the controller manager is serving the metrics server")
+ verifyMetricsServerStarted := func(g Gomega) {
+ //nolint:gosec // Test code with validated pod name from test setup
+ cmd := exec.Command("kubectl", "logs", controllerPodName, "-n", namespace)
+ output, err := utils.Run(cmd)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(output).To(ContainSubstring("Starting metrics server"),
+ "Metrics server not yet started")
+ }
+ Eventually(verifyMetricsServerStarted).Should(Succeed())
+
+ By("creating the curl-metrics pod to access the metrics endpoint")
+ //nolint:gosec // Test code with validated namespace and service account from test setup
+ cmd = exec.Command("kubectl", "run", "curl-metrics", "--restart=Never",
+ "--namespace", namespace,
+ "--image=curlimages/curl:latest",
+ "--overrides",
+ fmt.Sprintf(`{
+ "spec": {
+ "containers": [{
+ "name": "curl",
+ "image": "curlimages/curl:latest",
+ "command": ["/bin/sh", "-c"],
+ "args": ["curl -v -k -H 'Authorization: Bearer %s' https://%s.%s.svc.cluster.local:8443/metrics"],
+ "securityContext": {
+ "allowPrivilegeEscalation": false,
+ "capabilities": {
+ "drop": ["ALL"]
+ },
+ "runAsNonRoot": true,
+ "runAsUser": 1000,
+ "seccompProfile": {
+ "type": "RuntimeDefault"
+ }
+ }
+ }],
+ "serviceAccount": "%s"
+ }
+ }`, token, metricsServiceName, namespace, serviceAccountName))
+ _, err = utils.Run(cmd)
+ Expect(err).NotTo(HaveOccurred(), "Failed to create curl-metrics pod")
+
+ By("waiting for the curl-metrics pod to complete.")
+ verifyCurlUp := func(g Gomega) {
+ cmd := exec.Command("kubectl", "get", "pods", "curl-metrics",
+ "-o", "jsonpath={.status.phase}",
+ "-n", namespace)
+ output, err := utils.Run(cmd)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(output).To(Equal("Succeeded"), "curl pod in wrong status")
+ }
+ Eventually(verifyCurlUp, 5*time.Minute).Should(Succeed())
+
+ By("getting the metrics by checking curl-metrics logs")
+ metricsOutput := getMetricsOutput()
+ Expect(metricsOutput).To(ContainSubstring(
+ "controller_runtime_reconcile_total",
+ ))
+
+ By("cleaning up test resources")
+ // Delete the curl-metrics pod
+ //nolint:gosec // Test code with validated namespace from test setup
+ cleanupCmd = exec.Command("kubectl", "delete", "pod", "curl-metrics",
+ "-n", namespace,
+ "--ignore-not-found",
+ )
+ _, _ = utils.Run(cleanupCmd)
+
+ // Delete the ClusterRoleBinding
+ //nolint:gosec // Test code with validated binding name from test setup
+ cleanupCmd = exec.Command("kubectl", "delete", "clusterrolebinding", metricsRoleBindingName,
+ "--ignore-not-found",
+ )
+ _, _ = utils.Run(cleanupCmd)
+ })
+
+ // +kubebuilder:scaffold:e2e-webhooks-checks
+
+ // TODO: Customize the e2e test suite with scenarios specific to your project.
+ // Consider applying sample/CR(s) and check their status and/or verifying
+ // the reconciliation by using the metrics, i.e.:
+ // metricsOutput := getMetricsOutput()
+ // Expect(metricsOutput).To(ContainSubstring(
+ // fmt.Sprintf(`controller_runtime_reconcile_total{controller="%s",result="success"} 1`,
+ // strings.ToLower(),
+ // ))
+ })
+})
+
+// serviceAccountToken returns a token for the specified service account in the given namespace.
+// It uses the Kubernetes TokenRequest API to generate a token by directly sending a request
+// and parsing the resulting token from the API response.
+func serviceAccountToken() (string, error) {
+ //nolint:gosec // Not hardcoded credentials - this is a K8s API resource kind string
+ const tokenRequestRawString = `{
+ "apiVersion": "authentication.k8s.io/v1",
+ "kind": "TokenRequest"
+ }`
+
+ // Temporary file to store the token request
+ secretName := fmt.Sprintf("%s-token-request", serviceAccountName)
+ tokenRequestFile := filepath.Join("/tmp", secretName)
+ err := os.WriteFile(tokenRequestFile, []byte(tokenRequestRawString), os.FileMode(0o644))
+ if err != nil {
+ return "", err
+ }
+
+ var out string
+ verifyTokenCreation := func(g Gomega) {
+ // Execute kubectl command to create the token
+ //nolint:gosec // Test code with validated namespace and service account from test setup
+ cmd := exec.Command("kubectl", "create", "--raw", fmt.Sprintf(
+ "/api/v1/namespaces/%s/serviceaccounts/%s/token",
+ namespace,
+ serviceAccountName,
+ ), "-f", tokenRequestFile)
+
+ output, err := cmd.CombinedOutput()
+ g.Expect(err).NotTo(HaveOccurred())
+
+ // Parse the JSON output to extract the token
+ var token tokenRequest
+ err = json.Unmarshal(output, &token)
+ g.Expect(err).NotTo(HaveOccurred())
+
+ out = token.Status.Token
+ }
+ Eventually(verifyTokenCreation).Should(Succeed())
+
+ return out, err
+}
+
+// getMetricsOutput retrieves and returns the logs from the curl pod used to access the metrics endpoint.
+func getMetricsOutput() string {
+ By("getting the curl-metrics logs")
+ cmd := exec.Command("kubectl", "logs", "curl-metrics", "-n", namespace)
+ metricsOutput, err := utils.Run(cmd)
+ Expect(err).NotTo(HaveOccurred(), "Failed to retrieve logs from curl pod")
+ Expect(metricsOutput).To(ContainSubstring("< HTTP/1.1 200 OK"))
+ return metricsOutput
+}
+
+// tokenRequest is a simplified representation of the Kubernetes TokenRequest API response,
+// containing only the token field that we need to extract.
+type tokenRequest struct {
+ Status struct {
+ Token string `json:"token"`
+ } `json:"status"`
+}
diff --git a/test/e2e/kind-config.yaml b/test/e2e/kind-config.yaml
new file mode 100644
index 00000000..80a2901b
--- /dev/null
+++ b/test/e2e/kind-config.yaml
@@ -0,0 +1,11 @@
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+name: locust-webhook-test
+nodes:
+ - role: control-plane
+ kubeadmConfigPatches:
+ - |
+ kind: InitConfiguration
+ nodeRegistration:
+ kubeletExtraArgs:
+ node-labels: "ingress-ready=true"
diff --git a/test/e2e/locusttest_e2e_test.go b/test/e2e/locusttest_e2e_test.go
new file mode 100644
index 00000000..3aba129d
--- /dev/null
+++ b/test/e2e/locusttest_e2e_test.go
@@ -0,0 +1,237 @@
+/*
+Copyright 2026.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package e2e
+
+import (
+ "fmt"
+ "os/exec"
+ "path/filepath"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "github.com/AbdelrhmanHamouda/locust-k8s-operator/test/utils"
+)
+
+var _ = Describe("LocustTest", Ordered, func() {
+ const testNamespace = "locust-k8s-operator-system"
+ var testdataDir string
+
+ BeforeAll(func() {
+ var err error
+ testdataDir, err = filepath.Abs("testdata")
+ Expect(err).NotTo(HaveOccurred())
+
+ By("applying test ConfigMaps")
+ _, err = utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "configmaps", "test-config.yaml"))
+ Expect(err).NotTo(HaveOccurred(), "Failed to apply test ConfigMap")
+
+ _, err = utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "configmaps", "env-configmap.yaml"))
+ Expect(err).NotTo(HaveOccurred(), "Failed to apply env ConfigMap")
+ })
+
+ AfterAll(func() {
+ By("cleaning up test ConfigMaps")
+ _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "configmaps", "test-config.yaml"))
+ _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "configmaps", "env-configmap.yaml"))
+ })
+
+ AfterEach(func() {
+ if CurrentSpecReport().Failed() {
+ By("Fetching LocustTest CRs on failure")
+ cmd := exec.Command("kubectl", "get", "locusttest", "-n", testNamespace, "-o", "yaml")
+ output, _ := utils.Run(cmd)
+ _, _ = fmt.Fprintf(GinkgoWriter, "LocustTest CRs:\n%s", output)
+
+ By("Fetching Jobs on failure")
+ cmd = exec.Command("kubectl", "get", "jobs", "-n", testNamespace, "-o", "wide")
+ output, _ = utils.Run(cmd)
+ _, _ = fmt.Fprintf(GinkgoWriter, "Jobs:\n%s", output)
+
+ By("Fetching Services on failure")
+ cmd = exec.Command("kubectl", "get", "services", "-n", testNamespace)
+ output, _ = utils.Run(cmd)
+ _, _ = fmt.Fprintf(GinkgoWriter, "Services:\n%s", output)
+ }
+ })
+
+ Context("v2 API lifecycle", func() {
+ const crName = "e2e-test-basic"
+
+ AfterAll(func() {
+ By("cleaning up basic LocustTest CR")
+ _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-basic.yaml"))
+ // Wait for cleanup
+ Eventually(func() bool {
+ return !utils.ResourceExists("locusttest", testNamespace, crName)
+ }, 30*time.Second, time.Second).Should(BeTrue())
+ })
+
+ It("should create master Service on CR creation", func() {
+ By("applying the basic LocustTest CR")
+ _, err := utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-basic.yaml"))
+ Expect(err).NotTo(HaveOccurred())
+
+ By("waiting for master Service")
+ Eventually(func() bool {
+ return utils.ResourceExists("service", testNamespace, crName+"-master")
+ }, 60*time.Second, time.Second).Should(BeTrue())
+ })
+
+ It("should create master Job on CR creation", func() {
+ Eventually(func() bool {
+ return utils.ResourceExists("job", testNamespace, crName+"-master")
+ }, 60*time.Second, time.Second).Should(BeTrue())
+ })
+
+ It("should create worker Job on CR creation", func() {
+ Eventually(func() bool {
+ return utils.ResourceExists("job", testNamespace, crName+"-worker")
+ }, 60*time.Second, time.Second).Should(BeTrue())
+ })
+
+ It("should set owner references on created resources", func() {
+ owner, err := utils.GetOwnerReferenceName("job", testNamespace, crName+"-master")
+ Expect(err).NotTo(HaveOccurred())
+ Expect(owner).To(Equal(crName))
+
+ owner, err = utils.GetOwnerReferenceName("job", testNamespace, crName+"-worker")
+ Expect(err).NotTo(HaveOccurred())
+ Expect(owner).To(Equal(crName))
+
+ owner, err = utils.GetOwnerReferenceName("service", testNamespace, crName+"-master")
+ Expect(err).NotTo(HaveOccurred())
+ Expect(owner).To(Equal(crName))
+ })
+
+ It("should update status phase", func() {
+ Eventually(func() string {
+ phase, _ := utils.GetResourceField("locusttest", testNamespace, crName, ".status.phase")
+ return phase
+ }, 60*time.Second, time.Second).Should(Or(Equal("Pending"), Equal("Running")))
+ })
+
+ It("should clean up resources on CR deletion", func() {
+ By("deleting the LocustTest CR")
+ _, err := utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-basic.yaml"))
+ Expect(err).NotTo(HaveOccurred())
+
+ By("verifying Jobs are deleted")
+ Eventually(func() bool {
+ return !utils.ResourceExists("job", testNamespace, crName+"-master")
+ }, 60*time.Second, time.Second).Should(BeTrue())
+
+ Eventually(func() bool {
+ return !utils.ResourceExists("job", testNamespace, crName+"-worker")
+ }, 60*time.Second, time.Second).Should(BeTrue())
+
+ By("verifying Service is deleted")
+ Eventually(func() bool {
+ return !utils.ResourceExists("service", testNamespace, crName+"-master")
+ }, 60*time.Second, time.Second).Should(BeTrue())
+ })
+ })
+
+ Context("with environment injection", func() {
+ const crName = "e2e-test-env"
+
+ AfterAll(func() {
+ By("cleaning up env LocustTest CR")
+ _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-with-env.yaml"))
+ Eventually(func() bool {
+ return !utils.ResourceExists("locusttest", testNamespace, crName)
+ }, 30*time.Second, time.Second).Should(BeTrue())
+ })
+
+ It("should create resources with env configuration", func() {
+ By("applying LocustTest with env config")
+ _, err := utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-with-env.yaml"))
+ Expect(err).NotTo(HaveOccurred())
+
+ By("waiting for master Job")
+ Eventually(func() bool {
+ return utils.ResourceExists("job", testNamespace, crName+"-master")
+ }, 60*time.Second, time.Second).Should(BeTrue())
+ })
+
+ It("should inject ConfigMap env vars via envFrom", func() {
+ Eventually(func() string {
+ envFrom, _ := utils.GetJobEnvFrom(testNamespace, crName+"-master", crName+"-master")
+ return envFrom
+ }, 30*time.Second, time.Second).Should(ContainSubstring("e2e-env-configmap"))
+ })
+
+ It("should inject inline env variables", func() {
+ Eventually(func() string {
+ env, _ := utils.GetJobContainerEnv(testNamespace, crName+"-master", crName+"-master")
+ return env
+ }, 30*time.Second, time.Second).Should(ContainSubstring("E2E_TEST_VAR"))
+ })
+ })
+
+ Context("with custom volumes", func() {
+ const crName = "e2e-test-volumes"
+
+ AfterAll(func() {
+ By("cleaning up volumes LocustTest CR")
+ _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-with-volumes.yaml"))
+ Eventually(func() bool {
+ return !utils.ResourceExists("locusttest", testNamespace, crName)
+ }, 30*time.Second, time.Second).Should(BeTrue())
+ })
+
+ It("should create resources with volume configuration", func() {
+ By("applying LocustTest with volumes")
+ _, err := utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-with-volumes.yaml"))
+ Expect(err).NotTo(HaveOccurred())
+
+ By("waiting for master Job")
+ Eventually(func() bool {
+ return utils.ResourceExists("job", testNamespace, crName+"-master")
+ }, 60*time.Second, time.Second).Should(BeTrue())
+ })
+
+ It("should mount volumes to master pod", func() {
+ Eventually(func() string {
+ volumes, _ := utils.GetJobVolumes(testNamespace, crName+"-master")
+ return volumes
+ }, 30*time.Second, time.Second).Should(ContainSubstring("test-data"))
+
+ Eventually(func() string {
+ mounts, _ := utils.GetJobVolumeMounts(testNamespace, crName+"-master", crName+"-master")
+ return mounts
+ }, 30*time.Second, time.Second).Should(ContainSubstring("/data"))
+ })
+
+ It("should mount volumes to worker pods", func() {
+ Eventually(func() bool {
+ return utils.ResourceExists("job", testNamespace, crName+"-worker")
+ }, 60*time.Second, time.Second).Should(BeTrue())
+
+ Eventually(func() string {
+ volumes, _ := utils.GetJobVolumes(testNamespace, crName+"-worker")
+ return volumes
+ }, 30*time.Second, time.Second).Should(ContainSubstring("test-data"))
+
+ Eventually(func() string {
+ mounts, _ := utils.GetJobVolumeMounts(testNamespace, crName+"-worker", crName+"-worker")
+ return mounts
+ }, 30*time.Second, time.Second).Should(ContainSubstring("/data"))
+ })
+ })
+})
diff --git a/test/e2e/otel_e2e_test.go b/test/e2e/otel_e2e_test.go
new file mode 100644
index 00000000..7aae8658
--- /dev/null
+++ b/test/e2e/otel_e2e_test.go
@@ -0,0 +1,116 @@
+/*
+Copyright 2026.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package e2e
+
+import (
+ "fmt"
+ "os/exec"
+ "path/filepath"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "github.com/AbdelrhmanHamouda/locust-k8s-operator/test/utils"
+)
+
+var _ = Describe("OpenTelemetry", Ordered, func() {
+ const testNamespace = "locust-k8s-operator-system"
+ const crName = "e2e-test-otel"
+ var testdataDir string
+
+ BeforeAll(func() {
+ var err error
+ testdataDir, err = filepath.Abs("testdata")
+ Expect(err).NotTo(HaveOccurred())
+
+ By("ensuring test ConfigMap exists")
+ _, err = utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "configmaps", "test-config.yaml"))
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterAll(func() {
+ By("cleaning up OTel LocustTest CR")
+ _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-with-otel.yaml"))
+ Eventually(func() bool {
+ return !utils.ResourceExists("locusttest", testNamespace, crName)
+ }, 30*time.Second, time.Second).Should(BeTrue())
+ })
+
+ AfterEach(func() {
+ if CurrentSpecReport().Failed() {
+ By("Fetching LocustTest CRs on failure")
+ cmd := exec.Command("kubectl", "get", "locusttest", "-n", testNamespace, "-o", "yaml")
+ output, _ := utils.Run(cmd)
+ _, _ = fmt.Fprintf(GinkgoWriter, "LocustTest CRs:\n%s", output)
+
+ By("Fetching Jobs on failure")
+ cmd = exec.Command("kubectl", "get", "jobs", "-n", testNamespace, "-o", "yaml")
+ output, _ = utils.Run(cmd)
+ _, _ = fmt.Fprintf(GinkgoWriter, "Jobs:\n%s", output)
+ }
+ })
+
+ It("should create resources with OTel enabled", func() {
+ By("applying LocustTest with OTel config")
+ _, err := utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-with-otel.yaml"))
+ Expect(err).NotTo(HaveOccurred())
+
+ By("waiting for master Job")
+ Eventually(func() bool {
+ return utils.ResourceExists("job", testNamespace, crName+"-master")
+ }, 60*time.Second, time.Second).Should(BeTrue())
+ })
+
+ It("should add --otel flag when enabled", func() {
+ Eventually(func() string {
+ args, _ := utils.GetJobContainerArgs(testNamespace, crName+"-master", crName+"-master")
+ return args
+ }, 30*time.Second, time.Second).Should(ContainSubstring("--otel"))
+ })
+
+ It("should inject OTEL_* environment variables", func() {
+ Eventually(func() string {
+ env, _ := utils.GetJobContainerEnv(testNamespace, crName+"-master", crName+"-master")
+ return env
+ }, 30*time.Second, time.Second).Should(ContainSubstring("OTEL_EXPORTER_OTLP_ENDPOINT"))
+ })
+
+ It("should NOT deploy metrics sidecar when OTel enabled", func() {
+ Eventually(func() string {
+ containers, _ := utils.GetJobContainerNames(testNamespace, crName+"-master")
+ return containers
+ }, 30*time.Second, time.Second).ShouldNot(ContainSubstring("metrics-exporter"))
+ })
+
+ It("should have only one container (locust) in master pod", func() {
+ Eventually(func() string {
+ containers, _ := utils.GetJobContainerNames(testNamespace, crName+"-master")
+ return containers
+ }, 30*time.Second, time.Second).Should(Equal(crName + "-master"))
+ })
+
+ It("should exclude metrics port from Service when OTel enabled", func() {
+ Eventually(func() bool {
+ return utils.ResourceExists("service", testNamespace, crName+"-master")
+ }, 60*time.Second, time.Second).Should(BeTrue())
+
+ ports, err := utils.GetServicePorts(testNamespace, crName+"-master")
+ Expect(err).NotTo(HaveOccurred())
+ Expect(ports).NotTo(ContainSubstring("metrics"))
+ })
+})
diff --git a/test/e2e/testdata/configmaps/env-configmap.yaml b/test/e2e/testdata/configmaps/env-configmap.yaml
new file mode 100644
index 00000000..4db3f271
--- /dev/null
+++ b/test/e2e/testdata/configmaps/env-configmap.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: e2e-env-configmap
+data:
+ TARGET_HOST: "http://example.com"
+ TEST_MODE: "e2e"
diff --git a/test/e2e/testdata/configmaps/test-config.yaml b/test/e2e/testdata/configmaps/test-config.yaml
new file mode 100644
index 00000000..e7e7b51e
--- /dev/null
+++ b/test/e2e/testdata/configmaps/test-config.yaml
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: e2e-test-configmap
+data:
+ locustfile.py: |
+ from locust import HttpUser, task
+
+ class TestUser(HttpUser):
+ @task
+ def hello(self):
+ self.client.get("/")
diff --git a/test/e2e/testdata/v1/locusttest-basic.yaml b/test/e2e/testdata/v1/locusttest-basic.yaml
new file mode 100644
index 00000000..15db0f8d
--- /dev/null
+++ b/test/e2e/testdata/v1/locusttest-basic.yaml
@@ -0,0 +1,10 @@
+apiVersion: locust.io/v1
+kind: LocustTest
+metadata:
+ name: e2e-test-v1
+spec:
+ masterCommandSeed: "-f /lotest/src/locustfile.py"
+ workerCommandSeed: "-f /lotest/src/locustfile.py"
+ workerReplicas: 1
+ image: locustio/locust:latest
+ configMap: e2e-test-configmap
diff --git a/test/e2e/testdata/v2/locusttest-basic.yaml b/test/e2e/testdata/v2/locusttest-basic.yaml
new file mode 100644
index 00000000..768c3f76
--- /dev/null
+++ b/test/e2e/testdata/v2/locusttest-basic.yaml
@@ -0,0 +1,13 @@
+apiVersion: locust.io/v2
+kind: LocustTest
+metadata:
+ name: e2e-test-basic
+spec:
+ master:
+ command: "-f /lotest/src/locustfile.py"
+ worker:
+ replicas: 2
+ command: "-f /lotest/src/locustfile.py"
+ image: locustio/locust:latest
+ testFiles:
+ configMapRef: e2e-test-configmap
diff --git a/test/e2e/testdata/v2/locusttest-invalid.yaml b/test/e2e/testdata/v2/locusttest-invalid.yaml
new file mode 100644
index 00000000..3c40130d
--- /dev/null
+++ b/test/e2e/testdata/v2/locusttest-invalid.yaml
@@ -0,0 +1,13 @@
+apiVersion: locust.io/v2
+kind: LocustTest
+metadata:
+ name: e2e-test-invalid
+spec:
+ master:
+ command: "-f /lotest/src/locustfile.py"
+ worker:
+ replicas: 0
+ command: "-f /lotest/src/locustfile.py"
+ image: locustio/locust:latest
+ testFiles:
+ configMapRef: e2e-test-configmap
diff --git a/test/e2e/testdata/v2/locusttest-with-env.yaml b/test/e2e/testdata/v2/locusttest-with-env.yaml
new file mode 100644
index 00000000..a5c049bf
--- /dev/null
+++ b/test/e2e/testdata/v2/locusttest-with-env.yaml
@@ -0,0 +1,19 @@
+apiVersion: locust.io/v2
+kind: LocustTest
+metadata:
+ name: e2e-test-env
+spec:
+ master:
+ command: "-f /lotest/src/locustfile.py"
+ worker:
+ replicas: 1
+ command: "-f /lotest/src/locustfile.py"
+ image: locustio/locust:latest
+ testFiles:
+ configMapRef: e2e-test-configmap
+ env:
+ configMapRefs:
+ - name: e2e-env-configmap
+ variables:
+ - name: E2E_TEST_VAR
+ value: "test-value"
diff --git a/test/e2e/testdata/v2/locusttest-with-otel.yaml b/test/e2e/testdata/v2/locusttest-with-otel.yaml
new file mode 100644
index 00000000..e6ed73f2
--- /dev/null
+++ b/test/e2e/testdata/v2/locusttest-with-otel.yaml
@@ -0,0 +1,17 @@
+apiVersion: locust.io/v2
+kind: LocustTest
+metadata:
+ name: e2e-test-otel
+spec:
+ master:
+ command: "-f /lotest/src/locustfile.py"
+ worker:
+ replicas: 1
+ command: "-f /lotest/src/locustfile.py"
+ image: locustio/locust:latest
+ testFiles:
+ configMapRef: e2e-test-configmap
+ observability:
+ openTelemetry:
+ enabled: true
+ endpoint: "http://otel-collector:4317"
diff --git a/test/e2e/testdata/v2/locusttest-with-scheduling.yaml b/test/e2e/testdata/v2/locusttest-with-scheduling.yaml
new file mode 100644
index 00000000..7fd7eb01
--- /dev/null
+++ b/test/e2e/testdata/v2/locusttest-with-scheduling.yaml
@@ -0,0 +1,46 @@
+apiVersion: locust.io/v2
+kind: LocustTest
+metadata:
+ name: e2e-test-scheduling
+spec:
+ master:
+ command: "-f /lotest/src/locustfile.py"
+ worker:
+ replicas: 2
+ command: "-f /lotest/src/locustfile.py"
+ image: locustio/locust:latest
+ testFiles:
+ configMapRef: e2e-test-configmap
+ scheduling:
+ nodeSelector:
+ workload-type: performance-testing
+ tier: compute
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: kubernetes.io/arch
+ operator: In
+ values:
+ - amd64
+ - arm64
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: performance-test-name
+ operator: In
+ values:
+ - e2e-test-scheduling
+ topologyKey: kubernetes.io/hostname
+ tolerations:
+ - key: "performance-testing"
+ operator: "Equal"
+ value: "true"
+ effect: "NoSchedule"
+ - key: "high-load"
+ operator: "Exists"
+ effect: "PreferNoSchedule"
diff --git a/test/e2e/testdata/v2/locusttest-with-volumes.yaml b/test/e2e/testdata/v2/locusttest-with-volumes.yaml
new file mode 100644
index 00000000..c832c25c
--- /dev/null
+++ b/test/e2e/testdata/v2/locusttest-with-volumes.yaml
@@ -0,0 +1,20 @@
+apiVersion: locust.io/v2
+kind: LocustTest
+metadata:
+ name: e2e-test-volumes
+spec:
+ master:
+ command: "-f /lotest/src/locustfile.py"
+ worker:
+ replicas: 1
+ command: "-f /lotest/src/locustfile.py"
+ image: locustio/locust:latest
+ testFiles:
+ configMapRef: e2e-test-configmap
+ volumes:
+ - name: test-data
+ emptyDir: {}
+ volumeMounts:
+ - name: test-data
+ mountPath: /data
+ target: both
diff --git a/test/e2e/v1_compatibility_test.go b/test/e2e/v1_compatibility_test.go
new file mode 100644
index 00000000..9d39ad40
--- /dev/null
+++ b/test/e2e/v1_compatibility_test.go
@@ -0,0 +1,105 @@
+/*
+Copyright 2026.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package e2e
+
+import (
+ "fmt"
+ "os/exec"
+ "path/filepath"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "github.com/AbdelrhmanHamouda/locust-k8s-operator/test/utils"
+)
+
+var _ = Describe("v1 API Compatibility", Ordered, func() {
+ const testNamespace = "locust-k8s-operator-system"
+ const crName = "e2e-test-v1"
+ var testdataDir string
+
+ BeforeAll(func() {
+ var err error
+ testdataDir, err = filepath.Abs("testdata")
+ Expect(err).NotTo(HaveOccurred())
+
+ By("ensuring test ConfigMap exists")
+ _, err = utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "configmaps", "test-config.yaml"))
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterAll(func() {
+ By("cleaning up v1 LocustTest CR")
+ _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v1", "locusttest-basic.yaml"))
+ Eventually(func() bool {
+ return !utils.ResourceExists("locusttest", testNamespace, crName)
+ }, 30*time.Second, time.Second).Should(BeTrue())
+ })
+
+ AfterEach(func() {
+ if CurrentSpecReport().Failed() {
+ By("Fetching LocustTest CRs on failure")
+ cmd := exec.Command("kubectl", "get", "locusttest", "-n", testNamespace, "-o", "yaml")
+ output, _ := utils.Run(cmd)
+ _, _ = fmt.Fprintf(GinkgoWriter, "LocustTest CRs:\n%s", output)
+
+ By("Fetching Jobs on failure")
+ cmd = exec.Command("kubectl", "get", "jobs", "-n", testNamespace, "-o", "wide")
+ output, _ = utils.Run(cmd)
+ _, _ = fmt.Fprintf(GinkgoWriter, "Jobs:\n%s", output)
+ }
+ })
+
+ It("should accept v1 LocustTest CR", func() {
+ By("applying v1 LocustTest CR")
+ _, err := utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "v1", "locusttest-basic.yaml"))
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ It("should create resources from v1 CR", func() {
+ By("waiting for master Service")
+ Eventually(func() bool {
+ return utils.ResourceExists("service", testNamespace, crName+"-master")
+ }, 60*time.Second, time.Second).Should(BeTrue())
+
+ By("waiting for master Job")
+ Eventually(func() bool {
+ return utils.ResourceExists("job", testNamespace, crName+"-master")
+ }, 60*time.Second, time.Second).Should(BeTrue())
+
+ By("waiting for worker Job")
+ Eventually(func() bool {
+ return utils.ResourceExists("job", testNamespace, crName+"-worker")
+ }, 60*time.Second, time.Second).Should(BeTrue())
+ })
+
+ It("should allow reading v1 CR as v2", func() {
+ By("fetching v1 CR using v2 API version")
+ cmd := exec.Command("kubectl", "get", "locusttest.v2.locust.io",
+ crName, "-n", testNamespace, "-o", "yaml")
+ output, err := utils.Run(cmd)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(output).To(ContainSubstring("apiVersion: locust.io/v2"))
+ })
+
+ It("should have correct owner references", func() {
+ owner, err := utils.GetOwnerReferenceName("job", testNamespace, crName+"-master")
+ Expect(err).NotTo(HaveOccurred())
+ Expect(owner).To(Equal(crName))
+ })
+})
diff --git a/test/e2e/validation_e2e_test.go b/test/e2e/validation_e2e_test.go
new file mode 100644
index 00000000..885117b6
--- /dev/null
+++ b/test/e2e/validation_e2e_test.go
@@ -0,0 +1,73 @@
+/*
+Copyright 2026.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package e2e
+
+import (
+ "path/filepath"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+
+ "github.com/AbdelrhmanHamouda/locust-k8s-operator/test/utils"
+)
+
+var _ = Describe("Validation Webhook", Ordered, func() {
+ const testNamespace = "locust-k8s-operator-system"
+ var testdataDir string
+
+ BeforeAll(func() {
+ var err error
+ testdataDir, err = filepath.Abs("testdata")
+ Expect(err).NotTo(HaveOccurred())
+
+ By("ensuring test ConfigMap exists")
+ _, err = utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "configmaps", "test-config.yaml"))
+ Expect(err).NotTo(HaveOccurred())
+ })
+
+ AfterAll(func() {
+ By("cleaning up any leftover CRs")
+ _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-invalid.yaml"))
+ _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-basic.yaml"))
+ })
+
+ It("should reject CR with invalid workerReplicas (0)", func() {
+ By("applying invalid LocustTest CR with workerReplicas=0")
+ _, err := utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-invalid.yaml"))
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(Or(
+ ContainSubstring("minimum"),
+ ContainSubstring("Invalid value"),
+ ContainSubstring("spec.worker.replicas"),
+ ))
+ })
+
+ It("should accept valid CR", func() {
+ By("applying valid LocustTest CR")
+ _, err := utils.ApplyFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-basic.yaml"))
+ Expect(err).NotTo(HaveOccurred())
+
+ By("verifying CR was created")
+ Eventually(func() bool {
+ return utils.ResourceExists("locusttest", testNamespace, "e2e-test-basic")
+ }, 30*time.Second, time.Second).Should(BeTrue())
+
+ By("cleaning up")
+ _, _ = utils.DeleteFromFile(testNamespace, filepath.Join(testdataDir, "v2", "locusttest-basic.yaml"))
+ })
+})
diff --git a/test/utils/utils.go b/test/utils/utils.go
new file mode 100644
index 00000000..2adfd73a
--- /dev/null
+++ b/test/utils/utils.go
@@ -0,0 +1,406 @@
+/*
+Copyright 2026.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utils
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2" // nolint:revive,staticcheck
+)
+
+const (
+ prometheusOperatorVersion = "v0.77.1"
+ prometheusOperatorURL = "https://github.com/prometheus-operator/prometheus-operator/" +
+ "releases/download/%s/bundle.yaml"
+
+ certmanagerVersion = "v1.16.3"
+ certmanagerURLTmpl = "https://github.com/cert-manager/cert-manager/releases/download/%s/cert-manager.yaml"
+)
+
+func warnError(err error) {
+ _, _ = fmt.Fprintf(GinkgoWriter, "warning: %v\n", err)
+}
+
+// Run executes the provided command within this context
+func Run(cmd *exec.Cmd) (string, error) {
+ dir, _ := GetProjectDir()
+ cmd.Dir = dir
+
+ cmd.Env = append(os.Environ(), "GO111MODULE=on")
+ command := strings.Join(cmd.Args, " ")
+ _, _ = fmt.Fprintf(GinkgoWriter, "running: %q\n", command)
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ return string(output), fmt.Errorf("%q failed with error %q: %w", command, string(output), err)
+ }
+
+ return string(output), nil
+}
+
+// InstallPrometheusOperator installs the prometheus Operator to be used to export the enabled metrics.
+func InstallPrometheusOperator() error {
+ url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion)
+ cmd := exec.Command("kubectl", "create", "-f", url) //nolint:gosec // Test code with known safe prometheus URL
+ _, err := Run(cmd)
+ return err
+}
+
+// UninstallPrometheusOperator uninstalls the prometheus
+func UninstallPrometheusOperator() {
+ url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion)
+ cmd := exec.Command("kubectl", "delete", "-f", url) //nolint:gosec // Test code with known safe prometheus URL
+ if _, err := Run(cmd); err != nil {
+ warnError(err)
+ }
+}
+
+// IsPrometheusCRDsInstalled checks if any Prometheus CRDs are installed
+// by verifying the existence of key CRDs related to Prometheus.
+func IsPrometheusCRDsInstalled() bool {
+ // List of common Prometheus CRDs
+ prometheusCRDs := []string{
+ "prometheuses.monitoring.coreos.com",
+ "prometheusrules.monitoring.coreos.com",
+ "prometheusagents.monitoring.coreos.com",
+ }
+
+ cmd := exec.Command("kubectl", "get", "crds", "-o", "custom-columns=NAME:.metadata.name")
+ output, err := Run(cmd)
+ if err != nil {
+ return false
+ }
+ crdList := GetNonEmptyLines(output)
+ for _, crd := range prometheusCRDs {
+ for _, line := range crdList {
+ if strings.Contains(line, crd) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// UninstallCertManager uninstalls the cert manager
+func UninstallCertManager() {
+ url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion)
+ cmd := exec.Command("kubectl", "delete", "-f", url) //nolint:gosec // Test code with known safe cert-manager URL
+ if _, err := Run(cmd); err != nil {
+ warnError(err)
+ }
+}
+
+// InstallCertManager installs the cert manager bundle.
+func InstallCertManager() error {
+ url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion)
+ cmd := exec.Command("kubectl", "apply", "-f", url) //nolint:gosec // Test code with known safe cert-manager URL
+ if _, err := Run(cmd); err != nil {
+ return err
+ }
+ // Wait for cert-manager-webhook to be ready, which can take time if cert-manager
+ // was re-installed after uninstalling on a cluster.
+ cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook",
+ "--for", "condition=Available",
+ "--namespace", "cert-manager",
+ "--timeout", "5m",
+ )
+
+ if _, err := Run(cmd); err != nil {
+ return err
+ }
+
+ // Wait for all cert-manager pods to be ready
+ cmd = exec.Command("kubectl", "wait", "pods",
+ "--all",
+ "--for", "condition=Ready",
+ "--namespace", "cert-manager",
+ "--timeout", "5m",
+ )
+ if _, err := Run(cmd); err != nil {
+ return err
+ }
+
+ // Wait for cert-manager's own webhook to have its CA bundle injected
+ // This ensures the webhook is fully functional before we try to create Certificate resources
+ maxRetries := 60 // 60 retries * 2 seconds = 2 minutes
+ for i := 0; i < maxRetries; i++ {
+ cmd = exec.Command("kubectl", "-n", "cert-manager", "get",
+ "validatingwebhookconfigurations", "cert-manager-webhook",
+ "-o", "jsonpath={.webhooks[0].clientConfig.caBundle}",
+ )
+ output, err := Run(cmd)
+ if err == nil && len(output) > 0 {
+ // CA bundle is present, webhook is ready
+ return nil
+ }
+ time.Sleep(2 * time.Second)
+ }
+
+ return fmt.Errorf("timed out waiting for cert-manager webhook CA bundle to be injected")
+}
+
+// IsCertManagerCRDsInstalled checks if any Cert Manager CRDs are installed
+// by verifying the existence of key CRDs related to Cert Manager.
+func IsCertManagerCRDsInstalled() bool {
+ // List of common Cert Manager CRDs
+ certManagerCRDs := []string{
+ "certificates.cert-manager.io",
+ "issuers.cert-manager.io",
+ "clusterissuers.cert-manager.io",
+ "certificaterequests.cert-manager.io",
+ "orders.acme.cert-manager.io",
+ "challenges.acme.cert-manager.io",
+ }
+
+ // Execute the kubectl command to get all CRDs
+ cmd := exec.Command("kubectl", "get", "crds")
+ output, err := Run(cmd)
+ if err != nil {
+ return false
+ }
+
+ // Check if any of the Cert Manager CRDs are present
+ crdList := GetNonEmptyLines(output)
+ for _, crd := range certManagerCRDs {
+ for _, line := range crdList {
+ if strings.Contains(line, crd) {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// LoadImageToKindClusterWithName loads a local docker image to the kind cluster
+func LoadImageToKindClusterWithName(name string) error {
+ cluster := "kind"
+ if v, ok := os.LookupEnv("KIND_CLUSTER"); ok {
+ cluster = v
+ }
+ kindOptions := []string{"load", "docker-image", name, "--name", cluster}
+ cmd := exec.Command("kind", kindOptions...) //nolint:gosec // Test code with validated cluster name and image
+ _, err := Run(cmd)
+ return err
+}
+
+// GetNonEmptyLines converts given command output string into individual objects
+// according to line breakers, and ignores the empty elements in it.
+func GetNonEmptyLines(output string) []string {
+ var res []string
+ elements := strings.Split(output, "\n")
+ for _, element := range elements {
+ if element != "" {
+ res = append(res, element)
+ }
+ }
+
+ return res
+}
+
+// GetProjectDir will return the directory where the project is
+func GetProjectDir() (string, error) {
+ wd, err := os.Getwd()
+ if err != nil {
+ return wd, fmt.Errorf("failed to get current working directory: %w", err)
+ }
+ wd = strings.ReplaceAll(wd, "/test/e2e", "")
+ return wd, nil
+}
+
+// UncommentCode searches for target in the file and remove the comment prefix
+// of the target content. The target content may span multiple lines.
+func UncommentCode(filename, target, prefix string) error {
+ // false positive
+ // nolint:gosec
+ content, err := os.ReadFile(filename)
+ if err != nil {
+ return fmt.Errorf("failed to read file %q: %w", filename, err)
+ }
+ strContent := string(content)
+
+ idx := strings.Index(strContent, target)
+ if idx < 0 {
+ return fmt.Errorf("unable to find the code %q to be uncomment", target)
+ }
+
+ out := new(bytes.Buffer)
+ _, err = out.Write(content[:idx])
+ if err != nil {
+ return fmt.Errorf("failed to write to output: %w", err)
+ }
+
+ scanner := bufio.NewScanner(bytes.NewBufferString(target))
+ if !scanner.Scan() {
+ return nil
+ }
+ for {
+ if _, err = out.WriteString(strings.TrimPrefix(scanner.Text(), prefix)); err != nil {
+ return fmt.Errorf("failed to write to output: %w", err)
+ }
+ // Avoid writing a newline in case the previous line was the last in target.
+ if !scanner.Scan() {
+ break
+ }
+ if _, err = out.WriteString("\n"); err != nil {
+ return fmt.Errorf("failed to write to output: %w", err)
+ }
+ }
+
+ if _, err = out.Write(content[idx+len(target):]); err != nil {
+ return fmt.Errorf("failed to write to output: %w", err)
+ }
+
+ // false positive
+ // nolint:gosec
+ if err = os.WriteFile(filename, out.Bytes(), 0644); err != nil {
+ return fmt.Errorf("failed to write file %q: %w", filename, err)
+ }
+
+ return nil
+}
+
+// ApplyFromFile applies a Kubernetes resource from a YAML file
+func ApplyFromFile(namespace, path string) (string, error) {
+ cmd := exec.Command("kubectl", "apply", "-f", path, "-n", namespace)
+ return Run(cmd)
+}
+
+// DeleteFromFile deletes a Kubernetes resource from a YAML file
+func DeleteFromFile(namespace, path string) (string, error) {
+ cmd := exec.Command("kubectl", "delete", "-f", path, "-n", namespace, "--ignore-not-found")
+ return Run(cmd)
+}
+
+// WaitForResource waits for a resource to exist
+func WaitForResource(resourceType, namespace, name string, timeout string) error {
+ cmd := exec.Command("kubectl", "wait", resourceType, name,
+ "-n", namespace,
+ "--for=create",
+ "--timeout", timeout)
+ _, err := Run(cmd)
+ return err
+}
+
+// ResourceExists checks if a resource exists
+func ResourceExists(resourceType, namespace, name string) bool {
+ cmd := exec.Command("kubectl", "get", resourceType, name, "-n", namespace)
+ _, err := Run(cmd)
+ return err == nil
+}
+
+// GetResourceField retrieves a field from a resource using jsonpath
+func GetResourceField(resourceType, namespace, name, jsonpath string) (string, error) {
+ //nolint:gosec // Test code with validated kubectl parameters
+ cmd := exec.Command("kubectl", "get", resourceType, name,
+ "-n", namespace, "-o", fmt.Sprintf("jsonpath={%s}", jsonpath))
+ output, err := Run(cmd)
+ if err != nil {
+ return "", err
+ }
+ return strings.TrimSpace(output), nil
+}
+
+// GetOwnerReferenceName retrieves the owner reference name from a resource
+func GetOwnerReferenceName(resourceType, namespace, name string) (string, error) {
+ return GetResourceField(resourceType, namespace, name, ".metadata.ownerReferences[0].name")
+}
+
+// GetJobContainerEnv retrieves environment variables from a Job's container
+func GetJobContainerEnv(namespace, jobName, containerName string) (string, error) {
+ jsonpath := fmt.Sprintf(".spec.template.spec.containers[?(@.name==\"%s\")].env[*].name", containerName)
+ return GetResourceField("job", namespace, jobName, jsonpath)
+}
+
+// GetJobContainerCommand retrieves the command from a Job's container
+func GetJobContainerCommand(namespace, jobName, containerName string) (string, error) {
+ jsonpath := fmt.Sprintf(".spec.template.spec.containers[?(@.name==\"%s\")].command", containerName)
+ return GetResourceField("job", namespace, jobName, jsonpath)
+}
+
+// GetJobContainerArgs retrieves the args from a Job's container
+func GetJobContainerArgs(namespace, jobName, containerName string) (string, error) {
+ jsonpath := fmt.Sprintf(".spec.template.spec.containers[?(@.name==\"%s\")].args", containerName)
+ return GetResourceField("job", namespace, jobName, jsonpath)
+}
+
+// GetJobContainerNames retrieves all container names from a Job
+func GetJobContainerNames(namespace, jobName string) (string, error) {
+ return GetResourceField("job", namespace, jobName, ".spec.template.spec.containers[*].name")
+}
+
+// GetServicePorts retrieves port names from a Service
+func GetServicePorts(namespace, serviceName string) (string, error) {
+ return GetResourceField("service", namespace, serviceName, ".spec.ports[*].name")
+}
+
+// GetJobEnvFrom retrieves envFrom configuration from a Job's container
+func GetJobEnvFrom(namespace, jobName, containerName string) (string, error) {
+ jsonpath := fmt.Sprintf(".spec.template.spec.containers[?(@.name==\"%s\")].envFrom", containerName)
+ return GetResourceField("job", namespace, jobName, jsonpath)
+}
+
+// GetJobVolumes retrieves volume names from a Job
+func GetJobVolumes(namespace, jobName string) (string, error) {
+ return GetResourceField("job", namespace, jobName, ".spec.template.spec.volumes[*].name")
+}
+
+// GetJobVolumeMounts retrieves volume mount paths from a Job's container
+func GetJobVolumeMounts(namespace, jobName, containerName string) (string, error) {
+ jsonpath := fmt.Sprintf(".spec.template.spec.containers[?(@.name==\"%s\")].volumeMounts[*].mountPath", containerName)
+ return GetResourceField("job", namespace, jobName, jsonpath)
+}
+
+// WaitForControllerReady waits for the controller-manager deployment to be ready
+func WaitForControllerReady(namespace string, timeout string) error {
+ _, _ = fmt.Fprintf(GinkgoWriter, "Waiting for controller-manager deployment to be ready...\n")
+ cmd := exec.Command("kubectl", "wait", "deployment",
+ "-l", "control-plane=controller-manager",
+ "-n", namespace,
+ "--for=condition=Available",
+ "--timeout", timeout)
+ _, err := Run(cmd)
+ return err
+}
+
+// WaitForWebhookReady waits for the webhook service endpoint to be ready
+func WaitForWebhookReady(namespace, serviceName string, timeout string) error {
+ _, _ = fmt.Fprintf(GinkgoWriter, "Waiting for webhook service endpoint to be ready...\n")
+ cmd := exec.Command("kubectl", "wait", "endpoints", serviceName,
+ "-n", namespace,
+ "--for=jsonpath={.subsets[0].addresses[0].ip}",
+ "--timeout", timeout)
+ _, err := Run(cmd)
+ return err
+}
+
+// WaitForCertificateReady waits for the serving certificate to be ready
+func WaitForCertificateReady(namespace, certName string, timeout string) error {
+ _, _ = fmt.Fprintf(GinkgoWriter, "Waiting for certificate %s to be ready...\n", certName)
+ cmd := exec.Command("kubectl", "wait", "certificate", certName,
+ "-n", namespace,
+ "--for=condition=Ready",
+ "--timeout", timeout)
+ _, err := Run(cmd)
+ return err
+}